1use super::{
2 fs::{self, Fs},
3 ignore::IgnoreStack,
4 DiagnosticSummary,
5};
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context, Result};
8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
9use clock::ReplicaId;
10use collections::{hash_map, HashMap};
11use collections::{BTreeMap, HashSet};
12use futures::{Stream, StreamExt};
13use fuzzy::CharBag;
14use gpui::{
15 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
16 Task, UpgradeModelHandle, WeakModelHandle,
17};
18use language::{
19 Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Language, LanguageRegistry,
20 Operation, PointUtf16, Rope,
21};
22use lazy_static::lazy_static;
23use lsp::LanguageServer;
24use parking_lot::Mutex;
25use postage::{
26 prelude::{Sink as _, Stream as _},
27 watch,
28};
29use serde::Deserialize;
30use smol::channel::{self, Sender};
31use std::{
32 any::Any,
33 cmp::{self, Ordering},
34 convert::{TryFrom, TryInto},
35 ffi::{OsStr, OsString},
36 fmt,
37 future::Future,
38 mem,
39 ops::{Deref, Range},
40 path::{Path, PathBuf},
41 sync::{
42 atomic::{AtomicUsize, Ordering::SeqCst},
43 Arc,
44 },
45 time::{Duration, SystemTime},
46};
47use sum_tree::Bias;
48use sum_tree::{Edit, SeekTarget, SumTree};
49use util::{post_inc, ResultExt, TryFutureExt};
50
51lazy_static! {
52 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
53}
54
55#[derive(Clone, Debug)]
56enum ScanState {
57 Idle,
58 Scanning,
59 Err(Arc<anyhow::Error>),
60}
61
62#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
63pub struct WorktreeId(usize);
64
65pub enum Worktree {
66 Local(LocalWorktree),
67 Remote(RemoteWorktree),
68}
69
70#[derive(Debug)]
71pub enum Event {
72 DiskBasedDiagnosticsUpdated,
73 DiagnosticsUpdated(Arc<Path>),
74}
75
76impl Entity for Worktree {
77 type Event = Event;
78
79 fn app_will_quit(
80 &mut self,
81 _: &mut MutableAppContext,
82 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
83 use futures::FutureExt;
84
85 if let Self::Local(worktree) = self {
86 let shutdown_futures = worktree
87 .language_servers
88 .drain()
89 .filter_map(|(_, server)| server.shutdown())
90 .collect::<Vec<_>>();
91 Some(
92 async move {
93 futures::future::join_all(shutdown_futures).await;
94 }
95 .boxed(),
96 )
97 } else {
98 None
99 }
100 }
101}
102
103impl Worktree {
104 pub async fn open_local(
105 client: Arc<Client>,
106 user_store: ModelHandle<UserStore>,
107 path: impl Into<Arc<Path>>,
108 fs: Arc<dyn Fs>,
109 languages: Arc<LanguageRegistry>,
110 cx: &mut AsyncAppContext,
111 ) -> Result<ModelHandle<Self>> {
112 let (tree, scan_states_tx) =
113 LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
114 tree.update(cx, |tree, cx| {
115 let tree = tree.as_local_mut().unwrap();
116 let abs_path = tree.snapshot.abs_path.clone();
117 let background_snapshot = tree.background_snapshot.clone();
118 let background = cx.background().clone();
119 tree._background_scanner_task = Some(cx.background().spawn(async move {
120 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
121 let scanner =
122 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
123 scanner.run(events).await;
124 }));
125 });
126 Ok(tree)
127 }
128
129 pub async fn remote(
130 project_remote_id: u64,
131 replica_id: ReplicaId,
132 worktree: proto::Worktree,
133 client: Arc<Client>,
134 user_store: ModelHandle<UserStore>,
135 languages: Arc<LanguageRegistry>,
136 cx: &mut AsyncAppContext,
137 ) -> Result<ModelHandle<Self>> {
138 let remote_id = worktree.id;
139 let root_char_bag: CharBag = worktree
140 .root_name
141 .chars()
142 .map(|c| c.to_ascii_lowercase())
143 .collect();
144 let root_name = worktree.root_name.clone();
145 let (entries_by_path, entries_by_id) = cx
146 .background()
147 .spawn(async move {
148 let mut entries_by_path_edits = Vec::new();
149 let mut entries_by_id_edits = Vec::new();
150 for entry in worktree.entries {
151 match Entry::try_from((&root_char_bag, entry)) {
152 Ok(entry) => {
153 entries_by_id_edits.push(Edit::Insert(PathEntry {
154 id: entry.id,
155 path: entry.path.clone(),
156 is_ignored: entry.is_ignored,
157 scan_id: 0,
158 }));
159 entries_by_path_edits.push(Edit::Insert(entry));
160 }
161 Err(err) => log::warn!("error for remote worktree entry {:?}", err),
162 }
163 }
164
165 let mut entries_by_path = SumTree::new();
166 let mut entries_by_id = SumTree::new();
167 entries_by_path.edit(entries_by_path_edits, &());
168 entries_by_id.edit(entries_by_id_edits, &());
169 (entries_by_path, entries_by_id)
170 })
171 .await;
172
173 let worktree = cx.update(|cx| {
174 cx.add_model(|cx: &mut ModelContext<Worktree>| {
175 let snapshot = Snapshot {
176 id: WorktreeId(remote_id as usize),
177 scan_id: 0,
178 abs_path: Path::new("").into(),
179 root_name,
180 root_char_bag,
181 ignores: Default::default(),
182 entries_by_path,
183 entries_by_id,
184 removed_entry_ids: Default::default(),
185 next_entry_id: Default::default(),
186 };
187
188 let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
189 let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
190
191 cx.background()
192 .spawn(async move {
193 while let Some(update) = updates_rx.recv().await {
194 let mut snapshot = snapshot_tx.borrow().clone();
195 if let Err(error) = snapshot.apply_update(update) {
196 log::error!("error applying worktree update: {}", error);
197 }
198 *snapshot_tx.borrow_mut() = snapshot;
199 }
200 })
201 .detach();
202
203 {
204 let mut snapshot_rx = snapshot_rx.clone();
205 cx.spawn_weak(|this, mut cx| async move {
206 while let Some(_) = snapshot_rx.recv().await {
207 if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
208 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
209 } else {
210 break;
211 }
212 }
213 })
214 .detach();
215 }
216
217 Worktree::Remote(RemoteWorktree {
218 project_id: project_remote_id,
219 replica_id,
220 snapshot,
221 snapshot_rx,
222 updates_tx,
223 client: client.clone(),
224 loading_buffers: Default::default(),
225 open_buffers: Default::default(),
226 diagnostic_summaries: Default::default(),
227 queued_operations: Default::default(),
228 languages,
229 user_store,
230 })
231 })
232 });
233
234 Ok(worktree)
235 }
236
237 pub fn as_local(&self) -> Option<&LocalWorktree> {
238 if let Worktree::Local(worktree) = self {
239 Some(worktree)
240 } else {
241 None
242 }
243 }
244
245 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
246 if let Worktree::Remote(worktree) = self {
247 Some(worktree)
248 } else {
249 None
250 }
251 }
252
253 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
254 if let Worktree::Local(worktree) = self {
255 Some(worktree)
256 } else {
257 None
258 }
259 }
260
261 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
262 if let Worktree::Remote(worktree) = self {
263 Some(worktree)
264 } else {
265 None
266 }
267 }
268
269 pub fn snapshot(&self) -> Snapshot {
270 match self {
271 Worktree::Local(worktree) => worktree.snapshot(),
272 Worktree::Remote(worktree) => worktree.snapshot(),
273 }
274 }
275
276 pub fn replica_id(&self) -> ReplicaId {
277 match self {
278 Worktree::Local(_) => 0,
279 Worktree::Remote(worktree) => worktree.replica_id,
280 }
281 }
282
283 pub fn remove_collaborator(
284 &mut self,
285 peer_id: PeerId,
286 replica_id: ReplicaId,
287 cx: &mut ModelContext<Self>,
288 ) {
289 match self {
290 Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
291 Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
292 }
293 }
294
295 pub fn languages(&self) -> &Arc<LanguageRegistry> {
296 match self {
297 Worktree::Local(worktree) => &worktree.language_registry,
298 Worktree::Remote(worktree) => &worktree.languages,
299 }
300 }
301
302 pub fn user_store(&self) -> &ModelHandle<UserStore> {
303 match self {
304 Worktree::Local(worktree) => &worktree.user_store,
305 Worktree::Remote(worktree) => &worktree.user_store,
306 }
307 }
308
309 pub fn handle_open_buffer(
310 &mut self,
311 envelope: TypedEnvelope<proto::OpenBuffer>,
312 rpc: Arc<Client>,
313 cx: &mut ModelContext<Self>,
314 ) -> anyhow::Result<()> {
315 let receipt = envelope.receipt();
316
317 let response = self
318 .as_local_mut()
319 .unwrap()
320 .open_remote_buffer(envelope, cx);
321
322 cx.background()
323 .spawn(
324 async move {
325 rpc.respond(receipt, response.await?).await?;
326 Ok(())
327 }
328 .log_err(),
329 )
330 .detach();
331
332 Ok(())
333 }
334
335 pub fn handle_close_buffer(
336 &mut self,
337 envelope: TypedEnvelope<proto::CloseBuffer>,
338 _: Arc<Client>,
339 cx: &mut ModelContext<Self>,
340 ) -> anyhow::Result<()> {
341 self.as_local_mut()
342 .unwrap()
343 .close_remote_buffer(envelope, cx)
344 }
345
346 pub fn diagnostic_summaries<'a>(
347 &'a self,
348 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
349 match self {
350 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
351 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
352 }
353 .iter()
354 .map(|(path, summary)| (path.clone(), summary.clone()))
355 }
356
357 pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
358 match self {
359 Worktree::Local(worktree) => &mut worktree.loading_buffers,
360 Worktree::Remote(worktree) => &mut worktree.loading_buffers,
361 }
362 }
363
364 pub fn open_buffer(
365 &mut self,
366 path: impl AsRef<Path>,
367 cx: &mut ModelContext<Self>,
368 ) -> Task<Result<ModelHandle<Buffer>>> {
369 let path = path.as_ref();
370
371 // If there is already a buffer for the given path, then return it.
372 let existing_buffer = match self {
373 Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
374 Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
375 };
376 if let Some(existing_buffer) = existing_buffer {
377 return cx.spawn(move |_, _| async move { Ok(existing_buffer) });
378 }
379
380 let path: Arc<Path> = Arc::from(path);
381 let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
382 // If the given path is already being loaded, then wait for that existing
383 // task to complete and return the same buffer.
384 hash_map::Entry::Occupied(e) => e.get().clone(),
385
386 // Otherwise, record the fact that this path is now being loaded.
387 hash_map::Entry::Vacant(entry) => {
388 let (mut tx, rx) = postage::watch::channel();
389 entry.insert(rx.clone());
390
391 let load_buffer = match self {
392 Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
393 Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
394 };
395 cx.spawn(move |this, mut cx| async move {
396 let result = load_buffer.await;
397
398 // After the buffer loads, record the fact that it is no longer
399 // loading.
400 this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
401 *tx.borrow_mut() = Some(result.map_err(|e| Arc::new(e)));
402 })
403 .detach();
404 rx
405 }
406 };
407
408 cx.spawn(|_, _| async move {
409 loop {
410 if let Some(result) = loading_watch.borrow().as_ref() {
411 return result.clone().map_err(|e| anyhow!("{}", e));
412 }
413 loading_watch.recv().await;
414 }
415 })
416 }
417
418 #[cfg(feature = "test-support")]
419 pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
420 let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
421 Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
422 Worktree::Remote(worktree) => {
423 Box::new(worktree.open_buffers.values().filter_map(|buf| {
424 if let RemoteBuffer::Loaded(buf) = buf {
425 Some(buf)
426 } else {
427 None
428 }
429 }))
430 }
431 };
432
433 let path = path.as_ref();
434 open_buffers
435 .find(|buffer| {
436 if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
437 file.path().as_ref() == path
438 } else {
439 false
440 }
441 })
442 .is_some()
443 }
444
445 pub fn handle_update_buffer(
446 &mut self,
447 envelope: TypedEnvelope<proto::UpdateBuffer>,
448 cx: &mut ModelContext<Self>,
449 ) -> Result<()> {
450 let payload = envelope.payload.clone();
451 let buffer_id = payload.buffer_id as usize;
452 let ops = payload
453 .operations
454 .into_iter()
455 .map(|op| language::proto::deserialize_operation(op))
456 .collect::<Result<Vec<_>, _>>()?;
457
458 match self {
459 Worktree::Local(worktree) => {
460 let buffer = worktree
461 .open_buffers
462 .get(&buffer_id)
463 .and_then(|buf| buf.upgrade(cx))
464 .ok_or_else(|| {
465 anyhow!("invalid buffer {} in update buffer message", buffer_id)
466 })?;
467 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
468 }
469 Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
470 Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
471 Some(RemoteBuffer::Loaded(buffer)) => {
472 if let Some(buffer) = buffer.upgrade(cx) {
473 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
474 } else {
475 worktree
476 .open_buffers
477 .insert(buffer_id, RemoteBuffer::Operations(ops));
478 }
479 }
480 None => {
481 worktree
482 .open_buffers
483 .insert(buffer_id, RemoteBuffer::Operations(ops));
484 }
485 },
486 }
487
488 Ok(())
489 }
490
491 pub fn handle_save_buffer(
492 &mut self,
493 envelope: TypedEnvelope<proto::SaveBuffer>,
494 rpc: Arc<Client>,
495 cx: &mut ModelContext<Self>,
496 ) -> Result<()> {
497 let sender_id = envelope.original_sender_id()?;
498 let this = self.as_local().unwrap();
499 let project_id = this
500 .share
501 .as_ref()
502 .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
503 .project_id;
504
505 let buffer = this
506 .shared_buffers
507 .get(&sender_id)
508 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
509 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
510
511 let receipt = envelope.receipt();
512 let worktree_id = envelope.payload.worktree_id;
513 let buffer_id = envelope.payload.buffer_id;
514 let save = cx.spawn(|_, mut cx| async move {
515 buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
516 });
517
518 cx.background()
519 .spawn(
520 async move {
521 let (version, mtime) = save.await?;
522
523 rpc.respond(
524 receipt,
525 proto::BufferSaved {
526 project_id,
527 worktree_id,
528 buffer_id,
529 version: (&version).into(),
530 mtime: Some(mtime.into()),
531 },
532 )
533 .await?;
534
535 Ok(())
536 }
537 .log_err(),
538 )
539 .detach();
540
541 Ok(())
542 }
543
544 pub fn handle_buffer_saved(
545 &mut self,
546 envelope: TypedEnvelope<proto::BufferSaved>,
547 cx: &mut ModelContext<Self>,
548 ) -> Result<()> {
549 let payload = envelope.payload.clone();
550 let worktree = self.as_remote_mut().unwrap();
551 if let Some(buffer) = worktree
552 .open_buffers
553 .get(&(payload.buffer_id as usize))
554 .and_then(|buf| buf.upgrade(cx))
555 {
556 buffer.update(cx, |buffer, cx| {
557 let version = payload.version.try_into()?;
558 let mtime = payload
559 .mtime
560 .ok_or_else(|| anyhow!("missing mtime"))?
561 .into();
562 buffer.did_save(version, mtime, None, cx);
563 Result::<_, anyhow::Error>::Ok(())
564 })?;
565 }
566 Ok(())
567 }
568
569 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
570 match self {
571 Self::Local(worktree) => {
572 let is_fake_fs = worktree.fs.is_fake();
573 worktree.snapshot = worktree.background_snapshot.lock().clone();
574 if worktree.is_scanning() {
575 if worktree.poll_task.is_none() {
576 worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
577 if is_fake_fs {
578 smol::future::yield_now().await;
579 } else {
580 smol::Timer::after(Duration::from_millis(100)).await;
581 }
582 this.update(&mut cx, |this, cx| {
583 this.as_local_mut().unwrap().poll_task = None;
584 this.poll_snapshot(cx);
585 })
586 }));
587 }
588 } else {
589 worktree.poll_task.take();
590 self.update_open_buffers(cx);
591 }
592 }
593 Self::Remote(worktree) => {
594 worktree.snapshot = worktree.snapshot_rx.borrow().clone();
595 self.update_open_buffers(cx);
596 }
597 };
598
599 cx.notify();
600 }
601
602 fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
603 let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
604 Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
605 Self::Remote(worktree) => {
606 Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
607 if let RemoteBuffer::Loaded(buf) = buf {
608 Some((id, buf))
609 } else {
610 None
611 }
612 }))
613 }
614 };
615
616 let local = self.as_local().is_some();
617 let worktree_path = self.abs_path.clone();
618 let worktree_handle = cx.handle();
619 let mut buffers_to_delete = Vec::new();
620 for (buffer_id, buffer) in open_buffers {
621 if let Some(buffer) = buffer.upgrade(cx) {
622 buffer.update(cx, |buffer, cx| {
623 if let Some(old_file) = File::from_dyn(buffer.file()) {
624 let new_file = if let Some(entry) = old_file
625 .entry_id
626 .and_then(|entry_id| self.entry_for_id(entry_id))
627 {
628 File {
629 is_local: local,
630 worktree_path: worktree_path.clone(),
631 entry_id: Some(entry.id),
632 mtime: entry.mtime,
633 path: entry.path.clone(),
634 worktree: worktree_handle.clone(),
635 }
636 } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
637 File {
638 is_local: local,
639 worktree_path: worktree_path.clone(),
640 entry_id: Some(entry.id),
641 mtime: entry.mtime,
642 path: entry.path.clone(),
643 worktree: worktree_handle.clone(),
644 }
645 } else {
646 File {
647 is_local: local,
648 worktree_path: worktree_path.clone(),
649 entry_id: None,
650 path: old_file.path().clone(),
651 mtime: old_file.mtime(),
652 worktree: worktree_handle.clone(),
653 }
654 };
655
656 if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
657 task.detach();
658 }
659 }
660 });
661 } else {
662 buffers_to_delete.push(*buffer_id);
663 }
664 }
665
666 for buffer_id in buffers_to_delete {
667 match self {
668 Self::Local(worktree) => {
669 worktree.open_buffers.remove(&buffer_id);
670 }
671 Self::Remote(worktree) => {
672 worktree.open_buffers.remove(&buffer_id);
673 }
674 }
675 }
676 }
677
678 pub fn update_diagnostics(
679 &mut self,
680 mut params: lsp::PublishDiagnosticsParams,
681 disk_based_sources: &HashSet<String>,
682 cx: &mut ModelContext<Worktree>,
683 ) -> Result<()> {
684 let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
685 let abs_path = params
686 .uri
687 .to_file_path()
688 .map_err(|_| anyhow!("URI is not a file"))?;
689 let worktree_path = Arc::from(
690 abs_path
691 .strip_prefix(&this.abs_path)
692 .context("path is not within worktree")?,
693 );
694
695 let mut group_ids_by_diagnostic_range = HashMap::default();
696 let mut diagnostics_by_group_id = HashMap::default();
697 let mut next_group_id = 0;
698 for diagnostic in &mut params.diagnostics {
699 let source = diagnostic.source.as_ref();
700 let code = diagnostic.code.as_ref();
701 let group_id = diagnostic_ranges(&diagnostic, &abs_path)
702 .find_map(|range| group_ids_by_diagnostic_range.get(&(source, code, range)))
703 .copied()
704 .unwrap_or_else(|| {
705 let group_id = post_inc(&mut next_group_id);
706 for range in diagnostic_ranges(&diagnostic, &abs_path) {
707 group_ids_by_diagnostic_range.insert((source, code, range), group_id);
708 }
709 group_id
710 });
711
712 diagnostics_by_group_id
713 .entry(group_id)
714 .or_insert(Vec::new())
715 .push(DiagnosticEntry {
716 range: diagnostic.range.start.to_point_utf16()
717 ..diagnostic.range.end.to_point_utf16(),
718 diagnostic: Diagnostic {
719 code: diagnostic.code.clone().map(|code| match code {
720 lsp::NumberOrString::Number(code) => code.to_string(),
721 lsp::NumberOrString::String(code) => code,
722 }),
723 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
724 message: mem::take(&mut diagnostic.message),
725 group_id,
726 is_primary: false,
727 is_valid: true,
728 is_disk_based: diagnostic
729 .source
730 .as_ref()
731 .map_or(false, |source| disk_based_sources.contains(source)),
732 },
733 });
734 }
735
736 let diagnostics = diagnostics_by_group_id
737 .into_values()
738 .flat_map(|mut diagnostics| {
739 let primary = diagnostics
740 .iter_mut()
741 .min_by_key(|entry| entry.diagnostic.severity)
742 .unwrap();
743 primary.diagnostic.is_primary = true;
744 diagnostics
745 })
746 .collect::<Vec<_>>();
747
748 self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)?;
749 Ok(())
750 }
751
752 pub fn update_diagnostic_entries(
753 &mut self,
754 worktree_path: Arc<Path>,
755 version: Option<i32>,
756 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
757 cx: &mut ModelContext<Self>,
758 ) -> Result<()> {
759 let this = self.as_local_mut().unwrap();
760 for buffer in this.open_buffers.values() {
761 if let Some(buffer) = buffer.upgrade(cx) {
762 if buffer
763 .read(cx)
764 .file()
765 .map_or(false, |file| *file.path() == worktree_path)
766 {
767 let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
768 (
769 buffer.remote_id(),
770 buffer.update_diagnostics(version, diagnostics.clone(), cx),
771 )
772 });
773 self.send_buffer_update(remote_id, operation?, cx);
774 break;
775 }
776 }
777 }
778
779 let this = self.as_local_mut().unwrap();
780 this.diagnostic_summaries
781 .insert(worktree_path.clone(), DiagnosticSummary::new(&diagnostics));
782 this.diagnostics.insert(worktree_path.clone(), diagnostics);
783 cx.emit(Event::DiagnosticsUpdated(worktree_path.clone()));
784 Ok(())
785 }
786
787 fn send_buffer_update(
788 &mut self,
789 buffer_id: u64,
790 operation: Operation,
791 cx: &mut ModelContext<Self>,
792 ) {
793 if let Some((project_id, worktree_id, rpc)) = match self {
794 Worktree::Local(worktree) => worktree
795 .share
796 .as_ref()
797 .map(|share| (share.project_id, worktree.id(), worktree.client.clone())),
798 Worktree::Remote(worktree) => Some((
799 worktree.project_id,
800 worktree.snapshot.id(),
801 worktree.client.clone(),
802 )),
803 } {
804 cx.spawn(|worktree, mut cx| async move {
805 if let Err(error) = rpc
806 .request(proto::UpdateBuffer {
807 project_id,
808 worktree_id: worktree_id.0 as u64,
809 buffer_id,
810 operations: vec![language::proto::serialize_operation(&operation)],
811 })
812 .await
813 {
814 worktree.update(&mut cx, |worktree, _| {
815 log::error!("error sending buffer operation: {}", error);
816 match worktree {
817 Worktree::Local(t) => &mut t.queued_operations,
818 Worktree::Remote(t) => &mut t.queued_operations,
819 }
820 .push((buffer_id, operation));
821 });
822 }
823 })
824 .detach();
825 }
826 }
827}
828
829impl WorktreeId {
830 pub fn from_usize(handle_id: usize) -> Self {
831 Self(handle_id)
832 }
833
834 pub(crate) fn from_proto(id: u64) -> Self {
835 Self(id as usize)
836 }
837
838 pub fn to_proto(&self) -> u64 {
839 self.0 as u64
840 }
841
842 pub fn to_usize(&self) -> usize {
843 self.0
844 }
845}
846
847impl fmt::Display for WorktreeId {
848 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
849 self.0.fmt(f)
850 }
851}
852
853#[derive(Clone)]
854pub struct Snapshot {
855 id: WorktreeId,
856 scan_id: usize,
857 abs_path: Arc<Path>,
858 root_name: String,
859 root_char_bag: CharBag,
860 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
861 entries_by_path: SumTree<Entry>,
862 entries_by_id: SumTree<PathEntry>,
863 removed_entry_ids: HashMap<u64, usize>,
864 next_entry_id: Arc<AtomicUsize>,
865}
866
867pub struct LocalWorktree {
868 snapshot: Snapshot,
869 config: WorktreeConfig,
870 background_snapshot: Arc<Mutex<Snapshot>>,
871 last_scan_state_rx: watch::Receiver<ScanState>,
872 _background_scanner_task: Option<Task<()>>,
873 poll_task: Option<Task<()>>,
874 share: Option<ShareState>,
875 loading_buffers: LoadingBuffers,
876 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
877 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
878 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
879 diagnostic_summaries: BTreeMap<Arc<Path>, DiagnosticSummary>,
880 queued_operations: Vec<(u64, Operation)>,
881 language_registry: Arc<LanguageRegistry>,
882 client: Arc<Client>,
883 user_store: ModelHandle<UserStore>,
884 fs: Arc<dyn Fs>,
885 languages: Vec<Arc<Language>>,
886 language_servers: HashMap<String, Arc<LanguageServer>>,
887}
888
889struct ShareState {
890 project_id: u64,
891 snapshots_tx: Sender<Snapshot>,
892}
893
894pub struct RemoteWorktree {
895 project_id: u64,
896 snapshot: Snapshot,
897 snapshot_rx: watch::Receiver<Snapshot>,
898 client: Arc<Client>,
899 updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
900 replica_id: ReplicaId,
901 loading_buffers: LoadingBuffers,
902 open_buffers: HashMap<usize, RemoteBuffer>,
903 diagnostic_summaries: BTreeMap<Arc<Path>, DiagnosticSummary>,
904 languages: Arc<LanguageRegistry>,
905 user_store: ModelHandle<UserStore>,
906 queued_operations: Vec<(u64, Operation)>,
907}
908
909type LoadingBuffers = HashMap<
910 Arc<Path>,
911 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
912>;
913
914#[derive(Default, Deserialize)]
915struct WorktreeConfig {
916 collaborators: Vec<String>,
917}
918
919impl LocalWorktree {
920 async fn new(
921 client: Arc<Client>,
922 user_store: ModelHandle<UserStore>,
923 path: impl Into<Arc<Path>>,
924 fs: Arc<dyn Fs>,
925 languages: Arc<LanguageRegistry>,
926 cx: &mut AsyncAppContext,
927 ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
928 let abs_path = path.into();
929 let path: Arc<Path> = Arc::from(Path::new(""));
930 let next_entry_id = AtomicUsize::new(0);
931
932 // After determining whether the root entry is a file or a directory, populate the
933 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
934 let root_name = abs_path
935 .file_name()
936 .map_or(String::new(), |f| f.to_string_lossy().to_string());
937 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
938 let metadata = fs.metadata(&abs_path).await?;
939
940 let mut config = WorktreeConfig::default();
941 if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
942 if let Ok(parsed) = toml::from_str(&zed_toml) {
943 config = parsed;
944 }
945 }
946
947 let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
948 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
949 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
950 let mut snapshot = Snapshot {
951 id: WorktreeId::from_usize(cx.model_id()),
952 scan_id: 0,
953 abs_path,
954 root_name: root_name.clone(),
955 root_char_bag,
956 ignores: Default::default(),
957 entries_by_path: Default::default(),
958 entries_by_id: Default::default(),
959 removed_entry_ids: Default::default(),
960 next_entry_id: Arc::new(next_entry_id),
961 };
962 if let Some(metadata) = metadata {
963 snapshot.insert_entry(
964 Entry::new(
965 path.into(),
966 &metadata,
967 &snapshot.next_entry_id,
968 snapshot.root_char_bag,
969 ),
970 fs.as_ref(),
971 );
972 }
973
974 let tree = Self {
975 snapshot: snapshot.clone(),
976 config,
977 background_snapshot: Arc::new(Mutex::new(snapshot)),
978 last_scan_state_rx,
979 _background_scanner_task: None,
980 share: None,
981 poll_task: None,
982 loading_buffers: Default::default(),
983 open_buffers: Default::default(),
984 shared_buffers: Default::default(),
985 diagnostics: Default::default(),
986 diagnostic_summaries: Default::default(),
987 queued_operations: Default::default(),
988 language_registry: languages,
989 client,
990 user_store,
991 fs,
992 languages: Default::default(),
993 language_servers: Default::default(),
994 };
995
996 cx.spawn_weak(|this, mut cx| async move {
997 while let Ok(scan_state) = scan_states_rx.recv().await {
998 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
999 let to_send = handle.update(&mut cx, |this, cx| {
1000 last_scan_state_tx.blocking_send(scan_state).ok();
1001 this.poll_snapshot(cx);
1002 let tree = this.as_local_mut().unwrap();
1003 if !tree.is_scanning() {
1004 if let Some(share) = tree.share.as_ref() {
1005 return Some((tree.snapshot(), share.snapshots_tx.clone()));
1006 }
1007 }
1008 None
1009 });
1010
1011 if let Some((snapshot, snapshots_to_send_tx)) = to_send {
1012 if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
1013 log::error!("error submitting snapshot to send {}", err);
1014 }
1015 }
1016 } else {
1017 break;
1018 }
1019 }
1020 })
1021 .detach();
1022
1023 Worktree::Local(tree)
1024 });
1025
1026 Ok((tree, scan_states_tx))
1027 }
1028
1029 pub fn authorized_logins(&self) -> Vec<String> {
1030 self.config.collaborators.clone()
1031 }
1032
1033 pub fn language_registry(&self) -> &LanguageRegistry {
1034 &self.language_registry
1035 }
1036
1037 pub fn languages(&self) -> &[Arc<Language>] {
1038 &self.languages
1039 }
1040
1041 pub fn register_language(
1042 &mut self,
1043 language: &Arc<Language>,
1044 cx: &mut ModelContext<Worktree>,
1045 ) -> Option<Arc<LanguageServer>> {
1046 if !self.languages.iter().any(|l| Arc::ptr_eq(l, language)) {
1047 self.languages.push(language.clone());
1048 }
1049
1050 if let Some(server) = self.language_servers.get(language.name()) {
1051 return Some(server.clone());
1052 }
1053
1054 if let Some(language_server) = language
1055 .start_server(self.abs_path(), cx)
1056 .log_err()
1057 .flatten()
1058 {
1059 let disk_based_sources = language
1060 .disk_based_diagnostic_sources()
1061 .cloned()
1062 .unwrap_or_default();
1063 let disk_based_diagnostics_progress_token =
1064 language.disk_based_diagnostics_progress_token().cloned();
1065 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
1066 language_server
1067 .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
1068 smol::block_on(diagnostics_tx.send(params)).ok();
1069 })
1070 .detach();
1071 cx.spawn_weak(|this, mut cx| {
1072 let has_disk_based_diagnostic_progress_token =
1073 disk_based_diagnostics_progress_token.is_some();
1074 async move {
1075 while let Ok(diagnostics) = diagnostics_rx.recv().await {
1076 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1077 handle.update(&mut cx, |this, cx| {
1078 this.update_diagnostics(diagnostics, &disk_based_sources, cx)
1079 .log_err();
1080 if !has_disk_based_diagnostic_progress_token {
1081 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1082 }
1083 });
1084 } else {
1085 break;
1086 }
1087 }
1088 }
1089 })
1090 .detach();
1091
1092 let (mut disk_based_diagnostics_done_tx, mut disk_based_diagnostics_done_rx) =
1093 watch::channel_with(());
1094 language_server
1095 .on_notification::<lsp::notification::Progress, _>(move |params| {
1096 let token = match params.token {
1097 lsp::NumberOrString::Number(_) => None,
1098 lsp::NumberOrString::String(token) => Some(token),
1099 };
1100
1101 if token == disk_based_diagnostics_progress_token {
1102 match params.value {
1103 lsp::ProgressParamsValue::WorkDone(progress) => match progress {
1104 lsp::WorkDoneProgress::End(_) => {
1105 smol::block_on(disk_based_diagnostics_done_tx.send(())).ok();
1106 }
1107 _ => {}
1108 },
1109 }
1110 }
1111 })
1112 .detach();
1113 cx.spawn_weak(|this, mut cx| async move {
1114 while let Some(()) = disk_based_diagnostics_done_rx.recv().await {
1115 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1116 handle.update(&mut cx, |_, cx| {
1117 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1118 });
1119 } else {
1120 break;
1121 }
1122 }
1123 })
1124 .detach();
1125
1126 self.language_servers
1127 .insert(language.name().to_string(), language_server.clone());
1128 Some(language_server.clone())
1129 } else {
1130 None
1131 }
1132 }
1133
1134 fn get_open_buffer(
1135 &mut self,
1136 path: &Path,
1137 cx: &mut ModelContext<Worktree>,
1138 ) -> Option<ModelHandle<Buffer>> {
1139 let handle = cx.handle();
1140 let mut result = None;
1141 self.open_buffers.retain(|_buffer_id, buffer| {
1142 if let Some(buffer) = buffer.upgrade(cx) {
1143 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1144 if file.worktree == handle && file.path().as_ref() == path {
1145 result = Some(buffer);
1146 }
1147 }
1148 true
1149 } else {
1150 false
1151 }
1152 });
1153 result
1154 }
1155
1156 fn open_buffer(
1157 &mut self,
1158 path: &Path,
1159 cx: &mut ModelContext<Worktree>,
1160 ) -> Task<Result<ModelHandle<Buffer>>> {
1161 let path = Arc::from(path);
1162 cx.spawn(move |this, mut cx| async move {
1163 let (file, contents) = this
1164 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
1165 .await?;
1166
1167 let (diagnostics, language, language_server) = this.update(&mut cx, |this, cx| {
1168 let this = this.as_local_mut().unwrap();
1169 let diagnostics = this.diagnostics.remove(&path);
1170 let language = this
1171 .language_registry
1172 .select_language(file.full_path())
1173 .cloned();
1174 let server = language
1175 .as_ref()
1176 .and_then(|language| this.register_language(language, cx));
1177 (diagnostics, language, server)
1178 });
1179
1180 let mut buffer_operations = Vec::new();
1181 let buffer = cx.add_model(|cx| {
1182 let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1183 buffer.set_language(language, language_server, cx);
1184 if let Some(diagnostics) = diagnostics {
1185 let op = buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1186 buffer_operations.push(op);
1187 }
1188 buffer
1189 });
1190
1191 this.update(&mut cx, |this, cx| {
1192 for op in buffer_operations {
1193 this.send_buffer_update(buffer.read(cx).remote_id(), op, cx);
1194 }
1195 let this = this.as_local_mut().unwrap();
1196 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1197 });
1198
1199 Ok(buffer)
1200 })
1201 }
1202
1203 pub fn open_remote_buffer(
1204 &mut self,
1205 envelope: TypedEnvelope<proto::OpenBuffer>,
1206 cx: &mut ModelContext<Worktree>,
1207 ) -> Task<Result<proto::OpenBufferResponse>> {
1208 cx.spawn(|this, mut cx| async move {
1209 let peer_id = envelope.original_sender_id();
1210 let path = Path::new(&envelope.payload.path);
1211 let buffer = this
1212 .update(&mut cx, |this, cx| this.open_buffer(path, cx))
1213 .await?;
1214 this.update(&mut cx, |this, cx| {
1215 this.as_local_mut()
1216 .unwrap()
1217 .shared_buffers
1218 .entry(peer_id?)
1219 .or_default()
1220 .insert(buffer.id() as u64, buffer.clone());
1221
1222 Ok(proto::OpenBufferResponse {
1223 buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1224 })
1225 })
1226 })
1227 }
1228
1229 pub fn close_remote_buffer(
1230 &mut self,
1231 envelope: TypedEnvelope<proto::CloseBuffer>,
1232 cx: &mut ModelContext<Worktree>,
1233 ) -> Result<()> {
1234 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1235 shared_buffers.remove(&envelope.payload.buffer_id);
1236 cx.notify();
1237 }
1238
1239 Ok(())
1240 }
1241
1242 pub fn remove_collaborator(
1243 &mut self,
1244 peer_id: PeerId,
1245 replica_id: ReplicaId,
1246 cx: &mut ModelContext<Worktree>,
1247 ) {
1248 self.shared_buffers.remove(&peer_id);
1249 for (_, buffer) in &self.open_buffers {
1250 if let Some(buffer) = buffer.upgrade(cx) {
1251 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1252 }
1253 }
1254 cx.notify();
1255 }
1256
1257 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1258 let mut scan_state_rx = self.last_scan_state_rx.clone();
1259 async move {
1260 let mut scan_state = Some(scan_state_rx.borrow().clone());
1261 while let Some(ScanState::Scanning) = scan_state {
1262 scan_state = scan_state_rx.recv().await;
1263 }
1264 }
1265 }
1266
1267 fn is_scanning(&self) -> bool {
1268 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1269 true
1270 } else {
1271 false
1272 }
1273 }
1274
1275 pub fn snapshot(&self) -> Snapshot {
1276 self.snapshot.clone()
1277 }
1278
1279 pub fn abs_path(&self) -> &Arc<Path> {
1280 &self.snapshot.abs_path
1281 }
1282
1283 pub fn contains_abs_path(&self, path: &Path) -> bool {
1284 path.starts_with(&self.snapshot.abs_path)
1285 }
1286
1287 fn absolutize(&self, path: &Path) -> PathBuf {
1288 if path.file_name().is_some() {
1289 self.snapshot.abs_path.join(path)
1290 } else {
1291 self.snapshot.abs_path.to_path_buf()
1292 }
1293 }
1294
1295 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1296 let handle = cx.handle();
1297 let path = Arc::from(path);
1298 let worktree_path = self.abs_path.clone();
1299 let abs_path = self.absolutize(&path);
1300 let background_snapshot = self.background_snapshot.clone();
1301 let fs = self.fs.clone();
1302 cx.spawn(|this, mut cx| async move {
1303 let text = fs.load(&abs_path).await?;
1304 // Eagerly populate the snapshot with an updated entry for the loaded file
1305 let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1306 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1307 Ok((
1308 File {
1309 entry_id: Some(entry.id),
1310 worktree: handle,
1311 worktree_path,
1312 path: entry.path,
1313 mtime: entry.mtime,
1314 is_local: true,
1315 },
1316 text,
1317 ))
1318 })
1319 }
1320
1321 pub fn save_buffer_as(
1322 &self,
1323 buffer: ModelHandle<Buffer>,
1324 path: impl Into<Arc<Path>>,
1325 text: Rope,
1326 cx: &mut ModelContext<Worktree>,
1327 ) -> Task<Result<File>> {
1328 let save = self.save(path, text, cx);
1329 cx.spawn(|this, mut cx| async move {
1330 let entry = save.await?;
1331 this.update(&mut cx, |this, cx| {
1332 let this = this.as_local_mut().unwrap();
1333 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1334 Ok(File {
1335 entry_id: Some(entry.id),
1336 worktree: cx.handle(),
1337 worktree_path: this.abs_path.clone(),
1338 path: entry.path,
1339 mtime: entry.mtime,
1340 is_local: true,
1341 })
1342 })
1343 })
1344 }
1345
1346 fn save(
1347 &self,
1348 path: impl Into<Arc<Path>>,
1349 text: Rope,
1350 cx: &mut ModelContext<Worktree>,
1351 ) -> Task<Result<Entry>> {
1352 let path = path.into();
1353 let abs_path = self.absolutize(&path);
1354 let background_snapshot = self.background_snapshot.clone();
1355 let fs = self.fs.clone();
1356 let save = cx.background().spawn(async move {
1357 fs.save(&abs_path, &text).await?;
1358 refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1359 });
1360
1361 cx.spawn(|this, mut cx| async move {
1362 let entry = save.await?;
1363 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1364 Ok(entry)
1365 })
1366 }
1367
1368 pub fn share(
1369 &mut self,
1370 project_id: u64,
1371 cx: &mut ModelContext<Worktree>,
1372 ) -> Task<anyhow::Result<()>> {
1373 if self.share.is_some() {
1374 return Task::ready(Ok(()));
1375 }
1376
1377 let snapshot = self.snapshot();
1378 let rpc = self.client.clone();
1379 let worktree_id = cx.model_id() as u64;
1380 let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1381 self.share = Some(ShareState {
1382 project_id,
1383 snapshots_tx: snapshots_to_send_tx,
1384 });
1385
1386 cx.background()
1387 .spawn({
1388 let rpc = rpc.clone();
1389 let snapshot = snapshot.clone();
1390 async move {
1391 let mut prev_snapshot = snapshot;
1392 while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1393 let message =
1394 snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1395 match rpc.send(message).await {
1396 Ok(()) => prev_snapshot = snapshot,
1397 Err(err) => log::error!("error sending snapshot diff {}", err),
1398 }
1399 }
1400 }
1401 })
1402 .detach();
1403
1404 let share_message = cx.background().spawn(async move {
1405 proto::ShareWorktree {
1406 project_id,
1407 worktree: Some(snapshot.to_proto()),
1408 }
1409 });
1410
1411 cx.foreground().spawn(async move {
1412 rpc.request(share_message.await).await?;
1413 Ok(())
1414 })
1415 }
1416}
1417
1418fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1419 let contents = smol::block_on(fs.load(&abs_path))?;
1420 let parent = abs_path.parent().unwrap_or(Path::new("/"));
1421 let mut builder = GitignoreBuilder::new(parent);
1422 for line in contents.lines() {
1423 builder.add_line(Some(abs_path.into()), line)?;
1424 }
1425 Ok(builder.build()?)
1426}
1427
1428impl Deref for Worktree {
1429 type Target = Snapshot;
1430
1431 fn deref(&self) -> &Self::Target {
1432 match self {
1433 Worktree::Local(worktree) => &worktree.snapshot,
1434 Worktree::Remote(worktree) => &worktree.snapshot,
1435 }
1436 }
1437}
1438
1439impl Deref for LocalWorktree {
1440 type Target = Snapshot;
1441
1442 fn deref(&self) -> &Self::Target {
1443 &self.snapshot
1444 }
1445}
1446
1447impl Deref for RemoteWorktree {
1448 type Target = Snapshot;
1449
1450 fn deref(&self) -> &Self::Target {
1451 &self.snapshot
1452 }
1453}
1454
1455impl fmt::Debug for LocalWorktree {
1456 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1457 self.snapshot.fmt(f)
1458 }
1459}
1460
1461impl RemoteWorktree {
1462 fn get_open_buffer(
1463 &mut self,
1464 path: &Path,
1465 cx: &mut ModelContext<Worktree>,
1466 ) -> Option<ModelHandle<Buffer>> {
1467 let handle = cx.handle();
1468 let mut existing_buffer = None;
1469 self.open_buffers.retain(|_buffer_id, buffer| {
1470 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1471 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1472 if file.worktree == handle && file.path().as_ref() == path {
1473 existing_buffer = Some(buffer);
1474 }
1475 }
1476 true
1477 } else {
1478 false
1479 }
1480 });
1481 existing_buffer
1482 }
1483
1484 fn open_buffer(
1485 &mut self,
1486 path: &Path,
1487 cx: &mut ModelContext<Worktree>,
1488 ) -> Task<Result<ModelHandle<Buffer>>> {
1489 let rpc = self.client.clone();
1490 let replica_id = self.replica_id;
1491 let project_id = self.project_id;
1492 let remote_worktree_id = self.id();
1493 let root_path = self.snapshot.abs_path.clone();
1494 let path: Arc<Path> = Arc::from(path);
1495 let path_string = path.to_string_lossy().to_string();
1496 cx.spawn_weak(move |this, mut cx| async move {
1497 let entry = this
1498 .upgrade(&cx)
1499 .ok_or_else(|| anyhow!("worktree was closed"))?
1500 .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1501 .ok_or_else(|| anyhow!("file does not exist"))?;
1502 let response = rpc
1503 .request(proto::OpenBuffer {
1504 project_id,
1505 worktree_id: remote_worktree_id.to_proto(),
1506 path: path_string,
1507 })
1508 .await?;
1509
1510 let this = this
1511 .upgrade(&cx)
1512 .ok_or_else(|| anyhow!("worktree was closed"))?;
1513 let file = File {
1514 entry_id: Some(entry.id),
1515 worktree: this.clone(),
1516 worktree_path: root_path,
1517 path: entry.path,
1518 mtime: entry.mtime,
1519 is_local: false,
1520 };
1521 let language = this.read_with(&cx, |this, _| {
1522 use language::File;
1523 this.languages().select_language(file.full_path()).cloned()
1524 });
1525 let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1526 let buffer_id = remote_buffer.id as usize;
1527 let buffer = cx.add_model(|cx| {
1528 Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1529 .unwrap()
1530 .with_language(language, None, cx)
1531 });
1532 this.update(&mut cx, move |this, cx| {
1533 let this = this.as_remote_mut().unwrap();
1534 if let Some(RemoteBuffer::Operations(pending_ops)) = this
1535 .open_buffers
1536 .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1537 {
1538 buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1539 }
1540 Result::<_, anyhow::Error>::Ok(buffer)
1541 })
1542 })
1543 }
1544
1545 pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1546 for (_, buffer) in self.open_buffers.drain() {
1547 if let RemoteBuffer::Loaded(buffer) = buffer {
1548 if let Some(buffer) = buffer.upgrade(cx) {
1549 buffer.update(cx, |buffer, cx| buffer.close(cx))
1550 }
1551 }
1552 }
1553 }
1554
1555 fn snapshot(&self) -> Snapshot {
1556 self.snapshot.clone()
1557 }
1558
1559 pub fn update_from_remote(
1560 &mut self,
1561 envelope: TypedEnvelope<proto::UpdateWorktree>,
1562 cx: &mut ModelContext<Worktree>,
1563 ) -> Result<()> {
1564 let mut tx = self.updates_tx.clone();
1565 let payload = envelope.payload.clone();
1566 cx.background()
1567 .spawn(async move {
1568 tx.send(payload).await.expect("receiver runs to completion");
1569 })
1570 .detach();
1571
1572 Ok(())
1573 }
1574
1575 pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1576 for (_, buffer) in &self.open_buffers {
1577 if let Some(buffer) = buffer.upgrade(cx) {
1578 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1579 }
1580 }
1581 cx.notify();
1582 }
1583}
1584
1585enum RemoteBuffer {
1586 Operations(Vec<Operation>),
1587 Loaded(WeakModelHandle<Buffer>),
1588}
1589
1590impl RemoteBuffer {
1591 fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1592 match self {
1593 Self::Operations(_) => None,
1594 Self::Loaded(buffer) => buffer.upgrade(cx),
1595 }
1596 }
1597}
1598
1599impl Snapshot {
1600 pub fn id(&self) -> WorktreeId {
1601 self.id
1602 }
1603
1604 pub fn to_proto(&self) -> proto::Worktree {
1605 let root_name = self.root_name.clone();
1606 proto::Worktree {
1607 id: self.id.0 as u64,
1608 root_name,
1609 entries: self
1610 .entries_by_path
1611 .cursor::<()>()
1612 .filter(|e| !e.is_ignored)
1613 .map(Into::into)
1614 .collect(),
1615 }
1616 }
1617
1618 pub fn build_update(
1619 &self,
1620 other: &Self,
1621 project_id: u64,
1622 worktree_id: u64,
1623 include_ignored: bool,
1624 ) -> proto::UpdateWorktree {
1625 let mut updated_entries = Vec::new();
1626 let mut removed_entries = Vec::new();
1627 let mut self_entries = self
1628 .entries_by_id
1629 .cursor::<()>()
1630 .filter(|e| include_ignored || !e.is_ignored)
1631 .peekable();
1632 let mut other_entries = other
1633 .entries_by_id
1634 .cursor::<()>()
1635 .filter(|e| include_ignored || !e.is_ignored)
1636 .peekable();
1637 loop {
1638 match (self_entries.peek(), other_entries.peek()) {
1639 (Some(self_entry), Some(other_entry)) => {
1640 match Ord::cmp(&self_entry.id, &other_entry.id) {
1641 Ordering::Less => {
1642 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1643 updated_entries.push(entry);
1644 self_entries.next();
1645 }
1646 Ordering::Equal => {
1647 if self_entry.scan_id != other_entry.scan_id {
1648 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1649 updated_entries.push(entry);
1650 }
1651
1652 self_entries.next();
1653 other_entries.next();
1654 }
1655 Ordering::Greater => {
1656 removed_entries.push(other_entry.id as u64);
1657 other_entries.next();
1658 }
1659 }
1660 }
1661 (Some(self_entry), None) => {
1662 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1663 updated_entries.push(entry);
1664 self_entries.next();
1665 }
1666 (None, Some(other_entry)) => {
1667 removed_entries.push(other_entry.id as u64);
1668 other_entries.next();
1669 }
1670 (None, None) => break,
1671 }
1672 }
1673
1674 proto::UpdateWorktree {
1675 project_id,
1676 worktree_id,
1677 root_name: self.root_name().to_string(),
1678 updated_entries,
1679 removed_entries,
1680 }
1681 }
1682
1683 fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1684 self.scan_id += 1;
1685 let scan_id = self.scan_id;
1686
1687 let mut entries_by_path_edits = Vec::new();
1688 let mut entries_by_id_edits = Vec::new();
1689 for entry_id in update.removed_entries {
1690 let entry_id = entry_id as usize;
1691 let entry = self
1692 .entry_for_id(entry_id)
1693 .ok_or_else(|| anyhow!("unknown entry"))?;
1694 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1695 entries_by_id_edits.push(Edit::Remove(entry.id));
1696 }
1697
1698 for entry in update.updated_entries {
1699 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1700 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1701 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1702 }
1703 entries_by_id_edits.push(Edit::Insert(PathEntry {
1704 id: entry.id,
1705 path: entry.path.clone(),
1706 is_ignored: entry.is_ignored,
1707 scan_id,
1708 }));
1709 entries_by_path_edits.push(Edit::Insert(entry));
1710 }
1711
1712 self.entries_by_path.edit(entries_by_path_edits, &());
1713 self.entries_by_id.edit(entries_by_id_edits, &());
1714
1715 Ok(())
1716 }
1717
1718 pub fn file_count(&self) -> usize {
1719 self.entries_by_path.summary().file_count
1720 }
1721
1722 pub fn visible_file_count(&self) -> usize {
1723 self.entries_by_path.summary().visible_file_count
1724 }
1725
1726 fn traverse_from_offset(
1727 &self,
1728 include_dirs: bool,
1729 include_ignored: bool,
1730 start_offset: usize,
1731 ) -> Traversal {
1732 let mut cursor = self.entries_by_path.cursor();
1733 cursor.seek(
1734 &TraversalTarget::Count {
1735 count: start_offset,
1736 include_dirs,
1737 include_ignored,
1738 },
1739 Bias::Right,
1740 &(),
1741 );
1742 Traversal {
1743 cursor,
1744 include_dirs,
1745 include_ignored,
1746 }
1747 }
1748
1749 fn traverse_from_path(
1750 &self,
1751 include_dirs: bool,
1752 include_ignored: bool,
1753 path: &Path,
1754 ) -> Traversal {
1755 let mut cursor = self.entries_by_path.cursor();
1756 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1757 Traversal {
1758 cursor,
1759 include_dirs,
1760 include_ignored,
1761 }
1762 }
1763
1764 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1765 self.traverse_from_offset(false, include_ignored, start)
1766 }
1767
1768 pub fn entries(&self, include_ignored: bool) -> Traversal {
1769 self.traverse_from_offset(true, include_ignored, 0)
1770 }
1771
1772 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1773 let empty_path = Path::new("");
1774 self.entries_by_path
1775 .cursor::<()>()
1776 .filter(move |entry| entry.path.as_ref() != empty_path)
1777 .map(|entry| &entry.path)
1778 }
1779
1780 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1781 let mut cursor = self.entries_by_path.cursor();
1782 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1783 let traversal = Traversal {
1784 cursor,
1785 include_dirs: true,
1786 include_ignored: true,
1787 };
1788 ChildEntriesIter {
1789 traversal,
1790 parent_path,
1791 }
1792 }
1793
1794 pub fn root_entry(&self) -> Option<&Entry> {
1795 self.entry_for_path("")
1796 }
1797
1798 pub fn root_name(&self) -> &str {
1799 &self.root_name
1800 }
1801
1802 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1803 let path = path.as_ref();
1804 self.traverse_from_path(true, true, path)
1805 .entry()
1806 .and_then(|entry| {
1807 if entry.path.as_ref() == path {
1808 Some(entry)
1809 } else {
1810 None
1811 }
1812 })
1813 }
1814
1815 pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1816 let entry = self.entries_by_id.get(&id, &())?;
1817 self.entry_for_path(&entry.path)
1818 }
1819
1820 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1821 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1822 }
1823
1824 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1825 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1826 let abs_path = self.abs_path.join(&entry.path);
1827 match build_gitignore(&abs_path, fs) {
1828 Ok(ignore) => {
1829 let ignore_dir_path = entry.path.parent().unwrap();
1830 self.ignores
1831 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1832 }
1833 Err(error) => {
1834 log::error!(
1835 "error loading .gitignore file {:?} - {:?}",
1836 &entry.path,
1837 error
1838 );
1839 }
1840 }
1841 }
1842
1843 self.reuse_entry_id(&mut entry);
1844 self.entries_by_path.insert_or_replace(entry.clone(), &());
1845 self.entries_by_id.insert_or_replace(
1846 PathEntry {
1847 id: entry.id,
1848 path: entry.path.clone(),
1849 is_ignored: entry.is_ignored,
1850 scan_id: self.scan_id,
1851 },
1852 &(),
1853 );
1854 entry
1855 }
1856
1857 fn populate_dir(
1858 &mut self,
1859 parent_path: Arc<Path>,
1860 entries: impl IntoIterator<Item = Entry>,
1861 ignore: Option<Arc<Gitignore>>,
1862 ) {
1863 let mut parent_entry = self
1864 .entries_by_path
1865 .get(&PathKey(parent_path.clone()), &())
1866 .unwrap()
1867 .clone();
1868 if let Some(ignore) = ignore {
1869 self.ignores.insert(parent_path, (ignore, self.scan_id));
1870 }
1871 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1872 parent_entry.kind = EntryKind::Dir;
1873 } else {
1874 unreachable!();
1875 }
1876
1877 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1878 let mut entries_by_id_edits = Vec::new();
1879
1880 for mut entry in entries {
1881 self.reuse_entry_id(&mut entry);
1882 entries_by_id_edits.push(Edit::Insert(PathEntry {
1883 id: entry.id,
1884 path: entry.path.clone(),
1885 is_ignored: entry.is_ignored,
1886 scan_id: self.scan_id,
1887 }));
1888 entries_by_path_edits.push(Edit::Insert(entry));
1889 }
1890
1891 self.entries_by_path.edit(entries_by_path_edits, &());
1892 self.entries_by_id.edit(entries_by_id_edits, &());
1893 }
1894
1895 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1896 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1897 entry.id = removed_entry_id;
1898 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1899 entry.id = existing_entry.id;
1900 }
1901 }
1902
1903 fn remove_path(&mut self, path: &Path) {
1904 let mut new_entries;
1905 let removed_entries;
1906 {
1907 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1908 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1909 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1910 new_entries.push_tree(cursor.suffix(&()), &());
1911 }
1912 self.entries_by_path = new_entries;
1913
1914 let mut entries_by_id_edits = Vec::new();
1915 for entry in removed_entries.cursor::<()>() {
1916 let removed_entry_id = self
1917 .removed_entry_ids
1918 .entry(entry.inode)
1919 .or_insert(entry.id);
1920 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1921 entries_by_id_edits.push(Edit::Remove(entry.id));
1922 }
1923 self.entries_by_id.edit(entries_by_id_edits, &());
1924
1925 if path.file_name() == Some(&GITIGNORE) {
1926 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1927 *scan_id = self.scan_id;
1928 }
1929 }
1930 }
1931
1932 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1933 let mut new_ignores = Vec::new();
1934 for ancestor in path.ancestors().skip(1) {
1935 if let Some((ignore, _)) = self.ignores.get(ancestor) {
1936 new_ignores.push((ancestor, Some(ignore.clone())));
1937 } else {
1938 new_ignores.push((ancestor, None));
1939 }
1940 }
1941
1942 let mut ignore_stack = IgnoreStack::none();
1943 for (parent_path, ignore) in new_ignores.into_iter().rev() {
1944 if ignore_stack.is_path_ignored(&parent_path, true) {
1945 ignore_stack = IgnoreStack::all();
1946 break;
1947 } else if let Some(ignore) = ignore {
1948 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
1949 }
1950 }
1951
1952 if ignore_stack.is_path_ignored(path, is_dir) {
1953 ignore_stack = IgnoreStack::all();
1954 }
1955
1956 ignore_stack
1957 }
1958}
1959
1960impl fmt::Debug for Snapshot {
1961 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1962 for entry in self.entries_by_path.cursor::<()>() {
1963 for _ in entry.path.ancestors().skip(1) {
1964 write!(f, " ")?;
1965 }
1966 writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
1967 }
1968 Ok(())
1969 }
1970}
1971
1972#[derive(Clone, PartialEq)]
1973pub struct File {
1974 entry_id: Option<usize>,
1975 worktree: ModelHandle<Worktree>,
1976 worktree_path: Arc<Path>,
1977 pub path: Arc<Path>,
1978 pub mtime: SystemTime,
1979 is_local: bool,
1980}
1981
1982impl language::File for File {
1983 fn mtime(&self) -> SystemTime {
1984 self.mtime
1985 }
1986
1987 fn path(&self) -> &Arc<Path> {
1988 &self.path
1989 }
1990
1991 fn abs_path(&self) -> Option<PathBuf> {
1992 if self.is_local {
1993 Some(self.worktree_path.join(&self.path))
1994 } else {
1995 None
1996 }
1997 }
1998
1999 fn full_path(&self) -> PathBuf {
2000 let mut full_path = PathBuf::new();
2001 if let Some(worktree_name) = self.worktree_path.file_name() {
2002 full_path.push(worktree_name);
2003 }
2004 full_path.push(&self.path);
2005 full_path
2006 }
2007
2008 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2009 /// of its worktree, then this method will return the name of the worktree itself.
2010 fn file_name<'a>(&'a self) -> Option<OsString> {
2011 self.path
2012 .file_name()
2013 .or_else(|| self.worktree_path.file_name())
2014 .map(Into::into)
2015 }
2016
2017 fn is_deleted(&self) -> bool {
2018 self.entry_id.is_none()
2019 }
2020
2021 fn save(
2022 &self,
2023 buffer_id: u64,
2024 text: Rope,
2025 version: clock::Global,
2026 cx: &mut MutableAppContext,
2027 ) -> Task<Result<(clock::Global, SystemTime)>> {
2028 let worktree_id = self.worktree.read(cx).id().to_proto();
2029 self.worktree.update(cx, |worktree, cx| match worktree {
2030 Worktree::Local(worktree) => {
2031 let rpc = worktree.client.clone();
2032 let project_id = worktree.share.as_ref().map(|share| share.project_id);
2033 let save = worktree.save(self.path.clone(), text, cx);
2034 cx.background().spawn(async move {
2035 let entry = save.await?;
2036 if let Some(project_id) = project_id {
2037 rpc.send(proto::BufferSaved {
2038 project_id,
2039 worktree_id,
2040 buffer_id,
2041 version: (&version).into(),
2042 mtime: Some(entry.mtime.into()),
2043 })
2044 .await?;
2045 }
2046 Ok((version, entry.mtime))
2047 })
2048 }
2049 Worktree::Remote(worktree) => {
2050 let rpc = worktree.client.clone();
2051 let project_id = worktree.project_id;
2052 cx.foreground().spawn(async move {
2053 let response = rpc
2054 .request(proto::SaveBuffer {
2055 project_id,
2056 worktree_id,
2057 buffer_id,
2058 })
2059 .await?;
2060 let version = response.version.try_into()?;
2061 let mtime = response
2062 .mtime
2063 .ok_or_else(|| anyhow!("missing mtime"))?
2064 .into();
2065 Ok((version, mtime))
2066 })
2067 }
2068 })
2069 }
2070
2071 fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2072 let worktree = self.worktree.read(cx).as_local()?;
2073 let abs_path = worktree.absolutize(&self.path);
2074 let fs = worktree.fs.clone();
2075 Some(
2076 cx.background()
2077 .spawn(async move { fs.load(&abs_path).await }),
2078 )
2079 }
2080
2081 fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2082 self.worktree.update(cx, |worktree, cx| {
2083 worktree.send_buffer_update(buffer_id, operation, cx);
2084 });
2085 }
2086
2087 fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2088 self.worktree.update(cx, |worktree, cx| {
2089 if let Worktree::Remote(worktree) = worktree {
2090 let project_id = worktree.project_id;
2091 let worktree_id = worktree.id().to_proto();
2092 let rpc = worktree.client.clone();
2093 cx.background()
2094 .spawn(async move {
2095 if let Err(error) = rpc
2096 .send(proto::CloseBuffer {
2097 project_id,
2098 worktree_id,
2099 buffer_id,
2100 })
2101 .await
2102 {
2103 log::error!("error closing remote buffer: {}", error);
2104 }
2105 })
2106 .detach();
2107 }
2108 });
2109 }
2110
2111 fn as_any(&self) -> &dyn Any {
2112 self
2113 }
2114}
2115
2116impl File {
2117 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2118 file.and_then(|f| f.as_any().downcast_ref())
2119 }
2120
2121 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2122 self.worktree.read(cx).id()
2123 }
2124}
2125
2126#[derive(Clone, Debug)]
2127pub struct Entry {
2128 pub id: usize,
2129 pub kind: EntryKind,
2130 pub path: Arc<Path>,
2131 pub inode: u64,
2132 pub mtime: SystemTime,
2133 pub is_symlink: bool,
2134 pub is_ignored: bool,
2135}
2136
2137#[derive(Clone, Debug)]
2138pub enum EntryKind {
2139 PendingDir,
2140 Dir,
2141 File(CharBag),
2142}
2143
2144impl Entry {
2145 fn new(
2146 path: Arc<Path>,
2147 metadata: &fs::Metadata,
2148 next_entry_id: &AtomicUsize,
2149 root_char_bag: CharBag,
2150 ) -> Self {
2151 Self {
2152 id: next_entry_id.fetch_add(1, SeqCst),
2153 kind: if metadata.is_dir {
2154 EntryKind::PendingDir
2155 } else {
2156 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2157 },
2158 path,
2159 inode: metadata.inode,
2160 mtime: metadata.mtime,
2161 is_symlink: metadata.is_symlink,
2162 is_ignored: false,
2163 }
2164 }
2165
2166 pub fn is_dir(&self) -> bool {
2167 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2168 }
2169
2170 pub fn is_file(&self) -> bool {
2171 matches!(self.kind, EntryKind::File(_))
2172 }
2173}
2174
2175impl sum_tree::Item for Entry {
2176 type Summary = EntrySummary;
2177
2178 fn summary(&self) -> Self::Summary {
2179 let visible_count = if self.is_ignored { 0 } else { 1 };
2180 let file_count;
2181 let visible_file_count;
2182 if self.is_file() {
2183 file_count = 1;
2184 visible_file_count = visible_count;
2185 } else {
2186 file_count = 0;
2187 visible_file_count = 0;
2188 }
2189
2190 EntrySummary {
2191 max_path: self.path.clone(),
2192 count: 1,
2193 visible_count,
2194 file_count,
2195 visible_file_count,
2196 }
2197 }
2198}
2199
2200impl sum_tree::KeyedItem for Entry {
2201 type Key = PathKey;
2202
2203 fn key(&self) -> Self::Key {
2204 PathKey(self.path.clone())
2205 }
2206}
2207
2208#[derive(Clone, Debug)]
2209pub struct EntrySummary {
2210 max_path: Arc<Path>,
2211 count: usize,
2212 visible_count: usize,
2213 file_count: usize,
2214 visible_file_count: usize,
2215}
2216
2217impl Default for EntrySummary {
2218 fn default() -> Self {
2219 Self {
2220 max_path: Arc::from(Path::new("")),
2221 count: 0,
2222 visible_count: 0,
2223 file_count: 0,
2224 visible_file_count: 0,
2225 }
2226 }
2227}
2228
2229impl sum_tree::Summary for EntrySummary {
2230 type Context = ();
2231
2232 fn add_summary(&mut self, rhs: &Self, _: &()) {
2233 self.max_path = rhs.max_path.clone();
2234 self.visible_count += rhs.visible_count;
2235 self.file_count += rhs.file_count;
2236 self.visible_file_count += rhs.visible_file_count;
2237 }
2238}
2239
2240#[derive(Clone, Debug)]
2241struct PathEntry {
2242 id: usize,
2243 path: Arc<Path>,
2244 is_ignored: bool,
2245 scan_id: usize,
2246}
2247
2248impl sum_tree::Item for PathEntry {
2249 type Summary = PathEntrySummary;
2250
2251 fn summary(&self) -> Self::Summary {
2252 PathEntrySummary { max_id: self.id }
2253 }
2254}
2255
2256impl sum_tree::KeyedItem for PathEntry {
2257 type Key = usize;
2258
2259 fn key(&self) -> Self::Key {
2260 self.id
2261 }
2262}
2263
2264#[derive(Clone, Debug, Default)]
2265struct PathEntrySummary {
2266 max_id: usize,
2267}
2268
2269impl sum_tree::Summary for PathEntrySummary {
2270 type Context = ();
2271
2272 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2273 self.max_id = summary.max_id;
2274 }
2275}
2276
2277impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2278 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2279 *self = summary.max_id;
2280 }
2281}
2282
2283#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2284pub struct PathKey(Arc<Path>);
2285
2286impl Default for PathKey {
2287 fn default() -> Self {
2288 Self(Path::new("").into())
2289 }
2290}
2291
2292impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2293 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2294 self.0 = summary.max_path.clone();
2295 }
2296}
2297
2298struct BackgroundScanner {
2299 fs: Arc<dyn Fs>,
2300 snapshot: Arc<Mutex<Snapshot>>,
2301 notify: Sender<ScanState>,
2302 executor: Arc<executor::Background>,
2303}
2304
2305impl BackgroundScanner {
2306 fn new(
2307 snapshot: Arc<Mutex<Snapshot>>,
2308 notify: Sender<ScanState>,
2309 fs: Arc<dyn Fs>,
2310 executor: Arc<executor::Background>,
2311 ) -> Self {
2312 Self {
2313 fs,
2314 snapshot,
2315 notify,
2316 executor,
2317 }
2318 }
2319
2320 fn abs_path(&self) -> Arc<Path> {
2321 self.snapshot.lock().abs_path.clone()
2322 }
2323
2324 fn snapshot(&self) -> Snapshot {
2325 self.snapshot.lock().clone()
2326 }
2327
2328 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2329 if self.notify.send(ScanState::Scanning).await.is_err() {
2330 return;
2331 }
2332
2333 if let Err(err) = self.scan_dirs().await {
2334 if self
2335 .notify
2336 .send(ScanState::Err(Arc::new(err)))
2337 .await
2338 .is_err()
2339 {
2340 return;
2341 }
2342 }
2343
2344 if self.notify.send(ScanState::Idle).await.is_err() {
2345 return;
2346 }
2347
2348 futures::pin_mut!(events_rx);
2349 while let Some(events) = events_rx.next().await {
2350 if self.notify.send(ScanState::Scanning).await.is_err() {
2351 break;
2352 }
2353
2354 if !self.process_events(events).await {
2355 break;
2356 }
2357
2358 if self.notify.send(ScanState::Idle).await.is_err() {
2359 break;
2360 }
2361 }
2362 }
2363
2364 async fn scan_dirs(&mut self) -> Result<()> {
2365 let root_char_bag;
2366 let next_entry_id;
2367 let is_dir;
2368 {
2369 let snapshot = self.snapshot.lock();
2370 root_char_bag = snapshot.root_char_bag;
2371 next_entry_id = snapshot.next_entry_id.clone();
2372 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2373 };
2374
2375 if is_dir {
2376 let path: Arc<Path> = Arc::from(Path::new(""));
2377 let abs_path = self.abs_path();
2378 let (tx, rx) = channel::unbounded();
2379 tx.send(ScanJob {
2380 abs_path: abs_path.to_path_buf(),
2381 path,
2382 ignore_stack: IgnoreStack::none(),
2383 scan_queue: tx.clone(),
2384 })
2385 .await
2386 .unwrap();
2387 drop(tx);
2388
2389 self.executor
2390 .scoped(|scope| {
2391 for _ in 0..self.executor.num_cpus() {
2392 scope.spawn(async {
2393 while let Ok(job) = rx.recv().await {
2394 if let Err(err) = self
2395 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2396 .await
2397 {
2398 log::error!("error scanning {:?}: {}", job.abs_path, err);
2399 }
2400 }
2401 });
2402 }
2403 })
2404 .await;
2405 }
2406
2407 Ok(())
2408 }
2409
2410 async fn scan_dir(
2411 &self,
2412 root_char_bag: CharBag,
2413 next_entry_id: Arc<AtomicUsize>,
2414 job: &ScanJob,
2415 ) -> Result<()> {
2416 let mut new_entries: Vec<Entry> = Vec::new();
2417 let mut new_jobs: Vec<ScanJob> = Vec::new();
2418 let mut ignore_stack = job.ignore_stack.clone();
2419 let mut new_ignore = None;
2420
2421 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2422 while let Some(child_abs_path) = child_paths.next().await {
2423 let child_abs_path = match child_abs_path {
2424 Ok(child_abs_path) => child_abs_path,
2425 Err(error) => {
2426 log::error!("error processing entry {:?}", error);
2427 continue;
2428 }
2429 };
2430 let child_name = child_abs_path.file_name().unwrap();
2431 let child_path: Arc<Path> = job.path.join(child_name).into();
2432 let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2433 Some(metadata) => metadata,
2434 None => continue,
2435 };
2436
2437 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2438 if child_name == *GITIGNORE {
2439 match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2440 Ok(ignore) => {
2441 let ignore = Arc::new(ignore);
2442 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2443 new_ignore = Some(ignore);
2444 }
2445 Err(error) => {
2446 log::error!(
2447 "error loading .gitignore file {:?} - {:?}",
2448 child_name,
2449 error
2450 );
2451 }
2452 }
2453
2454 // Update ignore status of any child entries we've already processed to reflect the
2455 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2456 // there should rarely be too numerous. Update the ignore stack associated with any
2457 // new jobs as well.
2458 let mut new_jobs = new_jobs.iter_mut();
2459 for entry in &mut new_entries {
2460 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2461 if entry.is_dir() {
2462 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2463 IgnoreStack::all()
2464 } else {
2465 ignore_stack.clone()
2466 };
2467 }
2468 }
2469 }
2470
2471 let mut child_entry = Entry::new(
2472 child_path.clone(),
2473 &child_metadata,
2474 &next_entry_id,
2475 root_char_bag,
2476 );
2477
2478 if child_metadata.is_dir {
2479 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2480 child_entry.is_ignored = is_ignored;
2481 new_entries.push(child_entry);
2482 new_jobs.push(ScanJob {
2483 abs_path: child_abs_path,
2484 path: child_path,
2485 ignore_stack: if is_ignored {
2486 IgnoreStack::all()
2487 } else {
2488 ignore_stack.clone()
2489 },
2490 scan_queue: job.scan_queue.clone(),
2491 });
2492 } else {
2493 child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2494 new_entries.push(child_entry);
2495 };
2496 }
2497
2498 self.snapshot
2499 .lock()
2500 .populate_dir(job.path.clone(), new_entries, new_ignore);
2501 for new_job in new_jobs {
2502 job.scan_queue.send(new_job).await.unwrap();
2503 }
2504
2505 Ok(())
2506 }
2507
2508 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2509 let mut snapshot = self.snapshot();
2510 snapshot.scan_id += 1;
2511
2512 let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2513 abs_path
2514 } else {
2515 return false;
2516 };
2517 let root_char_bag = snapshot.root_char_bag;
2518 let next_entry_id = snapshot.next_entry_id.clone();
2519
2520 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2521 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2522
2523 for event in &events {
2524 match event.path.strip_prefix(&root_abs_path) {
2525 Ok(path) => snapshot.remove_path(&path),
2526 Err(_) => {
2527 log::error!(
2528 "unexpected event {:?} for root path {:?}",
2529 event.path,
2530 root_abs_path
2531 );
2532 continue;
2533 }
2534 }
2535 }
2536
2537 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2538 for event in events {
2539 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2540 Ok(path) => Arc::from(path.to_path_buf()),
2541 Err(_) => {
2542 log::error!(
2543 "unexpected event {:?} for root path {:?}",
2544 event.path,
2545 root_abs_path
2546 );
2547 continue;
2548 }
2549 };
2550
2551 match self.fs.metadata(&event.path).await {
2552 Ok(Some(metadata)) => {
2553 let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2554 let mut fs_entry = Entry::new(
2555 path.clone(),
2556 &metadata,
2557 snapshot.next_entry_id.as_ref(),
2558 snapshot.root_char_bag,
2559 );
2560 fs_entry.is_ignored = ignore_stack.is_all();
2561 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2562 if metadata.is_dir {
2563 scan_queue_tx
2564 .send(ScanJob {
2565 abs_path: event.path,
2566 path,
2567 ignore_stack,
2568 scan_queue: scan_queue_tx.clone(),
2569 })
2570 .await
2571 .unwrap();
2572 }
2573 }
2574 Ok(None) => {}
2575 Err(err) => {
2576 // TODO - create a special 'error' entry in the entries tree to mark this
2577 log::error!("error reading file on event {:?}", err);
2578 }
2579 }
2580 }
2581
2582 *self.snapshot.lock() = snapshot;
2583
2584 // Scan any directories that were created as part of this event batch.
2585 drop(scan_queue_tx);
2586 self.executor
2587 .scoped(|scope| {
2588 for _ in 0..self.executor.num_cpus() {
2589 scope.spawn(async {
2590 while let Ok(job) = scan_queue_rx.recv().await {
2591 if let Err(err) = self
2592 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2593 .await
2594 {
2595 log::error!("error scanning {:?}: {}", job.abs_path, err);
2596 }
2597 }
2598 });
2599 }
2600 })
2601 .await;
2602
2603 // Attempt to detect renames only over a single batch of file-system events.
2604 self.snapshot.lock().removed_entry_ids.clear();
2605
2606 self.update_ignore_statuses().await;
2607 true
2608 }
2609
2610 async fn update_ignore_statuses(&self) {
2611 let mut snapshot = self.snapshot();
2612
2613 let mut ignores_to_update = Vec::new();
2614 let mut ignores_to_delete = Vec::new();
2615 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2616 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2617 ignores_to_update.push(parent_path.clone());
2618 }
2619
2620 let ignore_path = parent_path.join(&*GITIGNORE);
2621 if snapshot.entry_for_path(ignore_path).is_none() {
2622 ignores_to_delete.push(parent_path.clone());
2623 }
2624 }
2625
2626 for parent_path in ignores_to_delete {
2627 snapshot.ignores.remove(&parent_path);
2628 self.snapshot.lock().ignores.remove(&parent_path);
2629 }
2630
2631 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2632 ignores_to_update.sort_unstable();
2633 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2634 while let Some(parent_path) = ignores_to_update.next() {
2635 while ignores_to_update
2636 .peek()
2637 .map_or(false, |p| p.starts_with(&parent_path))
2638 {
2639 ignores_to_update.next().unwrap();
2640 }
2641
2642 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2643 ignore_queue_tx
2644 .send(UpdateIgnoreStatusJob {
2645 path: parent_path,
2646 ignore_stack,
2647 ignore_queue: ignore_queue_tx.clone(),
2648 })
2649 .await
2650 .unwrap();
2651 }
2652 drop(ignore_queue_tx);
2653
2654 self.executor
2655 .scoped(|scope| {
2656 for _ in 0..self.executor.num_cpus() {
2657 scope.spawn(async {
2658 while let Ok(job) = ignore_queue_rx.recv().await {
2659 self.update_ignore_status(job, &snapshot).await;
2660 }
2661 });
2662 }
2663 })
2664 .await;
2665 }
2666
2667 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2668 let mut ignore_stack = job.ignore_stack;
2669 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2670 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2671 }
2672
2673 let mut entries_by_id_edits = Vec::new();
2674 let mut entries_by_path_edits = Vec::new();
2675 for mut entry in snapshot.child_entries(&job.path).cloned() {
2676 let was_ignored = entry.is_ignored;
2677 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2678 if entry.is_dir() {
2679 let child_ignore_stack = if entry.is_ignored {
2680 IgnoreStack::all()
2681 } else {
2682 ignore_stack.clone()
2683 };
2684 job.ignore_queue
2685 .send(UpdateIgnoreStatusJob {
2686 path: entry.path.clone(),
2687 ignore_stack: child_ignore_stack,
2688 ignore_queue: job.ignore_queue.clone(),
2689 })
2690 .await
2691 .unwrap();
2692 }
2693
2694 if entry.is_ignored != was_ignored {
2695 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2696 path_entry.scan_id = snapshot.scan_id;
2697 path_entry.is_ignored = entry.is_ignored;
2698 entries_by_id_edits.push(Edit::Insert(path_entry));
2699 entries_by_path_edits.push(Edit::Insert(entry));
2700 }
2701 }
2702
2703 let mut snapshot = self.snapshot.lock();
2704 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2705 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2706 }
2707}
2708
2709async fn refresh_entry(
2710 fs: &dyn Fs,
2711 snapshot: &Mutex<Snapshot>,
2712 path: Arc<Path>,
2713 abs_path: &Path,
2714) -> Result<Entry> {
2715 let root_char_bag;
2716 let next_entry_id;
2717 {
2718 let snapshot = snapshot.lock();
2719 root_char_bag = snapshot.root_char_bag;
2720 next_entry_id = snapshot.next_entry_id.clone();
2721 }
2722 let entry = Entry::new(
2723 path,
2724 &fs.metadata(abs_path)
2725 .await?
2726 .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2727 &next_entry_id,
2728 root_char_bag,
2729 );
2730 Ok(snapshot.lock().insert_entry(entry, fs))
2731}
2732
2733fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2734 let mut result = root_char_bag;
2735 result.extend(
2736 path.to_string_lossy()
2737 .chars()
2738 .map(|c| c.to_ascii_lowercase()),
2739 );
2740 result
2741}
2742
2743struct ScanJob {
2744 abs_path: PathBuf,
2745 path: Arc<Path>,
2746 ignore_stack: Arc<IgnoreStack>,
2747 scan_queue: Sender<ScanJob>,
2748}
2749
2750struct UpdateIgnoreStatusJob {
2751 path: Arc<Path>,
2752 ignore_stack: Arc<IgnoreStack>,
2753 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2754}
2755
2756pub trait WorktreeHandle {
2757 #[cfg(test)]
2758 fn flush_fs_events<'a>(
2759 &self,
2760 cx: &'a gpui::TestAppContext,
2761 ) -> futures::future::LocalBoxFuture<'a, ()>;
2762}
2763
2764impl WorktreeHandle for ModelHandle<Worktree> {
2765 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2766 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2767 // extra directory scans, and emit extra scan-state notifications.
2768 //
2769 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2770 // to ensure that all redundant FS events have already been processed.
2771 #[cfg(test)]
2772 fn flush_fs_events<'a>(
2773 &self,
2774 cx: &'a gpui::TestAppContext,
2775 ) -> futures::future::LocalBoxFuture<'a, ()> {
2776 use smol::future::FutureExt;
2777
2778 let filename = "fs-event-sentinel";
2779 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2780 let tree = self.clone();
2781 async move {
2782 std::fs::write(root_path.join(filename), "").unwrap();
2783 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2784 .await;
2785
2786 std::fs::remove_file(root_path.join(filename)).unwrap();
2787 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2788 .await;
2789
2790 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2791 .await;
2792 }
2793 .boxed_local()
2794 }
2795}
2796
2797#[derive(Clone, Debug)]
2798struct TraversalProgress<'a> {
2799 max_path: &'a Path,
2800 count: usize,
2801 visible_count: usize,
2802 file_count: usize,
2803 visible_file_count: usize,
2804}
2805
2806impl<'a> TraversalProgress<'a> {
2807 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2808 match (include_ignored, include_dirs) {
2809 (true, true) => self.count,
2810 (true, false) => self.file_count,
2811 (false, true) => self.visible_count,
2812 (false, false) => self.visible_file_count,
2813 }
2814 }
2815}
2816
2817impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2818 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2819 self.max_path = summary.max_path.as_ref();
2820 self.count += summary.count;
2821 self.visible_count += summary.visible_count;
2822 self.file_count += summary.file_count;
2823 self.visible_file_count += summary.visible_file_count;
2824 }
2825}
2826
2827impl<'a> Default for TraversalProgress<'a> {
2828 fn default() -> Self {
2829 Self {
2830 max_path: Path::new(""),
2831 count: 0,
2832 visible_count: 0,
2833 file_count: 0,
2834 visible_file_count: 0,
2835 }
2836 }
2837}
2838
2839pub struct Traversal<'a> {
2840 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2841 include_ignored: bool,
2842 include_dirs: bool,
2843}
2844
2845impl<'a> Traversal<'a> {
2846 pub fn advance(&mut self) -> bool {
2847 self.advance_to_offset(self.offset() + 1)
2848 }
2849
2850 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2851 self.cursor.seek_forward(
2852 &TraversalTarget::Count {
2853 count: offset,
2854 include_dirs: self.include_dirs,
2855 include_ignored: self.include_ignored,
2856 },
2857 Bias::Right,
2858 &(),
2859 )
2860 }
2861
2862 pub fn advance_to_sibling(&mut self) -> bool {
2863 while let Some(entry) = self.cursor.item() {
2864 self.cursor.seek_forward(
2865 &TraversalTarget::PathSuccessor(&entry.path),
2866 Bias::Left,
2867 &(),
2868 );
2869 if let Some(entry) = self.cursor.item() {
2870 if (self.include_dirs || !entry.is_dir())
2871 && (self.include_ignored || !entry.is_ignored)
2872 {
2873 return true;
2874 }
2875 }
2876 }
2877 false
2878 }
2879
2880 pub fn entry(&self) -> Option<&'a Entry> {
2881 self.cursor.item()
2882 }
2883
2884 pub fn offset(&self) -> usize {
2885 self.cursor
2886 .start()
2887 .count(self.include_dirs, self.include_ignored)
2888 }
2889}
2890
2891impl<'a> Iterator for Traversal<'a> {
2892 type Item = &'a Entry;
2893
2894 fn next(&mut self) -> Option<Self::Item> {
2895 if let Some(item) = self.entry() {
2896 self.advance();
2897 Some(item)
2898 } else {
2899 None
2900 }
2901 }
2902}
2903
2904#[derive(Debug)]
2905enum TraversalTarget<'a> {
2906 Path(&'a Path),
2907 PathSuccessor(&'a Path),
2908 Count {
2909 count: usize,
2910 include_ignored: bool,
2911 include_dirs: bool,
2912 },
2913}
2914
2915impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2916 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2917 match self {
2918 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2919 TraversalTarget::PathSuccessor(path) => {
2920 if !cursor_location.max_path.starts_with(path) {
2921 Ordering::Equal
2922 } else {
2923 Ordering::Greater
2924 }
2925 }
2926 TraversalTarget::Count {
2927 count,
2928 include_dirs,
2929 include_ignored,
2930 } => Ord::cmp(
2931 count,
2932 &cursor_location.count(*include_dirs, *include_ignored),
2933 ),
2934 }
2935 }
2936}
2937
2938struct ChildEntriesIter<'a> {
2939 parent_path: &'a Path,
2940 traversal: Traversal<'a>,
2941}
2942
2943impl<'a> Iterator for ChildEntriesIter<'a> {
2944 type Item = &'a Entry;
2945
2946 fn next(&mut self) -> Option<Self::Item> {
2947 if let Some(item) = self.traversal.entry() {
2948 if item.path.starts_with(&self.parent_path) {
2949 self.traversal.advance_to_sibling();
2950 return Some(item);
2951 }
2952 }
2953 None
2954 }
2955}
2956
2957impl<'a> From<&'a Entry> for proto::Entry {
2958 fn from(entry: &'a Entry) -> Self {
2959 Self {
2960 id: entry.id as u64,
2961 is_dir: entry.is_dir(),
2962 path: entry.path.to_string_lossy().to_string(),
2963 inode: entry.inode,
2964 mtime: Some(entry.mtime.into()),
2965 is_symlink: entry.is_symlink,
2966 is_ignored: entry.is_ignored,
2967 }
2968 }
2969}
2970
2971impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2972 type Error = anyhow::Error;
2973
2974 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2975 if let Some(mtime) = entry.mtime {
2976 let kind = if entry.is_dir {
2977 EntryKind::Dir
2978 } else {
2979 let mut char_bag = root_char_bag.clone();
2980 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
2981 EntryKind::File(char_bag)
2982 };
2983 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
2984 Ok(Entry {
2985 id: entry.id as usize,
2986 kind,
2987 path: path.clone(),
2988 inode: entry.inode,
2989 mtime: mtime.into(),
2990 is_symlink: entry.is_symlink,
2991 is_ignored: entry.is_ignored,
2992 })
2993 } else {
2994 Err(anyhow!(
2995 "missing mtime in remote worktree entry {:?}",
2996 entry.path
2997 ))
2998 }
2999 }
3000}
3001
3002trait ToPointUtf16 {
3003 fn to_point_utf16(self) -> PointUtf16;
3004}
3005
3006impl ToPointUtf16 for lsp::Position {
3007 fn to_point_utf16(self) -> PointUtf16 {
3008 PointUtf16::new(self.line, self.character)
3009 }
3010}
3011
3012fn diagnostic_ranges<'a>(
3013 diagnostic: &'a lsp::Diagnostic,
3014 abs_path: &'a Path,
3015) -> impl 'a + Iterator<Item = Range<PointUtf16>> {
3016 diagnostic
3017 .related_information
3018 .iter()
3019 .flatten()
3020 .filter_map(move |info| {
3021 if info.location.uri.to_file_path().ok()? == abs_path {
3022 let info_start = PointUtf16::new(
3023 info.location.range.start.line,
3024 info.location.range.start.character,
3025 );
3026 let info_end = PointUtf16::new(
3027 info.location.range.end.line,
3028 info.location.range.end.character,
3029 );
3030 Some(info_start..info_end)
3031 } else {
3032 None
3033 }
3034 })
3035 .chain(Some(
3036 diagnostic.range.start.to_point_utf16()..diagnostic.range.end.to_point_utf16(),
3037 ))
3038}
3039
3040#[cfg(test)]
3041mod tests {
3042 use super::*;
3043 use crate::fs::FakeFs;
3044 use anyhow::Result;
3045 use client::test::{FakeHttpClient, FakeServer};
3046 use fs::RealFs;
3047 use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
3048 use language::{Diagnostic, LanguageConfig};
3049 use lsp::Url;
3050 use rand::prelude::*;
3051 use serde_json::json;
3052 use std::{cell::RefCell, rc::Rc};
3053 use std::{
3054 env,
3055 fmt::Write,
3056 time::{SystemTime, UNIX_EPOCH},
3057 };
3058 use text::Point;
3059 use unindent::Unindent as _;
3060 use util::test::temp_tree;
3061
3062 #[gpui::test]
3063 async fn test_traversal(mut cx: gpui::TestAppContext) {
3064 let fs = FakeFs::new();
3065 fs.insert_tree(
3066 "/root",
3067 json!({
3068 ".gitignore": "a/b\n",
3069 "a": {
3070 "b": "",
3071 "c": "",
3072 }
3073 }),
3074 )
3075 .await;
3076
3077 let http_client = FakeHttpClient::with_404_response();
3078 let client = Client::new(http_client.clone());
3079 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3080
3081 let tree = Worktree::open_local(
3082 client,
3083 user_store,
3084 Arc::from(Path::new("/root")),
3085 Arc::new(fs),
3086 Default::default(),
3087 &mut cx.to_async(),
3088 )
3089 .await
3090 .unwrap();
3091 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3092 .await;
3093
3094 tree.read_with(&cx, |tree, _| {
3095 assert_eq!(
3096 tree.entries(false)
3097 .map(|entry| entry.path.as_ref())
3098 .collect::<Vec<_>>(),
3099 vec![
3100 Path::new(""),
3101 Path::new(".gitignore"),
3102 Path::new("a"),
3103 Path::new("a/c"),
3104 ]
3105 );
3106 })
3107 }
3108
3109 #[gpui::test]
3110 async fn test_save_file(mut cx: gpui::TestAppContext) {
3111 let dir = temp_tree(json!({
3112 "file1": "the old contents",
3113 }));
3114
3115 let http_client = FakeHttpClient::with_404_response();
3116 let client = Client::new(http_client.clone());
3117 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3118
3119 let tree = Worktree::open_local(
3120 client,
3121 user_store,
3122 dir.path(),
3123 Arc::new(RealFs),
3124 Default::default(),
3125 &mut cx.to_async(),
3126 )
3127 .await
3128 .unwrap();
3129 let buffer = tree
3130 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3131 .await
3132 .unwrap();
3133 let save = buffer.update(&mut cx, |buffer, cx| {
3134 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3135 buffer.save(cx).unwrap()
3136 });
3137 save.await.unwrap();
3138
3139 let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3140 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3141 }
3142
3143 #[gpui::test]
3144 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3145 let dir = temp_tree(json!({
3146 "file1": "the old contents",
3147 }));
3148 let file_path = dir.path().join("file1");
3149
3150 let http_client = FakeHttpClient::with_404_response();
3151 let client = Client::new(http_client.clone());
3152 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3153
3154 let tree = Worktree::open_local(
3155 client,
3156 user_store,
3157 file_path.clone(),
3158 Arc::new(RealFs),
3159 Default::default(),
3160 &mut cx.to_async(),
3161 )
3162 .await
3163 .unwrap();
3164 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3165 .await;
3166 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3167
3168 let buffer = tree
3169 .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3170 .await
3171 .unwrap();
3172 let save = buffer.update(&mut cx, |buffer, cx| {
3173 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3174 buffer.save(cx).unwrap()
3175 });
3176 save.await.unwrap();
3177
3178 let new_text = std::fs::read_to_string(file_path).unwrap();
3179 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3180 }
3181
3182 #[gpui::test]
3183 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3184 let dir = temp_tree(json!({
3185 "a": {
3186 "file1": "",
3187 "file2": "",
3188 "file3": "",
3189 },
3190 "b": {
3191 "c": {
3192 "file4": "",
3193 "file5": "",
3194 }
3195 }
3196 }));
3197
3198 let user_id = 5;
3199 let http_client = FakeHttpClient::with_404_response();
3200 let mut client = Client::new(http_client.clone());
3201 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3202 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3203 let tree = Worktree::open_local(
3204 client,
3205 user_store.clone(),
3206 dir.path(),
3207 Arc::new(RealFs),
3208 Default::default(),
3209 &mut cx.to_async(),
3210 )
3211 .await
3212 .unwrap();
3213
3214 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3215 let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3216 async move { buffer.await.unwrap() }
3217 };
3218 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3219 tree.read_with(cx, |tree, _| {
3220 tree.entry_for_path(path)
3221 .expect(&format!("no entry for path {}", path))
3222 .id
3223 })
3224 };
3225
3226 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3227 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3228 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3229 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3230
3231 let file2_id = id_for_path("a/file2", &cx);
3232 let file3_id = id_for_path("a/file3", &cx);
3233 let file4_id = id_for_path("b/c/file4", &cx);
3234
3235 // Wait for the initial scan.
3236 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3237 .await;
3238
3239 // Create a remote copy of this worktree.
3240 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3241 let remote = Worktree::remote(
3242 1,
3243 1,
3244 initial_snapshot.to_proto(),
3245 Client::new(http_client.clone()),
3246 user_store,
3247 Default::default(),
3248 &mut cx.to_async(),
3249 )
3250 .await
3251 .unwrap();
3252
3253 cx.read(|cx| {
3254 assert!(!buffer2.read(cx).is_dirty());
3255 assert!(!buffer3.read(cx).is_dirty());
3256 assert!(!buffer4.read(cx).is_dirty());
3257 assert!(!buffer5.read(cx).is_dirty());
3258 });
3259
3260 // Rename and delete files and directories.
3261 tree.flush_fs_events(&cx).await;
3262 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3263 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3264 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3265 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3266 tree.flush_fs_events(&cx).await;
3267
3268 let expected_paths = vec![
3269 "a",
3270 "a/file1",
3271 "a/file2.new",
3272 "b",
3273 "d",
3274 "d/file3",
3275 "d/file4",
3276 ];
3277
3278 cx.read(|app| {
3279 assert_eq!(
3280 tree.read(app)
3281 .paths()
3282 .map(|p| p.to_str().unwrap())
3283 .collect::<Vec<_>>(),
3284 expected_paths
3285 );
3286
3287 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3288 assert_eq!(id_for_path("d/file3", &cx), file3_id);
3289 assert_eq!(id_for_path("d/file4", &cx), file4_id);
3290
3291 assert_eq!(
3292 buffer2.read(app).file().unwrap().path().as_ref(),
3293 Path::new("a/file2.new")
3294 );
3295 assert_eq!(
3296 buffer3.read(app).file().unwrap().path().as_ref(),
3297 Path::new("d/file3")
3298 );
3299 assert_eq!(
3300 buffer4.read(app).file().unwrap().path().as_ref(),
3301 Path::new("d/file4")
3302 );
3303 assert_eq!(
3304 buffer5.read(app).file().unwrap().path().as_ref(),
3305 Path::new("b/c/file5")
3306 );
3307
3308 assert!(!buffer2.read(app).file().unwrap().is_deleted());
3309 assert!(!buffer3.read(app).file().unwrap().is_deleted());
3310 assert!(!buffer4.read(app).file().unwrap().is_deleted());
3311 assert!(buffer5.read(app).file().unwrap().is_deleted());
3312 });
3313
3314 // Update the remote worktree. Check that it becomes consistent with the
3315 // local worktree.
3316 remote.update(&mut cx, |remote, cx| {
3317 let update_message =
3318 tree.read(cx)
3319 .snapshot()
3320 .build_update(&initial_snapshot, 1, 1, true);
3321 remote
3322 .as_remote_mut()
3323 .unwrap()
3324 .snapshot
3325 .apply_update(update_message)
3326 .unwrap();
3327
3328 assert_eq!(
3329 remote
3330 .paths()
3331 .map(|p| p.to_str().unwrap())
3332 .collect::<Vec<_>>(),
3333 expected_paths
3334 );
3335 });
3336 }
3337
3338 #[gpui::test]
3339 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3340 let dir = temp_tree(json!({
3341 ".git": {},
3342 ".gitignore": "ignored-dir\n",
3343 "tracked-dir": {
3344 "tracked-file1": "tracked contents",
3345 },
3346 "ignored-dir": {
3347 "ignored-file1": "ignored contents",
3348 }
3349 }));
3350
3351 let http_client = FakeHttpClient::with_404_response();
3352 let client = Client::new(http_client.clone());
3353 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3354
3355 let tree = Worktree::open_local(
3356 client,
3357 user_store,
3358 dir.path(),
3359 Arc::new(RealFs),
3360 Default::default(),
3361 &mut cx.to_async(),
3362 )
3363 .await
3364 .unwrap();
3365 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3366 .await;
3367 tree.flush_fs_events(&cx).await;
3368 cx.read(|cx| {
3369 let tree = tree.read(cx);
3370 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3371 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3372 assert_eq!(tracked.is_ignored, false);
3373 assert_eq!(ignored.is_ignored, true);
3374 });
3375
3376 std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3377 std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3378 tree.flush_fs_events(&cx).await;
3379 cx.read(|cx| {
3380 let tree = tree.read(cx);
3381 let dot_git = tree.entry_for_path(".git").unwrap();
3382 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3383 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3384 assert_eq!(tracked.is_ignored, false);
3385 assert_eq!(ignored.is_ignored, true);
3386 assert_eq!(dot_git.is_ignored, true);
3387 });
3388 }
3389
3390 #[gpui::test]
3391 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3392 let user_id = 100;
3393 let http_client = FakeHttpClient::with_404_response();
3394 let mut client = Client::new(http_client);
3395 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3396 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3397
3398 let fs = Arc::new(FakeFs::new());
3399 fs.insert_tree(
3400 "/the-dir",
3401 json!({
3402 "a.txt": "a-contents",
3403 "b.txt": "b-contents",
3404 }),
3405 )
3406 .await;
3407
3408 let worktree = Worktree::open_local(
3409 client.clone(),
3410 user_store,
3411 "/the-dir".as_ref(),
3412 fs,
3413 Default::default(),
3414 &mut cx.to_async(),
3415 )
3416 .await
3417 .unwrap();
3418
3419 // Spawn multiple tasks to open paths, repeating some paths.
3420 let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3421 (
3422 worktree.open_buffer("a.txt", cx),
3423 worktree.open_buffer("b.txt", cx),
3424 worktree.open_buffer("a.txt", cx),
3425 )
3426 });
3427
3428 let buffer_a_1 = buffer_a_1.await.unwrap();
3429 let buffer_a_2 = buffer_a_2.await.unwrap();
3430 let buffer_b = buffer_b.await.unwrap();
3431 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3432 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3433
3434 // There is only one buffer per path.
3435 let buffer_a_id = buffer_a_1.id();
3436 assert_eq!(buffer_a_2.id(), buffer_a_id);
3437
3438 // Open the same path again while it is still open.
3439 drop(buffer_a_1);
3440 let buffer_a_3 = worktree
3441 .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3442 .await
3443 .unwrap();
3444
3445 // There's still only one buffer per path.
3446 assert_eq!(buffer_a_3.id(), buffer_a_id);
3447 }
3448
3449 #[gpui::test]
3450 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3451 use std::fs;
3452
3453 let dir = temp_tree(json!({
3454 "file1": "abc",
3455 "file2": "def",
3456 "file3": "ghi",
3457 }));
3458 let http_client = FakeHttpClient::with_404_response();
3459 let client = Client::new(http_client.clone());
3460 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3461
3462 let tree = Worktree::open_local(
3463 client,
3464 user_store,
3465 dir.path(),
3466 Arc::new(RealFs),
3467 Default::default(),
3468 &mut cx.to_async(),
3469 )
3470 .await
3471 .unwrap();
3472 tree.flush_fs_events(&cx).await;
3473 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3474 .await;
3475
3476 let buffer1 = tree
3477 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3478 .await
3479 .unwrap();
3480 let events = Rc::new(RefCell::new(Vec::new()));
3481
3482 // initially, the buffer isn't dirty.
3483 buffer1.update(&mut cx, |buffer, cx| {
3484 cx.subscribe(&buffer1, {
3485 let events = events.clone();
3486 move |_, _, event, _| events.borrow_mut().push(event.clone())
3487 })
3488 .detach();
3489
3490 assert!(!buffer.is_dirty());
3491 assert!(events.borrow().is_empty());
3492
3493 buffer.edit(vec![1..2], "", cx);
3494 });
3495
3496 // after the first edit, the buffer is dirty, and emits a dirtied event.
3497 buffer1.update(&mut cx, |buffer, cx| {
3498 assert!(buffer.text() == "ac");
3499 assert!(buffer.is_dirty());
3500 assert_eq!(
3501 *events.borrow(),
3502 &[language::Event::Edited, language::Event::Dirtied]
3503 );
3504 events.borrow_mut().clear();
3505 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3506 });
3507
3508 // after saving, the buffer is not dirty, and emits a saved event.
3509 buffer1.update(&mut cx, |buffer, cx| {
3510 assert!(!buffer.is_dirty());
3511 assert_eq!(*events.borrow(), &[language::Event::Saved]);
3512 events.borrow_mut().clear();
3513
3514 buffer.edit(vec![1..1], "B", cx);
3515 buffer.edit(vec![2..2], "D", cx);
3516 });
3517
3518 // after editing again, the buffer is dirty, and emits another dirty event.
3519 buffer1.update(&mut cx, |buffer, cx| {
3520 assert!(buffer.text() == "aBDc");
3521 assert!(buffer.is_dirty());
3522 assert_eq!(
3523 *events.borrow(),
3524 &[
3525 language::Event::Edited,
3526 language::Event::Dirtied,
3527 language::Event::Edited,
3528 ],
3529 );
3530 events.borrow_mut().clear();
3531
3532 // TODO - currently, after restoring the buffer to its
3533 // previously-saved state, the is still considered dirty.
3534 buffer.edit([1..3], "", cx);
3535 assert!(buffer.text() == "ac");
3536 assert!(buffer.is_dirty());
3537 });
3538
3539 assert_eq!(*events.borrow(), &[language::Event::Edited]);
3540
3541 // When a file is deleted, the buffer is considered dirty.
3542 let events = Rc::new(RefCell::new(Vec::new()));
3543 let buffer2 = tree
3544 .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3545 .await
3546 .unwrap();
3547 buffer2.update(&mut cx, |_, cx| {
3548 cx.subscribe(&buffer2, {
3549 let events = events.clone();
3550 move |_, _, event, _| events.borrow_mut().push(event.clone())
3551 })
3552 .detach();
3553 });
3554
3555 fs::remove_file(dir.path().join("file2")).unwrap();
3556 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3557 assert_eq!(
3558 *events.borrow(),
3559 &[language::Event::Dirtied, language::Event::FileHandleChanged]
3560 );
3561
3562 // When a file is already dirty when deleted, we don't emit a Dirtied event.
3563 let events = Rc::new(RefCell::new(Vec::new()));
3564 let buffer3 = tree
3565 .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3566 .await
3567 .unwrap();
3568 buffer3.update(&mut cx, |_, cx| {
3569 cx.subscribe(&buffer3, {
3570 let events = events.clone();
3571 move |_, _, event, _| events.borrow_mut().push(event.clone())
3572 })
3573 .detach();
3574 });
3575
3576 tree.flush_fs_events(&cx).await;
3577 buffer3.update(&mut cx, |buffer, cx| {
3578 buffer.edit(Some(0..0), "x", cx);
3579 });
3580 events.borrow_mut().clear();
3581 fs::remove_file(dir.path().join("file3")).unwrap();
3582 buffer3
3583 .condition(&cx, |_, _| !events.borrow().is_empty())
3584 .await;
3585 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3586 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3587 }
3588
3589 #[gpui::test]
3590 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3591 use std::fs;
3592
3593 let initial_contents = "aaa\nbbbbb\nc\n";
3594 let dir = temp_tree(json!({ "the-file": initial_contents }));
3595 let http_client = FakeHttpClient::with_404_response();
3596 let client = Client::new(http_client.clone());
3597 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3598
3599 let tree = Worktree::open_local(
3600 client,
3601 user_store,
3602 dir.path(),
3603 Arc::new(RealFs),
3604 Default::default(),
3605 &mut cx.to_async(),
3606 )
3607 .await
3608 .unwrap();
3609 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3610 .await;
3611
3612 let abs_path = dir.path().join("the-file");
3613 let buffer = tree
3614 .update(&mut cx, |tree, cx| {
3615 tree.open_buffer(Path::new("the-file"), cx)
3616 })
3617 .await
3618 .unwrap();
3619
3620 // TODO
3621 // Add a cursor on each row.
3622 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3623 // assert!(!buffer.is_dirty());
3624 // buffer.add_selection_set(
3625 // &(0..3)
3626 // .map(|row| Selection {
3627 // id: row as usize,
3628 // start: Point::new(row, 1),
3629 // end: Point::new(row, 1),
3630 // reversed: false,
3631 // goal: SelectionGoal::None,
3632 // })
3633 // .collect::<Vec<_>>(),
3634 // cx,
3635 // )
3636 // });
3637
3638 // Change the file on disk, adding two new lines of text, and removing
3639 // one line.
3640 buffer.read_with(&cx, |buffer, _| {
3641 assert!(!buffer.is_dirty());
3642 assert!(!buffer.has_conflict());
3643 });
3644 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3645 fs::write(&abs_path, new_contents).unwrap();
3646
3647 // Because the buffer was not modified, it is reloaded from disk. Its
3648 // contents are edited according to the diff between the old and new
3649 // file contents.
3650 buffer
3651 .condition(&cx, |buffer, _| buffer.text() == new_contents)
3652 .await;
3653
3654 buffer.update(&mut cx, |buffer, _| {
3655 assert_eq!(buffer.text(), new_contents);
3656 assert!(!buffer.is_dirty());
3657 assert!(!buffer.has_conflict());
3658
3659 // TODO
3660 // let cursor_positions = buffer
3661 // .selection_set(selection_set_id)
3662 // .unwrap()
3663 // .selections::<Point>(&*buffer)
3664 // .map(|selection| {
3665 // assert_eq!(selection.start, selection.end);
3666 // selection.start
3667 // })
3668 // .collect::<Vec<_>>();
3669 // assert_eq!(
3670 // cursor_positions,
3671 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3672 // );
3673 });
3674
3675 // Modify the buffer
3676 buffer.update(&mut cx, |buffer, cx| {
3677 buffer.edit(vec![0..0], " ", cx);
3678 assert!(buffer.is_dirty());
3679 assert!(!buffer.has_conflict());
3680 });
3681
3682 // Change the file on disk again, adding blank lines to the beginning.
3683 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3684
3685 // Because the buffer is modified, it doesn't reload from disk, but is
3686 // marked as having a conflict.
3687 buffer
3688 .condition(&cx, |buffer, _| buffer.has_conflict())
3689 .await;
3690 }
3691
3692 #[gpui::test]
3693 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3694 let (language_server_config, mut fake_server) =
3695 LanguageServerConfig::fake(cx.background()).await;
3696 let mut languages = LanguageRegistry::new();
3697 languages.add(Arc::new(Language::new(
3698 LanguageConfig {
3699 name: "Rust".to_string(),
3700 path_suffixes: vec!["rs".to_string()],
3701 language_server: Some(language_server_config),
3702 ..Default::default()
3703 },
3704 Some(tree_sitter_rust::language()),
3705 )));
3706
3707 let dir = temp_tree(json!({
3708 "a.rs": "fn a() { A }",
3709 "b.rs": "const y: i32 = 1",
3710 }));
3711
3712 let http_client = FakeHttpClient::with_404_response();
3713 let client = Client::new(http_client.clone());
3714 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3715
3716 let tree = Worktree::open_local(
3717 client,
3718 user_store,
3719 dir.path(),
3720 Arc::new(RealFs),
3721 Arc::new(languages),
3722 &mut cx.to_async(),
3723 )
3724 .await
3725 .unwrap();
3726 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3727 .await;
3728
3729 // Cause worktree to start the fake language server
3730 let _buffer = tree
3731 .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3732 .await
3733 .unwrap();
3734
3735 fake_server
3736 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3737 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3738 version: None,
3739 diagnostics: vec![lsp::Diagnostic {
3740 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
3741 severity: Some(lsp::DiagnosticSeverity::ERROR),
3742 message: "undefined variable 'A'".to_string(),
3743 ..Default::default()
3744 }],
3745 })
3746 .await;
3747
3748 let buffer = tree
3749 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3750 .await
3751 .unwrap();
3752
3753 buffer.read_with(&cx, |buffer, _| {
3754 let snapshot = buffer.snapshot();
3755 let diagnostics = snapshot
3756 .diagnostics_in_range::<_, Point>(0..buffer.len())
3757 .collect::<Vec<_>>();
3758 assert_eq!(
3759 diagnostics,
3760 &[DiagnosticEntry {
3761 range: Point::new(0, 9)..Point::new(0, 10),
3762 diagnostic: Diagnostic {
3763 severity: lsp::DiagnosticSeverity::ERROR,
3764 message: "undefined variable 'A'".to_string(),
3765 group_id: 0,
3766 is_primary: true,
3767 ..Default::default()
3768 }
3769 }]
3770 )
3771 });
3772 }
3773
3774 #[gpui::test]
3775 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3776 let fs = Arc::new(FakeFs::new());
3777 let http_client = FakeHttpClient::with_404_response();
3778 let client = Client::new(http_client.clone());
3779 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3780
3781 fs.insert_tree(
3782 "/the-dir",
3783 json!({
3784 "a.rs": "
3785 fn foo(mut v: Vec<usize>) {
3786 for x in &v {
3787 v.push(1);
3788 }
3789 }
3790 "
3791 .unindent(),
3792 }),
3793 )
3794 .await;
3795
3796 let worktree = Worktree::open_local(
3797 client.clone(),
3798 user_store,
3799 "/the-dir".as_ref(),
3800 fs,
3801 Default::default(),
3802 &mut cx.to_async(),
3803 )
3804 .await
3805 .unwrap();
3806
3807 let buffer = worktree
3808 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3809 .await
3810 .unwrap();
3811
3812 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3813 let message = lsp::PublishDiagnosticsParams {
3814 uri: buffer_uri.clone(),
3815 diagnostics: vec![
3816 lsp::Diagnostic {
3817 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3818 severity: Some(DiagnosticSeverity::WARNING),
3819 message: "error 1".to_string(),
3820 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3821 location: lsp::Location {
3822 uri: buffer_uri.clone(),
3823 range: lsp::Range::new(
3824 lsp::Position::new(1, 8),
3825 lsp::Position::new(1, 9),
3826 ),
3827 },
3828 message: "error 1 hint 1".to_string(),
3829 }]),
3830 ..Default::default()
3831 },
3832 lsp::Diagnostic {
3833 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3834 severity: Some(DiagnosticSeverity::HINT),
3835 message: "error 1 hint 1".to_string(),
3836 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3837 location: lsp::Location {
3838 uri: buffer_uri.clone(),
3839 range: lsp::Range::new(
3840 lsp::Position::new(1, 8),
3841 lsp::Position::new(1, 9),
3842 ),
3843 },
3844 message: "original diagnostic".to_string(),
3845 }]),
3846 ..Default::default()
3847 },
3848 lsp::Diagnostic {
3849 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3850 severity: Some(DiagnosticSeverity::ERROR),
3851 message: "error 2".to_string(),
3852 related_information: Some(vec![
3853 lsp::DiagnosticRelatedInformation {
3854 location: lsp::Location {
3855 uri: buffer_uri.clone(),
3856 range: lsp::Range::new(
3857 lsp::Position::new(1, 13),
3858 lsp::Position::new(1, 15),
3859 ),
3860 },
3861 message: "error 2 hint 1".to_string(),
3862 },
3863 lsp::DiagnosticRelatedInformation {
3864 location: lsp::Location {
3865 uri: buffer_uri.clone(),
3866 range: lsp::Range::new(
3867 lsp::Position::new(1, 13),
3868 lsp::Position::new(1, 15),
3869 ),
3870 },
3871 message: "error 2 hint 2".to_string(),
3872 },
3873 ]),
3874 ..Default::default()
3875 },
3876 lsp::Diagnostic {
3877 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3878 severity: Some(DiagnosticSeverity::HINT),
3879 message: "error 2 hint 1".to_string(),
3880 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3881 location: lsp::Location {
3882 uri: buffer_uri.clone(),
3883 range: lsp::Range::new(
3884 lsp::Position::new(2, 8),
3885 lsp::Position::new(2, 17),
3886 ),
3887 },
3888 message: "original diagnostic".to_string(),
3889 }]),
3890 ..Default::default()
3891 },
3892 lsp::Diagnostic {
3893 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3894 severity: Some(DiagnosticSeverity::HINT),
3895 message: "error 2 hint 2".to_string(),
3896 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3897 location: lsp::Location {
3898 uri: buffer_uri.clone(),
3899 range: lsp::Range::new(
3900 lsp::Position::new(2, 8),
3901 lsp::Position::new(2, 17),
3902 ),
3903 },
3904 message: "original diagnostic".to_string(),
3905 }]),
3906 ..Default::default()
3907 },
3908 ],
3909 version: None,
3910 };
3911
3912 worktree
3913 .update(&mut cx, |tree, cx| {
3914 tree.update_diagnostics(message, &Default::default(), cx)
3915 })
3916 .unwrap();
3917 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
3918
3919 assert_eq!(
3920 buffer
3921 .diagnostics_in_range::<_, Point>(0..buffer.len())
3922 .collect::<Vec<_>>(),
3923 &[
3924 DiagnosticEntry {
3925 range: Point::new(1, 8)..Point::new(1, 9),
3926 diagnostic: Diagnostic {
3927 severity: DiagnosticSeverity::WARNING,
3928 message: "error 1".to_string(),
3929 group_id: 0,
3930 is_primary: true,
3931 ..Default::default()
3932 }
3933 },
3934 DiagnosticEntry {
3935 range: Point::new(1, 8)..Point::new(1, 9),
3936 diagnostic: Diagnostic {
3937 severity: DiagnosticSeverity::HINT,
3938 message: "error 1 hint 1".to_string(),
3939 group_id: 0,
3940 is_primary: false,
3941 ..Default::default()
3942 }
3943 },
3944 DiagnosticEntry {
3945 range: Point::new(1, 13)..Point::new(1, 15),
3946 diagnostic: Diagnostic {
3947 severity: DiagnosticSeverity::HINT,
3948 message: "error 2 hint 1".to_string(),
3949 group_id: 1,
3950 is_primary: false,
3951 ..Default::default()
3952 }
3953 },
3954 DiagnosticEntry {
3955 range: Point::new(1, 13)..Point::new(1, 15),
3956 diagnostic: Diagnostic {
3957 severity: DiagnosticSeverity::HINT,
3958 message: "error 2 hint 2".to_string(),
3959 group_id: 1,
3960 is_primary: false,
3961 ..Default::default()
3962 }
3963 },
3964 DiagnosticEntry {
3965 range: Point::new(2, 8)..Point::new(2, 17),
3966 diagnostic: Diagnostic {
3967 severity: DiagnosticSeverity::ERROR,
3968 message: "error 2".to_string(),
3969 group_id: 1,
3970 is_primary: true,
3971 ..Default::default()
3972 }
3973 }
3974 ]
3975 );
3976
3977 assert_eq!(
3978 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
3979 &[
3980 DiagnosticEntry {
3981 range: Point::new(1, 8)..Point::new(1, 9),
3982 diagnostic: Diagnostic {
3983 severity: DiagnosticSeverity::WARNING,
3984 message: "error 1".to_string(),
3985 group_id: 0,
3986 is_primary: true,
3987 ..Default::default()
3988 }
3989 },
3990 DiagnosticEntry {
3991 range: Point::new(1, 8)..Point::new(1, 9),
3992 diagnostic: Diagnostic {
3993 severity: DiagnosticSeverity::HINT,
3994 message: "error 1 hint 1".to_string(),
3995 group_id: 0,
3996 is_primary: false,
3997 ..Default::default()
3998 }
3999 },
4000 ]
4001 );
4002 assert_eq!(
4003 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
4004 &[
4005 DiagnosticEntry {
4006 range: Point::new(1, 13)..Point::new(1, 15),
4007 diagnostic: Diagnostic {
4008 severity: DiagnosticSeverity::HINT,
4009 message: "error 2 hint 1".to_string(),
4010 group_id: 1,
4011 is_primary: false,
4012 ..Default::default()
4013 }
4014 },
4015 DiagnosticEntry {
4016 range: Point::new(1, 13)..Point::new(1, 15),
4017 diagnostic: Diagnostic {
4018 severity: DiagnosticSeverity::HINT,
4019 message: "error 2 hint 2".to_string(),
4020 group_id: 1,
4021 is_primary: false,
4022 ..Default::default()
4023 }
4024 },
4025 DiagnosticEntry {
4026 range: Point::new(2, 8)..Point::new(2, 17),
4027 diagnostic: Diagnostic {
4028 severity: DiagnosticSeverity::ERROR,
4029 message: "error 2".to_string(),
4030 group_id: 1,
4031 is_primary: true,
4032 ..Default::default()
4033 }
4034 }
4035 ]
4036 );
4037 }
4038
4039 #[gpui::test(iterations = 100)]
4040 fn test_random(mut rng: StdRng) {
4041 let operations = env::var("OPERATIONS")
4042 .map(|o| o.parse().unwrap())
4043 .unwrap_or(40);
4044 let initial_entries = env::var("INITIAL_ENTRIES")
4045 .map(|o| o.parse().unwrap())
4046 .unwrap_or(20);
4047
4048 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
4049 for _ in 0..initial_entries {
4050 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
4051 }
4052 log::info!("Generated initial tree");
4053
4054 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4055 let fs = Arc::new(RealFs);
4056 let next_entry_id = Arc::new(AtomicUsize::new(0));
4057 let mut initial_snapshot = Snapshot {
4058 id: WorktreeId::from_usize(0),
4059 scan_id: 0,
4060 abs_path: root_dir.path().into(),
4061 entries_by_path: Default::default(),
4062 entries_by_id: Default::default(),
4063 removed_entry_ids: Default::default(),
4064 ignores: Default::default(),
4065 root_name: Default::default(),
4066 root_char_bag: Default::default(),
4067 next_entry_id: next_entry_id.clone(),
4068 };
4069 initial_snapshot.insert_entry(
4070 Entry::new(
4071 Path::new("").into(),
4072 &smol::block_on(fs.metadata(root_dir.path()))
4073 .unwrap()
4074 .unwrap(),
4075 &next_entry_id,
4076 Default::default(),
4077 ),
4078 fs.as_ref(),
4079 );
4080 let mut scanner = BackgroundScanner::new(
4081 Arc::new(Mutex::new(initial_snapshot.clone())),
4082 notify_tx,
4083 fs.clone(),
4084 Arc::new(gpui::executor::Background::new()),
4085 );
4086 smol::block_on(scanner.scan_dirs()).unwrap();
4087 scanner.snapshot().check_invariants();
4088
4089 let mut events = Vec::new();
4090 let mut snapshots = Vec::new();
4091 let mut mutations_len = operations;
4092 while mutations_len > 1 {
4093 if !events.is_empty() && rng.gen_bool(0.4) {
4094 let len = rng.gen_range(0..=events.len());
4095 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4096 log::info!("Delivering events: {:#?}", to_deliver);
4097 smol::block_on(scanner.process_events(to_deliver));
4098 scanner.snapshot().check_invariants();
4099 } else {
4100 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4101 mutations_len -= 1;
4102 }
4103
4104 if rng.gen_bool(0.2) {
4105 snapshots.push(scanner.snapshot());
4106 }
4107 }
4108 log::info!("Quiescing: {:#?}", events);
4109 smol::block_on(scanner.process_events(events));
4110 scanner.snapshot().check_invariants();
4111
4112 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4113 let mut new_scanner = BackgroundScanner::new(
4114 Arc::new(Mutex::new(initial_snapshot)),
4115 notify_tx,
4116 scanner.fs.clone(),
4117 scanner.executor.clone(),
4118 );
4119 smol::block_on(new_scanner.scan_dirs()).unwrap();
4120 assert_eq!(
4121 scanner.snapshot().to_vec(true),
4122 new_scanner.snapshot().to_vec(true)
4123 );
4124
4125 for mut prev_snapshot in snapshots {
4126 let include_ignored = rng.gen::<bool>();
4127 if !include_ignored {
4128 let mut entries_by_path_edits = Vec::new();
4129 let mut entries_by_id_edits = Vec::new();
4130 for entry in prev_snapshot
4131 .entries_by_id
4132 .cursor::<()>()
4133 .filter(|e| e.is_ignored)
4134 {
4135 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4136 entries_by_id_edits.push(Edit::Remove(entry.id));
4137 }
4138
4139 prev_snapshot
4140 .entries_by_path
4141 .edit(entries_by_path_edits, &());
4142 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4143 }
4144
4145 let update = scanner
4146 .snapshot()
4147 .build_update(&prev_snapshot, 0, 0, include_ignored);
4148 prev_snapshot.apply_update(update).unwrap();
4149 assert_eq!(
4150 prev_snapshot.to_vec(true),
4151 scanner.snapshot().to_vec(include_ignored)
4152 );
4153 }
4154 }
4155
4156 fn randomly_mutate_tree(
4157 root_path: &Path,
4158 insertion_probability: f64,
4159 rng: &mut impl Rng,
4160 ) -> Result<Vec<fsevent::Event>> {
4161 let root_path = root_path.canonicalize().unwrap();
4162 let (dirs, files) = read_dir_recursive(root_path.clone());
4163
4164 let mut events = Vec::new();
4165 let mut record_event = |path: PathBuf| {
4166 events.push(fsevent::Event {
4167 event_id: SystemTime::now()
4168 .duration_since(UNIX_EPOCH)
4169 .unwrap()
4170 .as_secs(),
4171 flags: fsevent::StreamFlags::empty(),
4172 path,
4173 });
4174 };
4175
4176 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4177 let path = dirs.choose(rng).unwrap();
4178 let new_path = path.join(gen_name(rng));
4179
4180 if rng.gen() {
4181 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4182 std::fs::create_dir(&new_path)?;
4183 } else {
4184 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4185 std::fs::write(&new_path, "")?;
4186 }
4187 record_event(new_path);
4188 } else if rng.gen_bool(0.05) {
4189 let ignore_dir_path = dirs.choose(rng).unwrap();
4190 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4191
4192 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4193 let files_to_ignore = {
4194 let len = rng.gen_range(0..=subfiles.len());
4195 subfiles.choose_multiple(rng, len)
4196 };
4197 let dirs_to_ignore = {
4198 let len = rng.gen_range(0..subdirs.len());
4199 subdirs.choose_multiple(rng, len)
4200 };
4201
4202 let mut ignore_contents = String::new();
4203 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4204 write!(
4205 ignore_contents,
4206 "{}\n",
4207 path_to_ignore
4208 .strip_prefix(&ignore_dir_path)?
4209 .to_str()
4210 .unwrap()
4211 )
4212 .unwrap();
4213 }
4214 log::info!(
4215 "Creating {:?} with contents:\n{}",
4216 ignore_path.strip_prefix(&root_path)?,
4217 ignore_contents
4218 );
4219 std::fs::write(&ignore_path, ignore_contents).unwrap();
4220 record_event(ignore_path);
4221 } else {
4222 let old_path = {
4223 let file_path = files.choose(rng);
4224 let dir_path = dirs[1..].choose(rng);
4225 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4226 };
4227
4228 let is_rename = rng.gen();
4229 if is_rename {
4230 let new_path_parent = dirs
4231 .iter()
4232 .filter(|d| !d.starts_with(old_path))
4233 .choose(rng)
4234 .unwrap();
4235
4236 let overwrite_existing_dir =
4237 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4238 let new_path = if overwrite_existing_dir {
4239 std::fs::remove_dir_all(&new_path_parent).ok();
4240 new_path_parent.to_path_buf()
4241 } else {
4242 new_path_parent.join(gen_name(rng))
4243 };
4244
4245 log::info!(
4246 "Renaming {:?} to {}{:?}",
4247 old_path.strip_prefix(&root_path)?,
4248 if overwrite_existing_dir {
4249 "overwrite "
4250 } else {
4251 ""
4252 },
4253 new_path.strip_prefix(&root_path)?
4254 );
4255 std::fs::rename(&old_path, &new_path)?;
4256 record_event(old_path.clone());
4257 record_event(new_path);
4258 } else if old_path.is_dir() {
4259 let (dirs, files) = read_dir_recursive(old_path.clone());
4260
4261 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4262 std::fs::remove_dir_all(&old_path).unwrap();
4263 for file in files {
4264 record_event(file);
4265 }
4266 for dir in dirs {
4267 record_event(dir);
4268 }
4269 } else {
4270 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4271 std::fs::remove_file(old_path).unwrap();
4272 record_event(old_path.clone());
4273 }
4274 }
4275
4276 Ok(events)
4277 }
4278
4279 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4280 let child_entries = std::fs::read_dir(&path).unwrap();
4281 let mut dirs = vec![path];
4282 let mut files = Vec::new();
4283 for child_entry in child_entries {
4284 let child_path = child_entry.unwrap().path();
4285 if child_path.is_dir() {
4286 let (child_dirs, child_files) = read_dir_recursive(child_path);
4287 dirs.extend(child_dirs);
4288 files.extend(child_files);
4289 } else {
4290 files.push(child_path);
4291 }
4292 }
4293 (dirs, files)
4294 }
4295
4296 fn gen_name(rng: &mut impl Rng) -> String {
4297 (0..6)
4298 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4299 .map(char::from)
4300 .collect()
4301 }
4302
4303 impl Snapshot {
4304 fn check_invariants(&self) {
4305 let mut files = self.files(true, 0);
4306 let mut visible_files = self.files(false, 0);
4307 for entry in self.entries_by_path.cursor::<()>() {
4308 if entry.is_file() {
4309 assert_eq!(files.next().unwrap().inode, entry.inode);
4310 if !entry.is_ignored {
4311 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4312 }
4313 }
4314 }
4315 assert!(files.next().is_none());
4316 assert!(visible_files.next().is_none());
4317
4318 let mut bfs_paths = Vec::new();
4319 let mut stack = vec![Path::new("")];
4320 while let Some(path) = stack.pop() {
4321 bfs_paths.push(path);
4322 let ix = stack.len();
4323 for child_entry in self.child_entries(path) {
4324 stack.insert(ix, &child_entry.path);
4325 }
4326 }
4327
4328 let dfs_paths = self
4329 .entries_by_path
4330 .cursor::<()>()
4331 .map(|e| e.path.as_ref())
4332 .collect::<Vec<_>>();
4333 assert_eq!(bfs_paths, dfs_paths);
4334
4335 for (ignore_parent_path, _) in &self.ignores {
4336 assert!(self.entry_for_path(ignore_parent_path).is_some());
4337 assert!(self
4338 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4339 .is_some());
4340 }
4341 }
4342
4343 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4344 let mut paths = Vec::new();
4345 for entry in self.entries_by_path.cursor::<()>() {
4346 if include_ignored || !entry.is_ignored {
4347 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4348 }
4349 }
4350 paths.sort_by(|a, b| a.0.cmp(&b.0));
4351 paths
4352 }
4353 }
4354}