1use super::{
2 fs::{self, Fs},
3 ignore::IgnoreStack,
4 DiagnosticSummary,
5};
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context, Result};
8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
9use clock::ReplicaId;
10use collections::{hash_map, HashMap};
11use collections::{BTreeMap, HashSet};
12use futures::{Stream, StreamExt};
13use fuzzy::CharBag;
14use gpui::{
15 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
16 Task, UpgradeModelHandle, WeakModelHandle,
17};
18use language::{
19 Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Language, LanguageRegistry,
20 Operation, PointUtf16, Rope,
21};
22use lazy_static::lazy_static;
23use lsp::LanguageServer;
24use parking_lot::Mutex;
25use postage::{
26 prelude::{Sink as _, Stream as _},
27 watch,
28};
29use serde::Deserialize;
30use smol::channel::{self, Sender};
31use std::{
32 any::Any,
33 cmp::{self, Ordering},
34 convert::{TryFrom, TryInto},
35 ffi::{OsStr, OsString},
36 fmt,
37 future::Future,
38 mem,
39 ops::{Deref, Range},
40 path::{Path, PathBuf},
41 sync::{
42 atomic::{AtomicUsize, Ordering::SeqCst},
43 Arc,
44 },
45 time::{Duration, SystemTime},
46};
47use sum_tree::Bias;
48use sum_tree::{Edit, SeekTarget, SumTree};
49use util::{post_inc, ResultExt, TryFutureExt};
50
51lazy_static! {
52 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
53}
54
55#[derive(Clone, Debug)]
56enum ScanState {
57 Idle,
58 Scanning,
59 Err(Arc<anyhow::Error>),
60}
61
62pub enum Worktree {
63 Local(LocalWorktree),
64 Remote(RemoteWorktree),
65}
66
67#[derive(Debug)]
68pub enum Event {
69 DiagnosticsUpdated(Arc<Path>),
70}
71
72impl Entity for Worktree {
73 type Event = Event;
74
75 fn app_will_quit(
76 &mut self,
77 _: &mut MutableAppContext,
78 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
79 use futures::FutureExt;
80
81 if let Self::Local(worktree) = self {
82 let shutdown_futures = worktree
83 .language_servers
84 .drain()
85 .filter_map(|(_, server)| server.shutdown())
86 .collect::<Vec<_>>();
87 Some(
88 async move {
89 futures::future::join_all(shutdown_futures).await;
90 }
91 .boxed(),
92 )
93 } else {
94 None
95 }
96 }
97}
98
99impl Worktree {
100 pub async fn open_local(
101 client: Arc<Client>,
102 user_store: ModelHandle<UserStore>,
103 path: impl Into<Arc<Path>>,
104 fs: Arc<dyn Fs>,
105 languages: Arc<LanguageRegistry>,
106 cx: &mut AsyncAppContext,
107 ) -> Result<ModelHandle<Self>> {
108 let (tree, scan_states_tx) =
109 LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
110 tree.update(cx, |tree, cx| {
111 let tree = tree.as_local_mut().unwrap();
112 let abs_path = tree.snapshot.abs_path.clone();
113 let background_snapshot = tree.background_snapshot.clone();
114 let background = cx.background().clone();
115 tree._background_scanner_task = Some(cx.background().spawn(async move {
116 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
117 let scanner =
118 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
119 scanner.run(events).await;
120 }));
121 });
122 Ok(tree)
123 }
124
125 pub async fn remote(
126 project_remote_id: u64,
127 replica_id: ReplicaId,
128 worktree: proto::Worktree,
129 client: Arc<Client>,
130 user_store: ModelHandle<UserStore>,
131 languages: Arc<LanguageRegistry>,
132 cx: &mut AsyncAppContext,
133 ) -> Result<ModelHandle<Self>> {
134 let remote_id = worktree.id;
135 let root_char_bag: CharBag = worktree
136 .root_name
137 .chars()
138 .map(|c| c.to_ascii_lowercase())
139 .collect();
140 let root_name = worktree.root_name.clone();
141 let (entries_by_path, entries_by_id) = cx
142 .background()
143 .spawn(async move {
144 let mut entries_by_path_edits = Vec::new();
145 let mut entries_by_id_edits = Vec::new();
146 for entry in worktree.entries {
147 match Entry::try_from((&root_char_bag, entry)) {
148 Ok(entry) => {
149 entries_by_id_edits.push(Edit::Insert(PathEntry {
150 id: entry.id,
151 path: entry.path.clone(),
152 is_ignored: entry.is_ignored,
153 scan_id: 0,
154 }));
155 entries_by_path_edits.push(Edit::Insert(entry));
156 }
157 Err(err) => log::warn!("error for remote worktree entry {:?}", err),
158 }
159 }
160
161 let mut entries_by_path = SumTree::new();
162 let mut entries_by_id = SumTree::new();
163 entries_by_path.edit(entries_by_path_edits, &());
164 entries_by_id.edit(entries_by_id_edits, &());
165 (entries_by_path, entries_by_id)
166 })
167 .await;
168
169 let worktree = cx.update(|cx| {
170 cx.add_model(|cx: &mut ModelContext<Worktree>| {
171 let snapshot = Snapshot {
172 id: remote_id as usize,
173 scan_id: 0,
174 abs_path: Path::new("").into(),
175 root_name,
176 root_char_bag,
177 ignores: Default::default(),
178 entries_by_path,
179 entries_by_id,
180 removed_entry_ids: Default::default(),
181 next_entry_id: Default::default(),
182 };
183
184 let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
185 let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
186
187 cx.background()
188 .spawn(async move {
189 while let Some(update) = updates_rx.recv().await {
190 let mut snapshot = snapshot_tx.borrow().clone();
191 if let Err(error) = snapshot.apply_update(update) {
192 log::error!("error applying worktree update: {}", error);
193 }
194 *snapshot_tx.borrow_mut() = snapshot;
195 }
196 })
197 .detach();
198
199 {
200 let mut snapshot_rx = snapshot_rx.clone();
201 cx.spawn_weak(|this, mut cx| async move {
202 while let Some(_) = snapshot_rx.recv().await {
203 if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
204 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
205 } else {
206 break;
207 }
208 }
209 })
210 .detach();
211 }
212
213 Worktree::Remote(RemoteWorktree {
214 project_id: project_remote_id,
215 remote_id,
216 replica_id,
217 snapshot,
218 snapshot_rx,
219 updates_tx,
220 client: client.clone(),
221 loading_buffers: Default::default(),
222 open_buffers: Default::default(),
223 diagnostic_summaries: Default::default(),
224 queued_operations: Default::default(),
225 languages,
226 user_store,
227 })
228 })
229 });
230
231 Ok(worktree)
232 }
233
234 pub fn as_local(&self) -> Option<&LocalWorktree> {
235 if let Worktree::Local(worktree) = self {
236 Some(worktree)
237 } else {
238 None
239 }
240 }
241
242 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
243 if let Worktree::Remote(worktree) = self {
244 Some(worktree)
245 } else {
246 None
247 }
248 }
249
250 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
251 if let Worktree::Local(worktree) = self {
252 Some(worktree)
253 } else {
254 None
255 }
256 }
257
258 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
259 if let Worktree::Remote(worktree) = self {
260 Some(worktree)
261 } else {
262 None
263 }
264 }
265
266 pub fn snapshot(&self) -> Snapshot {
267 match self {
268 Worktree::Local(worktree) => worktree.snapshot(),
269 Worktree::Remote(worktree) => worktree.snapshot(),
270 }
271 }
272
273 pub fn replica_id(&self) -> ReplicaId {
274 match self {
275 Worktree::Local(_) => 0,
276 Worktree::Remote(worktree) => worktree.replica_id,
277 }
278 }
279
280 pub fn remove_collaborator(
281 &mut self,
282 peer_id: PeerId,
283 replica_id: ReplicaId,
284 cx: &mut ModelContext<Self>,
285 ) {
286 match self {
287 Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
288 Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
289 }
290 }
291
292 pub fn languages(&self) -> &Arc<LanguageRegistry> {
293 match self {
294 Worktree::Local(worktree) => &worktree.language_registry,
295 Worktree::Remote(worktree) => &worktree.languages,
296 }
297 }
298
299 pub fn user_store(&self) -> &ModelHandle<UserStore> {
300 match self {
301 Worktree::Local(worktree) => &worktree.user_store,
302 Worktree::Remote(worktree) => &worktree.user_store,
303 }
304 }
305
306 pub fn handle_open_buffer(
307 &mut self,
308 envelope: TypedEnvelope<proto::OpenBuffer>,
309 rpc: Arc<Client>,
310 cx: &mut ModelContext<Self>,
311 ) -> anyhow::Result<()> {
312 let receipt = envelope.receipt();
313
314 let response = self
315 .as_local_mut()
316 .unwrap()
317 .open_remote_buffer(envelope, cx);
318
319 cx.background()
320 .spawn(
321 async move {
322 rpc.respond(receipt, response.await?).await?;
323 Ok(())
324 }
325 .log_err(),
326 )
327 .detach();
328
329 Ok(())
330 }
331
332 pub fn handle_close_buffer(
333 &mut self,
334 envelope: TypedEnvelope<proto::CloseBuffer>,
335 _: Arc<Client>,
336 cx: &mut ModelContext<Self>,
337 ) -> anyhow::Result<()> {
338 self.as_local_mut()
339 .unwrap()
340 .close_remote_buffer(envelope, cx)
341 }
342
343 pub fn diagnostic_summaries<'a>(
344 &'a self,
345 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
346 match self {
347 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
348 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
349 }
350 .iter()
351 .map(|(path, summary)| (path.clone(), summary.clone()))
352 }
353
354 pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
355 match self {
356 Worktree::Local(worktree) => &mut worktree.loading_buffers,
357 Worktree::Remote(worktree) => &mut worktree.loading_buffers,
358 }
359 }
360
361 pub fn open_buffer(
362 &mut self,
363 path: impl AsRef<Path>,
364 cx: &mut ModelContext<Self>,
365 ) -> Task<Result<ModelHandle<Buffer>>> {
366 let path = path.as_ref();
367
368 // If there is already a buffer for the given path, then return it.
369 let existing_buffer = match self {
370 Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
371 Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
372 };
373 if let Some(existing_buffer) = existing_buffer {
374 return cx.spawn(move |_, _| async move { Ok(existing_buffer) });
375 }
376
377 let path: Arc<Path> = Arc::from(path);
378 let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
379 // If the given path is already being loaded, then wait for that existing
380 // task to complete and return the same buffer.
381 hash_map::Entry::Occupied(e) => e.get().clone(),
382
383 // Otherwise, record the fact that this path is now being loaded.
384 hash_map::Entry::Vacant(entry) => {
385 let (mut tx, rx) = postage::watch::channel();
386 entry.insert(rx.clone());
387
388 let load_buffer = match self {
389 Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
390 Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
391 };
392 cx.spawn(move |this, mut cx| async move {
393 let result = load_buffer.await;
394
395 // After the buffer loads, record the fact that it is no longer
396 // loading.
397 this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
398 *tx.borrow_mut() = Some(result.map_err(|e| Arc::new(e)));
399 })
400 .detach();
401 rx
402 }
403 };
404
405 cx.spawn(|_, _| async move {
406 loop {
407 if let Some(result) = loading_watch.borrow().as_ref() {
408 return result.clone().map_err(|e| anyhow!("{}", e));
409 }
410 loading_watch.recv().await;
411 }
412 })
413 }
414
415 #[cfg(feature = "test-support")]
416 pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
417 let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
418 Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
419 Worktree::Remote(worktree) => {
420 Box::new(worktree.open_buffers.values().filter_map(|buf| {
421 if let RemoteBuffer::Loaded(buf) = buf {
422 Some(buf)
423 } else {
424 None
425 }
426 }))
427 }
428 };
429
430 let path = path.as_ref();
431 open_buffers
432 .find(|buffer| {
433 if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
434 file.path().as_ref() == path
435 } else {
436 false
437 }
438 })
439 .is_some()
440 }
441
442 pub fn handle_update_buffer(
443 &mut self,
444 envelope: TypedEnvelope<proto::UpdateBuffer>,
445 cx: &mut ModelContext<Self>,
446 ) -> Result<()> {
447 let payload = envelope.payload.clone();
448 let buffer_id = payload.buffer_id as usize;
449 let ops = payload
450 .operations
451 .into_iter()
452 .map(|op| language::proto::deserialize_operation(op))
453 .collect::<Result<Vec<_>, _>>()?;
454
455 match self {
456 Worktree::Local(worktree) => {
457 let buffer = worktree
458 .open_buffers
459 .get(&buffer_id)
460 .and_then(|buf| buf.upgrade(cx))
461 .ok_or_else(|| {
462 anyhow!("invalid buffer {} in update buffer message", buffer_id)
463 })?;
464 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
465 }
466 Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
467 Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
468 Some(RemoteBuffer::Loaded(buffer)) => {
469 if let Some(buffer) = buffer.upgrade(cx) {
470 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
471 } else {
472 worktree
473 .open_buffers
474 .insert(buffer_id, RemoteBuffer::Operations(ops));
475 }
476 }
477 None => {
478 worktree
479 .open_buffers
480 .insert(buffer_id, RemoteBuffer::Operations(ops));
481 }
482 },
483 }
484
485 Ok(())
486 }
487
488 pub fn handle_save_buffer(
489 &mut self,
490 envelope: TypedEnvelope<proto::SaveBuffer>,
491 rpc: Arc<Client>,
492 cx: &mut ModelContext<Self>,
493 ) -> Result<()> {
494 let sender_id = envelope.original_sender_id()?;
495 let this = self.as_local().unwrap();
496 let project_id = this
497 .share
498 .as_ref()
499 .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
500 .project_id;
501
502 let buffer = this
503 .shared_buffers
504 .get(&sender_id)
505 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
506 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
507
508 let receipt = envelope.receipt();
509 let worktree_id = envelope.payload.worktree_id;
510 let buffer_id = envelope.payload.buffer_id;
511 let save = cx.spawn(|_, mut cx| async move {
512 buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
513 });
514
515 cx.background()
516 .spawn(
517 async move {
518 let (version, mtime) = save.await?;
519
520 rpc.respond(
521 receipt,
522 proto::BufferSaved {
523 project_id,
524 worktree_id,
525 buffer_id,
526 version: (&version).into(),
527 mtime: Some(mtime.into()),
528 },
529 )
530 .await?;
531
532 Ok(())
533 }
534 .log_err(),
535 )
536 .detach();
537
538 Ok(())
539 }
540
541 pub fn handle_buffer_saved(
542 &mut self,
543 envelope: TypedEnvelope<proto::BufferSaved>,
544 cx: &mut ModelContext<Self>,
545 ) -> Result<()> {
546 let payload = envelope.payload.clone();
547 let worktree = self.as_remote_mut().unwrap();
548 if let Some(buffer) = worktree
549 .open_buffers
550 .get(&(payload.buffer_id as usize))
551 .and_then(|buf| buf.upgrade(cx))
552 {
553 buffer.update(cx, |buffer, cx| {
554 let version = payload.version.try_into()?;
555 let mtime = payload
556 .mtime
557 .ok_or_else(|| anyhow!("missing mtime"))?
558 .into();
559 buffer.did_save(version, mtime, None, cx);
560 Result::<_, anyhow::Error>::Ok(())
561 })?;
562 }
563 Ok(())
564 }
565
566 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
567 match self {
568 Self::Local(worktree) => {
569 let is_fake_fs = worktree.fs.is_fake();
570 worktree.snapshot = worktree.background_snapshot.lock().clone();
571 if worktree.is_scanning() {
572 if worktree.poll_task.is_none() {
573 worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
574 if is_fake_fs {
575 smol::future::yield_now().await;
576 } else {
577 smol::Timer::after(Duration::from_millis(100)).await;
578 }
579 this.update(&mut cx, |this, cx| {
580 this.as_local_mut().unwrap().poll_task = None;
581 this.poll_snapshot(cx);
582 })
583 }));
584 }
585 } else {
586 worktree.poll_task.take();
587 self.update_open_buffers(cx);
588 }
589 }
590 Self::Remote(worktree) => {
591 worktree.snapshot = worktree.snapshot_rx.borrow().clone();
592 self.update_open_buffers(cx);
593 }
594 };
595
596 cx.notify();
597 }
598
599 fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
600 let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
601 Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
602 Self::Remote(worktree) => {
603 Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
604 if let RemoteBuffer::Loaded(buf) = buf {
605 Some((id, buf))
606 } else {
607 None
608 }
609 }))
610 }
611 };
612
613 let local = self.as_local().is_some();
614 let worktree_path = self.abs_path.clone();
615 let worktree_handle = cx.handle();
616 let mut buffers_to_delete = Vec::new();
617 for (buffer_id, buffer) in open_buffers {
618 if let Some(buffer) = buffer.upgrade(cx) {
619 buffer.update(cx, |buffer, cx| {
620 if let Some(old_file) = buffer.file() {
621 let new_file = if let Some(entry) = old_file
622 .entry_id()
623 .and_then(|entry_id| self.entry_for_id(entry_id))
624 {
625 File {
626 is_local: local,
627 worktree_path: worktree_path.clone(),
628 entry_id: Some(entry.id),
629 mtime: entry.mtime,
630 path: entry.path.clone(),
631 worktree: worktree_handle.clone(),
632 }
633 } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
634 File {
635 is_local: local,
636 worktree_path: worktree_path.clone(),
637 entry_id: Some(entry.id),
638 mtime: entry.mtime,
639 path: entry.path.clone(),
640 worktree: worktree_handle.clone(),
641 }
642 } else {
643 File {
644 is_local: local,
645 worktree_path: worktree_path.clone(),
646 entry_id: None,
647 path: old_file.path().clone(),
648 mtime: old_file.mtime(),
649 worktree: worktree_handle.clone(),
650 }
651 };
652
653 if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
654 task.detach();
655 }
656 }
657 });
658 } else {
659 buffers_to_delete.push(*buffer_id);
660 }
661 }
662
663 for buffer_id in buffers_to_delete {
664 match self {
665 Self::Local(worktree) => {
666 worktree.open_buffers.remove(&buffer_id);
667 }
668 Self::Remote(worktree) => {
669 worktree.open_buffers.remove(&buffer_id);
670 }
671 }
672 }
673 }
674
675 pub fn update_lsp_diagnostics(
676 &mut self,
677 mut params: lsp::PublishDiagnosticsParams,
678 disk_based_sources: &HashSet<String>,
679 cx: &mut ModelContext<Worktree>,
680 ) -> Result<()> {
681 let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
682 let abs_path = params
683 .uri
684 .to_file_path()
685 .map_err(|_| anyhow!("URI is not a file"))?;
686 let worktree_path = Arc::from(
687 abs_path
688 .strip_prefix(&this.abs_path)
689 .context("path is not within worktree")?,
690 );
691
692 let mut group_ids_by_diagnostic_range = HashMap::default();
693 let mut diagnostics_by_group_id = HashMap::default();
694 let mut next_group_id = 0;
695 for diagnostic in &mut params.diagnostics {
696 let source = diagnostic.source.as_ref();
697 let code = diagnostic.code.as_ref();
698 let group_id = diagnostic_ranges(&diagnostic, &abs_path)
699 .find_map(|range| group_ids_by_diagnostic_range.get(&(source, code, range)))
700 .copied()
701 .unwrap_or_else(|| {
702 let group_id = post_inc(&mut next_group_id);
703 for range in diagnostic_ranges(&diagnostic, &abs_path) {
704 group_ids_by_diagnostic_range.insert((source, code, range), group_id);
705 }
706 group_id
707 });
708
709 diagnostics_by_group_id
710 .entry(group_id)
711 .or_insert(Vec::new())
712 .push(DiagnosticEntry {
713 range: diagnostic.range.start.to_point_utf16()
714 ..diagnostic.range.end.to_point_utf16(),
715 diagnostic: Diagnostic {
716 code: diagnostic.code.clone().map(|code| match code {
717 lsp::NumberOrString::Number(code) => code.to_string(),
718 lsp::NumberOrString::String(code) => code,
719 }),
720 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
721 message: mem::take(&mut diagnostic.message),
722 group_id,
723 is_primary: false,
724 is_valid: true,
725 is_disk_based: diagnostic
726 .source
727 .as_ref()
728 .map_or(false, |source| disk_based_sources.contains(source)),
729 },
730 });
731 }
732
733 let diagnostics = diagnostics_by_group_id
734 .into_values()
735 .flat_map(|mut diagnostics| {
736 let primary = diagnostics
737 .iter_mut()
738 .min_by_key(|entry| entry.diagnostic.severity)
739 .unwrap();
740 primary.diagnostic.is_primary = true;
741 diagnostics
742 })
743 .collect::<Vec<_>>();
744
745 self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)
746 }
747
748 pub fn update_diagnostics(
749 &mut self,
750 mut params: lsp::PublishDiagnosticsParams,
751 disk_based_sources: &HashSet<String>,
752 cx: &mut ModelContext<Worktree>,
753 ) -> Result<()> {
754 let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
755 let abs_path = params
756 .uri
757 .to_file_path()
758 .map_err(|_| anyhow!("URI is not a file"))?;
759 let worktree_path = Arc::from(
760 abs_path
761 .strip_prefix(&this.abs_path)
762 .context("path is not within worktree")?,
763 );
764
765 let mut group_ids_by_diagnostic_range = HashMap::default();
766 let mut diagnostics_by_group_id = HashMap::default();
767 let mut next_group_id = 0;
768 for diagnostic in &mut params.diagnostics {
769 let source = diagnostic.source.as_ref();
770 let code = diagnostic.code.as_ref();
771 let group_id = diagnostic_ranges(&diagnostic, &abs_path)
772 .find_map(|range| group_ids_by_diagnostic_range.get(&(source, code, range)))
773 .copied()
774 .unwrap_or_else(|| {
775 let group_id = post_inc(&mut next_group_id);
776 for range in diagnostic_ranges(&diagnostic, &abs_path) {
777 group_ids_by_diagnostic_range.insert((source, code, range), group_id);
778 }
779 group_id
780 });
781
782 diagnostics_by_group_id
783 .entry(group_id)
784 .or_insert(Vec::new())
785 .push(DiagnosticEntry {
786 range: diagnostic.range.start.to_point_utf16()
787 ..diagnostic.range.end.to_point_utf16(),
788 diagnostic: Diagnostic {
789 code: diagnostic.code.clone().map(|code| match code {
790 lsp::NumberOrString::Number(code) => code.to_string(),
791 lsp::NumberOrString::String(code) => code,
792 }),
793 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
794 message: mem::take(&mut diagnostic.message),
795 group_id,
796 is_primary: false,
797 is_valid: true,
798 is_disk_based: diagnostic
799 .source
800 .as_ref()
801 .map_or(false, |source| disk_based_sources.contains(source)),
802 },
803 });
804 }
805
806 let diagnostics = diagnostics_by_group_id
807 .into_values()
808 .flat_map(|mut diagnostics| {
809 let primary = diagnostics
810 .iter_mut()
811 .min_by_key(|entry| entry.diagnostic.severity)
812 .unwrap();
813 primary.diagnostic.is_primary = true;
814 diagnostics
815 })
816 .collect::<Vec<_>>();
817
818 self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)
819 }
820
821 pub fn update_diagnostic_entries(
822 &mut self,
823 path: Arc<Path>,
824 version: Option<i32>,
825 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
826 cx: &mut ModelContext<Worktree>,
827 ) -> Result<()> {
828 let this = self.as_local_mut().unwrap();
829 for buffer in this.open_buffers.values() {
830 if let Some(buffer) = buffer.upgrade(cx) {
831 if buffer
832 .read(cx)
833 .file()
834 .map_or(false, |file| *file.path() == path)
835 {
836 let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
837 (
838 buffer.remote_id(),
839 buffer.update_diagnostics(version, diagnostics.clone(), cx),
840 )
841 });
842 self.send_buffer_update(remote_id, operation?, cx);
843 break;
844 }
845 }
846 }
847
848 let this = self.as_local_mut().unwrap();
849 this.diagnostic_summaries
850 .insert(path.clone(), DiagnosticSummary::new(&diagnostics));
851 this.diagnostics.insert(path.clone(), diagnostics);
852 cx.emit(Event::DiagnosticsUpdated(path.clone()));
853 Ok(())
854 }
855
856 fn send_buffer_update(
857 &mut self,
858 buffer_id: u64,
859 operation: Operation,
860 cx: &mut ModelContext<Self>,
861 ) {
862 if let Some((project_id, worktree_id, rpc)) = match self {
863 Worktree::Local(worktree) => worktree.share.as_ref().map(|share| {
864 (
865 share.project_id,
866 worktree.id() as u64,
867 worktree.client.clone(),
868 )
869 }),
870 Worktree::Remote(worktree) => Some((
871 worktree.project_id,
872 worktree.remote_id,
873 worktree.client.clone(),
874 )),
875 } {
876 cx.spawn(|worktree, mut cx| async move {
877 if let Err(error) = rpc
878 .request(proto::UpdateBuffer {
879 project_id,
880 worktree_id,
881 buffer_id,
882 operations: vec![language::proto::serialize_operation(&operation)],
883 })
884 .await
885 {
886 worktree.update(&mut cx, |worktree, _| {
887 log::error!("error sending buffer operation: {}", error);
888 match worktree {
889 Worktree::Local(t) => &mut t.queued_operations,
890 Worktree::Remote(t) => &mut t.queued_operations,
891 }
892 .push((buffer_id, operation));
893 });
894 }
895 })
896 .detach();
897 }
898 }
899}
900
901#[derive(Clone)]
902pub struct Snapshot {
903 id: usize,
904 scan_id: usize,
905 abs_path: Arc<Path>,
906 root_name: String,
907 root_char_bag: CharBag,
908 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
909 entries_by_path: SumTree<Entry>,
910 entries_by_id: SumTree<PathEntry>,
911 removed_entry_ids: HashMap<u64, usize>,
912 next_entry_id: Arc<AtomicUsize>,
913}
914
915pub struct LocalWorktree {
916 snapshot: Snapshot,
917 config: WorktreeConfig,
918 background_snapshot: Arc<Mutex<Snapshot>>,
919 last_scan_state_rx: watch::Receiver<ScanState>,
920 _background_scanner_task: Option<Task<()>>,
921 poll_task: Option<Task<()>>,
922 share: Option<ShareState>,
923 loading_buffers: LoadingBuffers,
924 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
925 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
926 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
927 diagnostic_summaries: BTreeMap<Arc<Path>, DiagnosticSummary>,
928 queued_operations: Vec<(u64, Operation)>,
929 language_registry: Arc<LanguageRegistry>,
930 client: Arc<Client>,
931 user_store: ModelHandle<UserStore>,
932 fs: Arc<dyn Fs>,
933 languages: Vec<Arc<Language>>,
934 language_servers: HashMap<String, Arc<LanguageServer>>,
935}
936
937struct ShareState {
938 project_id: u64,
939 snapshots_tx: Sender<Snapshot>,
940}
941
942pub struct RemoteWorktree {
943 project_id: u64,
944 remote_id: u64,
945 snapshot: Snapshot,
946 snapshot_rx: watch::Receiver<Snapshot>,
947 client: Arc<Client>,
948 updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
949 replica_id: ReplicaId,
950 loading_buffers: LoadingBuffers,
951 open_buffers: HashMap<usize, RemoteBuffer>,
952 diagnostic_summaries: BTreeMap<Arc<Path>, DiagnosticSummary>,
953 languages: Arc<LanguageRegistry>,
954 user_store: ModelHandle<UserStore>,
955 queued_operations: Vec<(u64, Operation)>,
956}
957
958type LoadingBuffers = HashMap<
959 Arc<Path>,
960 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
961>;
962
963#[derive(Default, Deserialize)]
964struct WorktreeConfig {
965 collaborators: Vec<String>,
966}
967
968impl LocalWorktree {
969 async fn new(
970 client: Arc<Client>,
971 user_store: ModelHandle<UserStore>,
972 path: impl Into<Arc<Path>>,
973 fs: Arc<dyn Fs>,
974 languages: Arc<LanguageRegistry>,
975 cx: &mut AsyncAppContext,
976 ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
977 let abs_path = path.into();
978 let path: Arc<Path> = Arc::from(Path::new(""));
979 let next_entry_id = AtomicUsize::new(0);
980
981 // After determining whether the root entry is a file or a directory, populate the
982 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
983 let root_name = abs_path
984 .file_name()
985 .map_or(String::new(), |f| f.to_string_lossy().to_string());
986 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
987 let metadata = fs.metadata(&abs_path).await?;
988
989 let mut config = WorktreeConfig::default();
990 if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
991 if let Ok(parsed) = toml::from_str(&zed_toml) {
992 config = parsed;
993 }
994 }
995
996 let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
997 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
998 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
999 let mut snapshot = Snapshot {
1000 id: cx.model_id(),
1001 scan_id: 0,
1002 abs_path,
1003 root_name: root_name.clone(),
1004 root_char_bag,
1005 ignores: Default::default(),
1006 entries_by_path: Default::default(),
1007 entries_by_id: Default::default(),
1008 removed_entry_ids: Default::default(),
1009 next_entry_id: Arc::new(next_entry_id),
1010 };
1011 if let Some(metadata) = metadata {
1012 snapshot.insert_entry(
1013 Entry::new(
1014 path.into(),
1015 &metadata,
1016 &snapshot.next_entry_id,
1017 snapshot.root_char_bag,
1018 ),
1019 fs.as_ref(),
1020 );
1021 }
1022
1023 let tree = Self {
1024 snapshot: snapshot.clone(),
1025 config,
1026 background_snapshot: Arc::new(Mutex::new(snapshot)),
1027 last_scan_state_rx,
1028 _background_scanner_task: None,
1029 share: None,
1030 poll_task: None,
1031 loading_buffers: Default::default(),
1032 open_buffers: Default::default(),
1033 shared_buffers: Default::default(),
1034 diagnostics: Default::default(),
1035 diagnostic_summaries: Default::default(),
1036 queued_operations: Default::default(),
1037 language_registry: languages,
1038 client,
1039 user_store,
1040 fs,
1041 languages: Default::default(),
1042 language_servers: Default::default(),
1043 };
1044
1045 cx.spawn_weak(|this, mut cx| async move {
1046 while let Ok(scan_state) = scan_states_rx.recv().await {
1047 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1048 let to_send = handle.update(&mut cx, |this, cx| {
1049 last_scan_state_tx.blocking_send(scan_state).ok();
1050 this.poll_snapshot(cx);
1051 let tree = this.as_local_mut().unwrap();
1052 if !tree.is_scanning() {
1053 if let Some(share) = tree.share.as_ref() {
1054 return Some((tree.snapshot(), share.snapshots_tx.clone()));
1055 }
1056 }
1057 None
1058 });
1059
1060 if let Some((snapshot, snapshots_to_send_tx)) = to_send {
1061 if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
1062 log::error!("error submitting snapshot to send {}", err);
1063 }
1064 }
1065 } else {
1066 break;
1067 }
1068 }
1069 })
1070 .detach();
1071
1072 Worktree::Local(tree)
1073 });
1074
1075 Ok((tree, scan_states_tx))
1076 }
1077
1078 pub fn authorized_logins(&self) -> Vec<String> {
1079 self.config.collaborators.clone()
1080 }
1081
1082 pub fn language_registry(&self) -> &LanguageRegistry {
1083 &self.language_registry
1084 }
1085
1086 pub fn languages(&self) -> &[Arc<Language>] {
1087 &self.languages
1088 }
1089
1090 pub fn register_language(
1091 &mut self,
1092 language: &Arc<Language>,
1093 cx: &mut ModelContext<Worktree>,
1094 ) -> Option<Arc<LanguageServer>> {
1095 if !self.languages.iter().any(|l| Arc::ptr_eq(l, language)) {
1096 self.languages.push(language.clone());
1097 }
1098
1099 if let Some(server) = self.language_servers.get(language.name()) {
1100 return Some(server.clone());
1101 }
1102
1103 if let Some(language_server) = language
1104 .start_server(self.abs_path(), cx)
1105 .log_err()
1106 .flatten()
1107 {
1108 let disk_based_sources = language
1109 .disk_based_diagnostic_sources()
1110 .cloned()
1111 .unwrap_or_default();
1112 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
1113 language_server
1114 .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
1115 smol::block_on(diagnostics_tx.send(params)).ok();
1116 })
1117 .detach();
1118 cx.spawn_weak(|this, mut cx| async move {
1119 while let Ok(diagnostics) = diagnostics_rx.recv().await {
1120 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1121 handle.update(&mut cx, |this, cx| {
1122 this.update_lsp_diagnostics(diagnostics, &disk_based_sources, cx)
1123 .log_err();
1124 });
1125 } else {
1126 break;
1127 }
1128 }
1129 })
1130 .detach();
1131
1132 self.language_servers
1133 .insert(language.name().to_string(), language_server.clone());
1134 Some(language_server.clone())
1135 } else {
1136 None
1137 }
1138 }
1139
1140 fn get_open_buffer(
1141 &mut self,
1142 path: &Path,
1143 cx: &mut ModelContext<Worktree>,
1144 ) -> Option<ModelHandle<Buffer>> {
1145 let worktree_id = self.id();
1146 let mut result = None;
1147 self.open_buffers.retain(|_buffer_id, buffer| {
1148 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1149 if let Some(file) = buffer.read(cx.as_ref()).file() {
1150 if file.worktree_id() == worktree_id && file.path().as_ref() == path {
1151 result = Some(buffer);
1152 }
1153 }
1154 true
1155 } else {
1156 false
1157 }
1158 });
1159 result
1160 }
1161
1162 fn open_buffer(
1163 &mut self,
1164 path: &Path,
1165 cx: &mut ModelContext<Worktree>,
1166 ) -> Task<Result<ModelHandle<Buffer>>> {
1167 let path = Arc::from(path);
1168 cx.spawn(move |this, mut cx| async move {
1169 let (file, contents) = this
1170 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
1171 .await?;
1172
1173 let (diagnostics, language, language_server) = this.update(&mut cx, |this, cx| {
1174 let this = this.as_local_mut().unwrap();
1175 let diagnostics = this.diagnostics.remove(&path);
1176 let language = this
1177 .language_registry
1178 .select_language(file.full_path())
1179 .cloned();
1180 let server = language
1181 .as_ref()
1182 .and_then(|language| this.register_language(language, cx));
1183 (diagnostics, language, server)
1184 });
1185
1186 let buffer = cx.add_model(|cx| {
1187 let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1188 buffer.set_language(language, language_server, cx);
1189 if let Some(diagnostics) = diagnostics {
1190 buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1191 }
1192 buffer
1193 });
1194
1195 this.update(&mut cx, |this, _| {
1196 let this = this.as_local_mut().unwrap();
1197 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1198 });
1199
1200 Ok(buffer)
1201 })
1202 }
1203
1204 pub fn open_remote_buffer(
1205 &mut self,
1206 envelope: TypedEnvelope<proto::OpenBuffer>,
1207 cx: &mut ModelContext<Worktree>,
1208 ) -> Task<Result<proto::OpenBufferResponse>> {
1209 cx.spawn(|this, mut cx| async move {
1210 let peer_id = envelope.original_sender_id();
1211 let path = Path::new(&envelope.payload.path);
1212 let buffer = this
1213 .update(&mut cx, |this, cx| this.open_buffer(path, cx))
1214 .await?;
1215 this.update(&mut cx, |this, cx| {
1216 this.as_local_mut()
1217 .unwrap()
1218 .shared_buffers
1219 .entry(peer_id?)
1220 .or_default()
1221 .insert(buffer.id() as u64, buffer.clone());
1222
1223 Ok(proto::OpenBufferResponse {
1224 buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1225 })
1226 })
1227 })
1228 }
1229
1230 pub fn close_remote_buffer(
1231 &mut self,
1232 envelope: TypedEnvelope<proto::CloseBuffer>,
1233 cx: &mut ModelContext<Worktree>,
1234 ) -> Result<()> {
1235 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1236 shared_buffers.remove(&envelope.payload.buffer_id);
1237 cx.notify();
1238 }
1239
1240 Ok(())
1241 }
1242
1243 pub fn remove_collaborator(
1244 &mut self,
1245 peer_id: PeerId,
1246 replica_id: ReplicaId,
1247 cx: &mut ModelContext<Worktree>,
1248 ) {
1249 self.shared_buffers.remove(&peer_id);
1250 for (_, buffer) in &self.open_buffers {
1251 if let Some(buffer) = buffer.upgrade(cx) {
1252 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1253 }
1254 }
1255 cx.notify();
1256 }
1257
1258 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1259 let mut scan_state_rx = self.last_scan_state_rx.clone();
1260 async move {
1261 let mut scan_state = Some(scan_state_rx.borrow().clone());
1262 while let Some(ScanState::Scanning) = scan_state {
1263 scan_state = scan_state_rx.recv().await;
1264 }
1265 }
1266 }
1267
1268 fn is_scanning(&self) -> bool {
1269 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1270 true
1271 } else {
1272 false
1273 }
1274 }
1275
1276 pub fn snapshot(&self) -> Snapshot {
1277 self.snapshot.clone()
1278 }
1279
1280 pub fn abs_path(&self) -> &Arc<Path> {
1281 &self.snapshot.abs_path
1282 }
1283
1284 pub fn contains_abs_path(&self, path: &Path) -> bool {
1285 path.starts_with(&self.snapshot.abs_path)
1286 }
1287
1288 fn absolutize(&self, path: &Path) -> PathBuf {
1289 if path.file_name().is_some() {
1290 self.snapshot.abs_path.join(path)
1291 } else {
1292 self.snapshot.abs_path.to_path_buf()
1293 }
1294 }
1295
1296 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1297 let handle = cx.handle();
1298 let path = Arc::from(path);
1299 let worktree_path = self.abs_path.clone();
1300 let abs_path = self.absolutize(&path);
1301 let background_snapshot = self.background_snapshot.clone();
1302 let fs = self.fs.clone();
1303 cx.spawn(|this, mut cx| async move {
1304 let text = fs.load(&abs_path).await?;
1305 // Eagerly populate the snapshot with an updated entry for the loaded file
1306 let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1307 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1308 Ok((
1309 File {
1310 entry_id: Some(entry.id),
1311 worktree: handle,
1312 worktree_path,
1313 path: entry.path,
1314 mtime: entry.mtime,
1315 is_local: true,
1316 },
1317 text,
1318 ))
1319 })
1320 }
1321
1322 pub fn save_buffer_as(
1323 &self,
1324 buffer: ModelHandle<Buffer>,
1325 path: impl Into<Arc<Path>>,
1326 text: Rope,
1327 cx: &mut ModelContext<Worktree>,
1328 ) -> Task<Result<File>> {
1329 let save = self.save(path, text, cx);
1330 cx.spawn(|this, mut cx| async move {
1331 let entry = save.await?;
1332 this.update(&mut cx, |this, cx| {
1333 let this = this.as_local_mut().unwrap();
1334 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1335 Ok(File {
1336 entry_id: Some(entry.id),
1337 worktree: cx.handle(),
1338 worktree_path: this.abs_path.clone(),
1339 path: entry.path,
1340 mtime: entry.mtime,
1341 is_local: true,
1342 })
1343 })
1344 })
1345 }
1346
1347 fn save(
1348 &self,
1349 path: impl Into<Arc<Path>>,
1350 text: Rope,
1351 cx: &mut ModelContext<Worktree>,
1352 ) -> Task<Result<Entry>> {
1353 let path = path.into();
1354 let abs_path = self.absolutize(&path);
1355 let background_snapshot = self.background_snapshot.clone();
1356 let fs = self.fs.clone();
1357 let save = cx.background().spawn(async move {
1358 fs.save(&abs_path, &text).await?;
1359 refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1360 });
1361
1362 cx.spawn(|this, mut cx| async move {
1363 let entry = save.await?;
1364 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1365 Ok(entry)
1366 })
1367 }
1368
1369 pub fn share(
1370 &mut self,
1371 project_id: u64,
1372 cx: &mut ModelContext<Worktree>,
1373 ) -> Task<anyhow::Result<()>> {
1374 if self.share.is_some() {
1375 return Task::ready(Ok(()));
1376 }
1377
1378 let snapshot = self.snapshot();
1379 let rpc = self.client.clone();
1380 let worktree_id = cx.model_id() as u64;
1381 let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1382 self.share = Some(ShareState {
1383 project_id,
1384 snapshots_tx: snapshots_to_send_tx,
1385 });
1386
1387 cx.background()
1388 .spawn({
1389 let rpc = rpc.clone();
1390 let snapshot = snapshot.clone();
1391 async move {
1392 let mut prev_snapshot = snapshot;
1393 while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1394 let message =
1395 snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1396 match rpc.send(message).await {
1397 Ok(()) => prev_snapshot = snapshot,
1398 Err(err) => log::error!("error sending snapshot diff {}", err),
1399 }
1400 }
1401 }
1402 })
1403 .detach();
1404
1405 let share_message = cx.background().spawn(async move {
1406 proto::ShareWorktree {
1407 project_id,
1408 worktree: Some(snapshot.to_proto()),
1409 }
1410 });
1411
1412 cx.foreground().spawn(async move {
1413 rpc.request(share_message.await).await?;
1414 Ok(())
1415 })
1416 }
1417}
1418
1419fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1420 let contents = smol::block_on(fs.load(&abs_path))?;
1421 let parent = abs_path.parent().unwrap_or(Path::new("/"));
1422 let mut builder = GitignoreBuilder::new(parent);
1423 for line in contents.lines() {
1424 builder.add_line(Some(abs_path.into()), line)?;
1425 }
1426 Ok(builder.build()?)
1427}
1428
1429impl Deref for Worktree {
1430 type Target = Snapshot;
1431
1432 fn deref(&self) -> &Self::Target {
1433 match self {
1434 Worktree::Local(worktree) => &worktree.snapshot,
1435 Worktree::Remote(worktree) => &worktree.snapshot,
1436 }
1437 }
1438}
1439
1440impl Deref for LocalWorktree {
1441 type Target = Snapshot;
1442
1443 fn deref(&self) -> &Self::Target {
1444 &self.snapshot
1445 }
1446}
1447
1448impl fmt::Debug for LocalWorktree {
1449 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1450 self.snapshot.fmt(f)
1451 }
1452}
1453
1454impl RemoteWorktree {
1455 pub fn remote_id(&self) -> u64 {
1456 self.remote_id
1457 }
1458
1459 fn get_open_buffer(
1460 &mut self,
1461 path: &Path,
1462 cx: &mut ModelContext<Worktree>,
1463 ) -> Option<ModelHandle<Buffer>> {
1464 let handle = cx.handle();
1465 let mut existing_buffer = None;
1466 self.open_buffers.retain(|_buffer_id, buffer| {
1467 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1468 if let Some(file) = buffer.read(cx.as_ref()).file() {
1469 if file.worktree_id() == handle.id() && file.path().as_ref() == path {
1470 existing_buffer = Some(buffer);
1471 }
1472 }
1473 true
1474 } else {
1475 false
1476 }
1477 });
1478 existing_buffer
1479 }
1480
1481 fn open_buffer(
1482 &mut self,
1483 path: &Path,
1484 cx: &mut ModelContext<Worktree>,
1485 ) -> Task<Result<ModelHandle<Buffer>>> {
1486 let rpc = self.client.clone();
1487 let replica_id = self.replica_id;
1488 let project_id = self.project_id;
1489 let remote_worktree_id = self.remote_id;
1490 let root_path = self.snapshot.abs_path.clone();
1491 let path: Arc<Path> = Arc::from(path);
1492 let path_string = path.to_string_lossy().to_string();
1493 cx.spawn_weak(move |this, mut cx| async move {
1494 let entry = this
1495 .upgrade(&cx)
1496 .ok_or_else(|| anyhow!("worktree was closed"))?
1497 .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1498 .ok_or_else(|| anyhow!("file does not exist"))?;
1499 let response = rpc
1500 .request(proto::OpenBuffer {
1501 project_id,
1502 worktree_id: remote_worktree_id as u64,
1503 path: path_string,
1504 })
1505 .await?;
1506
1507 let this = this
1508 .upgrade(&cx)
1509 .ok_or_else(|| anyhow!("worktree was closed"))?;
1510 let file = File {
1511 entry_id: Some(entry.id),
1512 worktree: this.clone(),
1513 worktree_path: root_path,
1514 path: entry.path,
1515 mtime: entry.mtime,
1516 is_local: false,
1517 };
1518 let language = this.read_with(&cx, |this, _| {
1519 use language::File;
1520 this.languages().select_language(file.full_path()).cloned()
1521 });
1522 let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1523 let buffer_id = remote_buffer.id as usize;
1524 let buffer = cx.add_model(|cx| {
1525 Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1526 .unwrap()
1527 .with_language(language, None, cx)
1528 });
1529 this.update(&mut cx, move |this, cx| {
1530 let this = this.as_remote_mut().unwrap();
1531 if let Some(RemoteBuffer::Operations(pending_ops)) = this
1532 .open_buffers
1533 .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1534 {
1535 buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1536 }
1537 Result::<_, anyhow::Error>::Ok(buffer)
1538 })
1539 })
1540 }
1541
1542 pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1543 for (_, buffer) in self.open_buffers.drain() {
1544 if let RemoteBuffer::Loaded(buffer) = buffer {
1545 if let Some(buffer) = buffer.upgrade(cx) {
1546 buffer.update(cx, |buffer, cx| buffer.close(cx))
1547 }
1548 }
1549 }
1550 }
1551
1552 fn snapshot(&self) -> Snapshot {
1553 self.snapshot.clone()
1554 }
1555
1556 pub fn update_from_remote(
1557 &mut self,
1558 envelope: TypedEnvelope<proto::UpdateWorktree>,
1559 cx: &mut ModelContext<Worktree>,
1560 ) -> Result<()> {
1561 let mut tx = self.updates_tx.clone();
1562 let payload = envelope.payload.clone();
1563 cx.background()
1564 .spawn(async move {
1565 tx.send(payload).await.expect("receiver runs to completion");
1566 })
1567 .detach();
1568
1569 Ok(())
1570 }
1571
1572 pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1573 for (_, buffer) in &self.open_buffers {
1574 if let Some(buffer) = buffer.upgrade(cx) {
1575 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1576 }
1577 }
1578 cx.notify();
1579 }
1580}
1581
1582enum RemoteBuffer {
1583 Operations(Vec<Operation>),
1584 Loaded(WeakModelHandle<Buffer>),
1585}
1586
1587impl RemoteBuffer {
1588 fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1589 match self {
1590 Self::Operations(_) => None,
1591 Self::Loaded(buffer) => buffer.upgrade(cx),
1592 }
1593 }
1594}
1595
1596impl Snapshot {
1597 pub fn id(&self) -> usize {
1598 self.id
1599 }
1600
1601 pub fn to_proto(&self) -> proto::Worktree {
1602 let root_name = self.root_name.clone();
1603 proto::Worktree {
1604 id: self.id as u64,
1605 root_name,
1606 entries: self
1607 .entries_by_path
1608 .cursor::<()>()
1609 .filter(|e| !e.is_ignored)
1610 .map(Into::into)
1611 .collect(),
1612 }
1613 }
1614
1615 pub fn build_update(
1616 &self,
1617 other: &Self,
1618 project_id: u64,
1619 worktree_id: u64,
1620 include_ignored: bool,
1621 ) -> proto::UpdateWorktree {
1622 let mut updated_entries = Vec::new();
1623 let mut removed_entries = Vec::new();
1624 let mut self_entries = self
1625 .entries_by_id
1626 .cursor::<()>()
1627 .filter(|e| include_ignored || !e.is_ignored)
1628 .peekable();
1629 let mut other_entries = other
1630 .entries_by_id
1631 .cursor::<()>()
1632 .filter(|e| include_ignored || !e.is_ignored)
1633 .peekable();
1634 loop {
1635 match (self_entries.peek(), other_entries.peek()) {
1636 (Some(self_entry), Some(other_entry)) => {
1637 match Ord::cmp(&self_entry.id, &other_entry.id) {
1638 Ordering::Less => {
1639 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1640 updated_entries.push(entry);
1641 self_entries.next();
1642 }
1643 Ordering::Equal => {
1644 if self_entry.scan_id != other_entry.scan_id {
1645 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1646 updated_entries.push(entry);
1647 }
1648
1649 self_entries.next();
1650 other_entries.next();
1651 }
1652 Ordering::Greater => {
1653 removed_entries.push(other_entry.id as u64);
1654 other_entries.next();
1655 }
1656 }
1657 }
1658 (Some(self_entry), None) => {
1659 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1660 updated_entries.push(entry);
1661 self_entries.next();
1662 }
1663 (None, Some(other_entry)) => {
1664 removed_entries.push(other_entry.id as u64);
1665 other_entries.next();
1666 }
1667 (None, None) => break,
1668 }
1669 }
1670
1671 proto::UpdateWorktree {
1672 project_id,
1673 worktree_id,
1674 root_name: self.root_name().to_string(),
1675 updated_entries,
1676 removed_entries,
1677 }
1678 }
1679
1680 fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1681 self.scan_id += 1;
1682 let scan_id = self.scan_id;
1683
1684 let mut entries_by_path_edits = Vec::new();
1685 let mut entries_by_id_edits = Vec::new();
1686 for entry_id in update.removed_entries {
1687 let entry_id = entry_id as usize;
1688 let entry = self
1689 .entry_for_id(entry_id)
1690 .ok_or_else(|| anyhow!("unknown entry"))?;
1691 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1692 entries_by_id_edits.push(Edit::Remove(entry.id));
1693 }
1694
1695 for entry in update.updated_entries {
1696 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1697 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1698 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1699 }
1700 entries_by_id_edits.push(Edit::Insert(PathEntry {
1701 id: entry.id,
1702 path: entry.path.clone(),
1703 is_ignored: entry.is_ignored,
1704 scan_id,
1705 }));
1706 entries_by_path_edits.push(Edit::Insert(entry));
1707 }
1708
1709 self.entries_by_path.edit(entries_by_path_edits, &());
1710 self.entries_by_id.edit(entries_by_id_edits, &());
1711
1712 Ok(())
1713 }
1714
1715 pub fn file_count(&self) -> usize {
1716 self.entries_by_path.summary().file_count
1717 }
1718
1719 pub fn visible_file_count(&self) -> usize {
1720 self.entries_by_path.summary().visible_file_count
1721 }
1722
1723 fn traverse_from_offset(
1724 &self,
1725 include_dirs: bool,
1726 include_ignored: bool,
1727 start_offset: usize,
1728 ) -> Traversal {
1729 let mut cursor = self.entries_by_path.cursor();
1730 cursor.seek(
1731 &TraversalTarget::Count {
1732 count: start_offset,
1733 include_dirs,
1734 include_ignored,
1735 },
1736 Bias::Right,
1737 &(),
1738 );
1739 Traversal {
1740 cursor,
1741 include_dirs,
1742 include_ignored,
1743 }
1744 }
1745
1746 fn traverse_from_path(
1747 &self,
1748 include_dirs: bool,
1749 include_ignored: bool,
1750 path: &Path,
1751 ) -> Traversal {
1752 let mut cursor = self.entries_by_path.cursor();
1753 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1754 Traversal {
1755 cursor,
1756 include_dirs,
1757 include_ignored,
1758 }
1759 }
1760
1761 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1762 self.traverse_from_offset(false, include_ignored, start)
1763 }
1764
1765 pub fn entries(&self, include_ignored: bool) -> Traversal {
1766 self.traverse_from_offset(true, include_ignored, 0)
1767 }
1768
1769 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1770 let empty_path = Path::new("");
1771 self.entries_by_path
1772 .cursor::<()>()
1773 .filter(move |entry| entry.path.as_ref() != empty_path)
1774 .map(|entry| &entry.path)
1775 }
1776
1777 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1778 let mut cursor = self.entries_by_path.cursor();
1779 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1780 let traversal = Traversal {
1781 cursor,
1782 include_dirs: true,
1783 include_ignored: true,
1784 };
1785 ChildEntriesIter {
1786 traversal,
1787 parent_path,
1788 }
1789 }
1790
1791 pub fn root_entry(&self) -> Option<&Entry> {
1792 self.entry_for_path("")
1793 }
1794
1795 pub fn root_name(&self) -> &str {
1796 &self.root_name
1797 }
1798
1799 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1800 let path = path.as_ref();
1801 self.traverse_from_path(true, true, path)
1802 .entry()
1803 .and_then(|entry| {
1804 if entry.path.as_ref() == path {
1805 Some(entry)
1806 } else {
1807 None
1808 }
1809 })
1810 }
1811
1812 pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1813 let entry = self.entries_by_id.get(&id, &())?;
1814 self.entry_for_path(&entry.path)
1815 }
1816
1817 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1818 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1819 }
1820
1821 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1822 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1823 let abs_path = self.abs_path.join(&entry.path);
1824 match build_gitignore(&abs_path, fs) {
1825 Ok(ignore) => {
1826 let ignore_dir_path = entry.path.parent().unwrap();
1827 self.ignores
1828 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1829 }
1830 Err(error) => {
1831 log::error!(
1832 "error loading .gitignore file {:?} - {:?}",
1833 &entry.path,
1834 error
1835 );
1836 }
1837 }
1838 }
1839
1840 self.reuse_entry_id(&mut entry);
1841 self.entries_by_path.insert_or_replace(entry.clone(), &());
1842 self.entries_by_id.insert_or_replace(
1843 PathEntry {
1844 id: entry.id,
1845 path: entry.path.clone(),
1846 is_ignored: entry.is_ignored,
1847 scan_id: self.scan_id,
1848 },
1849 &(),
1850 );
1851 entry
1852 }
1853
1854 fn populate_dir(
1855 &mut self,
1856 parent_path: Arc<Path>,
1857 entries: impl IntoIterator<Item = Entry>,
1858 ignore: Option<Arc<Gitignore>>,
1859 ) {
1860 let mut parent_entry = self
1861 .entries_by_path
1862 .get(&PathKey(parent_path.clone()), &())
1863 .unwrap()
1864 .clone();
1865 if let Some(ignore) = ignore {
1866 self.ignores.insert(parent_path, (ignore, self.scan_id));
1867 }
1868 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1869 parent_entry.kind = EntryKind::Dir;
1870 } else {
1871 unreachable!();
1872 }
1873
1874 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1875 let mut entries_by_id_edits = Vec::new();
1876
1877 for mut entry in entries {
1878 self.reuse_entry_id(&mut entry);
1879 entries_by_id_edits.push(Edit::Insert(PathEntry {
1880 id: entry.id,
1881 path: entry.path.clone(),
1882 is_ignored: entry.is_ignored,
1883 scan_id: self.scan_id,
1884 }));
1885 entries_by_path_edits.push(Edit::Insert(entry));
1886 }
1887
1888 self.entries_by_path.edit(entries_by_path_edits, &());
1889 self.entries_by_id.edit(entries_by_id_edits, &());
1890 }
1891
1892 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1893 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1894 entry.id = removed_entry_id;
1895 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1896 entry.id = existing_entry.id;
1897 }
1898 }
1899
1900 fn remove_path(&mut self, path: &Path) {
1901 let mut new_entries;
1902 let removed_entries;
1903 {
1904 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1905 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1906 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1907 new_entries.push_tree(cursor.suffix(&()), &());
1908 }
1909 self.entries_by_path = new_entries;
1910
1911 let mut entries_by_id_edits = Vec::new();
1912 for entry in removed_entries.cursor::<()>() {
1913 let removed_entry_id = self
1914 .removed_entry_ids
1915 .entry(entry.inode)
1916 .or_insert(entry.id);
1917 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1918 entries_by_id_edits.push(Edit::Remove(entry.id));
1919 }
1920 self.entries_by_id.edit(entries_by_id_edits, &());
1921
1922 if path.file_name() == Some(&GITIGNORE) {
1923 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1924 *scan_id = self.scan_id;
1925 }
1926 }
1927 }
1928
1929 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1930 let mut new_ignores = Vec::new();
1931 for ancestor in path.ancestors().skip(1) {
1932 if let Some((ignore, _)) = self.ignores.get(ancestor) {
1933 new_ignores.push((ancestor, Some(ignore.clone())));
1934 } else {
1935 new_ignores.push((ancestor, None));
1936 }
1937 }
1938
1939 let mut ignore_stack = IgnoreStack::none();
1940 for (parent_path, ignore) in new_ignores.into_iter().rev() {
1941 if ignore_stack.is_path_ignored(&parent_path, true) {
1942 ignore_stack = IgnoreStack::all();
1943 break;
1944 } else if let Some(ignore) = ignore {
1945 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
1946 }
1947 }
1948
1949 if ignore_stack.is_path_ignored(path, is_dir) {
1950 ignore_stack = IgnoreStack::all();
1951 }
1952
1953 ignore_stack
1954 }
1955}
1956
1957impl fmt::Debug for Snapshot {
1958 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1959 for entry in self.entries_by_path.cursor::<()>() {
1960 for _ in entry.path.ancestors().skip(1) {
1961 write!(f, " ")?;
1962 }
1963 writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
1964 }
1965 Ok(())
1966 }
1967}
1968
1969#[derive(Clone, PartialEq)]
1970pub struct File {
1971 entry_id: Option<usize>,
1972 worktree: ModelHandle<Worktree>,
1973 worktree_path: Arc<Path>,
1974 pub path: Arc<Path>,
1975 pub mtime: SystemTime,
1976 is_local: bool,
1977}
1978
1979impl language::File for File {
1980 fn worktree_id(&self) -> usize {
1981 self.worktree.id()
1982 }
1983
1984 fn entry_id(&self) -> Option<usize> {
1985 self.entry_id
1986 }
1987
1988 fn mtime(&self) -> SystemTime {
1989 self.mtime
1990 }
1991
1992 fn path(&self) -> &Arc<Path> {
1993 &self.path
1994 }
1995
1996 fn abs_path(&self) -> Option<PathBuf> {
1997 if self.is_local {
1998 Some(self.worktree_path.join(&self.path))
1999 } else {
2000 None
2001 }
2002 }
2003
2004 fn full_path(&self) -> PathBuf {
2005 let mut full_path = PathBuf::new();
2006 if let Some(worktree_name) = self.worktree_path.file_name() {
2007 full_path.push(worktree_name);
2008 }
2009 full_path.push(&self.path);
2010 full_path
2011 }
2012
2013 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2014 /// of its worktree, then this method will return the name of the worktree itself.
2015 fn file_name<'a>(&'a self) -> Option<OsString> {
2016 self.path
2017 .file_name()
2018 .or_else(|| self.worktree_path.file_name())
2019 .map(Into::into)
2020 }
2021
2022 fn is_deleted(&self) -> bool {
2023 self.entry_id.is_none()
2024 }
2025
2026 fn save(
2027 &self,
2028 buffer_id: u64,
2029 text: Rope,
2030 version: clock::Global,
2031 cx: &mut MutableAppContext,
2032 ) -> Task<Result<(clock::Global, SystemTime)>> {
2033 let worktree_id = self.worktree.read(cx).id() as u64;
2034 self.worktree.update(cx, |worktree, cx| match worktree {
2035 Worktree::Local(worktree) => {
2036 let rpc = worktree.client.clone();
2037 let project_id = worktree.share.as_ref().map(|share| share.project_id);
2038 let save = worktree.save(self.path.clone(), text, cx);
2039 cx.background().spawn(async move {
2040 let entry = save.await?;
2041 if let Some(project_id) = project_id {
2042 rpc.send(proto::BufferSaved {
2043 project_id,
2044 worktree_id,
2045 buffer_id,
2046 version: (&version).into(),
2047 mtime: Some(entry.mtime.into()),
2048 })
2049 .await?;
2050 }
2051 Ok((version, entry.mtime))
2052 })
2053 }
2054 Worktree::Remote(worktree) => {
2055 let rpc = worktree.client.clone();
2056 let project_id = worktree.project_id;
2057 cx.foreground().spawn(async move {
2058 let response = rpc
2059 .request(proto::SaveBuffer {
2060 project_id,
2061 worktree_id,
2062 buffer_id,
2063 })
2064 .await?;
2065 let version = response.version.try_into()?;
2066 let mtime = response
2067 .mtime
2068 .ok_or_else(|| anyhow!("missing mtime"))?
2069 .into();
2070 Ok((version, mtime))
2071 })
2072 }
2073 })
2074 }
2075
2076 fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2077 let worktree = self.worktree.read(cx).as_local()?;
2078 let abs_path = worktree.absolutize(&self.path);
2079 let fs = worktree.fs.clone();
2080 Some(
2081 cx.background()
2082 .spawn(async move { fs.load(&abs_path).await }),
2083 )
2084 }
2085
2086 fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2087 self.worktree.update(cx, |worktree, cx| {
2088 worktree.send_buffer_update(buffer_id, operation, cx);
2089 });
2090 }
2091
2092 fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2093 self.worktree.update(cx, |worktree, cx| {
2094 if let Worktree::Remote(worktree) = worktree {
2095 let project_id = worktree.project_id;
2096 let worktree_id = worktree.remote_id;
2097 let rpc = worktree.client.clone();
2098 cx.background()
2099 .spawn(async move {
2100 if let Err(error) = rpc
2101 .send(proto::CloseBuffer {
2102 project_id,
2103 worktree_id,
2104 buffer_id,
2105 })
2106 .await
2107 {
2108 log::error!("error closing remote buffer: {}", error);
2109 }
2110 })
2111 .detach();
2112 }
2113 });
2114 }
2115
2116 fn boxed_clone(&self) -> Box<dyn language::File> {
2117 Box::new(self.clone())
2118 }
2119
2120 fn as_any(&self) -> &dyn Any {
2121 self
2122 }
2123}
2124
2125#[derive(Clone, Debug)]
2126pub struct Entry {
2127 pub id: usize,
2128 pub kind: EntryKind,
2129 pub path: Arc<Path>,
2130 pub inode: u64,
2131 pub mtime: SystemTime,
2132 pub is_symlink: bool,
2133 pub is_ignored: bool,
2134}
2135
2136#[derive(Clone, Debug)]
2137pub enum EntryKind {
2138 PendingDir,
2139 Dir,
2140 File(CharBag),
2141}
2142
2143impl Entry {
2144 fn new(
2145 path: Arc<Path>,
2146 metadata: &fs::Metadata,
2147 next_entry_id: &AtomicUsize,
2148 root_char_bag: CharBag,
2149 ) -> Self {
2150 Self {
2151 id: next_entry_id.fetch_add(1, SeqCst),
2152 kind: if metadata.is_dir {
2153 EntryKind::PendingDir
2154 } else {
2155 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2156 },
2157 path,
2158 inode: metadata.inode,
2159 mtime: metadata.mtime,
2160 is_symlink: metadata.is_symlink,
2161 is_ignored: false,
2162 }
2163 }
2164
2165 pub fn is_dir(&self) -> bool {
2166 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2167 }
2168
2169 pub fn is_file(&self) -> bool {
2170 matches!(self.kind, EntryKind::File(_))
2171 }
2172}
2173
2174impl sum_tree::Item for Entry {
2175 type Summary = EntrySummary;
2176
2177 fn summary(&self) -> Self::Summary {
2178 let visible_count = if self.is_ignored { 0 } else { 1 };
2179 let file_count;
2180 let visible_file_count;
2181 if self.is_file() {
2182 file_count = 1;
2183 visible_file_count = visible_count;
2184 } else {
2185 file_count = 0;
2186 visible_file_count = 0;
2187 }
2188
2189 EntrySummary {
2190 max_path: self.path.clone(),
2191 count: 1,
2192 visible_count,
2193 file_count,
2194 visible_file_count,
2195 }
2196 }
2197}
2198
2199impl sum_tree::KeyedItem for Entry {
2200 type Key = PathKey;
2201
2202 fn key(&self) -> Self::Key {
2203 PathKey(self.path.clone())
2204 }
2205}
2206
2207#[derive(Clone, Debug)]
2208pub struct EntrySummary {
2209 max_path: Arc<Path>,
2210 count: usize,
2211 visible_count: usize,
2212 file_count: usize,
2213 visible_file_count: usize,
2214}
2215
2216impl Default for EntrySummary {
2217 fn default() -> Self {
2218 Self {
2219 max_path: Arc::from(Path::new("")),
2220 count: 0,
2221 visible_count: 0,
2222 file_count: 0,
2223 visible_file_count: 0,
2224 }
2225 }
2226}
2227
2228impl sum_tree::Summary for EntrySummary {
2229 type Context = ();
2230
2231 fn add_summary(&mut self, rhs: &Self, _: &()) {
2232 self.max_path = rhs.max_path.clone();
2233 self.visible_count += rhs.visible_count;
2234 self.file_count += rhs.file_count;
2235 self.visible_file_count += rhs.visible_file_count;
2236 }
2237}
2238
2239#[derive(Clone, Debug)]
2240struct PathEntry {
2241 id: usize,
2242 path: Arc<Path>,
2243 is_ignored: bool,
2244 scan_id: usize,
2245}
2246
2247impl sum_tree::Item for PathEntry {
2248 type Summary = PathEntrySummary;
2249
2250 fn summary(&self) -> Self::Summary {
2251 PathEntrySummary { max_id: self.id }
2252 }
2253}
2254
2255impl sum_tree::KeyedItem for PathEntry {
2256 type Key = usize;
2257
2258 fn key(&self) -> Self::Key {
2259 self.id
2260 }
2261}
2262
2263#[derive(Clone, Debug, Default)]
2264struct PathEntrySummary {
2265 max_id: usize,
2266}
2267
2268impl sum_tree::Summary for PathEntrySummary {
2269 type Context = ();
2270
2271 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2272 self.max_id = summary.max_id;
2273 }
2274}
2275
2276impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2277 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2278 *self = summary.max_id;
2279 }
2280}
2281
2282#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2283pub struct PathKey(Arc<Path>);
2284
2285impl Default for PathKey {
2286 fn default() -> Self {
2287 Self(Path::new("").into())
2288 }
2289}
2290
2291impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2292 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2293 self.0 = summary.max_path.clone();
2294 }
2295}
2296
2297struct BackgroundScanner {
2298 fs: Arc<dyn Fs>,
2299 snapshot: Arc<Mutex<Snapshot>>,
2300 notify: Sender<ScanState>,
2301 executor: Arc<executor::Background>,
2302}
2303
2304impl BackgroundScanner {
2305 fn new(
2306 snapshot: Arc<Mutex<Snapshot>>,
2307 notify: Sender<ScanState>,
2308 fs: Arc<dyn Fs>,
2309 executor: Arc<executor::Background>,
2310 ) -> Self {
2311 Self {
2312 fs,
2313 snapshot,
2314 notify,
2315 executor,
2316 }
2317 }
2318
2319 fn abs_path(&self) -> Arc<Path> {
2320 self.snapshot.lock().abs_path.clone()
2321 }
2322
2323 fn snapshot(&self) -> Snapshot {
2324 self.snapshot.lock().clone()
2325 }
2326
2327 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2328 if self.notify.send(ScanState::Scanning).await.is_err() {
2329 return;
2330 }
2331
2332 if let Err(err) = self.scan_dirs().await {
2333 if self
2334 .notify
2335 .send(ScanState::Err(Arc::new(err)))
2336 .await
2337 .is_err()
2338 {
2339 return;
2340 }
2341 }
2342
2343 if self.notify.send(ScanState::Idle).await.is_err() {
2344 return;
2345 }
2346
2347 futures::pin_mut!(events_rx);
2348 while let Some(events) = events_rx.next().await {
2349 if self.notify.send(ScanState::Scanning).await.is_err() {
2350 break;
2351 }
2352
2353 if !self.process_events(events).await {
2354 break;
2355 }
2356
2357 if self.notify.send(ScanState::Idle).await.is_err() {
2358 break;
2359 }
2360 }
2361 }
2362
2363 async fn scan_dirs(&mut self) -> Result<()> {
2364 let root_char_bag;
2365 let next_entry_id;
2366 let is_dir;
2367 {
2368 let snapshot = self.snapshot.lock();
2369 root_char_bag = snapshot.root_char_bag;
2370 next_entry_id = snapshot.next_entry_id.clone();
2371 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2372 };
2373
2374 if is_dir {
2375 let path: Arc<Path> = Arc::from(Path::new(""));
2376 let abs_path = self.abs_path();
2377 let (tx, rx) = channel::unbounded();
2378 tx.send(ScanJob {
2379 abs_path: abs_path.to_path_buf(),
2380 path,
2381 ignore_stack: IgnoreStack::none(),
2382 scan_queue: tx.clone(),
2383 })
2384 .await
2385 .unwrap();
2386 drop(tx);
2387
2388 self.executor
2389 .scoped(|scope| {
2390 for _ in 0..self.executor.num_cpus() {
2391 scope.spawn(async {
2392 while let Ok(job) = rx.recv().await {
2393 if let Err(err) = self
2394 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2395 .await
2396 {
2397 log::error!("error scanning {:?}: {}", job.abs_path, err);
2398 }
2399 }
2400 });
2401 }
2402 })
2403 .await;
2404 }
2405
2406 Ok(())
2407 }
2408
2409 async fn scan_dir(
2410 &self,
2411 root_char_bag: CharBag,
2412 next_entry_id: Arc<AtomicUsize>,
2413 job: &ScanJob,
2414 ) -> Result<()> {
2415 let mut new_entries: Vec<Entry> = Vec::new();
2416 let mut new_jobs: Vec<ScanJob> = Vec::new();
2417 let mut ignore_stack = job.ignore_stack.clone();
2418 let mut new_ignore = None;
2419
2420 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2421 while let Some(child_abs_path) = child_paths.next().await {
2422 let child_abs_path = match child_abs_path {
2423 Ok(child_abs_path) => child_abs_path,
2424 Err(error) => {
2425 log::error!("error processing entry {:?}", error);
2426 continue;
2427 }
2428 };
2429 let child_name = child_abs_path.file_name().unwrap();
2430 let child_path: Arc<Path> = job.path.join(child_name).into();
2431 let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2432 Some(metadata) => metadata,
2433 None => continue,
2434 };
2435
2436 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2437 if child_name == *GITIGNORE {
2438 match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2439 Ok(ignore) => {
2440 let ignore = Arc::new(ignore);
2441 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2442 new_ignore = Some(ignore);
2443 }
2444 Err(error) => {
2445 log::error!(
2446 "error loading .gitignore file {:?} - {:?}",
2447 child_name,
2448 error
2449 );
2450 }
2451 }
2452
2453 // Update ignore status of any child entries we've already processed to reflect the
2454 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2455 // there should rarely be too numerous. Update the ignore stack associated with any
2456 // new jobs as well.
2457 let mut new_jobs = new_jobs.iter_mut();
2458 for entry in &mut new_entries {
2459 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2460 if entry.is_dir() {
2461 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2462 IgnoreStack::all()
2463 } else {
2464 ignore_stack.clone()
2465 };
2466 }
2467 }
2468 }
2469
2470 let mut child_entry = Entry::new(
2471 child_path.clone(),
2472 &child_metadata,
2473 &next_entry_id,
2474 root_char_bag,
2475 );
2476
2477 if child_metadata.is_dir {
2478 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2479 child_entry.is_ignored = is_ignored;
2480 new_entries.push(child_entry);
2481 new_jobs.push(ScanJob {
2482 abs_path: child_abs_path,
2483 path: child_path,
2484 ignore_stack: if is_ignored {
2485 IgnoreStack::all()
2486 } else {
2487 ignore_stack.clone()
2488 },
2489 scan_queue: job.scan_queue.clone(),
2490 });
2491 } else {
2492 child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2493 new_entries.push(child_entry);
2494 };
2495 }
2496
2497 self.snapshot
2498 .lock()
2499 .populate_dir(job.path.clone(), new_entries, new_ignore);
2500 for new_job in new_jobs {
2501 job.scan_queue.send(new_job).await.unwrap();
2502 }
2503
2504 Ok(())
2505 }
2506
2507 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2508 let mut snapshot = self.snapshot();
2509 snapshot.scan_id += 1;
2510
2511 let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2512 abs_path
2513 } else {
2514 return false;
2515 };
2516 let root_char_bag = snapshot.root_char_bag;
2517 let next_entry_id = snapshot.next_entry_id.clone();
2518
2519 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2520 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2521
2522 for event in &events {
2523 match event.path.strip_prefix(&root_abs_path) {
2524 Ok(path) => snapshot.remove_path(&path),
2525 Err(_) => {
2526 log::error!(
2527 "unexpected event {:?} for root path {:?}",
2528 event.path,
2529 root_abs_path
2530 );
2531 continue;
2532 }
2533 }
2534 }
2535
2536 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2537 for event in events {
2538 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2539 Ok(path) => Arc::from(path.to_path_buf()),
2540 Err(_) => {
2541 log::error!(
2542 "unexpected event {:?} for root path {:?}",
2543 event.path,
2544 root_abs_path
2545 );
2546 continue;
2547 }
2548 };
2549
2550 match self.fs.metadata(&event.path).await {
2551 Ok(Some(metadata)) => {
2552 let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2553 let mut fs_entry = Entry::new(
2554 path.clone(),
2555 &metadata,
2556 snapshot.next_entry_id.as_ref(),
2557 snapshot.root_char_bag,
2558 );
2559 fs_entry.is_ignored = ignore_stack.is_all();
2560 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2561 if metadata.is_dir {
2562 scan_queue_tx
2563 .send(ScanJob {
2564 abs_path: event.path,
2565 path,
2566 ignore_stack,
2567 scan_queue: scan_queue_tx.clone(),
2568 })
2569 .await
2570 .unwrap();
2571 }
2572 }
2573 Ok(None) => {}
2574 Err(err) => {
2575 // TODO - create a special 'error' entry in the entries tree to mark this
2576 log::error!("error reading file on event {:?}", err);
2577 }
2578 }
2579 }
2580
2581 *self.snapshot.lock() = snapshot;
2582
2583 // Scan any directories that were created as part of this event batch.
2584 drop(scan_queue_tx);
2585 self.executor
2586 .scoped(|scope| {
2587 for _ in 0..self.executor.num_cpus() {
2588 scope.spawn(async {
2589 while let Ok(job) = scan_queue_rx.recv().await {
2590 if let Err(err) = self
2591 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2592 .await
2593 {
2594 log::error!("error scanning {:?}: {}", job.abs_path, err);
2595 }
2596 }
2597 });
2598 }
2599 })
2600 .await;
2601
2602 // Attempt to detect renames only over a single batch of file-system events.
2603 self.snapshot.lock().removed_entry_ids.clear();
2604
2605 self.update_ignore_statuses().await;
2606 true
2607 }
2608
2609 async fn update_ignore_statuses(&self) {
2610 let mut snapshot = self.snapshot();
2611
2612 let mut ignores_to_update = Vec::new();
2613 let mut ignores_to_delete = Vec::new();
2614 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2615 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2616 ignores_to_update.push(parent_path.clone());
2617 }
2618
2619 let ignore_path = parent_path.join(&*GITIGNORE);
2620 if snapshot.entry_for_path(ignore_path).is_none() {
2621 ignores_to_delete.push(parent_path.clone());
2622 }
2623 }
2624
2625 for parent_path in ignores_to_delete {
2626 snapshot.ignores.remove(&parent_path);
2627 self.snapshot.lock().ignores.remove(&parent_path);
2628 }
2629
2630 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2631 ignores_to_update.sort_unstable();
2632 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2633 while let Some(parent_path) = ignores_to_update.next() {
2634 while ignores_to_update
2635 .peek()
2636 .map_or(false, |p| p.starts_with(&parent_path))
2637 {
2638 ignores_to_update.next().unwrap();
2639 }
2640
2641 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2642 ignore_queue_tx
2643 .send(UpdateIgnoreStatusJob {
2644 path: parent_path,
2645 ignore_stack,
2646 ignore_queue: ignore_queue_tx.clone(),
2647 })
2648 .await
2649 .unwrap();
2650 }
2651 drop(ignore_queue_tx);
2652
2653 self.executor
2654 .scoped(|scope| {
2655 for _ in 0..self.executor.num_cpus() {
2656 scope.spawn(async {
2657 while let Ok(job) = ignore_queue_rx.recv().await {
2658 self.update_ignore_status(job, &snapshot).await;
2659 }
2660 });
2661 }
2662 })
2663 .await;
2664 }
2665
2666 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2667 let mut ignore_stack = job.ignore_stack;
2668 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2669 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2670 }
2671
2672 let mut entries_by_id_edits = Vec::new();
2673 let mut entries_by_path_edits = Vec::new();
2674 for mut entry in snapshot.child_entries(&job.path).cloned() {
2675 let was_ignored = entry.is_ignored;
2676 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2677 if entry.is_dir() {
2678 let child_ignore_stack = if entry.is_ignored {
2679 IgnoreStack::all()
2680 } else {
2681 ignore_stack.clone()
2682 };
2683 job.ignore_queue
2684 .send(UpdateIgnoreStatusJob {
2685 path: entry.path.clone(),
2686 ignore_stack: child_ignore_stack,
2687 ignore_queue: job.ignore_queue.clone(),
2688 })
2689 .await
2690 .unwrap();
2691 }
2692
2693 if entry.is_ignored != was_ignored {
2694 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2695 path_entry.scan_id = snapshot.scan_id;
2696 path_entry.is_ignored = entry.is_ignored;
2697 entries_by_id_edits.push(Edit::Insert(path_entry));
2698 entries_by_path_edits.push(Edit::Insert(entry));
2699 }
2700 }
2701
2702 let mut snapshot = self.snapshot.lock();
2703 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2704 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2705 }
2706}
2707
2708async fn refresh_entry(
2709 fs: &dyn Fs,
2710 snapshot: &Mutex<Snapshot>,
2711 path: Arc<Path>,
2712 abs_path: &Path,
2713) -> Result<Entry> {
2714 let root_char_bag;
2715 let next_entry_id;
2716 {
2717 let snapshot = snapshot.lock();
2718 root_char_bag = snapshot.root_char_bag;
2719 next_entry_id = snapshot.next_entry_id.clone();
2720 }
2721 let entry = Entry::new(
2722 path,
2723 &fs.metadata(abs_path)
2724 .await?
2725 .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2726 &next_entry_id,
2727 root_char_bag,
2728 );
2729 Ok(snapshot.lock().insert_entry(entry, fs))
2730}
2731
2732fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2733 let mut result = root_char_bag;
2734 result.extend(
2735 path.to_string_lossy()
2736 .chars()
2737 .map(|c| c.to_ascii_lowercase()),
2738 );
2739 result
2740}
2741
2742struct ScanJob {
2743 abs_path: PathBuf,
2744 path: Arc<Path>,
2745 ignore_stack: Arc<IgnoreStack>,
2746 scan_queue: Sender<ScanJob>,
2747}
2748
2749struct UpdateIgnoreStatusJob {
2750 path: Arc<Path>,
2751 ignore_stack: Arc<IgnoreStack>,
2752 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2753}
2754
2755pub trait WorktreeHandle {
2756 #[cfg(test)]
2757 fn flush_fs_events<'a>(
2758 &self,
2759 cx: &'a gpui::TestAppContext,
2760 ) -> futures::future::LocalBoxFuture<'a, ()>;
2761}
2762
2763impl WorktreeHandle for ModelHandle<Worktree> {
2764 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2765 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2766 // extra directory scans, and emit extra scan-state notifications.
2767 //
2768 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2769 // to ensure that all redundant FS events have already been processed.
2770 #[cfg(test)]
2771 fn flush_fs_events<'a>(
2772 &self,
2773 cx: &'a gpui::TestAppContext,
2774 ) -> futures::future::LocalBoxFuture<'a, ()> {
2775 use smol::future::FutureExt;
2776
2777 let filename = "fs-event-sentinel";
2778 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2779 let tree = self.clone();
2780 async move {
2781 std::fs::write(root_path.join(filename), "").unwrap();
2782 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2783 .await;
2784
2785 std::fs::remove_file(root_path.join(filename)).unwrap();
2786 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2787 .await;
2788
2789 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2790 .await;
2791 }
2792 .boxed_local()
2793 }
2794}
2795
2796#[derive(Clone, Debug)]
2797struct TraversalProgress<'a> {
2798 max_path: &'a Path,
2799 count: usize,
2800 visible_count: usize,
2801 file_count: usize,
2802 visible_file_count: usize,
2803}
2804
2805impl<'a> TraversalProgress<'a> {
2806 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2807 match (include_ignored, include_dirs) {
2808 (true, true) => self.count,
2809 (true, false) => self.file_count,
2810 (false, true) => self.visible_count,
2811 (false, false) => self.visible_file_count,
2812 }
2813 }
2814}
2815
2816impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2817 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2818 self.max_path = summary.max_path.as_ref();
2819 self.count += summary.count;
2820 self.visible_count += summary.visible_count;
2821 self.file_count += summary.file_count;
2822 self.visible_file_count += summary.visible_file_count;
2823 }
2824}
2825
2826impl<'a> Default for TraversalProgress<'a> {
2827 fn default() -> Self {
2828 Self {
2829 max_path: Path::new(""),
2830 count: 0,
2831 visible_count: 0,
2832 file_count: 0,
2833 visible_file_count: 0,
2834 }
2835 }
2836}
2837
2838pub struct Traversal<'a> {
2839 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2840 include_ignored: bool,
2841 include_dirs: bool,
2842}
2843
2844impl<'a> Traversal<'a> {
2845 pub fn advance(&mut self) -> bool {
2846 self.advance_to_offset(self.offset() + 1)
2847 }
2848
2849 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2850 self.cursor.seek_forward(
2851 &TraversalTarget::Count {
2852 count: offset,
2853 include_dirs: self.include_dirs,
2854 include_ignored: self.include_ignored,
2855 },
2856 Bias::Right,
2857 &(),
2858 )
2859 }
2860
2861 pub fn advance_to_sibling(&mut self) -> bool {
2862 while let Some(entry) = self.cursor.item() {
2863 self.cursor.seek_forward(
2864 &TraversalTarget::PathSuccessor(&entry.path),
2865 Bias::Left,
2866 &(),
2867 );
2868 if let Some(entry) = self.cursor.item() {
2869 if (self.include_dirs || !entry.is_dir())
2870 && (self.include_ignored || !entry.is_ignored)
2871 {
2872 return true;
2873 }
2874 }
2875 }
2876 false
2877 }
2878
2879 pub fn entry(&self) -> Option<&'a Entry> {
2880 self.cursor.item()
2881 }
2882
2883 pub fn offset(&self) -> usize {
2884 self.cursor
2885 .start()
2886 .count(self.include_dirs, self.include_ignored)
2887 }
2888}
2889
2890impl<'a> Iterator for Traversal<'a> {
2891 type Item = &'a Entry;
2892
2893 fn next(&mut self) -> Option<Self::Item> {
2894 if let Some(item) = self.entry() {
2895 self.advance();
2896 Some(item)
2897 } else {
2898 None
2899 }
2900 }
2901}
2902
2903#[derive(Debug)]
2904enum TraversalTarget<'a> {
2905 Path(&'a Path),
2906 PathSuccessor(&'a Path),
2907 Count {
2908 count: usize,
2909 include_ignored: bool,
2910 include_dirs: bool,
2911 },
2912}
2913
2914impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2915 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2916 match self {
2917 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2918 TraversalTarget::PathSuccessor(path) => {
2919 if !cursor_location.max_path.starts_with(path) {
2920 Ordering::Equal
2921 } else {
2922 Ordering::Greater
2923 }
2924 }
2925 TraversalTarget::Count {
2926 count,
2927 include_dirs,
2928 include_ignored,
2929 } => Ord::cmp(
2930 count,
2931 &cursor_location.count(*include_dirs, *include_ignored),
2932 ),
2933 }
2934 }
2935}
2936
2937struct ChildEntriesIter<'a> {
2938 parent_path: &'a Path,
2939 traversal: Traversal<'a>,
2940}
2941
2942impl<'a> Iterator for ChildEntriesIter<'a> {
2943 type Item = &'a Entry;
2944
2945 fn next(&mut self) -> Option<Self::Item> {
2946 if let Some(item) = self.traversal.entry() {
2947 if item.path.starts_with(&self.parent_path) {
2948 self.traversal.advance_to_sibling();
2949 return Some(item);
2950 }
2951 }
2952 None
2953 }
2954}
2955
2956impl<'a> From<&'a Entry> for proto::Entry {
2957 fn from(entry: &'a Entry) -> Self {
2958 Self {
2959 id: entry.id as u64,
2960 is_dir: entry.is_dir(),
2961 path: entry.path.to_string_lossy().to_string(),
2962 inode: entry.inode,
2963 mtime: Some(entry.mtime.into()),
2964 is_symlink: entry.is_symlink,
2965 is_ignored: entry.is_ignored,
2966 }
2967 }
2968}
2969
2970impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2971 type Error = anyhow::Error;
2972
2973 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2974 if let Some(mtime) = entry.mtime {
2975 let kind = if entry.is_dir {
2976 EntryKind::Dir
2977 } else {
2978 let mut char_bag = root_char_bag.clone();
2979 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
2980 EntryKind::File(char_bag)
2981 };
2982 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
2983 Ok(Entry {
2984 id: entry.id as usize,
2985 kind,
2986 path: path.clone(),
2987 inode: entry.inode,
2988 mtime: mtime.into(),
2989 is_symlink: entry.is_symlink,
2990 is_ignored: entry.is_ignored,
2991 })
2992 } else {
2993 Err(anyhow!(
2994 "missing mtime in remote worktree entry {:?}",
2995 entry.path
2996 ))
2997 }
2998 }
2999}
3000
3001trait ToPointUtf16 {
3002 fn to_point_utf16(self) -> PointUtf16;
3003}
3004
3005impl ToPointUtf16 for lsp::Position {
3006 fn to_point_utf16(self) -> PointUtf16 {
3007 PointUtf16::new(self.line, self.character)
3008 }
3009}
3010
3011fn diagnostic_ranges<'a>(
3012 diagnostic: &'a lsp::Diagnostic,
3013 abs_path: &'a Path,
3014) -> impl 'a + Iterator<Item = Range<PointUtf16>> {
3015 diagnostic
3016 .related_information
3017 .iter()
3018 .flatten()
3019 .filter_map(move |info| {
3020 if info.location.uri.to_file_path().ok()? == abs_path {
3021 let info_start = PointUtf16::new(
3022 info.location.range.start.line,
3023 info.location.range.start.character,
3024 );
3025 let info_end = PointUtf16::new(
3026 info.location.range.end.line,
3027 info.location.range.end.character,
3028 );
3029 Some(info_start..info_end)
3030 } else {
3031 None
3032 }
3033 })
3034 .chain(Some(
3035 diagnostic.range.start.to_point_utf16()..diagnostic.range.end.to_point_utf16(),
3036 ))
3037}
3038
3039#[cfg(test)]
3040mod tests {
3041 use super::*;
3042 use crate::fs::FakeFs;
3043 use anyhow::Result;
3044 use client::test::{FakeHttpClient, FakeServer};
3045 use fs::RealFs;
3046 use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
3047 use language::{Diagnostic, LanguageConfig};
3048 use lsp::Url;
3049 use rand::prelude::*;
3050 use serde_json::json;
3051 use std::{cell::RefCell, rc::Rc};
3052 use std::{
3053 env,
3054 fmt::Write,
3055 time::{SystemTime, UNIX_EPOCH},
3056 };
3057 use text::Point;
3058 use unindent::Unindent as _;
3059 use util::test::temp_tree;
3060
3061 #[gpui::test]
3062 async fn test_traversal(mut cx: gpui::TestAppContext) {
3063 let fs = FakeFs::new();
3064 fs.insert_tree(
3065 "/root",
3066 json!({
3067 ".gitignore": "a/b\n",
3068 "a": {
3069 "b": "",
3070 "c": "",
3071 }
3072 }),
3073 )
3074 .await;
3075
3076 let client = Client::new();
3077 let http_client = FakeHttpClient::with_404_response();
3078 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3079
3080 let tree = Worktree::open_local(
3081 client,
3082 user_store,
3083 Arc::from(Path::new("/root")),
3084 Arc::new(fs),
3085 Default::default(),
3086 &mut cx.to_async(),
3087 )
3088 .await
3089 .unwrap();
3090 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3091 .await;
3092
3093 tree.read_with(&cx, |tree, _| {
3094 assert_eq!(
3095 tree.entries(false)
3096 .map(|entry| entry.path.as_ref())
3097 .collect::<Vec<_>>(),
3098 vec![
3099 Path::new(""),
3100 Path::new(".gitignore"),
3101 Path::new("a"),
3102 Path::new("a/c"),
3103 ]
3104 );
3105 })
3106 }
3107
3108 #[gpui::test]
3109 async fn test_save_file(mut cx: gpui::TestAppContext) {
3110 let dir = temp_tree(json!({
3111 "file1": "the old contents",
3112 }));
3113
3114 let client = Client::new();
3115 let http_client = FakeHttpClient::with_404_response();
3116 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3117
3118 let tree = Worktree::open_local(
3119 client,
3120 user_store,
3121 dir.path(),
3122 Arc::new(RealFs),
3123 Default::default(),
3124 &mut cx.to_async(),
3125 )
3126 .await
3127 .unwrap();
3128 let buffer = tree
3129 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3130 .await
3131 .unwrap();
3132 let save = buffer.update(&mut cx, |buffer, cx| {
3133 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3134 buffer.save(cx).unwrap()
3135 });
3136 save.await.unwrap();
3137
3138 let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3139 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3140 }
3141
3142 #[gpui::test]
3143 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3144 let dir = temp_tree(json!({
3145 "file1": "the old contents",
3146 }));
3147 let file_path = dir.path().join("file1");
3148
3149 let client = Client::new();
3150 let http_client = FakeHttpClient::with_404_response();
3151 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3152
3153 let tree = Worktree::open_local(
3154 client,
3155 user_store,
3156 file_path.clone(),
3157 Arc::new(RealFs),
3158 Default::default(),
3159 &mut cx.to_async(),
3160 )
3161 .await
3162 .unwrap();
3163 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3164 .await;
3165 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3166
3167 let buffer = tree
3168 .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3169 .await
3170 .unwrap();
3171 let save = buffer.update(&mut cx, |buffer, cx| {
3172 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3173 buffer.save(cx).unwrap()
3174 });
3175 save.await.unwrap();
3176
3177 let new_text = std::fs::read_to_string(file_path).unwrap();
3178 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3179 }
3180
3181 #[gpui::test]
3182 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3183 let dir = temp_tree(json!({
3184 "a": {
3185 "file1": "",
3186 "file2": "",
3187 "file3": "",
3188 },
3189 "b": {
3190 "c": {
3191 "file4": "",
3192 "file5": "",
3193 }
3194 }
3195 }));
3196
3197 let user_id = 5;
3198 let mut client = Client::new();
3199 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3200 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3201 let tree = Worktree::open_local(
3202 client,
3203 user_store.clone(),
3204 dir.path(),
3205 Arc::new(RealFs),
3206 Default::default(),
3207 &mut cx.to_async(),
3208 )
3209 .await
3210 .unwrap();
3211
3212 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3213 let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3214 async move { buffer.await.unwrap() }
3215 };
3216 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3217 tree.read_with(cx, |tree, _| {
3218 tree.entry_for_path(path)
3219 .expect(&format!("no entry for path {}", path))
3220 .id
3221 })
3222 };
3223
3224 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3225 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3226 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3227 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3228
3229 let file2_id = id_for_path("a/file2", &cx);
3230 let file3_id = id_for_path("a/file3", &cx);
3231 let file4_id = id_for_path("b/c/file4", &cx);
3232
3233 // Wait for the initial scan.
3234 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3235 .await;
3236
3237 // Create a remote copy of this worktree.
3238 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3239 let remote = Worktree::remote(
3240 1,
3241 1,
3242 initial_snapshot.to_proto(),
3243 Client::new(),
3244 user_store,
3245 Default::default(),
3246 &mut cx.to_async(),
3247 )
3248 .await
3249 .unwrap();
3250
3251 cx.read(|cx| {
3252 assert!(!buffer2.read(cx).is_dirty());
3253 assert!(!buffer3.read(cx).is_dirty());
3254 assert!(!buffer4.read(cx).is_dirty());
3255 assert!(!buffer5.read(cx).is_dirty());
3256 });
3257
3258 // Rename and delete files and directories.
3259 tree.flush_fs_events(&cx).await;
3260 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3261 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3262 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3263 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3264 tree.flush_fs_events(&cx).await;
3265
3266 let expected_paths = vec![
3267 "a",
3268 "a/file1",
3269 "a/file2.new",
3270 "b",
3271 "d",
3272 "d/file3",
3273 "d/file4",
3274 ];
3275
3276 cx.read(|app| {
3277 assert_eq!(
3278 tree.read(app)
3279 .paths()
3280 .map(|p| p.to_str().unwrap())
3281 .collect::<Vec<_>>(),
3282 expected_paths
3283 );
3284
3285 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3286 assert_eq!(id_for_path("d/file3", &cx), file3_id);
3287 assert_eq!(id_for_path("d/file4", &cx), file4_id);
3288
3289 assert_eq!(
3290 buffer2.read(app).file().unwrap().path().as_ref(),
3291 Path::new("a/file2.new")
3292 );
3293 assert_eq!(
3294 buffer3.read(app).file().unwrap().path().as_ref(),
3295 Path::new("d/file3")
3296 );
3297 assert_eq!(
3298 buffer4.read(app).file().unwrap().path().as_ref(),
3299 Path::new("d/file4")
3300 );
3301 assert_eq!(
3302 buffer5.read(app).file().unwrap().path().as_ref(),
3303 Path::new("b/c/file5")
3304 );
3305
3306 assert!(!buffer2.read(app).file().unwrap().is_deleted());
3307 assert!(!buffer3.read(app).file().unwrap().is_deleted());
3308 assert!(!buffer4.read(app).file().unwrap().is_deleted());
3309 assert!(buffer5.read(app).file().unwrap().is_deleted());
3310 });
3311
3312 // Update the remote worktree. Check that it becomes consistent with the
3313 // local worktree.
3314 remote.update(&mut cx, |remote, cx| {
3315 let update_message =
3316 tree.read(cx)
3317 .snapshot()
3318 .build_update(&initial_snapshot, 1, 1, true);
3319 remote
3320 .as_remote_mut()
3321 .unwrap()
3322 .snapshot
3323 .apply_update(update_message)
3324 .unwrap();
3325
3326 assert_eq!(
3327 remote
3328 .paths()
3329 .map(|p| p.to_str().unwrap())
3330 .collect::<Vec<_>>(),
3331 expected_paths
3332 );
3333 });
3334 }
3335
3336 #[gpui::test]
3337 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3338 let dir = temp_tree(json!({
3339 ".git": {},
3340 ".gitignore": "ignored-dir\n",
3341 "tracked-dir": {
3342 "tracked-file1": "tracked contents",
3343 },
3344 "ignored-dir": {
3345 "ignored-file1": "ignored contents",
3346 }
3347 }));
3348
3349 let client = Client::new();
3350 let http_client = FakeHttpClient::with_404_response();
3351 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3352
3353 let tree = Worktree::open_local(
3354 client,
3355 user_store,
3356 dir.path(),
3357 Arc::new(RealFs),
3358 Default::default(),
3359 &mut cx.to_async(),
3360 )
3361 .await
3362 .unwrap();
3363 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3364 .await;
3365 tree.flush_fs_events(&cx).await;
3366 cx.read(|cx| {
3367 let tree = tree.read(cx);
3368 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3369 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3370 assert_eq!(tracked.is_ignored, false);
3371 assert_eq!(ignored.is_ignored, true);
3372 });
3373
3374 std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3375 std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3376 tree.flush_fs_events(&cx).await;
3377 cx.read(|cx| {
3378 let tree = tree.read(cx);
3379 let dot_git = tree.entry_for_path(".git").unwrap();
3380 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3381 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3382 assert_eq!(tracked.is_ignored, false);
3383 assert_eq!(ignored.is_ignored, true);
3384 assert_eq!(dot_git.is_ignored, true);
3385 });
3386 }
3387
3388 #[gpui::test]
3389 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3390 let user_id = 100;
3391 let mut client = Client::new();
3392 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3393 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3394
3395 let fs = Arc::new(FakeFs::new());
3396 fs.insert_tree(
3397 "/the-dir",
3398 json!({
3399 "a.txt": "a-contents",
3400 "b.txt": "b-contents",
3401 }),
3402 )
3403 .await;
3404
3405 let worktree = Worktree::open_local(
3406 client.clone(),
3407 user_store,
3408 "/the-dir".as_ref(),
3409 fs,
3410 Default::default(),
3411 &mut cx.to_async(),
3412 )
3413 .await
3414 .unwrap();
3415
3416 // Spawn multiple tasks to open paths, repeating some paths.
3417 let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3418 (
3419 worktree.open_buffer("a.txt", cx),
3420 worktree.open_buffer("b.txt", cx),
3421 worktree.open_buffer("a.txt", cx),
3422 )
3423 });
3424
3425 let buffer_a_1 = buffer_a_1.await.unwrap();
3426 let buffer_a_2 = buffer_a_2.await.unwrap();
3427 let buffer_b = buffer_b.await.unwrap();
3428 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3429 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3430
3431 // There is only one buffer per path.
3432 let buffer_a_id = buffer_a_1.id();
3433 assert_eq!(buffer_a_2.id(), buffer_a_id);
3434
3435 // Open the same path again while it is still open.
3436 drop(buffer_a_1);
3437 let buffer_a_3 = worktree
3438 .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3439 .await
3440 .unwrap();
3441
3442 // There's still only one buffer per path.
3443 assert_eq!(buffer_a_3.id(), buffer_a_id);
3444 }
3445
3446 #[gpui::test]
3447 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3448 use std::fs;
3449
3450 let dir = temp_tree(json!({
3451 "file1": "abc",
3452 "file2": "def",
3453 "file3": "ghi",
3454 }));
3455 let client = Client::new();
3456 let http_client = FakeHttpClient::with_404_response();
3457 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3458
3459 let tree = Worktree::open_local(
3460 client,
3461 user_store,
3462 dir.path(),
3463 Arc::new(RealFs),
3464 Default::default(),
3465 &mut cx.to_async(),
3466 )
3467 .await
3468 .unwrap();
3469 tree.flush_fs_events(&cx).await;
3470 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3471 .await;
3472
3473 let buffer1 = tree
3474 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3475 .await
3476 .unwrap();
3477 let events = Rc::new(RefCell::new(Vec::new()));
3478
3479 // initially, the buffer isn't dirty.
3480 buffer1.update(&mut cx, |buffer, cx| {
3481 cx.subscribe(&buffer1, {
3482 let events = events.clone();
3483 move |_, _, event, _| events.borrow_mut().push(event.clone())
3484 })
3485 .detach();
3486
3487 assert!(!buffer.is_dirty());
3488 assert!(events.borrow().is_empty());
3489
3490 buffer.edit(vec![1..2], "", cx);
3491 });
3492
3493 // after the first edit, the buffer is dirty, and emits a dirtied event.
3494 buffer1.update(&mut cx, |buffer, cx| {
3495 assert!(buffer.text() == "ac");
3496 assert!(buffer.is_dirty());
3497 assert_eq!(
3498 *events.borrow(),
3499 &[language::Event::Edited, language::Event::Dirtied]
3500 );
3501 events.borrow_mut().clear();
3502 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3503 });
3504
3505 // after saving, the buffer is not dirty, and emits a saved event.
3506 buffer1.update(&mut cx, |buffer, cx| {
3507 assert!(!buffer.is_dirty());
3508 assert_eq!(*events.borrow(), &[language::Event::Saved]);
3509 events.borrow_mut().clear();
3510
3511 buffer.edit(vec![1..1], "B", cx);
3512 buffer.edit(vec![2..2], "D", cx);
3513 });
3514
3515 // after editing again, the buffer is dirty, and emits another dirty event.
3516 buffer1.update(&mut cx, |buffer, cx| {
3517 assert!(buffer.text() == "aBDc");
3518 assert!(buffer.is_dirty());
3519 assert_eq!(
3520 *events.borrow(),
3521 &[
3522 language::Event::Edited,
3523 language::Event::Dirtied,
3524 language::Event::Edited,
3525 ],
3526 );
3527 events.borrow_mut().clear();
3528
3529 // TODO - currently, after restoring the buffer to its
3530 // previously-saved state, the is still considered dirty.
3531 buffer.edit([1..3], "", cx);
3532 assert!(buffer.text() == "ac");
3533 assert!(buffer.is_dirty());
3534 });
3535
3536 assert_eq!(*events.borrow(), &[language::Event::Edited]);
3537
3538 // When a file is deleted, the buffer is considered dirty.
3539 let events = Rc::new(RefCell::new(Vec::new()));
3540 let buffer2 = tree
3541 .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3542 .await
3543 .unwrap();
3544 buffer2.update(&mut cx, |_, cx| {
3545 cx.subscribe(&buffer2, {
3546 let events = events.clone();
3547 move |_, _, event, _| events.borrow_mut().push(event.clone())
3548 })
3549 .detach();
3550 });
3551
3552 fs::remove_file(dir.path().join("file2")).unwrap();
3553 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3554 assert_eq!(
3555 *events.borrow(),
3556 &[language::Event::Dirtied, language::Event::FileHandleChanged]
3557 );
3558
3559 // When a file is already dirty when deleted, we don't emit a Dirtied event.
3560 let events = Rc::new(RefCell::new(Vec::new()));
3561 let buffer3 = tree
3562 .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3563 .await
3564 .unwrap();
3565 buffer3.update(&mut cx, |_, cx| {
3566 cx.subscribe(&buffer3, {
3567 let events = events.clone();
3568 move |_, _, event, _| events.borrow_mut().push(event.clone())
3569 })
3570 .detach();
3571 });
3572
3573 tree.flush_fs_events(&cx).await;
3574 buffer3.update(&mut cx, |buffer, cx| {
3575 buffer.edit(Some(0..0), "x", cx);
3576 });
3577 events.borrow_mut().clear();
3578 fs::remove_file(dir.path().join("file3")).unwrap();
3579 buffer3
3580 .condition(&cx, |_, _| !events.borrow().is_empty())
3581 .await;
3582 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3583 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3584 }
3585
3586 #[gpui::test]
3587 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3588 use std::fs;
3589
3590 let initial_contents = "aaa\nbbbbb\nc\n";
3591 let dir = temp_tree(json!({ "the-file": initial_contents }));
3592 let client = Client::new();
3593 let http_client = FakeHttpClient::with_404_response();
3594 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3595
3596 let tree = Worktree::open_local(
3597 client,
3598 user_store,
3599 dir.path(),
3600 Arc::new(RealFs),
3601 Default::default(),
3602 &mut cx.to_async(),
3603 )
3604 .await
3605 .unwrap();
3606 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3607 .await;
3608
3609 let abs_path = dir.path().join("the-file");
3610 let buffer = tree
3611 .update(&mut cx, |tree, cx| {
3612 tree.open_buffer(Path::new("the-file"), cx)
3613 })
3614 .await
3615 .unwrap();
3616
3617 // TODO
3618 // Add a cursor on each row.
3619 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3620 // assert!(!buffer.is_dirty());
3621 // buffer.add_selection_set(
3622 // &(0..3)
3623 // .map(|row| Selection {
3624 // id: row as usize,
3625 // start: Point::new(row, 1),
3626 // end: Point::new(row, 1),
3627 // reversed: false,
3628 // goal: SelectionGoal::None,
3629 // })
3630 // .collect::<Vec<_>>(),
3631 // cx,
3632 // )
3633 // });
3634
3635 // Change the file on disk, adding two new lines of text, and removing
3636 // one line.
3637 buffer.read_with(&cx, |buffer, _| {
3638 assert!(!buffer.is_dirty());
3639 assert!(!buffer.has_conflict());
3640 });
3641 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3642 fs::write(&abs_path, new_contents).unwrap();
3643
3644 // Because the buffer was not modified, it is reloaded from disk. Its
3645 // contents are edited according to the diff between the old and new
3646 // file contents.
3647 buffer
3648 .condition(&cx, |buffer, _| buffer.text() == new_contents)
3649 .await;
3650
3651 buffer.update(&mut cx, |buffer, _| {
3652 assert_eq!(buffer.text(), new_contents);
3653 assert!(!buffer.is_dirty());
3654 assert!(!buffer.has_conflict());
3655
3656 // TODO
3657 // let cursor_positions = buffer
3658 // .selection_set(selection_set_id)
3659 // .unwrap()
3660 // .selections::<Point>(&*buffer)
3661 // .map(|selection| {
3662 // assert_eq!(selection.start, selection.end);
3663 // selection.start
3664 // })
3665 // .collect::<Vec<_>>();
3666 // assert_eq!(
3667 // cursor_positions,
3668 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3669 // );
3670 });
3671
3672 // Modify the buffer
3673 buffer.update(&mut cx, |buffer, cx| {
3674 buffer.edit(vec![0..0], " ", cx);
3675 assert!(buffer.is_dirty());
3676 assert!(!buffer.has_conflict());
3677 });
3678
3679 // Change the file on disk again, adding blank lines to the beginning.
3680 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3681
3682 // Because the buffer is modified, it doesn't reload from disk, but is
3683 // marked as having a conflict.
3684 buffer
3685 .condition(&cx, |buffer, _| buffer.has_conflict())
3686 .await;
3687 }
3688
3689 #[gpui::test]
3690 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3691 let (language_server_config, mut fake_server) =
3692 LanguageServerConfig::fake(cx.background()).await;
3693 let mut languages = LanguageRegistry::new();
3694 languages.add(Arc::new(Language::new(
3695 LanguageConfig {
3696 name: "Rust".to_string(),
3697 path_suffixes: vec!["rs".to_string()],
3698 language_server: Some(language_server_config),
3699 ..Default::default()
3700 },
3701 Some(tree_sitter_rust::language()),
3702 )));
3703
3704 let dir = temp_tree(json!({
3705 "a.rs": "fn a() { A }",
3706 "b.rs": "const y: i32 = 1",
3707 }));
3708
3709 let client = Client::new();
3710 let http_client = FakeHttpClient::with_404_response();
3711 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3712
3713 let tree = Worktree::open_local(
3714 client,
3715 user_store,
3716 dir.path(),
3717 Arc::new(RealFs),
3718 Arc::new(languages),
3719 &mut cx.to_async(),
3720 )
3721 .await
3722 .unwrap();
3723 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3724 .await;
3725
3726 // Cause worktree to start the fake language server
3727 let _buffer = tree
3728 .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3729 .await
3730 .unwrap();
3731
3732 fake_server
3733 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3734 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3735 version: None,
3736 diagnostics: vec![lsp::Diagnostic {
3737 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
3738 severity: Some(lsp::DiagnosticSeverity::ERROR),
3739 message: "undefined variable 'A'".to_string(),
3740 ..Default::default()
3741 }],
3742 })
3743 .await;
3744
3745 let buffer = tree
3746 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3747 .await
3748 .unwrap();
3749
3750 buffer.read_with(&cx, |buffer, _| {
3751 let diagnostics = buffer
3752 .snapshot()
3753 .diagnostics_in_range::<_, Point>(0..buffer.len())
3754 .collect::<Vec<_>>();
3755 assert_eq!(
3756 diagnostics,
3757 &[DiagnosticEntry {
3758 range: Point::new(0, 9)..Point::new(0, 10),
3759 diagnostic: Diagnostic {
3760 severity: lsp::DiagnosticSeverity::ERROR,
3761 message: "undefined variable 'A'".to_string(),
3762 group_id: 0,
3763 is_primary: true,
3764 ..Default::default()
3765 }
3766 }]
3767 )
3768 });
3769 }
3770
3771 #[gpui::test]
3772 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3773 let fs = Arc::new(FakeFs::new());
3774 let client = Client::new();
3775 let http_client = FakeHttpClient::with_404_response();
3776 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3777
3778 fs.insert_tree(
3779 "/the-dir",
3780 json!({
3781 "a.rs": "
3782 fn foo(mut v: Vec<usize>) {
3783 for x in &v {
3784 v.push(1);
3785 }
3786 }
3787 "
3788 .unindent(),
3789 }),
3790 )
3791 .await;
3792
3793 let worktree = Worktree::open_local(
3794 client.clone(),
3795 user_store,
3796 "/the-dir".as_ref(),
3797 fs,
3798 Default::default(),
3799 &mut cx.to_async(),
3800 )
3801 .await
3802 .unwrap();
3803
3804 let buffer = worktree
3805 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3806 .await
3807 .unwrap();
3808
3809 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3810 let message = lsp::PublishDiagnosticsParams {
3811 uri: buffer_uri.clone(),
3812 diagnostics: vec![
3813 lsp::Diagnostic {
3814 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3815 severity: Some(DiagnosticSeverity::WARNING),
3816 message: "error 1".to_string(),
3817 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3818 location: lsp::Location {
3819 uri: buffer_uri.clone(),
3820 range: lsp::Range::new(
3821 lsp::Position::new(1, 8),
3822 lsp::Position::new(1, 9),
3823 ),
3824 },
3825 message: "error 1 hint 1".to_string(),
3826 }]),
3827 ..Default::default()
3828 },
3829 lsp::Diagnostic {
3830 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3831 severity: Some(DiagnosticSeverity::HINT),
3832 message: "error 1 hint 1".to_string(),
3833 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3834 location: lsp::Location {
3835 uri: buffer_uri.clone(),
3836 range: lsp::Range::new(
3837 lsp::Position::new(1, 8),
3838 lsp::Position::new(1, 9),
3839 ),
3840 },
3841 message: "original diagnostic".to_string(),
3842 }]),
3843 ..Default::default()
3844 },
3845 lsp::Diagnostic {
3846 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3847 severity: Some(DiagnosticSeverity::ERROR),
3848 message: "error 2".to_string(),
3849 related_information: Some(vec![
3850 lsp::DiagnosticRelatedInformation {
3851 location: lsp::Location {
3852 uri: buffer_uri.clone(),
3853 range: lsp::Range::new(
3854 lsp::Position::new(1, 13),
3855 lsp::Position::new(1, 15),
3856 ),
3857 },
3858 message: "error 2 hint 1".to_string(),
3859 },
3860 lsp::DiagnosticRelatedInformation {
3861 location: lsp::Location {
3862 uri: buffer_uri.clone(),
3863 range: lsp::Range::new(
3864 lsp::Position::new(1, 13),
3865 lsp::Position::new(1, 15),
3866 ),
3867 },
3868 message: "error 2 hint 2".to_string(),
3869 },
3870 ]),
3871 ..Default::default()
3872 },
3873 lsp::Diagnostic {
3874 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3875 severity: Some(DiagnosticSeverity::HINT),
3876 message: "error 2 hint 1".to_string(),
3877 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3878 location: lsp::Location {
3879 uri: buffer_uri.clone(),
3880 range: lsp::Range::new(
3881 lsp::Position::new(2, 8),
3882 lsp::Position::new(2, 17),
3883 ),
3884 },
3885 message: "original diagnostic".to_string(),
3886 }]),
3887 ..Default::default()
3888 },
3889 lsp::Diagnostic {
3890 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3891 severity: Some(DiagnosticSeverity::HINT),
3892 message: "error 2 hint 2".to_string(),
3893 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3894 location: lsp::Location {
3895 uri: buffer_uri.clone(),
3896 range: lsp::Range::new(
3897 lsp::Position::new(2, 8),
3898 lsp::Position::new(2, 17),
3899 ),
3900 },
3901 message: "original diagnostic".to_string(),
3902 }]),
3903 ..Default::default()
3904 },
3905 ],
3906 version: None,
3907 };
3908
3909 worktree
3910 .update(&mut cx, |tree, cx| {
3911 tree.update_lsp_diagnostics(message, &Default::default(), cx)
3912 })
3913 .unwrap();
3914 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
3915
3916 assert_eq!(
3917 buffer
3918 .diagnostics_in_range::<_, Point>(0..buffer.len())
3919 .collect::<Vec<_>>(),
3920 &[
3921 DiagnosticEntry {
3922 range: Point::new(1, 8)..Point::new(1, 9),
3923 diagnostic: Diagnostic {
3924 severity: DiagnosticSeverity::WARNING,
3925 message: "error 1".to_string(),
3926 group_id: 0,
3927 is_primary: true,
3928 ..Default::default()
3929 }
3930 },
3931 DiagnosticEntry {
3932 range: Point::new(1, 8)..Point::new(1, 9),
3933 diagnostic: Diagnostic {
3934 severity: DiagnosticSeverity::HINT,
3935 message: "error 1 hint 1".to_string(),
3936 group_id: 0,
3937 is_primary: false,
3938 ..Default::default()
3939 }
3940 },
3941 DiagnosticEntry {
3942 range: Point::new(1, 13)..Point::new(1, 15),
3943 diagnostic: Diagnostic {
3944 severity: DiagnosticSeverity::HINT,
3945 message: "error 2 hint 1".to_string(),
3946 group_id: 1,
3947 is_primary: false,
3948 ..Default::default()
3949 }
3950 },
3951 DiagnosticEntry {
3952 range: Point::new(1, 13)..Point::new(1, 15),
3953 diagnostic: Diagnostic {
3954 severity: DiagnosticSeverity::HINT,
3955 message: "error 2 hint 2".to_string(),
3956 group_id: 1,
3957 is_primary: false,
3958 ..Default::default()
3959 }
3960 },
3961 DiagnosticEntry {
3962 range: Point::new(2, 8)..Point::new(2, 17),
3963 diagnostic: Diagnostic {
3964 severity: DiagnosticSeverity::ERROR,
3965 message: "error 2".to_string(),
3966 group_id: 1,
3967 is_primary: true,
3968 ..Default::default()
3969 }
3970 }
3971 ]
3972 );
3973
3974 assert_eq!(
3975 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
3976 &[
3977 DiagnosticEntry {
3978 range: Point::new(1, 8)..Point::new(1, 9),
3979 diagnostic: Diagnostic {
3980 severity: DiagnosticSeverity::WARNING,
3981 message: "error 1".to_string(),
3982 group_id: 0,
3983 is_primary: true,
3984 ..Default::default()
3985 }
3986 },
3987 DiagnosticEntry {
3988 range: Point::new(1, 8)..Point::new(1, 9),
3989 diagnostic: Diagnostic {
3990 severity: DiagnosticSeverity::HINT,
3991 message: "error 1 hint 1".to_string(),
3992 group_id: 0,
3993 is_primary: false,
3994 ..Default::default()
3995 }
3996 },
3997 ]
3998 );
3999 assert_eq!(
4000 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
4001 &[
4002 DiagnosticEntry {
4003 range: Point::new(1, 13)..Point::new(1, 15),
4004 diagnostic: Diagnostic {
4005 severity: DiagnosticSeverity::HINT,
4006 message: "error 2 hint 1".to_string(),
4007 group_id: 1,
4008 is_primary: false,
4009 ..Default::default()
4010 }
4011 },
4012 DiagnosticEntry {
4013 range: Point::new(1, 13)..Point::new(1, 15),
4014 diagnostic: Diagnostic {
4015 severity: DiagnosticSeverity::HINT,
4016 message: "error 2 hint 2".to_string(),
4017 group_id: 1,
4018 is_primary: false,
4019 ..Default::default()
4020 }
4021 },
4022 DiagnosticEntry {
4023 range: Point::new(2, 8)..Point::new(2, 17),
4024 diagnostic: Diagnostic {
4025 severity: DiagnosticSeverity::ERROR,
4026 message: "error 2".to_string(),
4027 group_id: 1,
4028 is_primary: true,
4029 ..Default::default()
4030 }
4031 }
4032 ]
4033 );
4034 }
4035
4036 #[gpui::test(iterations = 100)]
4037 fn test_random(mut rng: StdRng) {
4038 let operations = env::var("OPERATIONS")
4039 .map(|o| o.parse().unwrap())
4040 .unwrap_or(40);
4041 let initial_entries = env::var("INITIAL_ENTRIES")
4042 .map(|o| o.parse().unwrap())
4043 .unwrap_or(20);
4044
4045 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
4046 for _ in 0..initial_entries {
4047 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
4048 }
4049 log::info!("Generated initial tree");
4050
4051 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4052 let fs = Arc::new(RealFs);
4053 let next_entry_id = Arc::new(AtomicUsize::new(0));
4054 let mut initial_snapshot = Snapshot {
4055 id: 0,
4056 scan_id: 0,
4057 abs_path: root_dir.path().into(),
4058 entries_by_path: Default::default(),
4059 entries_by_id: Default::default(),
4060 removed_entry_ids: Default::default(),
4061 ignores: Default::default(),
4062 root_name: Default::default(),
4063 root_char_bag: Default::default(),
4064 next_entry_id: next_entry_id.clone(),
4065 };
4066 initial_snapshot.insert_entry(
4067 Entry::new(
4068 Path::new("").into(),
4069 &smol::block_on(fs.metadata(root_dir.path()))
4070 .unwrap()
4071 .unwrap(),
4072 &next_entry_id,
4073 Default::default(),
4074 ),
4075 fs.as_ref(),
4076 );
4077 let mut scanner = BackgroundScanner::new(
4078 Arc::new(Mutex::new(initial_snapshot.clone())),
4079 notify_tx,
4080 fs.clone(),
4081 Arc::new(gpui::executor::Background::new()),
4082 );
4083 smol::block_on(scanner.scan_dirs()).unwrap();
4084 scanner.snapshot().check_invariants();
4085
4086 let mut events = Vec::new();
4087 let mut snapshots = Vec::new();
4088 let mut mutations_len = operations;
4089 while mutations_len > 1 {
4090 if !events.is_empty() && rng.gen_bool(0.4) {
4091 let len = rng.gen_range(0..=events.len());
4092 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4093 log::info!("Delivering events: {:#?}", to_deliver);
4094 smol::block_on(scanner.process_events(to_deliver));
4095 scanner.snapshot().check_invariants();
4096 } else {
4097 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4098 mutations_len -= 1;
4099 }
4100
4101 if rng.gen_bool(0.2) {
4102 snapshots.push(scanner.snapshot());
4103 }
4104 }
4105 log::info!("Quiescing: {:#?}", events);
4106 smol::block_on(scanner.process_events(events));
4107 scanner.snapshot().check_invariants();
4108
4109 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4110 let mut new_scanner = BackgroundScanner::new(
4111 Arc::new(Mutex::new(initial_snapshot)),
4112 notify_tx,
4113 scanner.fs.clone(),
4114 scanner.executor.clone(),
4115 );
4116 smol::block_on(new_scanner.scan_dirs()).unwrap();
4117 assert_eq!(
4118 scanner.snapshot().to_vec(true),
4119 new_scanner.snapshot().to_vec(true)
4120 );
4121
4122 for mut prev_snapshot in snapshots {
4123 let include_ignored = rng.gen::<bool>();
4124 if !include_ignored {
4125 let mut entries_by_path_edits = Vec::new();
4126 let mut entries_by_id_edits = Vec::new();
4127 for entry in prev_snapshot
4128 .entries_by_id
4129 .cursor::<()>()
4130 .filter(|e| e.is_ignored)
4131 {
4132 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4133 entries_by_id_edits.push(Edit::Remove(entry.id));
4134 }
4135
4136 prev_snapshot
4137 .entries_by_path
4138 .edit(entries_by_path_edits, &());
4139 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4140 }
4141
4142 let update = scanner
4143 .snapshot()
4144 .build_update(&prev_snapshot, 0, 0, include_ignored);
4145 prev_snapshot.apply_update(update).unwrap();
4146 assert_eq!(
4147 prev_snapshot.to_vec(true),
4148 scanner.snapshot().to_vec(include_ignored)
4149 );
4150 }
4151 }
4152
4153 fn randomly_mutate_tree(
4154 root_path: &Path,
4155 insertion_probability: f64,
4156 rng: &mut impl Rng,
4157 ) -> Result<Vec<fsevent::Event>> {
4158 let root_path = root_path.canonicalize().unwrap();
4159 let (dirs, files) = read_dir_recursive(root_path.clone());
4160
4161 let mut events = Vec::new();
4162 let mut record_event = |path: PathBuf| {
4163 events.push(fsevent::Event {
4164 event_id: SystemTime::now()
4165 .duration_since(UNIX_EPOCH)
4166 .unwrap()
4167 .as_secs(),
4168 flags: fsevent::StreamFlags::empty(),
4169 path,
4170 });
4171 };
4172
4173 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4174 let path = dirs.choose(rng).unwrap();
4175 let new_path = path.join(gen_name(rng));
4176
4177 if rng.gen() {
4178 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4179 std::fs::create_dir(&new_path)?;
4180 } else {
4181 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4182 std::fs::write(&new_path, "")?;
4183 }
4184 record_event(new_path);
4185 } else if rng.gen_bool(0.05) {
4186 let ignore_dir_path = dirs.choose(rng).unwrap();
4187 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4188
4189 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4190 let files_to_ignore = {
4191 let len = rng.gen_range(0..=subfiles.len());
4192 subfiles.choose_multiple(rng, len)
4193 };
4194 let dirs_to_ignore = {
4195 let len = rng.gen_range(0..subdirs.len());
4196 subdirs.choose_multiple(rng, len)
4197 };
4198
4199 let mut ignore_contents = String::new();
4200 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4201 write!(
4202 ignore_contents,
4203 "{}\n",
4204 path_to_ignore
4205 .strip_prefix(&ignore_dir_path)?
4206 .to_str()
4207 .unwrap()
4208 )
4209 .unwrap();
4210 }
4211 log::info!(
4212 "Creating {:?} with contents:\n{}",
4213 ignore_path.strip_prefix(&root_path)?,
4214 ignore_contents
4215 );
4216 std::fs::write(&ignore_path, ignore_contents).unwrap();
4217 record_event(ignore_path);
4218 } else {
4219 let old_path = {
4220 let file_path = files.choose(rng);
4221 let dir_path = dirs[1..].choose(rng);
4222 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4223 };
4224
4225 let is_rename = rng.gen();
4226 if is_rename {
4227 let new_path_parent = dirs
4228 .iter()
4229 .filter(|d| !d.starts_with(old_path))
4230 .choose(rng)
4231 .unwrap();
4232
4233 let overwrite_existing_dir =
4234 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4235 let new_path = if overwrite_existing_dir {
4236 std::fs::remove_dir_all(&new_path_parent).ok();
4237 new_path_parent.to_path_buf()
4238 } else {
4239 new_path_parent.join(gen_name(rng))
4240 };
4241
4242 log::info!(
4243 "Renaming {:?} to {}{:?}",
4244 old_path.strip_prefix(&root_path)?,
4245 if overwrite_existing_dir {
4246 "overwrite "
4247 } else {
4248 ""
4249 },
4250 new_path.strip_prefix(&root_path)?
4251 );
4252 std::fs::rename(&old_path, &new_path)?;
4253 record_event(old_path.clone());
4254 record_event(new_path);
4255 } else if old_path.is_dir() {
4256 let (dirs, files) = read_dir_recursive(old_path.clone());
4257
4258 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4259 std::fs::remove_dir_all(&old_path).unwrap();
4260 for file in files {
4261 record_event(file);
4262 }
4263 for dir in dirs {
4264 record_event(dir);
4265 }
4266 } else {
4267 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4268 std::fs::remove_file(old_path).unwrap();
4269 record_event(old_path.clone());
4270 }
4271 }
4272
4273 Ok(events)
4274 }
4275
4276 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4277 let child_entries = std::fs::read_dir(&path).unwrap();
4278 let mut dirs = vec![path];
4279 let mut files = Vec::new();
4280 for child_entry in child_entries {
4281 let child_path = child_entry.unwrap().path();
4282 if child_path.is_dir() {
4283 let (child_dirs, child_files) = read_dir_recursive(child_path);
4284 dirs.extend(child_dirs);
4285 files.extend(child_files);
4286 } else {
4287 files.push(child_path);
4288 }
4289 }
4290 (dirs, files)
4291 }
4292
4293 fn gen_name(rng: &mut impl Rng) -> String {
4294 (0..6)
4295 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4296 .map(char::from)
4297 .collect()
4298 }
4299
4300 impl Snapshot {
4301 fn check_invariants(&self) {
4302 let mut files = self.files(true, 0);
4303 let mut visible_files = self.files(false, 0);
4304 for entry in self.entries_by_path.cursor::<()>() {
4305 if entry.is_file() {
4306 assert_eq!(files.next().unwrap().inode, entry.inode);
4307 if !entry.is_ignored {
4308 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4309 }
4310 }
4311 }
4312 assert!(files.next().is_none());
4313 assert!(visible_files.next().is_none());
4314
4315 let mut bfs_paths = Vec::new();
4316 let mut stack = vec![Path::new("")];
4317 while let Some(path) = stack.pop() {
4318 bfs_paths.push(path);
4319 let ix = stack.len();
4320 for child_entry in self.child_entries(path) {
4321 stack.insert(ix, &child_entry.path);
4322 }
4323 }
4324
4325 let dfs_paths = self
4326 .entries_by_path
4327 .cursor::<()>()
4328 .map(|e| e.path.as_ref())
4329 .collect::<Vec<_>>();
4330 assert_eq!(bfs_paths, dfs_paths);
4331
4332 for (ignore_parent_path, _) in &self.ignores {
4333 assert!(self.entry_for_path(ignore_parent_path).is_some());
4334 assert!(self
4335 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4336 .is_some());
4337 }
4338 }
4339
4340 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4341 let mut paths = Vec::new();
4342 for entry in self.entries_by_path.cursor::<()>() {
4343 if include_ignored || !entry.is_ignored {
4344 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4345 }
4346 }
4347 paths.sort_by(|a, b| a.0.cmp(&b.0));
4348 paths
4349 }
4350 }
4351}