1use super::{
2 fs::{self, Fs},
3 ignore::IgnoreStack,
4 DiagnosticSummary, ProjectEntry,
5};
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Result};
8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
9use clock::ReplicaId;
10use collections::{hash_map, HashMap, HashSet};
11use futures::{Stream, StreamExt};
12use fuzzy::CharBag;
13use gpui::{
14 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
15 Task, UpgradeModelHandle, WeakModelHandle,
16};
17use language::{
18 range_from_lsp, Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Operation,
19 PointUtf16, Rope,
20};
21use lazy_static::lazy_static;
22use parking_lot::Mutex;
23use postage::{
24 prelude::{Sink as _, Stream as _},
25 watch,
26};
27use serde::Deserialize;
28use smol::channel::{self, Sender};
29use std::{
30 any::Any,
31 cmp::{self, Ordering},
32 convert::{TryFrom, TryInto},
33 ffi::{OsStr, OsString},
34 fmt,
35 future::Future,
36 ops::Deref,
37 path::{Path, PathBuf},
38 sync::{
39 atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst},
40 Arc,
41 },
42 time::{Duration, SystemTime},
43};
44use sum_tree::{Bias, TreeMap};
45use sum_tree::{Edit, SeekTarget, SumTree};
46use util::{post_inc, ResultExt, TryFutureExt};
47
48lazy_static! {
49 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
50}
51
52#[derive(Clone, Debug)]
53enum ScanState {
54 Idle,
55 Scanning,
56 Err(Arc<anyhow::Error>),
57}
58
59#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
60pub struct WorktreeId(usize);
61
62pub enum Worktree {
63 Local(LocalWorktree),
64 Remote(RemoteWorktree),
65}
66
67impl Entity for Worktree {
68 type Event = ();
69
70 fn release(&mut self, cx: &mut MutableAppContext) {
71 if let Some(worktree) = self.as_local_mut() {
72 if let Registration::Done { project_id } = worktree.registration {
73 let client = worktree.client.clone();
74 let unregister_message = proto::UnregisterWorktree {
75 project_id,
76 worktree_id: worktree.id().to_proto(),
77 };
78 cx.foreground()
79 .spawn(async move {
80 client.send(unregister_message).await?;
81 Ok::<_, anyhow::Error>(())
82 })
83 .detach_and_log_err(cx);
84 }
85 }
86 }
87}
88
89impl Worktree {
90 pub async fn open_local(
91 client: Arc<Client>,
92 user_store: ModelHandle<UserStore>,
93 path: impl Into<Arc<Path>>,
94 weak: bool,
95 fs: Arc<dyn Fs>,
96 cx: &mut AsyncAppContext,
97 ) -> Result<ModelHandle<Self>> {
98 let (tree, scan_states_tx) =
99 LocalWorktree::new(client, user_store, path, weak, fs.clone(), cx).await?;
100 tree.update(cx, |tree, cx| {
101 let tree = tree.as_local_mut().unwrap();
102 let abs_path = tree.snapshot.abs_path.clone();
103 let background_snapshot = tree.background_snapshot.clone();
104 let background = cx.background().clone();
105 tree._background_scanner_task = Some(cx.background().spawn(async move {
106 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
107 let scanner =
108 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
109 scanner.run(events).await;
110 }));
111 });
112 Ok(tree)
113 }
114
115 pub async fn remote(
116 project_remote_id: u64,
117 replica_id: ReplicaId,
118 worktree: proto::Worktree,
119 client: Arc<Client>,
120 user_store: ModelHandle<UserStore>,
121 cx: &mut AsyncAppContext,
122 ) -> Result<ModelHandle<Self>> {
123 let remote_id = worktree.id;
124 let root_char_bag: CharBag = worktree
125 .root_name
126 .chars()
127 .map(|c| c.to_ascii_lowercase())
128 .collect();
129 let root_name = worktree.root_name.clone();
130 let weak = worktree.weak;
131 let (entries_by_path, entries_by_id, diagnostic_summaries) = cx
132 .background()
133 .spawn(async move {
134 let mut entries_by_path_edits = Vec::new();
135 let mut entries_by_id_edits = Vec::new();
136 for entry in worktree.entries {
137 match Entry::try_from((&root_char_bag, entry)) {
138 Ok(entry) => {
139 entries_by_id_edits.push(Edit::Insert(PathEntry {
140 id: entry.id,
141 path: entry.path.clone(),
142 is_ignored: entry.is_ignored,
143 scan_id: 0,
144 }));
145 entries_by_path_edits.push(Edit::Insert(entry));
146 }
147 Err(err) => log::warn!("error for remote worktree entry {:?}", err),
148 }
149 }
150
151 let mut entries_by_path = SumTree::new();
152 let mut entries_by_id = SumTree::new();
153 entries_by_path.edit(entries_by_path_edits, &());
154 entries_by_id.edit(entries_by_id_edits, &());
155
156 let diagnostic_summaries = TreeMap::from_ordered_entries(
157 worktree.diagnostic_summaries.into_iter().map(|summary| {
158 (
159 PathKey(PathBuf::from(summary.path).into()),
160 DiagnosticSummary {
161 error_count: summary.error_count as usize,
162 warning_count: summary.warning_count as usize,
163 info_count: summary.info_count as usize,
164 hint_count: summary.hint_count as usize,
165 },
166 )
167 }),
168 );
169
170 (entries_by_path, entries_by_id, diagnostic_summaries)
171 })
172 .await;
173
174 let worktree = cx.update(|cx| {
175 cx.add_model(|cx: &mut ModelContext<Worktree>| {
176 let snapshot = Snapshot {
177 id: WorktreeId(remote_id as usize),
178 scan_id: 0,
179 abs_path: Path::new("").into(),
180 root_name,
181 root_char_bag,
182 ignores: Default::default(),
183 entries_by_path,
184 entries_by_id,
185 removed_entry_ids: Default::default(),
186 next_entry_id: Default::default(),
187 };
188
189 let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
190 let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
191
192 cx.background()
193 .spawn(async move {
194 while let Some(update) = updates_rx.recv().await {
195 let mut snapshot = snapshot_tx.borrow().clone();
196 if let Err(error) = snapshot.apply_update(update) {
197 log::error!("error applying worktree update: {}", error);
198 }
199 *snapshot_tx.borrow_mut() = snapshot;
200 }
201 })
202 .detach();
203
204 {
205 let mut snapshot_rx = snapshot_rx.clone();
206 cx.spawn_weak(|this, mut cx| async move {
207 while let Some(_) = snapshot_rx.recv().await {
208 if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
209 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
210 } else {
211 break;
212 }
213 }
214 })
215 .detach();
216 }
217
218 Worktree::Remote(RemoteWorktree {
219 project_id: project_remote_id,
220 replica_id,
221 snapshot,
222 snapshot_rx,
223 updates_tx,
224 client: client.clone(),
225 loading_buffers: Default::default(),
226 open_buffers: Default::default(),
227 queued_operations: Default::default(),
228 user_store,
229 diagnostic_summaries,
230 weak,
231 })
232 })
233 });
234
235 Ok(worktree)
236 }
237
238 pub fn as_local(&self) -> Option<&LocalWorktree> {
239 if let Worktree::Local(worktree) = self {
240 Some(worktree)
241 } else {
242 None
243 }
244 }
245
246 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
247 if let Worktree::Remote(worktree) = self {
248 Some(worktree)
249 } else {
250 None
251 }
252 }
253
254 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
255 if let Worktree::Local(worktree) = self {
256 Some(worktree)
257 } else {
258 None
259 }
260 }
261
262 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
263 if let Worktree::Remote(worktree) = self {
264 Some(worktree)
265 } else {
266 None
267 }
268 }
269
270 pub fn snapshot(&self) -> Snapshot {
271 match self {
272 Worktree::Local(worktree) => worktree.snapshot(),
273 Worktree::Remote(worktree) => worktree.snapshot(),
274 }
275 }
276
277 pub fn is_weak(&self) -> bool {
278 match self {
279 Worktree::Local(worktree) => worktree.weak,
280 Worktree::Remote(worktree) => worktree.weak,
281 }
282 }
283
284 pub fn replica_id(&self) -> ReplicaId {
285 match self {
286 Worktree::Local(_) => 0,
287 Worktree::Remote(worktree) => worktree.replica_id,
288 }
289 }
290
291 pub fn remove_collaborator(
292 &mut self,
293 peer_id: PeerId,
294 replica_id: ReplicaId,
295 cx: &mut ModelContext<Self>,
296 ) {
297 match self {
298 Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
299 Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
300 }
301 }
302
303 pub fn user_store(&self) -> &ModelHandle<UserStore> {
304 match self {
305 Worktree::Local(worktree) => &worktree.user_store,
306 Worktree::Remote(worktree) => &worktree.user_store,
307 }
308 }
309
310 pub fn diagnostic_summaries<'a>(
311 &'a self,
312 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
313 match self {
314 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
315 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
316 }
317 .iter()
318 .map(|(path, summary)| (path.0.clone(), summary.clone()))
319 }
320
321 pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
322 match self {
323 Worktree::Local(worktree) => &mut worktree.loading_buffers,
324 Worktree::Remote(worktree) => &mut worktree.loading_buffers,
325 }
326 }
327
328 pub fn open_buffer(
329 &mut self,
330 path: impl AsRef<Path>,
331 cx: &mut ModelContext<Self>,
332 ) -> Task<Result<(ModelHandle<Buffer>, bool)>> {
333 let path = path.as_ref();
334
335 // If there is already a buffer for the given path, then return it.
336 let existing_buffer = match self {
337 Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
338 Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
339 };
340 if let Some(existing_buffer) = existing_buffer {
341 return cx.spawn(move |_, _| async move { Ok((existing_buffer, false)) });
342 }
343
344 let is_new = Arc::new(AtomicBool::new(true));
345 let path: Arc<Path> = Arc::from(path);
346 let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
347 // If the given path is already being loaded, then wait for that existing
348 // task to complete and return the same buffer.
349 hash_map::Entry::Occupied(e) => e.get().clone(),
350
351 // Otherwise, record the fact that this path is now being loaded.
352 hash_map::Entry::Vacant(entry) => {
353 let (mut tx, rx) = postage::watch::channel();
354 entry.insert(rx.clone());
355
356 let load_buffer = match self {
357 Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
358 Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
359 };
360 cx.spawn(move |this, mut cx| async move {
361 let result = load_buffer.await;
362
363 // After the buffer loads, record the fact that it is no longer
364 // loading.
365 this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
366 *tx.borrow_mut() = Some(match result {
367 Ok(buffer) => Ok((buffer, is_new)),
368 Err(error) => Err(Arc::new(error)),
369 });
370 })
371 .detach();
372 rx
373 }
374 };
375
376 cx.spawn(|_, _| async move {
377 loop {
378 if let Some(result) = loading_watch.borrow().as_ref() {
379 return match result {
380 Ok((buf, is_new)) => Ok((buf.clone(), is_new.fetch_and(false, SeqCst))),
381 Err(error) => Err(anyhow!("{}", error)),
382 };
383 }
384 loading_watch.recv().await;
385 }
386 })
387 }
388
389 #[cfg(feature = "test-support")]
390 pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
391 let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
392 Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
393 Worktree::Remote(worktree) => {
394 Box::new(worktree.open_buffers.values().filter_map(|buf| {
395 if let RemoteBuffer::Loaded(buf) = buf {
396 Some(buf)
397 } else {
398 None
399 }
400 }))
401 }
402 };
403
404 let path = path.as_ref();
405 open_buffers
406 .find(|buffer| {
407 if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
408 file.path().as_ref() == path
409 } else {
410 false
411 }
412 })
413 .is_some()
414 }
415
416 pub fn handle_update_buffer(
417 &mut self,
418 envelope: TypedEnvelope<proto::UpdateBuffer>,
419 cx: &mut ModelContext<Self>,
420 ) -> Result<()> {
421 let payload = envelope.payload.clone();
422 let buffer_id = payload.buffer_id as usize;
423 let ops = payload
424 .operations
425 .into_iter()
426 .map(|op| language::proto::deserialize_operation(op))
427 .collect::<Result<Vec<_>, _>>()?;
428
429 match self {
430 Worktree::Local(worktree) => {
431 let buffer = worktree
432 .open_buffers
433 .get(&buffer_id)
434 .and_then(|buf| buf.upgrade(cx))
435 .ok_or_else(|| {
436 anyhow!("invalid buffer {} in update buffer message", buffer_id)
437 })?;
438 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
439 }
440 Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
441 Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
442 Some(RemoteBuffer::Loaded(buffer)) => {
443 if let Some(buffer) = buffer.upgrade(cx) {
444 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
445 } else {
446 worktree
447 .open_buffers
448 .insert(buffer_id, RemoteBuffer::Operations(ops));
449 }
450 }
451 None => {
452 worktree
453 .open_buffers
454 .insert(buffer_id, RemoteBuffer::Operations(ops));
455 }
456 },
457 }
458
459 Ok(())
460 }
461
462 pub fn handle_save_buffer(
463 &mut self,
464 envelope: TypedEnvelope<proto::SaveBuffer>,
465 rpc: Arc<Client>,
466 cx: &mut ModelContext<Self>,
467 ) -> Result<()> {
468 let sender_id = envelope.original_sender_id()?;
469 let this = self.as_local().unwrap();
470 let project_id = this
471 .share
472 .as_ref()
473 .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
474 .project_id;
475
476 let buffer = this
477 .shared_buffers
478 .get(&sender_id)
479 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
480 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
481
482 let receipt = envelope.receipt();
483 let worktree_id = envelope.payload.worktree_id;
484 let buffer_id = envelope.payload.buffer_id;
485 let save = cx.spawn(|_, mut cx| async move {
486 buffer.update(&mut cx, |buffer, cx| buffer.save(cx)).await
487 });
488
489 cx.background()
490 .spawn(
491 async move {
492 let (version, mtime) = save.await?;
493
494 rpc.respond(
495 receipt,
496 proto::BufferSaved {
497 project_id,
498 worktree_id,
499 buffer_id,
500 version: (&version).into(),
501 mtime: Some(mtime.into()),
502 },
503 )
504 .await?;
505
506 Ok(())
507 }
508 .log_err(),
509 )
510 .detach();
511
512 Ok(())
513 }
514
515 pub fn handle_buffer_saved(
516 &mut self,
517 envelope: TypedEnvelope<proto::BufferSaved>,
518 cx: &mut ModelContext<Self>,
519 ) -> Result<()> {
520 let payload = envelope.payload.clone();
521 let worktree = self.as_remote_mut().unwrap();
522 if let Some(buffer) = worktree
523 .open_buffers
524 .get(&(payload.buffer_id as usize))
525 .and_then(|buf| buf.upgrade(cx))
526 {
527 buffer.update(cx, |buffer, cx| {
528 let version = payload.version.try_into()?;
529 let mtime = payload
530 .mtime
531 .ok_or_else(|| anyhow!("missing mtime"))?
532 .into();
533 buffer.did_save(version, mtime, None, cx);
534 Result::<_, anyhow::Error>::Ok(())
535 })?;
536 }
537 Ok(())
538 }
539
540 pub fn handle_format_buffer(
541 &mut self,
542 envelope: TypedEnvelope<proto::FormatBuffer>,
543 rpc: Arc<Client>,
544 cx: &mut ModelContext<Self>,
545 ) -> Result<()> {
546 let sender_id = envelope.original_sender_id()?;
547 let this = self.as_local().unwrap();
548 let buffer = this
549 .shared_buffers
550 .get(&sender_id)
551 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
552 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
553
554 let receipt = envelope.receipt();
555 cx.spawn(|_, mut cx| async move {
556 let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
557 // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
558 // associated with formatting.
559 cx.spawn(|_| async move {
560 match format {
561 Ok(()) => rpc.respond(receipt, proto::Ack {}).await?,
562 Err(error) => {
563 rpc.respond_with_error(
564 receipt,
565 proto::Error {
566 message: error.to_string(),
567 },
568 )
569 .await?
570 }
571 }
572 Ok::<_, anyhow::Error>(())
573 })
574 .await
575 .log_err();
576 })
577 .detach();
578
579 Ok(())
580 }
581
582 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
583 match self {
584 Self::Local(worktree) => {
585 let is_fake_fs = worktree.fs.is_fake();
586 worktree.snapshot = worktree.background_snapshot.lock().clone();
587 if worktree.is_scanning() {
588 if worktree.poll_task.is_none() {
589 worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
590 if is_fake_fs {
591 smol::future::yield_now().await;
592 } else {
593 smol::Timer::after(Duration::from_millis(100)).await;
594 }
595 this.update(&mut cx, |this, cx| {
596 this.as_local_mut().unwrap().poll_task = None;
597 this.poll_snapshot(cx);
598 })
599 }));
600 }
601 } else {
602 worktree.poll_task.take();
603 self.update_open_buffers(cx);
604 }
605 }
606 Self::Remote(worktree) => {
607 worktree.snapshot = worktree.snapshot_rx.borrow().clone();
608 self.update_open_buffers(cx);
609 }
610 };
611
612 cx.notify();
613 }
614
615 fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
616 let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
617 Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
618 Self::Remote(worktree) => {
619 Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
620 if let RemoteBuffer::Loaded(buf) = buf {
621 Some((id, buf))
622 } else {
623 None
624 }
625 }))
626 }
627 };
628
629 let local = self.as_local().is_some();
630 let worktree_path = self.abs_path.clone();
631 let worktree_handle = cx.handle();
632 let mut buffers_to_delete = Vec::new();
633 for (buffer_id, buffer) in open_buffers {
634 if let Some(buffer) = buffer.upgrade(cx) {
635 buffer.update(cx, |buffer, cx| {
636 if let Some(old_file) = File::from_dyn(buffer.file()) {
637 let new_file = if let Some(entry) = old_file
638 .entry_id
639 .and_then(|entry_id| self.entry_for_id(entry_id))
640 {
641 File {
642 is_local: local,
643 worktree_path: worktree_path.clone(),
644 entry_id: Some(entry.id),
645 mtime: entry.mtime,
646 path: entry.path.clone(),
647 worktree: worktree_handle.clone(),
648 }
649 } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
650 File {
651 is_local: local,
652 worktree_path: worktree_path.clone(),
653 entry_id: Some(entry.id),
654 mtime: entry.mtime,
655 path: entry.path.clone(),
656 worktree: worktree_handle.clone(),
657 }
658 } else {
659 File {
660 is_local: local,
661 worktree_path: worktree_path.clone(),
662 entry_id: None,
663 path: old_file.path().clone(),
664 mtime: old_file.mtime(),
665 worktree: worktree_handle.clone(),
666 }
667 };
668
669 if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
670 task.detach();
671 }
672 }
673 });
674 } else {
675 buffers_to_delete.push(*buffer_id);
676 }
677 }
678
679 for buffer_id in buffers_to_delete {
680 match self {
681 Self::Local(worktree) => {
682 worktree.open_buffers.remove(&buffer_id);
683 }
684 Self::Remote(worktree) => {
685 worktree.open_buffers.remove(&buffer_id);
686 }
687 }
688 }
689 }
690
691 fn send_buffer_update(
692 &mut self,
693 buffer_id: u64,
694 operation: Operation,
695 cx: &mut ModelContext<Self>,
696 ) {
697 if let Some((project_id, worktree_id, rpc)) = match self {
698 Worktree::Local(worktree) => worktree
699 .share
700 .as_ref()
701 .map(|share| (share.project_id, worktree.id(), worktree.client.clone())),
702 Worktree::Remote(worktree) => Some((
703 worktree.project_id,
704 worktree.snapshot.id(),
705 worktree.client.clone(),
706 )),
707 } {
708 cx.spawn(|worktree, mut cx| async move {
709 if let Err(error) = rpc
710 .request(proto::UpdateBuffer {
711 project_id,
712 worktree_id: worktree_id.0 as u64,
713 buffer_id,
714 operations: vec![language::proto::serialize_operation(&operation)],
715 })
716 .await
717 {
718 worktree.update(&mut cx, |worktree, _| {
719 log::error!("error sending buffer operation: {}", error);
720 match worktree {
721 Worktree::Local(t) => &mut t.queued_operations,
722 Worktree::Remote(t) => &mut t.queued_operations,
723 }
724 .push((buffer_id, operation));
725 });
726 }
727 })
728 .detach();
729 }
730 }
731}
732
733impl WorktreeId {
734 pub fn from_usize(handle_id: usize) -> Self {
735 Self(handle_id)
736 }
737
738 pub(crate) fn from_proto(id: u64) -> Self {
739 Self(id as usize)
740 }
741
742 pub fn to_proto(&self) -> u64 {
743 self.0 as u64
744 }
745
746 pub fn to_usize(&self) -> usize {
747 self.0
748 }
749}
750
751impl fmt::Display for WorktreeId {
752 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
753 self.0.fmt(f)
754 }
755}
756
757#[derive(Clone)]
758pub struct Snapshot {
759 id: WorktreeId,
760 scan_id: usize,
761 abs_path: Arc<Path>,
762 root_name: String,
763 root_char_bag: CharBag,
764 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
765 entries_by_path: SumTree<Entry>,
766 entries_by_id: SumTree<PathEntry>,
767 removed_entry_ids: HashMap<u64, usize>,
768 next_entry_id: Arc<AtomicUsize>,
769}
770
771pub struct LocalWorktree {
772 snapshot: Snapshot,
773 config: WorktreeConfig,
774 background_snapshot: Arc<Mutex<Snapshot>>,
775 last_scan_state_rx: watch::Receiver<ScanState>,
776 _background_scanner_task: Option<Task<()>>,
777 poll_task: Option<Task<()>>,
778 registration: Registration,
779 share: Option<ShareState>,
780 loading_buffers: LoadingBuffers,
781 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
782 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
783 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
784 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
785 queued_operations: Vec<(u64, Operation)>,
786 client: Arc<Client>,
787 user_store: ModelHandle<UserStore>,
788 fs: Arc<dyn Fs>,
789 weak: bool,
790}
791
792#[derive(Debug, Eq, PartialEq)]
793enum Registration {
794 None,
795 Pending,
796 Done { project_id: u64 },
797}
798
799struct ShareState {
800 project_id: u64,
801 snapshots_tx: Sender<Snapshot>,
802 _maintain_remote_snapshot: Option<Task<()>>,
803}
804
805pub struct RemoteWorktree {
806 project_id: u64,
807 snapshot: Snapshot,
808 snapshot_rx: watch::Receiver<Snapshot>,
809 client: Arc<Client>,
810 updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
811 replica_id: ReplicaId,
812 loading_buffers: LoadingBuffers,
813 open_buffers: HashMap<usize, RemoteBuffer>,
814 user_store: ModelHandle<UserStore>,
815 queued_operations: Vec<(u64, Operation)>,
816 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
817 weak: bool,
818}
819
820type LoadingBuffers = HashMap<
821 Arc<Path>,
822 postage::watch::Receiver<
823 Option<Result<(ModelHandle<Buffer>, Arc<AtomicBool>), Arc<anyhow::Error>>>,
824 >,
825>;
826
827#[derive(Default, Deserialize)]
828struct WorktreeConfig {
829 collaborators: Vec<String>,
830}
831
832impl LocalWorktree {
833 async fn new(
834 client: Arc<Client>,
835 user_store: ModelHandle<UserStore>,
836 path: impl Into<Arc<Path>>,
837 weak: bool,
838 fs: Arc<dyn Fs>,
839 cx: &mut AsyncAppContext,
840 ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
841 let abs_path = path.into();
842 let path: Arc<Path> = Arc::from(Path::new(""));
843 let next_entry_id = AtomicUsize::new(0);
844
845 // After determining whether the root entry is a file or a directory, populate the
846 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
847 let root_name = abs_path
848 .file_name()
849 .map_or(String::new(), |f| f.to_string_lossy().to_string());
850 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
851 let metadata = fs.metadata(&abs_path).await?;
852
853 let mut config = WorktreeConfig::default();
854 if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
855 if let Ok(parsed) = toml::from_str(&zed_toml) {
856 config = parsed;
857 }
858 }
859
860 let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
861 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
862 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
863 let mut snapshot = Snapshot {
864 id: WorktreeId::from_usize(cx.model_id()),
865 scan_id: 0,
866 abs_path,
867 root_name: root_name.clone(),
868 root_char_bag,
869 ignores: Default::default(),
870 entries_by_path: Default::default(),
871 entries_by_id: Default::default(),
872 removed_entry_ids: Default::default(),
873 next_entry_id: Arc::new(next_entry_id),
874 };
875 if let Some(metadata) = metadata {
876 snapshot.insert_entry(
877 Entry::new(
878 path.into(),
879 &metadata,
880 &snapshot.next_entry_id,
881 snapshot.root_char_bag,
882 ),
883 fs.as_ref(),
884 );
885 }
886
887 let tree = Self {
888 snapshot: snapshot.clone(),
889 config,
890 background_snapshot: Arc::new(Mutex::new(snapshot)),
891 last_scan_state_rx,
892 _background_scanner_task: None,
893 registration: Registration::None,
894 share: None,
895 poll_task: None,
896 loading_buffers: Default::default(),
897 open_buffers: Default::default(),
898 shared_buffers: Default::default(),
899 diagnostics: Default::default(),
900 diagnostic_summaries: Default::default(),
901 queued_operations: Default::default(),
902 client,
903 user_store,
904 fs,
905 weak,
906 };
907
908 cx.spawn_weak(|this, mut cx| async move {
909 while let Ok(scan_state) = scan_states_rx.recv().await {
910 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
911 let to_send = handle.update(&mut cx, |this, cx| {
912 last_scan_state_tx.blocking_send(scan_state).ok();
913 this.poll_snapshot(cx);
914 let tree = this.as_local_mut().unwrap();
915 if !tree.is_scanning() {
916 if let Some(share) = tree.share.as_ref() {
917 return Some((tree.snapshot(), share.snapshots_tx.clone()));
918 }
919 }
920 None
921 });
922
923 if let Some((snapshot, snapshots_to_send_tx)) = to_send {
924 if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
925 log::error!("error submitting snapshot to send {}", err);
926 }
927 }
928 } else {
929 break;
930 }
931 }
932 })
933 .detach();
934
935 Worktree::Local(tree)
936 });
937
938 Ok((tree, scan_states_tx))
939 }
940
941 pub fn authorized_logins(&self) -> Vec<String> {
942 self.config.collaborators.clone()
943 }
944
945 fn get_open_buffer(
946 &mut self,
947 path: &Path,
948 cx: &mut ModelContext<Worktree>,
949 ) -> Option<ModelHandle<Buffer>> {
950 let handle = cx.handle();
951 let mut result = None;
952 self.open_buffers.retain(|_buffer_id, buffer| {
953 if let Some(buffer) = buffer.upgrade(cx) {
954 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
955 if file.worktree == handle && file.path().as_ref() == path {
956 result = Some(buffer);
957 }
958 }
959 true
960 } else {
961 false
962 }
963 });
964 result
965 }
966
967 fn open_buffer(
968 &mut self,
969 path: &Path,
970 cx: &mut ModelContext<Worktree>,
971 ) -> Task<Result<ModelHandle<Buffer>>> {
972 let path = Arc::from(path);
973 cx.spawn(move |this, mut cx| async move {
974 let (file, contents) = this
975 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
976 .await?;
977
978 let diagnostics = this.update(&mut cx, |this, _| {
979 this.as_local_mut().unwrap().diagnostics.get(&path).cloned()
980 });
981
982 let mut buffer_operations = Vec::new();
983 let buffer = cx.add_model(|cx| {
984 let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
985 if let Some(diagnostics) = diagnostics {
986 let op = buffer.update_diagnostics(None, diagnostics, cx).unwrap();
987 buffer_operations.push(op);
988 }
989 buffer
990 });
991
992 this.update(&mut cx, |this, cx| {
993 for op in buffer_operations {
994 this.send_buffer_update(buffer.read(cx).remote_id(), op, cx);
995 }
996 let this = this.as_local_mut().unwrap();
997 this.open_buffers.insert(buffer.id(), buffer.downgrade());
998 });
999
1000 Ok(buffer)
1001 })
1002 }
1003
1004 pub fn open_remote_buffer(
1005 &mut self,
1006 peer_id: PeerId,
1007 buffer: ModelHandle<Buffer>,
1008 cx: &mut ModelContext<Worktree>,
1009 ) -> proto::OpenBufferResponse {
1010 self.shared_buffers
1011 .entry(peer_id)
1012 .or_default()
1013 .insert(buffer.id() as u64, buffer.clone());
1014 proto::OpenBufferResponse {
1015 buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1016 }
1017 }
1018
1019 pub fn close_remote_buffer(
1020 &mut self,
1021 envelope: TypedEnvelope<proto::CloseBuffer>,
1022 cx: &mut ModelContext<Worktree>,
1023 ) -> Result<()> {
1024 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1025 shared_buffers.remove(&envelope.payload.buffer_id);
1026 cx.notify();
1027 }
1028
1029 Ok(())
1030 }
1031
1032 pub fn remove_collaborator(
1033 &mut self,
1034 peer_id: PeerId,
1035 replica_id: ReplicaId,
1036 cx: &mut ModelContext<Worktree>,
1037 ) {
1038 self.shared_buffers.remove(&peer_id);
1039 for (_, buffer) in &self.open_buffers {
1040 if let Some(buffer) = buffer.upgrade(cx) {
1041 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1042 }
1043 }
1044 cx.notify();
1045 }
1046
1047 pub fn update_diagnostics(
1048 &mut self,
1049 worktree_path: Arc<Path>,
1050 params: lsp::PublishDiagnosticsParams,
1051 disk_based_sources: &HashSet<String>,
1052 cx: &mut ModelContext<Worktree>,
1053 ) -> Result<()> {
1054 let mut next_group_id = 0;
1055 let mut diagnostics = Vec::default();
1056 let mut primary_diagnostic_group_ids = HashMap::default();
1057 let mut sources_by_group_id = HashMap::default();
1058 let mut supporting_diagnostic_severities = HashMap::default();
1059 for diagnostic in ¶ms.diagnostics {
1060 let source = diagnostic.source.as_ref();
1061 let code = diagnostic.code.as_ref().map(|code| match code {
1062 lsp::NumberOrString::Number(code) => code.to_string(),
1063 lsp::NumberOrString::String(code) => code.clone(),
1064 });
1065 let range = range_from_lsp(diagnostic.range);
1066 let is_supporting = diagnostic
1067 .related_information
1068 .as_ref()
1069 .map_or(false, |infos| {
1070 infos.iter().any(|info| {
1071 primary_diagnostic_group_ids.contains_key(&(
1072 source,
1073 code.clone(),
1074 range_from_lsp(info.location.range),
1075 ))
1076 })
1077 });
1078
1079 if is_supporting {
1080 if let Some(severity) = diagnostic.severity {
1081 supporting_diagnostic_severities
1082 .insert((source, code.clone(), range), severity);
1083 }
1084 } else {
1085 let group_id = post_inc(&mut next_group_id);
1086 let is_disk_based =
1087 source.map_or(false, |source| disk_based_sources.contains(source));
1088
1089 sources_by_group_id.insert(group_id, source);
1090 primary_diagnostic_group_ids
1091 .insert((source, code.clone(), range.clone()), group_id);
1092
1093 diagnostics.push(DiagnosticEntry {
1094 range,
1095 diagnostic: Diagnostic {
1096 code: code.clone(),
1097 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
1098 message: diagnostic.message.clone(),
1099 group_id,
1100 is_primary: true,
1101 is_valid: true,
1102 is_disk_based,
1103 },
1104 });
1105 if let Some(infos) = &diagnostic.related_information {
1106 for info in infos {
1107 if info.location.uri == params.uri {
1108 let range = range_from_lsp(info.location.range);
1109 diagnostics.push(DiagnosticEntry {
1110 range,
1111 diagnostic: Diagnostic {
1112 code: code.clone(),
1113 severity: DiagnosticSeverity::INFORMATION,
1114 message: info.message.clone(),
1115 group_id,
1116 is_primary: false,
1117 is_valid: true,
1118 is_disk_based,
1119 },
1120 });
1121 }
1122 }
1123 }
1124 }
1125 }
1126
1127 for entry in &mut diagnostics {
1128 let diagnostic = &mut entry.diagnostic;
1129 if !diagnostic.is_primary {
1130 let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
1131 if let Some(&severity) = supporting_diagnostic_severities.get(&(
1132 source,
1133 diagnostic.code.clone(),
1134 entry.range.clone(),
1135 )) {
1136 diagnostic.severity = severity;
1137 }
1138 }
1139 }
1140
1141 self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)?;
1142 Ok(())
1143 }
1144
1145 pub fn update_diagnostic_entries(
1146 &mut self,
1147 worktree_path: Arc<Path>,
1148 version: Option<i32>,
1149 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
1150 cx: &mut ModelContext<Worktree>,
1151 ) -> Result<()> {
1152 for buffer in self.open_buffers.values() {
1153 if let Some(buffer) = buffer.upgrade(cx) {
1154 if buffer
1155 .read(cx)
1156 .file()
1157 .map_or(false, |file| *file.path() == worktree_path)
1158 {
1159 let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
1160 (
1161 buffer.remote_id(),
1162 buffer.update_diagnostics(version, diagnostics.clone(), cx),
1163 )
1164 });
1165 self.send_buffer_update(remote_id, operation?, cx);
1166 break;
1167 }
1168 }
1169 }
1170
1171 let summary = DiagnosticSummary::new(&diagnostics);
1172 self.diagnostic_summaries
1173 .insert(PathKey(worktree_path.clone()), summary.clone());
1174 self.diagnostics.insert(worktree_path.clone(), diagnostics);
1175
1176 if let Some(share) = self.share.as_ref() {
1177 cx.foreground()
1178 .spawn({
1179 let client = self.client.clone();
1180 let project_id = share.project_id;
1181 let worktree_id = self.id().to_proto();
1182 let path = worktree_path.to_string_lossy().to_string();
1183 async move {
1184 client
1185 .send(proto::UpdateDiagnosticSummary {
1186 project_id,
1187 worktree_id,
1188 summary: Some(proto::DiagnosticSummary {
1189 path,
1190 error_count: summary.error_count as u32,
1191 warning_count: summary.warning_count as u32,
1192 info_count: summary.info_count as u32,
1193 hint_count: summary.hint_count as u32,
1194 }),
1195 })
1196 .await
1197 .log_err()
1198 }
1199 })
1200 .detach();
1201 }
1202
1203 Ok(())
1204 }
1205
1206 fn send_buffer_update(
1207 &mut self,
1208 buffer_id: u64,
1209 operation: Operation,
1210 cx: &mut ModelContext<Worktree>,
1211 ) -> Option<()> {
1212 let share = self.share.as_ref()?;
1213 let project_id = share.project_id;
1214 let worktree_id = self.id();
1215 let rpc = self.client.clone();
1216 cx.spawn(|worktree, mut cx| async move {
1217 if let Err(error) = rpc
1218 .request(proto::UpdateBuffer {
1219 project_id,
1220 worktree_id: worktree_id.0 as u64,
1221 buffer_id,
1222 operations: vec![language::proto::serialize_operation(&operation)],
1223 })
1224 .await
1225 {
1226 worktree.update(&mut cx, |worktree, _| {
1227 log::error!("error sending buffer operation: {}", error);
1228 worktree
1229 .as_local_mut()
1230 .unwrap()
1231 .queued_operations
1232 .push((buffer_id, operation));
1233 });
1234 }
1235 })
1236 .detach();
1237 None
1238 }
1239
1240 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1241 let mut scan_state_rx = self.last_scan_state_rx.clone();
1242 async move {
1243 let mut scan_state = Some(scan_state_rx.borrow().clone());
1244 while let Some(ScanState::Scanning) = scan_state {
1245 scan_state = scan_state_rx.recv().await;
1246 }
1247 }
1248 }
1249
1250 fn is_scanning(&self) -> bool {
1251 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1252 true
1253 } else {
1254 false
1255 }
1256 }
1257
1258 pub fn snapshot(&self) -> Snapshot {
1259 self.snapshot.clone()
1260 }
1261
1262 pub fn abs_path(&self) -> &Arc<Path> {
1263 &self.snapshot.abs_path
1264 }
1265
1266 pub fn contains_abs_path(&self, path: &Path) -> bool {
1267 path.starts_with(&self.snapshot.abs_path)
1268 }
1269
1270 fn absolutize(&self, path: &Path) -> PathBuf {
1271 if path.file_name().is_some() {
1272 self.snapshot.abs_path.join(path)
1273 } else {
1274 self.snapshot.abs_path.to_path_buf()
1275 }
1276 }
1277
1278 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1279 let handle = cx.handle();
1280 let path = Arc::from(path);
1281 let worktree_path = self.abs_path.clone();
1282 let abs_path = self.absolutize(&path);
1283 let background_snapshot = self.background_snapshot.clone();
1284 let fs = self.fs.clone();
1285 cx.spawn(|this, mut cx| async move {
1286 let text = fs.load(&abs_path).await?;
1287 // Eagerly populate the snapshot with an updated entry for the loaded file
1288 let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1289 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1290 Ok((
1291 File {
1292 entry_id: Some(entry.id),
1293 worktree: handle,
1294 worktree_path,
1295 path: entry.path,
1296 mtime: entry.mtime,
1297 is_local: true,
1298 },
1299 text,
1300 ))
1301 })
1302 }
1303
1304 pub fn save_buffer_as(
1305 &self,
1306 buffer_handle: ModelHandle<Buffer>,
1307 path: impl Into<Arc<Path>>,
1308 cx: &mut ModelContext<Worktree>,
1309 ) -> Task<Result<()>> {
1310 let buffer = buffer_handle.read(cx);
1311 let text = buffer.as_rope().clone();
1312 let version = buffer.version();
1313 let save = self.save(path, text, cx);
1314 cx.spawn(|this, mut cx| async move {
1315 let entry = save.await?;
1316 let file = this.update(&mut cx, |this, cx| {
1317 let this = this.as_local_mut().unwrap();
1318 this.open_buffers
1319 .insert(buffer_handle.id(), buffer_handle.downgrade());
1320 File {
1321 entry_id: Some(entry.id),
1322 worktree: cx.handle(),
1323 worktree_path: this.abs_path.clone(),
1324 path: entry.path,
1325 mtime: entry.mtime,
1326 is_local: true,
1327 }
1328 });
1329
1330 buffer_handle.update(&mut cx, |buffer, cx| {
1331 buffer.did_save(version, file.mtime, Some(Box::new(file)), cx);
1332 });
1333
1334 Ok(())
1335 })
1336 }
1337
1338 fn save(
1339 &self,
1340 path: impl Into<Arc<Path>>,
1341 text: Rope,
1342 cx: &mut ModelContext<Worktree>,
1343 ) -> Task<Result<Entry>> {
1344 let path = path.into();
1345 let abs_path = self.absolutize(&path);
1346 let background_snapshot = self.background_snapshot.clone();
1347 let fs = self.fs.clone();
1348 let save = cx.background().spawn(async move {
1349 fs.save(&abs_path, &text).await?;
1350 refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1351 });
1352
1353 cx.spawn(|this, mut cx| async move {
1354 let entry = save.await?;
1355 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1356 Ok(entry)
1357 })
1358 }
1359
1360 pub fn register(
1361 &mut self,
1362 project_id: u64,
1363 cx: &mut ModelContext<Worktree>,
1364 ) -> Task<anyhow::Result<()>> {
1365 if self.registration != Registration::None {
1366 return Task::ready(Ok(()));
1367 }
1368
1369 self.registration = Registration::Pending;
1370 let client = self.client.clone();
1371 let register_message = proto::RegisterWorktree {
1372 project_id,
1373 worktree_id: self.id().to_proto(),
1374 root_name: self.root_name().to_string(),
1375 authorized_logins: self.authorized_logins(),
1376 };
1377 cx.spawn(|this, mut cx| async move {
1378 let response = client.request(register_message).await;
1379 this.update(&mut cx, |this, _| {
1380 let worktree = this.as_local_mut().unwrap();
1381 match response {
1382 Ok(_) => {
1383 worktree.registration = Registration::Done { project_id };
1384 Ok(())
1385 }
1386 Err(error) => {
1387 worktree.registration = Registration::None;
1388 Err(error)
1389 }
1390 }
1391 })
1392 })
1393 }
1394
1395 pub fn share(&mut self, cx: &mut ModelContext<Worktree>) -> Task<anyhow::Result<()>> {
1396 let project_id = if let Registration::Done { project_id } = self.registration {
1397 project_id
1398 } else {
1399 return Task::ready(Err(anyhow!("cannot share worktree before registering it")));
1400 };
1401
1402 if self.share.is_some() {
1403 return Task::ready(Ok(()));
1404 }
1405
1406 let snapshot = self.snapshot();
1407 let rpc = self.client.clone();
1408 let worktree_id = cx.model_id() as u64;
1409 let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1410 let maintain_remote_snapshot = cx.background().spawn({
1411 let rpc = rpc.clone();
1412 let snapshot = snapshot.clone();
1413 async move {
1414 let mut prev_snapshot = snapshot;
1415 while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1416 let message =
1417 snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1418 match rpc.send(message).await {
1419 Ok(()) => prev_snapshot = snapshot,
1420 Err(err) => log::error!("error sending snapshot diff {}", err),
1421 }
1422 }
1423 }
1424 });
1425 self.share = Some(ShareState {
1426 project_id,
1427 snapshots_tx: snapshots_to_send_tx,
1428 _maintain_remote_snapshot: Some(maintain_remote_snapshot),
1429 });
1430
1431 let diagnostic_summaries = self.diagnostic_summaries.clone();
1432 let weak = self.weak;
1433 let share_message = cx.background().spawn(async move {
1434 proto::ShareWorktree {
1435 project_id,
1436 worktree: Some(snapshot.to_proto(&diagnostic_summaries, weak)),
1437 }
1438 });
1439
1440 cx.foreground().spawn(async move {
1441 rpc.request(share_message.await).await?;
1442 Ok(())
1443 })
1444 }
1445
1446 pub fn unshare(&mut self) {
1447 self.share.take();
1448 }
1449
1450 pub fn is_shared(&self) -> bool {
1451 self.share.is_some()
1452 }
1453}
1454
1455fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1456 let contents = smol::block_on(fs.load(&abs_path))?;
1457 let parent = abs_path.parent().unwrap_or(Path::new("/"));
1458 let mut builder = GitignoreBuilder::new(parent);
1459 for line in contents.lines() {
1460 builder.add_line(Some(abs_path.into()), line)?;
1461 }
1462 Ok(builder.build()?)
1463}
1464
1465impl Deref for Worktree {
1466 type Target = Snapshot;
1467
1468 fn deref(&self) -> &Self::Target {
1469 match self {
1470 Worktree::Local(worktree) => &worktree.snapshot,
1471 Worktree::Remote(worktree) => &worktree.snapshot,
1472 }
1473 }
1474}
1475
1476impl Deref for LocalWorktree {
1477 type Target = Snapshot;
1478
1479 fn deref(&self) -> &Self::Target {
1480 &self.snapshot
1481 }
1482}
1483
1484impl Deref for RemoteWorktree {
1485 type Target = Snapshot;
1486
1487 fn deref(&self) -> &Self::Target {
1488 &self.snapshot
1489 }
1490}
1491
1492impl fmt::Debug for LocalWorktree {
1493 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1494 self.snapshot.fmt(f)
1495 }
1496}
1497
1498impl RemoteWorktree {
1499 fn get_open_buffer(
1500 &mut self,
1501 path: &Path,
1502 cx: &mut ModelContext<Worktree>,
1503 ) -> Option<ModelHandle<Buffer>> {
1504 let handle = cx.handle();
1505 let mut existing_buffer = None;
1506 self.open_buffers.retain(|_buffer_id, buffer| {
1507 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1508 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1509 if file.worktree == handle && file.path().as_ref() == path {
1510 existing_buffer = Some(buffer);
1511 }
1512 }
1513 true
1514 } else {
1515 false
1516 }
1517 });
1518 existing_buffer
1519 }
1520
1521 fn open_buffer(
1522 &mut self,
1523 path: &Path,
1524 cx: &mut ModelContext<Worktree>,
1525 ) -> Task<Result<ModelHandle<Buffer>>> {
1526 let rpc = self.client.clone();
1527 let replica_id = self.replica_id;
1528 let project_id = self.project_id;
1529 let remote_worktree_id = self.id();
1530 let root_path = self.snapshot.abs_path.clone();
1531 let path: Arc<Path> = Arc::from(path);
1532 let path_string = path.to_string_lossy().to_string();
1533 cx.spawn_weak(move |this, mut cx| async move {
1534 let entry = this
1535 .upgrade(&cx)
1536 .ok_or_else(|| anyhow!("worktree was closed"))?
1537 .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1538 .ok_or_else(|| anyhow!("file does not exist"))?;
1539 let response = rpc
1540 .request(proto::OpenBuffer {
1541 project_id,
1542 worktree_id: remote_worktree_id.to_proto(),
1543 path: path_string,
1544 })
1545 .await?;
1546
1547 let this = this
1548 .upgrade(&cx)
1549 .ok_or_else(|| anyhow!("worktree was closed"))?;
1550 let file = File {
1551 entry_id: Some(entry.id),
1552 worktree: this.clone(),
1553 worktree_path: root_path,
1554 path: entry.path,
1555 mtime: entry.mtime,
1556 is_local: false,
1557 };
1558 let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1559 let buffer_id = remote_buffer.id as usize;
1560 let buffer = cx.add_model(|cx| {
1561 Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx).unwrap()
1562 });
1563 this.update(&mut cx, move |this, cx| {
1564 let this = this.as_remote_mut().unwrap();
1565 if let Some(RemoteBuffer::Operations(pending_ops)) = this
1566 .open_buffers
1567 .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1568 {
1569 buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1570 }
1571 Result::<_, anyhow::Error>::Ok(buffer)
1572 })
1573 })
1574 }
1575
1576 pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1577 for (_, buffer) in self.open_buffers.drain() {
1578 if let RemoteBuffer::Loaded(buffer) = buffer {
1579 if let Some(buffer) = buffer.upgrade(cx) {
1580 buffer.update(cx, |buffer, cx| buffer.close(cx))
1581 }
1582 }
1583 }
1584 }
1585
1586 fn snapshot(&self) -> Snapshot {
1587 self.snapshot.clone()
1588 }
1589
1590 pub fn update_from_remote(
1591 &mut self,
1592 envelope: TypedEnvelope<proto::UpdateWorktree>,
1593 cx: &mut ModelContext<Worktree>,
1594 ) -> Result<()> {
1595 let mut tx = self.updates_tx.clone();
1596 let payload = envelope.payload.clone();
1597 cx.background()
1598 .spawn(async move {
1599 tx.send(payload).await.expect("receiver runs to completion");
1600 })
1601 .detach();
1602
1603 Ok(())
1604 }
1605
1606 pub fn update_diagnostic_summary(
1607 &mut self,
1608 path: Arc<Path>,
1609 summary: &proto::DiagnosticSummary,
1610 ) {
1611 self.diagnostic_summaries.insert(
1612 PathKey(path.clone()),
1613 DiagnosticSummary {
1614 error_count: summary.error_count as usize,
1615 warning_count: summary.warning_count as usize,
1616 info_count: summary.info_count as usize,
1617 hint_count: summary.hint_count as usize,
1618 },
1619 );
1620 }
1621
1622 pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1623 for (_, buffer) in &self.open_buffers {
1624 if let Some(buffer) = buffer.upgrade(cx) {
1625 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1626 }
1627 }
1628 cx.notify();
1629 }
1630}
1631
1632enum RemoteBuffer {
1633 Operations(Vec<Operation>),
1634 Loaded(WeakModelHandle<Buffer>),
1635}
1636
1637impl RemoteBuffer {
1638 fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1639 match self {
1640 Self::Operations(_) => None,
1641 Self::Loaded(buffer) => buffer.upgrade(cx),
1642 }
1643 }
1644}
1645
1646impl Snapshot {
1647 pub fn id(&self) -> WorktreeId {
1648 self.id
1649 }
1650
1651 pub fn to_proto(
1652 &self,
1653 diagnostic_summaries: &TreeMap<PathKey, DiagnosticSummary>,
1654 weak: bool,
1655 ) -> proto::Worktree {
1656 let root_name = self.root_name.clone();
1657 proto::Worktree {
1658 id: self.id.0 as u64,
1659 root_name,
1660 entries: self
1661 .entries_by_path
1662 .iter()
1663 .filter(|e| !e.is_ignored)
1664 .map(Into::into)
1665 .collect(),
1666 diagnostic_summaries: diagnostic_summaries
1667 .iter()
1668 .map(|(path, summary)| summary.to_proto(path.0.clone()))
1669 .collect(),
1670 weak,
1671 }
1672 }
1673
1674 pub fn build_update(
1675 &self,
1676 other: &Self,
1677 project_id: u64,
1678 worktree_id: u64,
1679 include_ignored: bool,
1680 ) -> proto::UpdateWorktree {
1681 let mut updated_entries = Vec::new();
1682 let mut removed_entries = Vec::new();
1683 let mut self_entries = self
1684 .entries_by_id
1685 .cursor::<()>()
1686 .filter(|e| include_ignored || !e.is_ignored)
1687 .peekable();
1688 let mut other_entries = other
1689 .entries_by_id
1690 .cursor::<()>()
1691 .filter(|e| include_ignored || !e.is_ignored)
1692 .peekable();
1693 loop {
1694 match (self_entries.peek(), other_entries.peek()) {
1695 (Some(self_entry), Some(other_entry)) => {
1696 match Ord::cmp(&self_entry.id, &other_entry.id) {
1697 Ordering::Less => {
1698 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1699 updated_entries.push(entry);
1700 self_entries.next();
1701 }
1702 Ordering::Equal => {
1703 if self_entry.scan_id != other_entry.scan_id {
1704 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1705 updated_entries.push(entry);
1706 }
1707
1708 self_entries.next();
1709 other_entries.next();
1710 }
1711 Ordering::Greater => {
1712 removed_entries.push(other_entry.id as u64);
1713 other_entries.next();
1714 }
1715 }
1716 }
1717 (Some(self_entry), None) => {
1718 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1719 updated_entries.push(entry);
1720 self_entries.next();
1721 }
1722 (None, Some(other_entry)) => {
1723 removed_entries.push(other_entry.id as u64);
1724 other_entries.next();
1725 }
1726 (None, None) => break,
1727 }
1728 }
1729
1730 proto::UpdateWorktree {
1731 project_id,
1732 worktree_id,
1733 root_name: self.root_name().to_string(),
1734 updated_entries,
1735 removed_entries,
1736 }
1737 }
1738
1739 fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1740 self.scan_id += 1;
1741 let scan_id = self.scan_id;
1742
1743 let mut entries_by_path_edits = Vec::new();
1744 let mut entries_by_id_edits = Vec::new();
1745 for entry_id in update.removed_entries {
1746 let entry_id = entry_id as usize;
1747 let entry = self
1748 .entry_for_id(entry_id)
1749 .ok_or_else(|| anyhow!("unknown entry"))?;
1750 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1751 entries_by_id_edits.push(Edit::Remove(entry.id));
1752 }
1753
1754 for entry in update.updated_entries {
1755 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1756 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1757 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1758 }
1759 entries_by_id_edits.push(Edit::Insert(PathEntry {
1760 id: entry.id,
1761 path: entry.path.clone(),
1762 is_ignored: entry.is_ignored,
1763 scan_id,
1764 }));
1765 entries_by_path_edits.push(Edit::Insert(entry));
1766 }
1767
1768 self.entries_by_path.edit(entries_by_path_edits, &());
1769 self.entries_by_id.edit(entries_by_id_edits, &());
1770
1771 Ok(())
1772 }
1773
1774 pub fn file_count(&self) -> usize {
1775 self.entries_by_path.summary().file_count
1776 }
1777
1778 pub fn visible_file_count(&self) -> usize {
1779 self.entries_by_path.summary().visible_file_count
1780 }
1781
1782 fn traverse_from_offset(
1783 &self,
1784 include_dirs: bool,
1785 include_ignored: bool,
1786 start_offset: usize,
1787 ) -> Traversal {
1788 let mut cursor = self.entries_by_path.cursor();
1789 cursor.seek(
1790 &TraversalTarget::Count {
1791 count: start_offset,
1792 include_dirs,
1793 include_ignored,
1794 },
1795 Bias::Right,
1796 &(),
1797 );
1798 Traversal {
1799 cursor,
1800 include_dirs,
1801 include_ignored,
1802 }
1803 }
1804
1805 fn traverse_from_path(
1806 &self,
1807 include_dirs: bool,
1808 include_ignored: bool,
1809 path: &Path,
1810 ) -> Traversal {
1811 let mut cursor = self.entries_by_path.cursor();
1812 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1813 Traversal {
1814 cursor,
1815 include_dirs,
1816 include_ignored,
1817 }
1818 }
1819
1820 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1821 self.traverse_from_offset(false, include_ignored, start)
1822 }
1823
1824 pub fn entries(&self, include_ignored: bool) -> Traversal {
1825 self.traverse_from_offset(true, include_ignored, 0)
1826 }
1827
1828 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1829 let empty_path = Path::new("");
1830 self.entries_by_path
1831 .cursor::<()>()
1832 .filter(move |entry| entry.path.as_ref() != empty_path)
1833 .map(|entry| &entry.path)
1834 }
1835
1836 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1837 let mut cursor = self.entries_by_path.cursor();
1838 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1839 let traversal = Traversal {
1840 cursor,
1841 include_dirs: true,
1842 include_ignored: true,
1843 };
1844 ChildEntriesIter {
1845 traversal,
1846 parent_path,
1847 }
1848 }
1849
1850 pub fn root_entry(&self) -> Option<&Entry> {
1851 self.entry_for_path("")
1852 }
1853
1854 pub fn root_name(&self) -> &str {
1855 &self.root_name
1856 }
1857
1858 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1859 let path = path.as_ref();
1860 self.traverse_from_path(true, true, path)
1861 .entry()
1862 .and_then(|entry| {
1863 if entry.path.as_ref() == path {
1864 Some(entry)
1865 } else {
1866 None
1867 }
1868 })
1869 }
1870
1871 pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1872 let entry = self.entries_by_id.get(&id, &())?;
1873 self.entry_for_path(&entry.path)
1874 }
1875
1876 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1877 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1878 }
1879
1880 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1881 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1882 let abs_path = self.abs_path.join(&entry.path);
1883 match build_gitignore(&abs_path, fs) {
1884 Ok(ignore) => {
1885 let ignore_dir_path = entry.path.parent().unwrap();
1886 self.ignores
1887 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1888 }
1889 Err(error) => {
1890 log::error!(
1891 "error loading .gitignore file {:?} - {:?}",
1892 &entry.path,
1893 error
1894 );
1895 }
1896 }
1897 }
1898
1899 self.reuse_entry_id(&mut entry);
1900 self.entries_by_path.insert_or_replace(entry.clone(), &());
1901 self.entries_by_id.insert_or_replace(
1902 PathEntry {
1903 id: entry.id,
1904 path: entry.path.clone(),
1905 is_ignored: entry.is_ignored,
1906 scan_id: self.scan_id,
1907 },
1908 &(),
1909 );
1910 entry
1911 }
1912
1913 fn populate_dir(
1914 &mut self,
1915 parent_path: Arc<Path>,
1916 entries: impl IntoIterator<Item = Entry>,
1917 ignore: Option<Arc<Gitignore>>,
1918 ) {
1919 let mut parent_entry = self
1920 .entries_by_path
1921 .get(&PathKey(parent_path.clone()), &())
1922 .unwrap()
1923 .clone();
1924 if let Some(ignore) = ignore {
1925 self.ignores.insert(parent_path, (ignore, self.scan_id));
1926 }
1927 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1928 parent_entry.kind = EntryKind::Dir;
1929 } else {
1930 unreachable!();
1931 }
1932
1933 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1934 let mut entries_by_id_edits = Vec::new();
1935
1936 for mut entry in entries {
1937 self.reuse_entry_id(&mut entry);
1938 entries_by_id_edits.push(Edit::Insert(PathEntry {
1939 id: entry.id,
1940 path: entry.path.clone(),
1941 is_ignored: entry.is_ignored,
1942 scan_id: self.scan_id,
1943 }));
1944 entries_by_path_edits.push(Edit::Insert(entry));
1945 }
1946
1947 self.entries_by_path.edit(entries_by_path_edits, &());
1948 self.entries_by_id.edit(entries_by_id_edits, &());
1949 }
1950
1951 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1952 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1953 entry.id = removed_entry_id;
1954 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1955 entry.id = existing_entry.id;
1956 }
1957 }
1958
1959 fn remove_path(&mut self, path: &Path) {
1960 let mut new_entries;
1961 let removed_entries;
1962 {
1963 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1964 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1965 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1966 new_entries.push_tree(cursor.suffix(&()), &());
1967 }
1968 self.entries_by_path = new_entries;
1969
1970 let mut entries_by_id_edits = Vec::new();
1971 for entry in removed_entries.cursor::<()>() {
1972 let removed_entry_id = self
1973 .removed_entry_ids
1974 .entry(entry.inode)
1975 .or_insert(entry.id);
1976 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1977 entries_by_id_edits.push(Edit::Remove(entry.id));
1978 }
1979 self.entries_by_id.edit(entries_by_id_edits, &());
1980
1981 if path.file_name() == Some(&GITIGNORE) {
1982 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1983 *scan_id = self.scan_id;
1984 }
1985 }
1986 }
1987
1988 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1989 let mut new_ignores = Vec::new();
1990 for ancestor in path.ancestors().skip(1) {
1991 if let Some((ignore, _)) = self.ignores.get(ancestor) {
1992 new_ignores.push((ancestor, Some(ignore.clone())));
1993 } else {
1994 new_ignores.push((ancestor, None));
1995 }
1996 }
1997
1998 let mut ignore_stack = IgnoreStack::none();
1999 for (parent_path, ignore) in new_ignores.into_iter().rev() {
2000 if ignore_stack.is_path_ignored(&parent_path, true) {
2001 ignore_stack = IgnoreStack::all();
2002 break;
2003 } else if let Some(ignore) = ignore {
2004 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
2005 }
2006 }
2007
2008 if ignore_stack.is_path_ignored(path, is_dir) {
2009 ignore_stack = IgnoreStack::all();
2010 }
2011
2012 ignore_stack
2013 }
2014}
2015
2016impl fmt::Debug for Snapshot {
2017 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2018 for entry in self.entries_by_path.cursor::<()>() {
2019 for _ in entry.path.ancestors().skip(1) {
2020 write!(f, " ")?;
2021 }
2022 writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
2023 }
2024 Ok(())
2025 }
2026}
2027
2028#[derive(Clone, PartialEq)]
2029pub struct File {
2030 entry_id: Option<usize>,
2031 pub worktree: ModelHandle<Worktree>,
2032 worktree_path: Arc<Path>,
2033 pub path: Arc<Path>,
2034 pub mtime: SystemTime,
2035 is_local: bool,
2036}
2037
2038impl language::File for File {
2039 fn mtime(&self) -> SystemTime {
2040 self.mtime
2041 }
2042
2043 fn path(&self) -> &Arc<Path> {
2044 &self.path
2045 }
2046
2047 fn abs_path(&self) -> Option<PathBuf> {
2048 if self.is_local {
2049 Some(self.worktree_path.join(&self.path))
2050 } else {
2051 None
2052 }
2053 }
2054
2055 fn full_path(&self) -> PathBuf {
2056 let mut full_path = PathBuf::new();
2057 if let Some(worktree_name) = self.worktree_path.file_name() {
2058 full_path.push(worktree_name);
2059 }
2060 full_path.push(&self.path);
2061 full_path
2062 }
2063
2064 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2065 /// of its worktree, then this method will return the name of the worktree itself.
2066 fn file_name<'a>(&'a self) -> Option<OsString> {
2067 self.path
2068 .file_name()
2069 .or_else(|| self.worktree_path.file_name())
2070 .map(Into::into)
2071 }
2072
2073 fn is_deleted(&self) -> bool {
2074 self.entry_id.is_none()
2075 }
2076
2077 fn save(
2078 &self,
2079 buffer_id: u64,
2080 text: Rope,
2081 version: clock::Global,
2082 cx: &mut MutableAppContext,
2083 ) -> Task<Result<(clock::Global, SystemTime)>> {
2084 let worktree_id = self.worktree.read(cx).id().to_proto();
2085 self.worktree.update(cx, |worktree, cx| match worktree {
2086 Worktree::Local(worktree) => {
2087 let rpc = worktree.client.clone();
2088 let project_id = worktree.share.as_ref().map(|share| share.project_id);
2089 let save = worktree.save(self.path.clone(), text, cx);
2090 cx.background().spawn(async move {
2091 let entry = save.await?;
2092 if let Some(project_id) = project_id {
2093 rpc.send(proto::BufferSaved {
2094 project_id,
2095 worktree_id,
2096 buffer_id,
2097 version: (&version).into(),
2098 mtime: Some(entry.mtime.into()),
2099 })
2100 .await?;
2101 }
2102 Ok((version, entry.mtime))
2103 })
2104 }
2105 Worktree::Remote(worktree) => {
2106 let rpc = worktree.client.clone();
2107 let project_id = worktree.project_id;
2108 cx.foreground().spawn(async move {
2109 let response = rpc
2110 .request(proto::SaveBuffer {
2111 project_id,
2112 worktree_id,
2113 buffer_id,
2114 })
2115 .await?;
2116 let version = response.version.try_into()?;
2117 let mtime = response
2118 .mtime
2119 .ok_or_else(|| anyhow!("missing mtime"))?
2120 .into();
2121 Ok((version, mtime))
2122 })
2123 }
2124 })
2125 }
2126
2127 fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2128 let worktree = self.worktree.read(cx).as_local()?;
2129 let abs_path = worktree.absolutize(&self.path);
2130 let fs = worktree.fs.clone();
2131 Some(
2132 cx.background()
2133 .spawn(async move { fs.load(&abs_path).await }),
2134 )
2135 }
2136
2137 fn format_remote(
2138 &self,
2139 buffer_id: u64,
2140 cx: &mut MutableAppContext,
2141 ) -> Option<Task<Result<()>>> {
2142 let worktree = self.worktree.read(cx);
2143 let worktree_id = worktree.id().to_proto();
2144 let worktree = worktree.as_remote()?;
2145 let rpc = worktree.client.clone();
2146 let project_id = worktree.project_id;
2147 Some(cx.foreground().spawn(async move {
2148 rpc.request(proto::FormatBuffer {
2149 project_id,
2150 worktree_id,
2151 buffer_id,
2152 })
2153 .await?;
2154 Ok(())
2155 }))
2156 }
2157
2158 fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2159 self.worktree.update(cx, |worktree, cx| {
2160 worktree.send_buffer_update(buffer_id, operation, cx);
2161 });
2162 }
2163
2164 fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2165 self.worktree.update(cx, |worktree, cx| {
2166 if let Worktree::Remote(worktree) = worktree {
2167 let project_id = worktree.project_id;
2168 let worktree_id = worktree.id().to_proto();
2169 let rpc = worktree.client.clone();
2170 cx.background()
2171 .spawn(async move {
2172 if let Err(error) = rpc
2173 .send(proto::CloseBuffer {
2174 project_id,
2175 worktree_id,
2176 buffer_id,
2177 })
2178 .await
2179 {
2180 log::error!("error closing remote buffer: {}", error);
2181 }
2182 })
2183 .detach();
2184 }
2185 });
2186 }
2187
2188 fn as_any(&self) -> &dyn Any {
2189 self
2190 }
2191}
2192
2193impl File {
2194 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2195 file.and_then(|f| f.as_any().downcast_ref())
2196 }
2197
2198 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2199 self.worktree.read(cx).id()
2200 }
2201
2202 pub fn project_entry(&self, cx: &AppContext) -> Option<ProjectEntry> {
2203 self.entry_id.map(|entry_id| ProjectEntry {
2204 worktree_id: self.worktree_id(cx),
2205 entry_id,
2206 })
2207 }
2208}
2209
2210#[derive(Clone, Debug)]
2211pub struct Entry {
2212 pub id: usize,
2213 pub kind: EntryKind,
2214 pub path: Arc<Path>,
2215 pub inode: u64,
2216 pub mtime: SystemTime,
2217 pub is_symlink: bool,
2218 pub is_ignored: bool,
2219}
2220
2221#[derive(Clone, Debug)]
2222pub enum EntryKind {
2223 PendingDir,
2224 Dir,
2225 File(CharBag),
2226}
2227
2228impl Entry {
2229 fn new(
2230 path: Arc<Path>,
2231 metadata: &fs::Metadata,
2232 next_entry_id: &AtomicUsize,
2233 root_char_bag: CharBag,
2234 ) -> Self {
2235 Self {
2236 id: next_entry_id.fetch_add(1, SeqCst),
2237 kind: if metadata.is_dir {
2238 EntryKind::PendingDir
2239 } else {
2240 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2241 },
2242 path,
2243 inode: metadata.inode,
2244 mtime: metadata.mtime,
2245 is_symlink: metadata.is_symlink,
2246 is_ignored: false,
2247 }
2248 }
2249
2250 pub fn is_dir(&self) -> bool {
2251 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2252 }
2253
2254 pub fn is_file(&self) -> bool {
2255 matches!(self.kind, EntryKind::File(_))
2256 }
2257}
2258
2259impl sum_tree::Item for Entry {
2260 type Summary = EntrySummary;
2261
2262 fn summary(&self) -> Self::Summary {
2263 let visible_count = if self.is_ignored { 0 } else { 1 };
2264 let file_count;
2265 let visible_file_count;
2266 if self.is_file() {
2267 file_count = 1;
2268 visible_file_count = visible_count;
2269 } else {
2270 file_count = 0;
2271 visible_file_count = 0;
2272 }
2273
2274 EntrySummary {
2275 max_path: self.path.clone(),
2276 count: 1,
2277 visible_count,
2278 file_count,
2279 visible_file_count,
2280 }
2281 }
2282}
2283
2284impl sum_tree::KeyedItem for Entry {
2285 type Key = PathKey;
2286
2287 fn key(&self) -> Self::Key {
2288 PathKey(self.path.clone())
2289 }
2290}
2291
2292#[derive(Clone, Debug)]
2293pub struct EntrySummary {
2294 max_path: Arc<Path>,
2295 count: usize,
2296 visible_count: usize,
2297 file_count: usize,
2298 visible_file_count: usize,
2299}
2300
2301impl Default for EntrySummary {
2302 fn default() -> Self {
2303 Self {
2304 max_path: Arc::from(Path::new("")),
2305 count: 0,
2306 visible_count: 0,
2307 file_count: 0,
2308 visible_file_count: 0,
2309 }
2310 }
2311}
2312
2313impl sum_tree::Summary for EntrySummary {
2314 type Context = ();
2315
2316 fn add_summary(&mut self, rhs: &Self, _: &()) {
2317 self.max_path = rhs.max_path.clone();
2318 self.visible_count += rhs.visible_count;
2319 self.file_count += rhs.file_count;
2320 self.visible_file_count += rhs.visible_file_count;
2321 }
2322}
2323
2324#[derive(Clone, Debug)]
2325struct PathEntry {
2326 id: usize,
2327 path: Arc<Path>,
2328 is_ignored: bool,
2329 scan_id: usize,
2330}
2331
2332impl sum_tree::Item for PathEntry {
2333 type Summary = PathEntrySummary;
2334
2335 fn summary(&self) -> Self::Summary {
2336 PathEntrySummary { max_id: self.id }
2337 }
2338}
2339
2340impl sum_tree::KeyedItem for PathEntry {
2341 type Key = usize;
2342
2343 fn key(&self) -> Self::Key {
2344 self.id
2345 }
2346}
2347
2348#[derive(Clone, Debug, Default)]
2349struct PathEntrySummary {
2350 max_id: usize,
2351}
2352
2353impl sum_tree::Summary for PathEntrySummary {
2354 type Context = ();
2355
2356 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2357 self.max_id = summary.max_id;
2358 }
2359}
2360
2361impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2362 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2363 *self = summary.max_id;
2364 }
2365}
2366
2367#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2368pub struct PathKey(Arc<Path>);
2369
2370impl Default for PathKey {
2371 fn default() -> Self {
2372 Self(Path::new("").into())
2373 }
2374}
2375
2376impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2377 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2378 self.0 = summary.max_path.clone();
2379 }
2380}
2381
2382struct BackgroundScanner {
2383 fs: Arc<dyn Fs>,
2384 snapshot: Arc<Mutex<Snapshot>>,
2385 notify: Sender<ScanState>,
2386 executor: Arc<executor::Background>,
2387}
2388
2389impl BackgroundScanner {
2390 fn new(
2391 snapshot: Arc<Mutex<Snapshot>>,
2392 notify: Sender<ScanState>,
2393 fs: Arc<dyn Fs>,
2394 executor: Arc<executor::Background>,
2395 ) -> Self {
2396 Self {
2397 fs,
2398 snapshot,
2399 notify,
2400 executor,
2401 }
2402 }
2403
2404 fn abs_path(&self) -> Arc<Path> {
2405 self.snapshot.lock().abs_path.clone()
2406 }
2407
2408 fn snapshot(&self) -> Snapshot {
2409 self.snapshot.lock().clone()
2410 }
2411
2412 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2413 if self.notify.send(ScanState::Scanning).await.is_err() {
2414 return;
2415 }
2416
2417 if let Err(err) = self.scan_dirs().await {
2418 if self
2419 .notify
2420 .send(ScanState::Err(Arc::new(err)))
2421 .await
2422 .is_err()
2423 {
2424 return;
2425 }
2426 }
2427
2428 if self.notify.send(ScanState::Idle).await.is_err() {
2429 return;
2430 }
2431
2432 futures::pin_mut!(events_rx);
2433 while let Some(events) = events_rx.next().await {
2434 if self.notify.send(ScanState::Scanning).await.is_err() {
2435 break;
2436 }
2437
2438 if !self.process_events(events).await {
2439 break;
2440 }
2441
2442 if self.notify.send(ScanState::Idle).await.is_err() {
2443 break;
2444 }
2445 }
2446 }
2447
2448 async fn scan_dirs(&mut self) -> Result<()> {
2449 let root_char_bag;
2450 let next_entry_id;
2451 let is_dir;
2452 {
2453 let snapshot = self.snapshot.lock();
2454 root_char_bag = snapshot.root_char_bag;
2455 next_entry_id = snapshot.next_entry_id.clone();
2456 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2457 };
2458
2459 if is_dir {
2460 let path: Arc<Path> = Arc::from(Path::new(""));
2461 let abs_path = self.abs_path();
2462 let (tx, rx) = channel::unbounded();
2463 tx.send(ScanJob {
2464 abs_path: abs_path.to_path_buf(),
2465 path,
2466 ignore_stack: IgnoreStack::none(),
2467 scan_queue: tx.clone(),
2468 })
2469 .await
2470 .unwrap();
2471 drop(tx);
2472
2473 self.executor
2474 .scoped(|scope| {
2475 for _ in 0..self.executor.num_cpus() {
2476 scope.spawn(async {
2477 while let Ok(job) = rx.recv().await {
2478 if let Err(err) = self
2479 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2480 .await
2481 {
2482 log::error!("error scanning {:?}: {}", job.abs_path, err);
2483 }
2484 }
2485 });
2486 }
2487 })
2488 .await;
2489 }
2490
2491 Ok(())
2492 }
2493
2494 async fn scan_dir(
2495 &self,
2496 root_char_bag: CharBag,
2497 next_entry_id: Arc<AtomicUsize>,
2498 job: &ScanJob,
2499 ) -> Result<()> {
2500 let mut new_entries: Vec<Entry> = Vec::new();
2501 let mut new_jobs: Vec<ScanJob> = Vec::new();
2502 let mut ignore_stack = job.ignore_stack.clone();
2503 let mut new_ignore = None;
2504
2505 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2506 while let Some(child_abs_path) = child_paths.next().await {
2507 let child_abs_path = match child_abs_path {
2508 Ok(child_abs_path) => child_abs_path,
2509 Err(error) => {
2510 log::error!("error processing entry {:?}", error);
2511 continue;
2512 }
2513 };
2514 let child_name = child_abs_path.file_name().unwrap();
2515 let child_path: Arc<Path> = job.path.join(child_name).into();
2516 let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2517 Some(metadata) => metadata,
2518 None => continue,
2519 };
2520
2521 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2522 if child_name == *GITIGNORE {
2523 match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2524 Ok(ignore) => {
2525 let ignore = Arc::new(ignore);
2526 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2527 new_ignore = Some(ignore);
2528 }
2529 Err(error) => {
2530 log::error!(
2531 "error loading .gitignore file {:?} - {:?}",
2532 child_name,
2533 error
2534 );
2535 }
2536 }
2537
2538 // Update ignore status of any child entries we've already processed to reflect the
2539 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2540 // there should rarely be too numerous. Update the ignore stack associated with any
2541 // new jobs as well.
2542 let mut new_jobs = new_jobs.iter_mut();
2543 for entry in &mut new_entries {
2544 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2545 if entry.is_dir() {
2546 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2547 IgnoreStack::all()
2548 } else {
2549 ignore_stack.clone()
2550 };
2551 }
2552 }
2553 }
2554
2555 let mut child_entry = Entry::new(
2556 child_path.clone(),
2557 &child_metadata,
2558 &next_entry_id,
2559 root_char_bag,
2560 );
2561
2562 if child_metadata.is_dir {
2563 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2564 child_entry.is_ignored = is_ignored;
2565 new_entries.push(child_entry);
2566 new_jobs.push(ScanJob {
2567 abs_path: child_abs_path,
2568 path: child_path,
2569 ignore_stack: if is_ignored {
2570 IgnoreStack::all()
2571 } else {
2572 ignore_stack.clone()
2573 },
2574 scan_queue: job.scan_queue.clone(),
2575 });
2576 } else {
2577 child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2578 new_entries.push(child_entry);
2579 };
2580 }
2581
2582 self.snapshot
2583 .lock()
2584 .populate_dir(job.path.clone(), new_entries, new_ignore);
2585 for new_job in new_jobs {
2586 job.scan_queue.send(new_job).await.unwrap();
2587 }
2588
2589 Ok(())
2590 }
2591
2592 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2593 let mut snapshot = self.snapshot();
2594 snapshot.scan_id += 1;
2595
2596 let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2597 abs_path
2598 } else {
2599 return false;
2600 };
2601 let root_char_bag = snapshot.root_char_bag;
2602 let next_entry_id = snapshot.next_entry_id.clone();
2603
2604 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2605 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2606
2607 for event in &events {
2608 match event.path.strip_prefix(&root_abs_path) {
2609 Ok(path) => snapshot.remove_path(&path),
2610 Err(_) => {
2611 log::error!(
2612 "unexpected event {:?} for root path {:?}",
2613 event.path,
2614 root_abs_path
2615 );
2616 continue;
2617 }
2618 }
2619 }
2620
2621 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2622 for event in events {
2623 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2624 Ok(path) => Arc::from(path.to_path_buf()),
2625 Err(_) => {
2626 log::error!(
2627 "unexpected event {:?} for root path {:?}",
2628 event.path,
2629 root_abs_path
2630 );
2631 continue;
2632 }
2633 };
2634
2635 match self.fs.metadata(&event.path).await {
2636 Ok(Some(metadata)) => {
2637 let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2638 let mut fs_entry = Entry::new(
2639 path.clone(),
2640 &metadata,
2641 snapshot.next_entry_id.as_ref(),
2642 snapshot.root_char_bag,
2643 );
2644 fs_entry.is_ignored = ignore_stack.is_all();
2645 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2646 if metadata.is_dir {
2647 scan_queue_tx
2648 .send(ScanJob {
2649 abs_path: event.path,
2650 path,
2651 ignore_stack,
2652 scan_queue: scan_queue_tx.clone(),
2653 })
2654 .await
2655 .unwrap();
2656 }
2657 }
2658 Ok(None) => {}
2659 Err(err) => {
2660 // TODO - create a special 'error' entry in the entries tree to mark this
2661 log::error!("error reading file on event {:?}", err);
2662 }
2663 }
2664 }
2665
2666 *self.snapshot.lock() = snapshot;
2667
2668 // Scan any directories that were created as part of this event batch.
2669 drop(scan_queue_tx);
2670 self.executor
2671 .scoped(|scope| {
2672 for _ in 0..self.executor.num_cpus() {
2673 scope.spawn(async {
2674 while let Ok(job) = scan_queue_rx.recv().await {
2675 if let Err(err) = self
2676 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2677 .await
2678 {
2679 log::error!("error scanning {:?}: {}", job.abs_path, err);
2680 }
2681 }
2682 });
2683 }
2684 })
2685 .await;
2686
2687 // Attempt to detect renames only over a single batch of file-system events.
2688 self.snapshot.lock().removed_entry_ids.clear();
2689
2690 self.update_ignore_statuses().await;
2691 true
2692 }
2693
2694 async fn update_ignore_statuses(&self) {
2695 let mut snapshot = self.snapshot();
2696
2697 let mut ignores_to_update = Vec::new();
2698 let mut ignores_to_delete = Vec::new();
2699 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2700 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2701 ignores_to_update.push(parent_path.clone());
2702 }
2703
2704 let ignore_path = parent_path.join(&*GITIGNORE);
2705 if snapshot.entry_for_path(ignore_path).is_none() {
2706 ignores_to_delete.push(parent_path.clone());
2707 }
2708 }
2709
2710 for parent_path in ignores_to_delete {
2711 snapshot.ignores.remove(&parent_path);
2712 self.snapshot.lock().ignores.remove(&parent_path);
2713 }
2714
2715 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2716 ignores_to_update.sort_unstable();
2717 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2718 while let Some(parent_path) = ignores_to_update.next() {
2719 while ignores_to_update
2720 .peek()
2721 .map_or(false, |p| p.starts_with(&parent_path))
2722 {
2723 ignores_to_update.next().unwrap();
2724 }
2725
2726 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2727 ignore_queue_tx
2728 .send(UpdateIgnoreStatusJob {
2729 path: parent_path,
2730 ignore_stack,
2731 ignore_queue: ignore_queue_tx.clone(),
2732 })
2733 .await
2734 .unwrap();
2735 }
2736 drop(ignore_queue_tx);
2737
2738 self.executor
2739 .scoped(|scope| {
2740 for _ in 0..self.executor.num_cpus() {
2741 scope.spawn(async {
2742 while let Ok(job) = ignore_queue_rx.recv().await {
2743 self.update_ignore_status(job, &snapshot).await;
2744 }
2745 });
2746 }
2747 })
2748 .await;
2749 }
2750
2751 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2752 let mut ignore_stack = job.ignore_stack;
2753 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2754 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2755 }
2756
2757 let mut entries_by_id_edits = Vec::new();
2758 let mut entries_by_path_edits = Vec::new();
2759 for mut entry in snapshot.child_entries(&job.path).cloned() {
2760 let was_ignored = entry.is_ignored;
2761 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2762 if entry.is_dir() {
2763 let child_ignore_stack = if entry.is_ignored {
2764 IgnoreStack::all()
2765 } else {
2766 ignore_stack.clone()
2767 };
2768 job.ignore_queue
2769 .send(UpdateIgnoreStatusJob {
2770 path: entry.path.clone(),
2771 ignore_stack: child_ignore_stack,
2772 ignore_queue: job.ignore_queue.clone(),
2773 })
2774 .await
2775 .unwrap();
2776 }
2777
2778 if entry.is_ignored != was_ignored {
2779 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2780 path_entry.scan_id = snapshot.scan_id;
2781 path_entry.is_ignored = entry.is_ignored;
2782 entries_by_id_edits.push(Edit::Insert(path_entry));
2783 entries_by_path_edits.push(Edit::Insert(entry));
2784 }
2785 }
2786
2787 let mut snapshot = self.snapshot.lock();
2788 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2789 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2790 }
2791}
2792
2793async fn refresh_entry(
2794 fs: &dyn Fs,
2795 snapshot: &Mutex<Snapshot>,
2796 path: Arc<Path>,
2797 abs_path: &Path,
2798) -> Result<Entry> {
2799 let root_char_bag;
2800 let next_entry_id;
2801 {
2802 let snapshot = snapshot.lock();
2803 root_char_bag = snapshot.root_char_bag;
2804 next_entry_id = snapshot.next_entry_id.clone();
2805 }
2806 let entry = Entry::new(
2807 path,
2808 &fs.metadata(abs_path)
2809 .await?
2810 .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2811 &next_entry_id,
2812 root_char_bag,
2813 );
2814 Ok(snapshot.lock().insert_entry(entry, fs))
2815}
2816
2817fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2818 let mut result = root_char_bag;
2819 result.extend(
2820 path.to_string_lossy()
2821 .chars()
2822 .map(|c| c.to_ascii_lowercase()),
2823 );
2824 result
2825}
2826
2827struct ScanJob {
2828 abs_path: PathBuf,
2829 path: Arc<Path>,
2830 ignore_stack: Arc<IgnoreStack>,
2831 scan_queue: Sender<ScanJob>,
2832}
2833
2834struct UpdateIgnoreStatusJob {
2835 path: Arc<Path>,
2836 ignore_stack: Arc<IgnoreStack>,
2837 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2838}
2839
2840pub trait WorktreeHandle {
2841 #[cfg(test)]
2842 fn flush_fs_events<'a>(
2843 &self,
2844 cx: &'a gpui::TestAppContext,
2845 ) -> futures::future::LocalBoxFuture<'a, ()>;
2846}
2847
2848impl WorktreeHandle for ModelHandle<Worktree> {
2849 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2850 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2851 // extra directory scans, and emit extra scan-state notifications.
2852 //
2853 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2854 // to ensure that all redundant FS events have already been processed.
2855 #[cfg(test)]
2856 fn flush_fs_events<'a>(
2857 &self,
2858 cx: &'a gpui::TestAppContext,
2859 ) -> futures::future::LocalBoxFuture<'a, ()> {
2860 use smol::future::FutureExt;
2861
2862 let filename = "fs-event-sentinel";
2863 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2864 let tree = self.clone();
2865 async move {
2866 std::fs::write(root_path.join(filename), "").unwrap();
2867 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2868 .await;
2869
2870 std::fs::remove_file(root_path.join(filename)).unwrap();
2871 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2872 .await;
2873
2874 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2875 .await;
2876 }
2877 .boxed_local()
2878 }
2879}
2880
2881#[derive(Clone, Debug)]
2882struct TraversalProgress<'a> {
2883 max_path: &'a Path,
2884 count: usize,
2885 visible_count: usize,
2886 file_count: usize,
2887 visible_file_count: usize,
2888}
2889
2890impl<'a> TraversalProgress<'a> {
2891 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2892 match (include_ignored, include_dirs) {
2893 (true, true) => self.count,
2894 (true, false) => self.file_count,
2895 (false, true) => self.visible_count,
2896 (false, false) => self.visible_file_count,
2897 }
2898 }
2899}
2900
2901impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2902 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2903 self.max_path = summary.max_path.as_ref();
2904 self.count += summary.count;
2905 self.visible_count += summary.visible_count;
2906 self.file_count += summary.file_count;
2907 self.visible_file_count += summary.visible_file_count;
2908 }
2909}
2910
2911impl<'a> Default for TraversalProgress<'a> {
2912 fn default() -> Self {
2913 Self {
2914 max_path: Path::new(""),
2915 count: 0,
2916 visible_count: 0,
2917 file_count: 0,
2918 visible_file_count: 0,
2919 }
2920 }
2921}
2922
2923pub struct Traversal<'a> {
2924 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2925 include_ignored: bool,
2926 include_dirs: bool,
2927}
2928
2929impl<'a> Traversal<'a> {
2930 pub fn advance(&mut self) -> bool {
2931 self.advance_to_offset(self.offset() + 1)
2932 }
2933
2934 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2935 self.cursor.seek_forward(
2936 &TraversalTarget::Count {
2937 count: offset,
2938 include_dirs: self.include_dirs,
2939 include_ignored: self.include_ignored,
2940 },
2941 Bias::Right,
2942 &(),
2943 )
2944 }
2945
2946 pub fn advance_to_sibling(&mut self) -> bool {
2947 while let Some(entry) = self.cursor.item() {
2948 self.cursor.seek_forward(
2949 &TraversalTarget::PathSuccessor(&entry.path),
2950 Bias::Left,
2951 &(),
2952 );
2953 if let Some(entry) = self.cursor.item() {
2954 if (self.include_dirs || !entry.is_dir())
2955 && (self.include_ignored || !entry.is_ignored)
2956 {
2957 return true;
2958 }
2959 }
2960 }
2961 false
2962 }
2963
2964 pub fn entry(&self) -> Option<&'a Entry> {
2965 self.cursor.item()
2966 }
2967
2968 pub fn offset(&self) -> usize {
2969 self.cursor
2970 .start()
2971 .count(self.include_dirs, self.include_ignored)
2972 }
2973}
2974
2975impl<'a> Iterator for Traversal<'a> {
2976 type Item = &'a Entry;
2977
2978 fn next(&mut self) -> Option<Self::Item> {
2979 if let Some(item) = self.entry() {
2980 self.advance();
2981 Some(item)
2982 } else {
2983 None
2984 }
2985 }
2986}
2987
2988#[derive(Debug)]
2989enum TraversalTarget<'a> {
2990 Path(&'a Path),
2991 PathSuccessor(&'a Path),
2992 Count {
2993 count: usize,
2994 include_ignored: bool,
2995 include_dirs: bool,
2996 },
2997}
2998
2999impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3000 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3001 match self {
3002 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3003 TraversalTarget::PathSuccessor(path) => {
3004 if !cursor_location.max_path.starts_with(path) {
3005 Ordering::Equal
3006 } else {
3007 Ordering::Greater
3008 }
3009 }
3010 TraversalTarget::Count {
3011 count,
3012 include_dirs,
3013 include_ignored,
3014 } => Ord::cmp(
3015 count,
3016 &cursor_location.count(*include_dirs, *include_ignored),
3017 ),
3018 }
3019 }
3020}
3021
3022struct ChildEntriesIter<'a> {
3023 parent_path: &'a Path,
3024 traversal: Traversal<'a>,
3025}
3026
3027impl<'a> Iterator for ChildEntriesIter<'a> {
3028 type Item = &'a Entry;
3029
3030 fn next(&mut self) -> Option<Self::Item> {
3031 if let Some(item) = self.traversal.entry() {
3032 if item.path.starts_with(&self.parent_path) {
3033 self.traversal.advance_to_sibling();
3034 return Some(item);
3035 }
3036 }
3037 None
3038 }
3039}
3040
3041impl<'a> From<&'a Entry> for proto::Entry {
3042 fn from(entry: &'a Entry) -> Self {
3043 Self {
3044 id: entry.id as u64,
3045 is_dir: entry.is_dir(),
3046 path: entry.path.to_string_lossy().to_string(),
3047 inode: entry.inode,
3048 mtime: Some(entry.mtime.into()),
3049 is_symlink: entry.is_symlink,
3050 is_ignored: entry.is_ignored,
3051 }
3052 }
3053}
3054
3055impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3056 type Error = anyhow::Error;
3057
3058 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3059 if let Some(mtime) = entry.mtime {
3060 let kind = if entry.is_dir {
3061 EntryKind::Dir
3062 } else {
3063 let mut char_bag = root_char_bag.clone();
3064 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3065 EntryKind::File(char_bag)
3066 };
3067 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
3068 Ok(Entry {
3069 id: entry.id as usize,
3070 kind,
3071 path: path.clone(),
3072 inode: entry.inode,
3073 mtime: mtime.into(),
3074 is_symlink: entry.is_symlink,
3075 is_ignored: entry.is_ignored,
3076 })
3077 } else {
3078 Err(anyhow!(
3079 "missing mtime in remote worktree entry {:?}",
3080 entry.path
3081 ))
3082 }
3083 }
3084}
3085
3086#[cfg(test)]
3087mod tests {
3088 use super::*;
3089 use crate::fs::FakeFs;
3090 use anyhow::Result;
3091 use client::test::{FakeHttpClient, FakeServer};
3092 use fs::RealFs;
3093 use language::{Diagnostic, DiagnosticEntry};
3094 use lsp::Url;
3095 use rand::prelude::*;
3096 use serde_json::json;
3097 use std::{cell::RefCell, rc::Rc};
3098 use std::{
3099 env,
3100 fmt::Write,
3101 time::{SystemTime, UNIX_EPOCH},
3102 };
3103 use text::Point;
3104 use unindent::Unindent as _;
3105 use util::test::temp_tree;
3106
3107 #[gpui::test]
3108 async fn test_traversal(mut cx: gpui::TestAppContext) {
3109 let fs = FakeFs::new();
3110 fs.insert_tree(
3111 "/root",
3112 json!({
3113 ".gitignore": "a/b\n",
3114 "a": {
3115 "b": "",
3116 "c": "",
3117 }
3118 }),
3119 )
3120 .await;
3121
3122 let http_client = FakeHttpClient::with_404_response();
3123 let client = Client::new(http_client.clone());
3124 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3125
3126 let tree = Worktree::open_local(
3127 client,
3128 user_store,
3129 Arc::from(Path::new("/root")),
3130 false,
3131 Arc::new(fs),
3132 &mut cx.to_async(),
3133 )
3134 .await
3135 .unwrap();
3136 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3137 .await;
3138
3139 tree.read_with(&cx, |tree, _| {
3140 assert_eq!(
3141 tree.entries(false)
3142 .map(|entry| entry.path.as_ref())
3143 .collect::<Vec<_>>(),
3144 vec![
3145 Path::new(""),
3146 Path::new(".gitignore"),
3147 Path::new("a"),
3148 Path::new("a/c"),
3149 ]
3150 );
3151 })
3152 }
3153
3154 #[gpui::test]
3155 async fn test_save_file(mut cx: gpui::TestAppContext) {
3156 let dir = temp_tree(json!({
3157 "file1": "the old contents",
3158 }));
3159
3160 let http_client = FakeHttpClient::with_404_response();
3161 let client = Client::new(http_client.clone());
3162 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3163
3164 let tree = Worktree::open_local(
3165 client,
3166 user_store,
3167 dir.path(),
3168 false,
3169 Arc::new(RealFs),
3170 &mut cx.to_async(),
3171 )
3172 .await
3173 .unwrap();
3174 let (buffer, _) = tree
3175 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3176 .await
3177 .unwrap();
3178 let save = buffer.update(&mut cx, |buffer, cx| {
3179 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3180 buffer.save(cx)
3181 });
3182 save.await.unwrap();
3183
3184 let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3185 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3186 }
3187
3188 #[gpui::test]
3189 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3190 let dir = temp_tree(json!({
3191 "file1": "the old contents",
3192 }));
3193 let file_path = dir.path().join("file1");
3194
3195 let http_client = FakeHttpClient::with_404_response();
3196 let client = Client::new(http_client.clone());
3197 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3198
3199 let tree = Worktree::open_local(
3200 client,
3201 user_store,
3202 file_path.clone(),
3203 false,
3204 Arc::new(RealFs),
3205 &mut cx.to_async(),
3206 )
3207 .await
3208 .unwrap();
3209 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3210 .await;
3211 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3212
3213 let (buffer, _) = tree
3214 .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3215 .await
3216 .unwrap();
3217 let save = buffer.update(&mut cx, |buffer, cx| {
3218 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3219 buffer.save(cx)
3220 });
3221 save.await.unwrap();
3222
3223 let new_text = std::fs::read_to_string(file_path).unwrap();
3224 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3225 }
3226
3227 #[gpui::test]
3228 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3229 let dir = temp_tree(json!({
3230 "a": {
3231 "file1": "",
3232 "file2": "",
3233 "file3": "",
3234 },
3235 "b": {
3236 "c": {
3237 "file4": "",
3238 "file5": "",
3239 }
3240 }
3241 }));
3242
3243 let user_id = 5;
3244 let http_client = FakeHttpClient::with_404_response();
3245 let mut client = Client::new(http_client.clone());
3246 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3247 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3248 let tree = Worktree::open_local(
3249 client,
3250 user_store.clone(),
3251 dir.path(),
3252 false,
3253 Arc::new(RealFs),
3254 &mut cx.to_async(),
3255 )
3256 .await
3257 .unwrap();
3258
3259 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3260 let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3261 async move { buffer.await.unwrap().0 }
3262 };
3263 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3264 tree.read_with(cx, |tree, _| {
3265 tree.entry_for_path(path)
3266 .expect(&format!("no entry for path {}", path))
3267 .id
3268 })
3269 };
3270
3271 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3272 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3273 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3274 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3275
3276 let file2_id = id_for_path("a/file2", &cx);
3277 let file3_id = id_for_path("a/file3", &cx);
3278 let file4_id = id_for_path("b/c/file4", &cx);
3279
3280 // Wait for the initial scan.
3281 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3282 .await;
3283
3284 // Create a remote copy of this worktree.
3285 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3286 let remote = Worktree::remote(
3287 1,
3288 1,
3289 initial_snapshot.to_proto(&Default::default(), Default::default()),
3290 Client::new(http_client.clone()),
3291 user_store,
3292 &mut cx.to_async(),
3293 )
3294 .await
3295 .unwrap();
3296
3297 cx.read(|cx| {
3298 assert!(!buffer2.read(cx).is_dirty());
3299 assert!(!buffer3.read(cx).is_dirty());
3300 assert!(!buffer4.read(cx).is_dirty());
3301 assert!(!buffer5.read(cx).is_dirty());
3302 });
3303
3304 // Rename and delete files and directories.
3305 tree.flush_fs_events(&cx).await;
3306 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3307 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3308 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3309 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3310 tree.flush_fs_events(&cx).await;
3311
3312 let expected_paths = vec![
3313 "a",
3314 "a/file1",
3315 "a/file2.new",
3316 "b",
3317 "d",
3318 "d/file3",
3319 "d/file4",
3320 ];
3321
3322 cx.read(|app| {
3323 assert_eq!(
3324 tree.read(app)
3325 .paths()
3326 .map(|p| p.to_str().unwrap())
3327 .collect::<Vec<_>>(),
3328 expected_paths
3329 );
3330
3331 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3332 assert_eq!(id_for_path("d/file3", &cx), file3_id);
3333 assert_eq!(id_for_path("d/file4", &cx), file4_id);
3334
3335 assert_eq!(
3336 buffer2.read(app).file().unwrap().path().as_ref(),
3337 Path::new("a/file2.new")
3338 );
3339 assert_eq!(
3340 buffer3.read(app).file().unwrap().path().as_ref(),
3341 Path::new("d/file3")
3342 );
3343 assert_eq!(
3344 buffer4.read(app).file().unwrap().path().as_ref(),
3345 Path::new("d/file4")
3346 );
3347 assert_eq!(
3348 buffer5.read(app).file().unwrap().path().as_ref(),
3349 Path::new("b/c/file5")
3350 );
3351
3352 assert!(!buffer2.read(app).file().unwrap().is_deleted());
3353 assert!(!buffer3.read(app).file().unwrap().is_deleted());
3354 assert!(!buffer4.read(app).file().unwrap().is_deleted());
3355 assert!(buffer5.read(app).file().unwrap().is_deleted());
3356 });
3357
3358 // Update the remote worktree. Check that it becomes consistent with the
3359 // local worktree.
3360 remote.update(&mut cx, |remote, cx| {
3361 let update_message =
3362 tree.read(cx)
3363 .snapshot()
3364 .build_update(&initial_snapshot, 1, 1, true);
3365 remote
3366 .as_remote_mut()
3367 .unwrap()
3368 .snapshot
3369 .apply_update(update_message)
3370 .unwrap();
3371
3372 assert_eq!(
3373 remote
3374 .paths()
3375 .map(|p| p.to_str().unwrap())
3376 .collect::<Vec<_>>(),
3377 expected_paths
3378 );
3379 });
3380 }
3381
3382 #[gpui::test]
3383 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3384 let dir = temp_tree(json!({
3385 ".git": {},
3386 ".gitignore": "ignored-dir\n",
3387 "tracked-dir": {
3388 "tracked-file1": "tracked contents",
3389 },
3390 "ignored-dir": {
3391 "ignored-file1": "ignored contents",
3392 }
3393 }));
3394
3395 let http_client = FakeHttpClient::with_404_response();
3396 let client = Client::new(http_client.clone());
3397 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3398
3399 let tree = Worktree::open_local(
3400 client,
3401 user_store,
3402 dir.path(),
3403 false,
3404 Arc::new(RealFs),
3405 &mut cx.to_async(),
3406 )
3407 .await
3408 .unwrap();
3409 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3410 .await;
3411 tree.flush_fs_events(&cx).await;
3412 cx.read(|cx| {
3413 let tree = tree.read(cx);
3414 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3415 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3416 assert_eq!(tracked.is_ignored, false);
3417 assert_eq!(ignored.is_ignored, true);
3418 });
3419
3420 std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3421 std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3422 tree.flush_fs_events(&cx).await;
3423 cx.read(|cx| {
3424 let tree = tree.read(cx);
3425 let dot_git = tree.entry_for_path(".git").unwrap();
3426 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3427 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3428 assert_eq!(tracked.is_ignored, false);
3429 assert_eq!(ignored.is_ignored, true);
3430 assert_eq!(dot_git.is_ignored, true);
3431 });
3432 }
3433
3434 #[gpui::test]
3435 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3436 let user_id = 100;
3437 let http_client = FakeHttpClient::with_404_response();
3438 let mut client = Client::new(http_client);
3439 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3440 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3441
3442 let fs = Arc::new(FakeFs::new());
3443 fs.insert_tree(
3444 "/the-dir",
3445 json!({
3446 "a.txt": "a-contents",
3447 "b.txt": "b-contents",
3448 }),
3449 )
3450 .await;
3451
3452 let worktree = Worktree::open_local(
3453 client.clone(),
3454 user_store,
3455 "/the-dir".as_ref(),
3456 false,
3457 fs,
3458 &mut cx.to_async(),
3459 )
3460 .await
3461 .unwrap();
3462
3463 // Spawn multiple tasks to open paths, repeating some paths.
3464 let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3465 (
3466 worktree.open_buffer("a.txt", cx),
3467 worktree.open_buffer("b.txt", cx),
3468 worktree.open_buffer("a.txt", cx),
3469 )
3470 });
3471
3472 let buffer_a_1 = buffer_a_1.await.unwrap().0;
3473 let buffer_a_2 = buffer_a_2.await.unwrap().0;
3474 let buffer_b = buffer_b.await.unwrap().0;
3475 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3476 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3477
3478 // There is only one buffer per path.
3479 let buffer_a_id = buffer_a_1.id();
3480 assert_eq!(buffer_a_2.id(), buffer_a_id);
3481
3482 // Open the same path again while it is still open.
3483 drop(buffer_a_1);
3484 let buffer_a_3 = worktree
3485 .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3486 .await
3487 .unwrap()
3488 .0;
3489
3490 // There's still only one buffer per path.
3491 assert_eq!(buffer_a_3.id(), buffer_a_id);
3492 }
3493
3494 #[gpui::test]
3495 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3496 use std::fs;
3497
3498 let dir = temp_tree(json!({
3499 "file1": "abc",
3500 "file2": "def",
3501 "file3": "ghi",
3502 }));
3503 let http_client = FakeHttpClient::with_404_response();
3504 let client = Client::new(http_client.clone());
3505 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3506
3507 let tree = Worktree::open_local(
3508 client,
3509 user_store,
3510 dir.path(),
3511 false,
3512 Arc::new(RealFs),
3513 &mut cx.to_async(),
3514 )
3515 .await
3516 .unwrap();
3517 tree.flush_fs_events(&cx).await;
3518 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3519 .await;
3520
3521 let (buffer1, _) = tree
3522 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3523 .await
3524 .unwrap();
3525 let events = Rc::new(RefCell::new(Vec::new()));
3526
3527 // initially, the buffer isn't dirty.
3528 buffer1.update(&mut cx, |buffer, cx| {
3529 cx.subscribe(&buffer1, {
3530 let events = events.clone();
3531 move |_, _, event, _| events.borrow_mut().push(event.clone())
3532 })
3533 .detach();
3534
3535 assert!(!buffer.is_dirty());
3536 assert!(events.borrow().is_empty());
3537
3538 buffer.edit(vec![1..2], "", cx);
3539 });
3540
3541 // after the first edit, the buffer is dirty, and emits a dirtied event.
3542 buffer1.update(&mut cx, |buffer, cx| {
3543 assert!(buffer.text() == "ac");
3544 assert!(buffer.is_dirty());
3545 assert_eq!(
3546 *events.borrow(),
3547 &[language::Event::Edited, language::Event::Dirtied]
3548 );
3549 events.borrow_mut().clear();
3550 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3551 });
3552
3553 // after saving, the buffer is not dirty, and emits a saved event.
3554 buffer1.update(&mut cx, |buffer, cx| {
3555 assert!(!buffer.is_dirty());
3556 assert_eq!(*events.borrow(), &[language::Event::Saved]);
3557 events.borrow_mut().clear();
3558
3559 buffer.edit(vec![1..1], "B", cx);
3560 buffer.edit(vec![2..2], "D", cx);
3561 });
3562
3563 // after editing again, the buffer is dirty, and emits another dirty event.
3564 buffer1.update(&mut cx, |buffer, cx| {
3565 assert!(buffer.text() == "aBDc");
3566 assert!(buffer.is_dirty());
3567 assert_eq!(
3568 *events.borrow(),
3569 &[
3570 language::Event::Edited,
3571 language::Event::Dirtied,
3572 language::Event::Edited,
3573 ],
3574 );
3575 events.borrow_mut().clear();
3576
3577 // TODO - currently, after restoring the buffer to its
3578 // previously-saved state, the is still considered dirty.
3579 buffer.edit([1..3], "", cx);
3580 assert!(buffer.text() == "ac");
3581 assert!(buffer.is_dirty());
3582 });
3583
3584 assert_eq!(*events.borrow(), &[language::Event::Edited]);
3585
3586 // When a file is deleted, the buffer is considered dirty.
3587 let events = Rc::new(RefCell::new(Vec::new()));
3588 let (buffer2, _) = tree
3589 .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3590 .await
3591 .unwrap();
3592 buffer2.update(&mut cx, |_, cx| {
3593 cx.subscribe(&buffer2, {
3594 let events = events.clone();
3595 move |_, _, event, _| events.borrow_mut().push(event.clone())
3596 })
3597 .detach();
3598 });
3599
3600 fs::remove_file(dir.path().join("file2")).unwrap();
3601 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3602 assert_eq!(
3603 *events.borrow(),
3604 &[language::Event::Dirtied, language::Event::FileHandleChanged]
3605 );
3606
3607 // When a file is already dirty when deleted, we don't emit a Dirtied event.
3608 let events = Rc::new(RefCell::new(Vec::new()));
3609 let (buffer3, _) = tree
3610 .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3611 .await
3612 .unwrap();
3613 buffer3.update(&mut cx, |_, cx| {
3614 cx.subscribe(&buffer3, {
3615 let events = events.clone();
3616 move |_, _, event, _| events.borrow_mut().push(event.clone())
3617 })
3618 .detach();
3619 });
3620
3621 tree.flush_fs_events(&cx).await;
3622 buffer3.update(&mut cx, |buffer, cx| {
3623 buffer.edit(Some(0..0), "x", cx);
3624 });
3625 events.borrow_mut().clear();
3626 fs::remove_file(dir.path().join("file3")).unwrap();
3627 buffer3
3628 .condition(&cx, |_, _| !events.borrow().is_empty())
3629 .await;
3630 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3631 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3632 }
3633
3634 #[gpui::test]
3635 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3636 use std::fs;
3637
3638 let initial_contents = "aaa\nbbbbb\nc\n";
3639 let dir = temp_tree(json!({ "the-file": initial_contents }));
3640 let http_client = FakeHttpClient::with_404_response();
3641 let client = Client::new(http_client.clone());
3642 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3643
3644 let tree = Worktree::open_local(
3645 client,
3646 user_store,
3647 dir.path(),
3648 false,
3649 Arc::new(RealFs),
3650 &mut cx.to_async(),
3651 )
3652 .await
3653 .unwrap();
3654 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3655 .await;
3656
3657 let abs_path = dir.path().join("the-file");
3658 let (buffer, _) = tree
3659 .update(&mut cx, |tree, cx| {
3660 tree.open_buffer(Path::new("the-file"), cx)
3661 })
3662 .await
3663 .unwrap();
3664
3665 // TODO
3666 // Add a cursor on each row.
3667 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3668 // assert!(!buffer.is_dirty());
3669 // buffer.add_selection_set(
3670 // &(0..3)
3671 // .map(|row| Selection {
3672 // id: row as usize,
3673 // start: Point::new(row, 1),
3674 // end: Point::new(row, 1),
3675 // reversed: false,
3676 // goal: SelectionGoal::None,
3677 // })
3678 // .collect::<Vec<_>>(),
3679 // cx,
3680 // )
3681 // });
3682
3683 // Change the file on disk, adding two new lines of text, and removing
3684 // one line.
3685 buffer.read_with(&cx, |buffer, _| {
3686 assert!(!buffer.is_dirty());
3687 assert!(!buffer.has_conflict());
3688 });
3689 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3690 fs::write(&abs_path, new_contents).unwrap();
3691
3692 // Because the buffer was not modified, it is reloaded from disk. Its
3693 // contents are edited according to the diff between the old and new
3694 // file contents.
3695 buffer
3696 .condition(&cx, |buffer, _| buffer.text() == new_contents)
3697 .await;
3698
3699 buffer.update(&mut cx, |buffer, _| {
3700 assert_eq!(buffer.text(), new_contents);
3701 assert!(!buffer.is_dirty());
3702 assert!(!buffer.has_conflict());
3703
3704 // TODO
3705 // let cursor_positions = buffer
3706 // .selection_set(selection_set_id)
3707 // .unwrap()
3708 // .selections::<Point>(&*buffer)
3709 // .map(|selection| {
3710 // assert_eq!(selection.start, selection.end);
3711 // selection.start
3712 // })
3713 // .collect::<Vec<_>>();
3714 // assert_eq!(
3715 // cursor_positions,
3716 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3717 // );
3718 });
3719
3720 // Modify the buffer
3721 buffer.update(&mut cx, |buffer, cx| {
3722 buffer.edit(vec![0..0], " ", cx);
3723 assert!(buffer.is_dirty());
3724 assert!(!buffer.has_conflict());
3725 });
3726
3727 // Change the file on disk again, adding blank lines to the beginning.
3728 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3729
3730 // Because the buffer is modified, it doesn't reload from disk, but is
3731 // marked as having a conflict.
3732 buffer
3733 .condition(&cx, |buffer, _| buffer.has_conflict())
3734 .await;
3735 }
3736
3737 #[gpui::test]
3738 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3739 let fs = Arc::new(FakeFs::new());
3740 let http_client = FakeHttpClient::with_404_response();
3741 let client = Client::new(http_client.clone());
3742 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3743
3744 fs.insert_tree(
3745 "/the-dir",
3746 json!({
3747 "a.rs": "
3748 fn foo(mut v: Vec<usize>) {
3749 for x in &v {
3750 v.push(1);
3751 }
3752 }
3753 "
3754 .unindent(),
3755 }),
3756 )
3757 .await;
3758
3759 let worktree = Worktree::open_local(
3760 client.clone(),
3761 user_store,
3762 "/the-dir".as_ref(),
3763 false,
3764 fs,
3765 &mut cx.to_async(),
3766 )
3767 .await
3768 .unwrap();
3769
3770 let (buffer, _) = worktree
3771 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3772 .await
3773 .unwrap();
3774
3775 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3776 let message = lsp::PublishDiagnosticsParams {
3777 uri: buffer_uri.clone(),
3778 diagnostics: vec![
3779 lsp::Diagnostic {
3780 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3781 severity: Some(DiagnosticSeverity::WARNING),
3782 message: "error 1".to_string(),
3783 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3784 location: lsp::Location {
3785 uri: buffer_uri.clone(),
3786 range: lsp::Range::new(
3787 lsp::Position::new(1, 8),
3788 lsp::Position::new(1, 9),
3789 ),
3790 },
3791 message: "error 1 hint 1".to_string(),
3792 }]),
3793 ..Default::default()
3794 },
3795 lsp::Diagnostic {
3796 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3797 severity: Some(DiagnosticSeverity::HINT),
3798 message: "error 1 hint 1".to_string(),
3799 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3800 location: lsp::Location {
3801 uri: buffer_uri.clone(),
3802 range: lsp::Range::new(
3803 lsp::Position::new(1, 8),
3804 lsp::Position::new(1, 9),
3805 ),
3806 },
3807 message: "original diagnostic".to_string(),
3808 }]),
3809 ..Default::default()
3810 },
3811 lsp::Diagnostic {
3812 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3813 severity: Some(DiagnosticSeverity::ERROR),
3814 message: "error 2".to_string(),
3815 related_information: Some(vec![
3816 lsp::DiagnosticRelatedInformation {
3817 location: lsp::Location {
3818 uri: buffer_uri.clone(),
3819 range: lsp::Range::new(
3820 lsp::Position::new(1, 13),
3821 lsp::Position::new(1, 15),
3822 ),
3823 },
3824 message: "error 2 hint 1".to_string(),
3825 },
3826 lsp::DiagnosticRelatedInformation {
3827 location: lsp::Location {
3828 uri: buffer_uri.clone(),
3829 range: lsp::Range::new(
3830 lsp::Position::new(1, 13),
3831 lsp::Position::new(1, 15),
3832 ),
3833 },
3834 message: "error 2 hint 2".to_string(),
3835 },
3836 ]),
3837 ..Default::default()
3838 },
3839 lsp::Diagnostic {
3840 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3841 severity: Some(DiagnosticSeverity::HINT),
3842 message: "error 2 hint 1".to_string(),
3843 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3844 location: lsp::Location {
3845 uri: buffer_uri.clone(),
3846 range: lsp::Range::new(
3847 lsp::Position::new(2, 8),
3848 lsp::Position::new(2, 17),
3849 ),
3850 },
3851 message: "original diagnostic".to_string(),
3852 }]),
3853 ..Default::default()
3854 },
3855 lsp::Diagnostic {
3856 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3857 severity: Some(DiagnosticSeverity::HINT),
3858 message: "error 2 hint 2".to_string(),
3859 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3860 location: lsp::Location {
3861 uri: buffer_uri.clone(),
3862 range: lsp::Range::new(
3863 lsp::Position::new(2, 8),
3864 lsp::Position::new(2, 17),
3865 ),
3866 },
3867 message: "original diagnostic".to_string(),
3868 }]),
3869 ..Default::default()
3870 },
3871 ],
3872 version: None,
3873 };
3874
3875 worktree
3876 .update(&mut cx, |tree, cx| {
3877 tree.as_local_mut().unwrap().update_diagnostics(
3878 Arc::from("a.rs".as_ref()),
3879 message,
3880 &Default::default(),
3881 cx,
3882 )
3883 })
3884 .unwrap();
3885 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
3886
3887 assert_eq!(
3888 buffer
3889 .diagnostics_in_range::<_, Point>(0..buffer.len())
3890 .collect::<Vec<_>>(),
3891 &[
3892 DiagnosticEntry {
3893 range: Point::new(1, 8)..Point::new(1, 9),
3894 diagnostic: Diagnostic {
3895 severity: DiagnosticSeverity::WARNING,
3896 message: "error 1".to_string(),
3897 group_id: 0,
3898 is_primary: true,
3899 ..Default::default()
3900 }
3901 },
3902 DiagnosticEntry {
3903 range: Point::new(1, 8)..Point::new(1, 9),
3904 diagnostic: Diagnostic {
3905 severity: DiagnosticSeverity::HINT,
3906 message: "error 1 hint 1".to_string(),
3907 group_id: 0,
3908 is_primary: false,
3909 ..Default::default()
3910 }
3911 },
3912 DiagnosticEntry {
3913 range: Point::new(1, 13)..Point::new(1, 15),
3914 diagnostic: Diagnostic {
3915 severity: DiagnosticSeverity::HINT,
3916 message: "error 2 hint 1".to_string(),
3917 group_id: 1,
3918 is_primary: false,
3919 ..Default::default()
3920 }
3921 },
3922 DiagnosticEntry {
3923 range: Point::new(1, 13)..Point::new(1, 15),
3924 diagnostic: Diagnostic {
3925 severity: DiagnosticSeverity::HINT,
3926 message: "error 2 hint 2".to_string(),
3927 group_id: 1,
3928 is_primary: false,
3929 ..Default::default()
3930 }
3931 },
3932 DiagnosticEntry {
3933 range: Point::new(2, 8)..Point::new(2, 17),
3934 diagnostic: Diagnostic {
3935 severity: DiagnosticSeverity::ERROR,
3936 message: "error 2".to_string(),
3937 group_id: 1,
3938 is_primary: true,
3939 ..Default::default()
3940 }
3941 }
3942 ]
3943 );
3944
3945 assert_eq!(
3946 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
3947 &[
3948 DiagnosticEntry {
3949 range: Point::new(1, 8)..Point::new(1, 9),
3950 diagnostic: Diagnostic {
3951 severity: DiagnosticSeverity::WARNING,
3952 message: "error 1".to_string(),
3953 group_id: 0,
3954 is_primary: true,
3955 ..Default::default()
3956 }
3957 },
3958 DiagnosticEntry {
3959 range: Point::new(1, 8)..Point::new(1, 9),
3960 diagnostic: Diagnostic {
3961 severity: DiagnosticSeverity::HINT,
3962 message: "error 1 hint 1".to_string(),
3963 group_id: 0,
3964 is_primary: false,
3965 ..Default::default()
3966 }
3967 },
3968 ]
3969 );
3970 assert_eq!(
3971 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
3972 &[
3973 DiagnosticEntry {
3974 range: Point::new(1, 13)..Point::new(1, 15),
3975 diagnostic: Diagnostic {
3976 severity: DiagnosticSeverity::HINT,
3977 message: "error 2 hint 1".to_string(),
3978 group_id: 1,
3979 is_primary: false,
3980 ..Default::default()
3981 }
3982 },
3983 DiagnosticEntry {
3984 range: Point::new(1, 13)..Point::new(1, 15),
3985 diagnostic: Diagnostic {
3986 severity: DiagnosticSeverity::HINT,
3987 message: "error 2 hint 2".to_string(),
3988 group_id: 1,
3989 is_primary: false,
3990 ..Default::default()
3991 }
3992 },
3993 DiagnosticEntry {
3994 range: Point::new(2, 8)..Point::new(2, 17),
3995 diagnostic: Diagnostic {
3996 severity: DiagnosticSeverity::ERROR,
3997 message: "error 2".to_string(),
3998 group_id: 1,
3999 is_primary: true,
4000 ..Default::default()
4001 }
4002 }
4003 ]
4004 );
4005 }
4006
4007 #[gpui::test(iterations = 100)]
4008 fn test_random(mut rng: StdRng) {
4009 let operations = env::var("OPERATIONS")
4010 .map(|o| o.parse().unwrap())
4011 .unwrap_or(40);
4012 let initial_entries = env::var("INITIAL_ENTRIES")
4013 .map(|o| o.parse().unwrap())
4014 .unwrap_or(20);
4015
4016 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
4017 for _ in 0..initial_entries {
4018 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
4019 }
4020 log::info!("Generated initial tree");
4021
4022 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4023 let fs = Arc::new(RealFs);
4024 let next_entry_id = Arc::new(AtomicUsize::new(0));
4025 let mut initial_snapshot = Snapshot {
4026 id: WorktreeId::from_usize(0),
4027 scan_id: 0,
4028 abs_path: root_dir.path().into(),
4029 entries_by_path: Default::default(),
4030 entries_by_id: Default::default(),
4031 removed_entry_ids: Default::default(),
4032 ignores: Default::default(),
4033 root_name: Default::default(),
4034 root_char_bag: Default::default(),
4035 next_entry_id: next_entry_id.clone(),
4036 };
4037 initial_snapshot.insert_entry(
4038 Entry::new(
4039 Path::new("").into(),
4040 &smol::block_on(fs.metadata(root_dir.path()))
4041 .unwrap()
4042 .unwrap(),
4043 &next_entry_id,
4044 Default::default(),
4045 ),
4046 fs.as_ref(),
4047 );
4048 let mut scanner = BackgroundScanner::new(
4049 Arc::new(Mutex::new(initial_snapshot.clone())),
4050 notify_tx,
4051 fs.clone(),
4052 Arc::new(gpui::executor::Background::new()),
4053 );
4054 smol::block_on(scanner.scan_dirs()).unwrap();
4055 scanner.snapshot().check_invariants();
4056
4057 let mut events = Vec::new();
4058 let mut snapshots = Vec::new();
4059 let mut mutations_len = operations;
4060 while mutations_len > 1 {
4061 if !events.is_empty() && rng.gen_bool(0.4) {
4062 let len = rng.gen_range(0..=events.len());
4063 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4064 log::info!("Delivering events: {:#?}", to_deliver);
4065 smol::block_on(scanner.process_events(to_deliver));
4066 scanner.snapshot().check_invariants();
4067 } else {
4068 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4069 mutations_len -= 1;
4070 }
4071
4072 if rng.gen_bool(0.2) {
4073 snapshots.push(scanner.snapshot());
4074 }
4075 }
4076 log::info!("Quiescing: {:#?}", events);
4077 smol::block_on(scanner.process_events(events));
4078 scanner.snapshot().check_invariants();
4079
4080 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4081 let mut new_scanner = BackgroundScanner::new(
4082 Arc::new(Mutex::new(initial_snapshot)),
4083 notify_tx,
4084 scanner.fs.clone(),
4085 scanner.executor.clone(),
4086 );
4087 smol::block_on(new_scanner.scan_dirs()).unwrap();
4088 assert_eq!(
4089 scanner.snapshot().to_vec(true),
4090 new_scanner.snapshot().to_vec(true)
4091 );
4092
4093 for mut prev_snapshot in snapshots {
4094 let include_ignored = rng.gen::<bool>();
4095 if !include_ignored {
4096 let mut entries_by_path_edits = Vec::new();
4097 let mut entries_by_id_edits = Vec::new();
4098 for entry in prev_snapshot
4099 .entries_by_id
4100 .cursor::<()>()
4101 .filter(|e| e.is_ignored)
4102 {
4103 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4104 entries_by_id_edits.push(Edit::Remove(entry.id));
4105 }
4106
4107 prev_snapshot
4108 .entries_by_path
4109 .edit(entries_by_path_edits, &());
4110 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4111 }
4112
4113 let update = scanner
4114 .snapshot()
4115 .build_update(&prev_snapshot, 0, 0, include_ignored);
4116 prev_snapshot.apply_update(update).unwrap();
4117 assert_eq!(
4118 prev_snapshot.to_vec(true),
4119 scanner.snapshot().to_vec(include_ignored)
4120 );
4121 }
4122 }
4123
4124 fn randomly_mutate_tree(
4125 root_path: &Path,
4126 insertion_probability: f64,
4127 rng: &mut impl Rng,
4128 ) -> Result<Vec<fsevent::Event>> {
4129 let root_path = root_path.canonicalize().unwrap();
4130 let (dirs, files) = read_dir_recursive(root_path.clone());
4131
4132 let mut events = Vec::new();
4133 let mut record_event = |path: PathBuf| {
4134 events.push(fsevent::Event {
4135 event_id: SystemTime::now()
4136 .duration_since(UNIX_EPOCH)
4137 .unwrap()
4138 .as_secs(),
4139 flags: fsevent::StreamFlags::empty(),
4140 path,
4141 });
4142 };
4143
4144 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4145 let path = dirs.choose(rng).unwrap();
4146 let new_path = path.join(gen_name(rng));
4147
4148 if rng.gen() {
4149 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4150 std::fs::create_dir(&new_path)?;
4151 } else {
4152 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4153 std::fs::write(&new_path, "")?;
4154 }
4155 record_event(new_path);
4156 } else if rng.gen_bool(0.05) {
4157 let ignore_dir_path = dirs.choose(rng).unwrap();
4158 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4159
4160 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4161 let files_to_ignore = {
4162 let len = rng.gen_range(0..=subfiles.len());
4163 subfiles.choose_multiple(rng, len)
4164 };
4165 let dirs_to_ignore = {
4166 let len = rng.gen_range(0..subdirs.len());
4167 subdirs.choose_multiple(rng, len)
4168 };
4169
4170 let mut ignore_contents = String::new();
4171 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4172 write!(
4173 ignore_contents,
4174 "{}\n",
4175 path_to_ignore
4176 .strip_prefix(&ignore_dir_path)?
4177 .to_str()
4178 .unwrap()
4179 )
4180 .unwrap();
4181 }
4182 log::info!(
4183 "Creating {:?} with contents:\n{}",
4184 ignore_path.strip_prefix(&root_path)?,
4185 ignore_contents
4186 );
4187 std::fs::write(&ignore_path, ignore_contents).unwrap();
4188 record_event(ignore_path);
4189 } else {
4190 let old_path = {
4191 let file_path = files.choose(rng);
4192 let dir_path = dirs[1..].choose(rng);
4193 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4194 };
4195
4196 let is_rename = rng.gen();
4197 if is_rename {
4198 let new_path_parent = dirs
4199 .iter()
4200 .filter(|d| !d.starts_with(old_path))
4201 .choose(rng)
4202 .unwrap();
4203
4204 let overwrite_existing_dir =
4205 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4206 let new_path = if overwrite_existing_dir {
4207 std::fs::remove_dir_all(&new_path_parent).ok();
4208 new_path_parent.to_path_buf()
4209 } else {
4210 new_path_parent.join(gen_name(rng))
4211 };
4212
4213 log::info!(
4214 "Renaming {:?} to {}{:?}",
4215 old_path.strip_prefix(&root_path)?,
4216 if overwrite_existing_dir {
4217 "overwrite "
4218 } else {
4219 ""
4220 },
4221 new_path.strip_prefix(&root_path)?
4222 );
4223 std::fs::rename(&old_path, &new_path)?;
4224 record_event(old_path.clone());
4225 record_event(new_path);
4226 } else if old_path.is_dir() {
4227 let (dirs, files) = read_dir_recursive(old_path.clone());
4228
4229 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4230 std::fs::remove_dir_all(&old_path).unwrap();
4231 for file in files {
4232 record_event(file);
4233 }
4234 for dir in dirs {
4235 record_event(dir);
4236 }
4237 } else {
4238 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4239 std::fs::remove_file(old_path).unwrap();
4240 record_event(old_path.clone());
4241 }
4242 }
4243
4244 Ok(events)
4245 }
4246
4247 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4248 let child_entries = std::fs::read_dir(&path).unwrap();
4249 let mut dirs = vec![path];
4250 let mut files = Vec::new();
4251 for child_entry in child_entries {
4252 let child_path = child_entry.unwrap().path();
4253 if child_path.is_dir() {
4254 let (child_dirs, child_files) = read_dir_recursive(child_path);
4255 dirs.extend(child_dirs);
4256 files.extend(child_files);
4257 } else {
4258 files.push(child_path);
4259 }
4260 }
4261 (dirs, files)
4262 }
4263
4264 fn gen_name(rng: &mut impl Rng) -> String {
4265 (0..6)
4266 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4267 .map(char::from)
4268 .collect()
4269 }
4270
4271 impl Snapshot {
4272 fn check_invariants(&self) {
4273 let mut files = self.files(true, 0);
4274 let mut visible_files = self.files(false, 0);
4275 for entry in self.entries_by_path.cursor::<()>() {
4276 if entry.is_file() {
4277 assert_eq!(files.next().unwrap().inode, entry.inode);
4278 if !entry.is_ignored {
4279 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4280 }
4281 }
4282 }
4283 assert!(files.next().is_none());
4284 assert!(visible_files.next().is_none());
4285
4286 let mut bfs_paths = Vec::new();
4287 let mut stack = vec![Path::new("")];
4288 while let Some(path) = stack.pop() {
4289 bfs_paths.push(path);
4290 let ix = stack.len();
4291 for child_entry in self.child_entries(path) {
4292 stack.insert(ix, &child_entry.path);
4293 }
4294 }
4295
4296 let dfs_paths = self
4297 .entries_by_path
4298 .cursor::<()>()
4299 .map(|e| e.path.as_ref())
4300 .collect::<Vec<_>>();
4301 assert_eq!(bfs_paths, dfs_paths);
4302
4303 for (ignore_parent_path, _) in &self.ignores {
4304 assert!(self.entry_for_path(ignore_parent_path).is_some());
4305 assert!(self
4306 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4307 .is_some());
4308 }
4309 }
4310
4311 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4312 let mut paths = Vec::new();
4313 for entry in self.entries_by_path.cursor::<()>() {
4314 if include_ignored || !entry.is_ignored {
4315 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4316 }
4317 }
4318 paths.sort_by(|a, b| a.0.cmp(&b.0));
4319 paths
4320 }
4321 }
4322}