1use super::{
2 fs::{self, Fs},
3 ignore::IgnoreStack,
4 DiagnosticSummary,
5};
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context, Result};
8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
9use clock::ReplicaId;
10use collections::{hash_map, HashMap};
11use collections::{BTreeMap, HashSet};
12use futures::{Stream, StreamExt};
13use fuzzy::CharBag;
14use gpui::{
15 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
16 Task, UpgradeModelHandle, WeakModelHandle,
17};
18use language::{
19 Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Language, LanguageRegistry,
20 Operation, PointUtf16, Rope,
21};
22use lazy_static::lazy_static;
23use lsp::LanguageServer;
24use parking_lot::Mutex;
25use postage::{
26 prelude::{Sink as _, Stream as _},
27 watch,
28};
29use serde::Deserialize;
30use smol::channel::{self, Sender};
31use std::{
32 any::Any,
33 cmp::{self, Ordering},
34 convert::{TryFrom, TryInto},
35 ffi::{OsStr, OsString},
36 fmt,
37 future::Future,
38 mem,
39 ops::{Deref, Range},
40 path::{Path, PathBuf},
41 sync::{
42 atomic::{AtomicUsize, Ordering::SeqCst},
43 Arc,
44 },
45 time::{Duration, SystemTime},
46};
47use sum_tree::Bias;
48use sum_tree::{Edit, SeekTarget, SumTree};
49use util::{post_inc, ResultExt, TryFutureExt};
50
51lazy_static! {
52 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
53}
54
55#[derive(Clone, Debug)]
56enum ScanState {
57 Idle,
58 Scanning,
59 Err(Arc<anyhow::Error>),
60}
61
62#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
63pub struct WorktreeId(usize);
64
65pub enum Worktree {
66 Local(LocalWorktree),
67 Remote(RemoteWorktree),
68}
69
70#[derive(Clone, Debug, Eq, PartialEq)]
71pub enum Event {
72 DiskBasedDiagnosticsUpdated,
73 DiagnosticsUpdated(Arc<Path>),
74}
75
76impl Entity for Worktree {
77 type Event = Event;
78
79 fn app_will_quit(
80 &mut self,
81 _: &mut MutableAppContext,
82 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
83 use futures::FutureExt;
84
85 if let Self::Local(worktree) = self {
86 let shutdown_futures = worktree
87 .language_servers
88 .drain()
89 .filter_map(|(_, server)| server.shutdown())
90 .collect::<Vec<_>>();
91 Some(
92 async move {
93 futures::future::join_all(shutdown_futures).await;
94 }
95 .boxed(),
96 )
97 } else {
98 None
99 }
100 }
101}
102
103impl Worktree {
104 pub async fn open_local(
105 client: Arc<Client>,
106 user_store: ModelHandle<UserStore>,
107 path: impl Into<Arc<Path>>,
108 fs: Arc<dyn Fs>,
109 languages: Arc<LanguageRegistry>,
110 cx: &mut AsyncAppContext,
111 ) -> Result<ModelHandle<Self>> {
112 let (tree, scan_states_tx) =
113 LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
114 tree.update(cx, |tree, cx| {
115 let tree = tree.as_local_mut().unwrap();
116 let abs_path = tree.snapshot.abs_path.clone();
117 let background_snapshot = tree.background_snapshot.clone();
118 let background = cx.background().clone();
119 tree._background_scanner_task = Some(cx.background().spawn(async move {
120 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
121 let scanner =
122 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
123 scanner.run(events).await;
124 }));
125 });
126 Ok(tree)
127 }
128
129 pub async fn remote(
130 project_remote_id: u64,
131 replica_id: ReplicaId,
132 worktree: proto::Worktree,
133 client: Arc<Client>,
134 user_store: ModelHandle<UserStore>,
135 languages: Arc<LanguageRegistry>,
136 cx: &mut AsyncAppContext,
137 ) -> Result<ModelHandle<Self>> {
138 let remote_id = worktree.id;
139 let root_char_bag: CharBag = worktree
140 .root_name
141 .chars()
142 .map(|c| c.to_ascii_lowercase())
143 .collect();
144 let root_name = worktree.root_name.clone();
145 let (entries_by_path, entries_by_id) = cx
146 .background()
147 .spawn(async move {
148 let mut entries_by_path_edits = Vec::new();
149 let mut entries_by_id_edits = Vec::new();
150 for entry in worktree.entries {
151 match Entry::try_from((&root_char_bag, entry)) {
152 Ok(entry) => {
153 entries_by_id_edits.push(Edit::Insert(PathEntry {
154 id: entry.id,
155 path: entry.path.clone(),
156 is_ignored: entry.is_ignored,
157 scan_id: 0,
158 }));
159 entries_by_path_edits.push(Edit::Insert(entry));
160 }
161 Err(err) => log::warn!("error for remote worktree entry {:?}", err),
162 }
163 }
164
165 let mut entries_by_path = SumTree::new();
166 let mut entries_by_id = SumTree::new();
167 entries_by_path.edit(entries_by_path_edits, &());
168 entries_by_id.edit(entries_by_id_edits, &());
169 (entries_by_path, entries_by_id)
170 })
171 .await;
172
173 let worktree = cx.update(|cx| {
174 cx.add_model(|cx: &mut ModelContext<Worktree>| {
175 let snapshot = Snapshot {
176 id: WorktreeId(remote_id as usize),
177 scan_id: 0,
178 abs_path: Path::new("").into(),
179 root_name,
180 root_char_bag,
181 ignores: Default::default(),
182 entries_by_path,
183 entries_by_id,
184 removed_entry_ids: Default::default(),
185 next_entry_id: Default::default(),
186 };
187
188 let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
189 let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
190
191 cx.background()
192 .spawn(async move {
193 while let Some(update) = updates_rx.recv().await {
194 let mut snapshot = snapshot_tx.borrow().clone();
195 if let Err(error) = snapshot.apply_update(update) {
196 log::error!("error applying worktree update: {}", error);
197 }
198 *snapshot_tx.borrow_mut() = snapshot;
199 }
200 })
201 .detach();
202
203 {
204 let mut snapshot_rx = snapshot_rx.clone();
205 cx.spawn_weak(|this, mut cx| async move {
206 while let Some(_) = snapshot_rx.recv().await {
207 if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
208 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
209 } else {
210 break;
211 }
212 }
213 })
214 .detach();
215 }
216
217 Worktree::Remote(RemoteWorktree {
218 project_id: project_remote_id,
219 replica_id,
220 snapshot,
221 snapshot_rx,
222 updates_tx,
223 client: client.clone(),
224 loading_buffers: Default::default(),
225 open_buffers: Default::default(),
226 diagnostic_summaries: Default::default(),
227 queued_operations: Default::default(),
228 languages,
229 user_store,
230 })
231 })
232 });
233
234 Ok(worktree)
235 }
236
237 pub fn as_local(&self) -> Option<&LocalWorktree> {
238 if let Worktree::Local(worktree) = self {
239 Some(worktree)
240 } else {
241 None
242 }
243 }
244
245 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
246 if let Worktree::Remote(worktree) = self {
247 Some(worktree)
248 } else {
249 None
250 }
251 }
252
253 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
254 if let Worktree::Local(worktree) = self {
255 Some(worktree)
256 } else {
257 None
258 }
259 }
260
261 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
262 if let Worktree::Remote(worktree) = self {
263 Some(worktree)
264 } else {
265 None
266 }
267 }
268
269 pub fn snapshot(&self) -> Snapshot {
270 match self {
271 Worktree::Local(worktree) => worktree.snapshot(),
272 Worktree::Remote(worktree) => worktree.snapshot(),
273 }
274 }
275
276 pub fn replica_id(&self) -> ReplicaId {
277 match self {
278 Worktree::Local(_) => 0,
279 Worktree::Remote(worktree) => worktree.replica_id,
280 }
281 }
282
283 pub fn remove_collaborator(
284 &mut self,
285 peer_id: PeerId,
286 replica_id: ReplicaId,
287 cx: &mut ModelContext<Self>,
288 ) {
289 match self {
290 Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
291 Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
292 }
293 }
294
295 pub fn languages(&self) -> &Arc<LanguageRegistry> {
296 match self {
297 Worktree::Local(worktree) => &worktree.language_registry,
298 Worktree::Remote(worktree) => &worktree.languages,
299 }
300 }
301
302 pub fn user_store(&self) -> &ModelHandle<UserStore> {
303 match self {
304 Worktree::Local(worktree) => &worktree.user_store,
305 Worktree::Remote(worktree) => &worktree.user_store,
306 }
307 }
308
309 pub fn handle_open_buffer(
310 &mut self,
311 envelope: TypedEnvelope<proto::OpenBuffer>,
312 rpc: Arc<Client>,
313 cx: &mut ModelContext<Self>,
314 ) -> anyhow::Result<()> {
315 let receipt = envelope.receipt();
316
317 let response = self
318 .as_local_mut()
319 .unwrap()
320 .open_remote_buffer(envelope, cx);
321
322 cx.background()
323 .spawn(
324 async move {
325 rpc.respond(receipt, response.await?).await?;
326 Ok(())
327 }
328 .log_err(),
329 )
330 .detach();
331
332 Ok(())
333 }
334
335 pub fn handle_close_buffer(
336 &mut self,
337 envelope: TypedEnvelope<proto::CloseBuffer>,
338 _: Arc<Client>,
339 cx: &mut ModelContext<Self>,
340 ) -> anyhow::Result<()> {
341 self.as_local_mut()
342 .unwrap()
343 .close_remote_buffer(envelope, cx)
344 }
345
346 pub fn diagnostic_summaries<'a>(
347 &'a self,
348 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
349 match self {
350 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
351 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
352 }
353 .iter()
354 .map(|(path, summary)| (path.clone(), summary.clone()))
355 }
356
357 pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
358 match self {
359 Worktree::Local(worktree) => &mut worktree.loading_buffers,
360 Worktree::Remote(worktree) => &mut worktree.loading_buffers,
361 }
362 }
363
364 pub fn open_buffer(
365 &mut self,
366 path: impl AsRef<Path>,
367 cx: &mut ModelContext<Self>,
368 ) -> Task<Result<ModelHandle<Buffer>>> {
369 let path = path.as_ref();
370
371 // If there is already a buffer for the given path, then return it.
372 let existing_buffer = match self {
373 Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
374 Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
375 };
376 if let Some(existing_buffer) = existing_buffer {
377 return cx.spawn(move |_, _| async move { Ok(existing_buffer) });
378 }
379
380 let path: Arc<Path> = Arc::from(path);
381 let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
382 // If the given path is already being loaded, then wait for that existing
383 // task to complete and return the same buffer.
384 hash_map::Entry::Occupied(e) => e.get().clone(),
385
386 // Otherwise, record the fact that this path is now being loaded.
387 hash_map::Entry::Vacant(entry) => {
388 let (mut tx, rx) = postage::watch::channel();
389 entry.insert(rx.clone());
390
391 let load_buffer = match self {
392 Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
393 Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
394 };
395 cx.spawn(move |this, mut cx| async move {
396 let result = load_buffer.await;
397
398 // After the buffer loads, record the fact that it is no longer
399 // loading.
400 this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
401 *tx.borrow_mut() = Some(result.map_err(|e| Arc::new(e)));
402 })
403 .detach();
404 rx
405 }
406 };
407
408 cx.spawn(|_, _| async move {
409 loop {
410 if let Some(result) = loading_watch.borrow().as_ref() {
411 return result.clone().map_err(|e| anyhow!("{}", e));
412 }
413 loading_watch.recv().await;
414 }
415 })
416 }
417
418 #[cfg(feature = "test-support")]
419 pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
420 let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
421 Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
422 Worktree::Remote(worktree) => {
423 Box::new(worktree.open_buffers.values().filter_map(|buf| {
424 if let RemoteBuffer::Loaded(buf) = buf {
425 Some(buf)
426 } else {
427 None
428 }
429 }))
430 }
431 };
432
433 let path = path.as_ref();
434 open_buffers
435 .find(|buffer| {
436 if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
437 file.path().as_ref() == path
438 } else {
439 false
440 }
441 })
442 .is_some()
443 }
444
445 pub fn handle_update_buffer(
446 &mut self,
447 envelope: TypedEnvelope<proto::UpdateBuffer>,
448 cx: &mut ModelContext<Self>,
449 ) -> Result<()> {
450 let payload = envelope.payload.clone();
451 let buffer_id = payload.buffer_id as usize;
452 let ops = payload
453 .operations
454 .into_iter()
455 .map(|op| language::proto::deserialize_operation(op))
456 .collect::<Result<Vec<_>, _>>()?;
457
458 match self {
459 Worktree::Local(worktree) => {
460 let buffer = worktree
461 .open_buffers
462 .get(&buffer_id)
463 .and_then(|buf| buf.upgrade(cx))
464 .ok_or_else(|| {
465 anyhow!("invalid buffer {} in update buffer message", buffer_id)
466 })?;
467 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
468 }
469 Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
470 Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
471 Some(RemoteBuffer::Loaded(buffer)) => {
472 if let Some(buffer) = buffer.upgrade(cx) {
473 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
474 } else {
475 worktree
476 .open_buffers
477 .insert(buffer_id, RemoteBuffer::Operations(ops));
478 }
479 }
480 None => {
481 worktree
482 .open_buffers
483 .insert(buffer_id, RemoteBuffer::Operations(ops));
484 }
485 },
486 }
487
488 Ok(())
489 }
490
491 pub fn handle_save_buffer(
492 &mut self,
493 envelope: TypedEnvelope<proto::SaveBuffer>,
494 rpc: Arc<Client>,
495 cx: &mut ModelContext<Self>,
496 ) -> Result<()> {
497 let sender_id = envelope.original_sender_id()?;
498 let this = self.as_local().unwrap();
499 let project_id = this
500 .share
501 .as_ref()
502 .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
503 .project_id;
504
505 let buffer = this
506 .shared_buffers
507 .get(&sender_id)
508 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
509 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
510
511 let receipt = envelope.receipt();
512 let worktree_id = envelope.payload.worktree_id;
513 let buffer_id = envelope.payload.buffer_id;
514 let save = cx.spawn(|_, mut cx| async move {
515 buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
516 });
517
518 cx.background()
519 .spawn(
520 async move {
521 let (version, mtime) = save.await?;
522
523 rpc.respond(
524 receipt,
525 proto::BufferSaved {
526 project_id,
527 worktree_id,
528 buffer_id,
529 version: (&version).into(),
530 mtime: Some(mtime.into()),
531 },
532 )
533 .await?;
534
535 Ok(())
536 }
537 .log_err(),
538 )
539 .detach();
540
541 Ok(())
542 }
543
544 pub fn handle_buffer_saved(
545 &mut self,
546 envelope: TypedEnvelope<proto::BufferSaved>,
547 cx: &mut ModelContext<Self>,
548 ) -> Result<()> {
549 let payload = envelope.payload.clone();
550 let worktree = self.as_remote_mut().unwrap();
551 if let Some(buffer) = worktree
552 .open_buffers
553 .get(&(payload.buffer_id as usize))
554 .and_then(|buf| buf.upgrade(cx))
555 {
556 buffer.update(cx, |buffer, cx| {
557 let version = payload.version.try_into()?;
558 let mtime = payload
559 .mtime
560 .ok_or_else(|| anyhow!("missing mtime"))?
561 .into();
562 buffer.did_save(version, mtime, None, cx);
563 Result::<_, anyhow::Error>::Ok(())
564 })?;
565 }
566 Ok(())
567 }
568
569 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
570 match self {
571 Self::Local(worktree) => {
572 let is_fake_fs = worktree.fs.is_fake();
573 worktree.snapshot = worktree.background_snapshot.lock().clone();
574 if worktree.is_scanning() {
575 if worktree.poll_task.is_none() {
576 worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
577 if is_fake_fs {
578 smol::future::yield_now().await;
579 } else {
580 smol::Timer::after(Duration::from_millis(100)).await;
581 }
582 this.update(&mut cx, |this, cx| {
583 this.as_local_mut().unwrap().poll_task = None;
584 this.poll_snapshot(cx);
585 })
586 }));
587 }
588 } else {
589 worktree.poll_task.take();
590 self.update_open_buffers(cx);
591 }
592 }
593 Self::Remote(worktree) => {
594 worktree.snapshot = worktree.snapshot_rx.borrow().clone();
595 self.update_open_buffers(cx);
596 }
597 };
598
599 cx.notify();
600 }
601
602 fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
603 let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
604 Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
605 Self::Remote(worktree) => {
606 Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
607 if let RemoteBuffer::Loaded(buf) = buf {
608 Some((id, buf))
609 } else {
610 None
611 }
612 }))
613 }
614 };
615
616 let local = self.as_local().is_some();
617 let worktree_path = self.abs_path.clone();
618 let worktree_handle = cx.handle();
619 let mut buffers_to_delete = Vec::new();
620 for (buffer_id, buffer) in open_buffers {
621 if let Some(buffer) = buffer.upgrade(cx) {
622 buffer.update(cx, |buffer, cx| {
623 if let Some(old_file) = File::from_dyn(buffer.file()) {
624 let new_file = if let Some(entry) = old_file
625 .entry_id
626 .and_then(|entry_id| self.entry_for_id(entry_id))
627 {
628 File {
629 is_local: local,
630 worktree_path: worktree_path.clone(),
631 entry_id: Some(entry.id),
632 mtime: entry.mtime,
633 path: entry.path.clone(),
634 worktree: worktree_handle.clone(),
635 }
636 } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
637 File {
638 is_local: local,
639 worktree_path: worktree_path.clone(),
640 entry_id: Some(entry.id),
641 mtime: entry.mtime,
642 path: entry.path.clone(),
643 worktree: worktree_handle.clone(),
644 }
645 } else {
646 File {
647 is_local: local,
648 worktree_path: worktree_path.clone(),
649 entry_id: None,
650 path: old_file.path().clone(),
651 mtime: old_file.mtime(),
652 worktree: worktree_handle.clone(),
653 }
654 };
655
656 if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
657 task.detach();
658 }
659 }
660 });
661 } else {
662 buffers_to_delete.push(*buffer_id);
663 }
664 }
665
666 for buffer_id in buffers_to_delete {
667 match self {
668 Self::Local(worktree) => {
669 worktree.open_buffers.remove(&buffer_id);
670 }
671 Self::Remote(worktree) => {
672 worktree.open_buffers.remove(&buffer_id);
673 }
674 }
675 }
676 }
677
678 pub fn update_diagnostics(
679 &mut self,
680 mut params: lsp::PublishDiagnosticsParams,
681 disk_based_sources: &HashSet<String>,
682 cx: &mut ModelContext<Worktree>,
683 ) -> Result<()> {
684 let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
685 let abs_path = params
686 .uri
687 .to_file_path()
688 .map_err(|_| anyhow!("URI is not a file"))?;
689 let worktree_path = Arc::from(
690 abs_path
691 .strip_prefix(&this.abs_path)
692 .context("path is not within worktree")?,
693 );
694
695 let mut group_ids_by_diagnostic_range = HashMap::default();
696 let mut diagnostics_by_group_id = HashMap::default();
697 let mut next_group_id = 0;
698 for diagnostic in &mut params.diagnostics {
699 let source = diagnostic.source.as_ref();
700 let code = diagnostic.code.as_ref();
701 let group_id = diagnostic_ranges(&diagnostic, &abs_path)
702 .find_map(|range| group_ids_by_diagnostic_range.get(&(source, code, range)))
703 .copied()
704 .unwrap_or_else(|| {
705 let group_id = post_inc(&mut next_group_id);
706 for range in diagnostic_ranges(&diagnostic, &abs_path) {
707 group_ids_by_diagnostic_range.insert((source, code, range), group_id);
708 }
709 group_id
710 });
711
712 diagnostics_by_group_id
713 .entry(group_id)
714 .or_insert(Vec::new())
715 .push(DiagnosticEntry {
716 range: diagnostic.range.start.to_point_utf16()
717 ..diagnostic.range.end.to_point_utf16(),
718 diagnostic: Diagnostic {
719 code: diagnostic.code.clone().map(|code| match code {
720 lsp::NumberOrString::Number(code) => code.to_string(),
721 lsp::NumberOrString::String(code) => code,
722 }),
723 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
724 message: mem::take(&mut diagnostic.message),
725 group_id,
726 is_primary: false,
727 is_valid: true,
728 is_disk_based: diagnostic
729 .source
730 .as_ref()
731 .map_or(false, |source| disk_based_sources.contains(source)),
732 },
733 });
734 }
735
736 let diagnostics = diagnostics_by_group_id
737 .into_values()
738 .flat_map(|mut diagnostics| {
739 let primary = diagnostics
740 .iter_mut()
741 .min_by_key(|entry| entry.diagnostic.severity)
742 .unwrap();
743 primary.diagnostic.is_primary = true;
744 diagnostics
745 })
746 .collect::<Vec<_>>();
747
748 self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)?;
749 Ok(())
750 }
751
752 pub fn update_diagnostic_entries(
753 &mut self,
754 worktree_path: Arc<Path>,
755 version: Option<i32>,
756 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
757 cx: &mut ModelContext<Self>,
758 ) -> Result<()> {
759 let this = self.as_local_mut().unwrap();
760 for buffer in this.open_buffers.values() {
761 if let Some(buffer) = buffer.upgrade(cx) {
762 if buffer
763 .read(cx)
764 .file()
765 .map_or(false, |file| *file.path() == worktree_path)
766 {
767 let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
768 (
769 buffer.remote_id(),
770 buffer.update_diagnostics(version, diagnostics.clone(), cx),
771 )
772 });
773 self.send_buffer_update(remote_id, operation?, cx);
774 break;
775 }
776 }
777 }
778
779 let this = self.as_local_mut().unwrap();
780 let summary = DiagnosticSummary::new(&diagnostics);
781 this.diagnostic_summaries
782 .insert(worktree_path.clone(), summary.clone());
783 this.diagnostics.insert(worktree_path.clone(), diagnostics);
784
785 cx.emit(Event::DiagnosticsUpdated(worktree_path.clone()));
786
787 if let Some(share) = this.share.as_ref() {
788 cx.foreground()
789 .spawn({
790 let client = this.client.clone();
791 let project_id = share.project_id;
792 let worktree_id = this.id().to_proto();
793 let path = worktree_path.to_string_lossy().to_string();
794 async move {
795 client
796 .send(proto::UpdateDiagnosticSummary {
797 project_id,
798 worktree_id,
799 path,
800 error_count: summary.error_count as u32,
801 warning_count: summary.warning_count as u32,
802 info_count: summary.info_count as u32,
803 hint_count: summary.hint_count as u32,
804 })
805 .await
806 .log_err()
807 }
808 })
809 .detach();
810 }
811
812 Ok(())
813 }
814
815 fn send_buffer_update(
816 &mut self,
817 buffer_id: u64,
818 operation: Operation,
819 cx: &mut ModelContext<Self>,
820 ) {
821 if let Some((project_id, worktree_id, rpc)) = match self {
822 Worktree::Local(worktree) => worktree
823 .share
824 .as_ref()
825 .map(|share| (share.project_id, worktree.id(), worktree.client.clone())),
826 Worktree::Remote(worktree) => Some((
827 worktree.project_id,
828 worktree.snapshot.id(),
829 worktree.client.clone(),
830 )),
831 } {
832 cx.spawn(|worktree, mut cx| async move {
833 if let Err(error) = rpc
834 .request(proto::UpdateBuffer {
835 project_id,
836 worktree_id: worktree_id.0 as u64,
837 buffer_id,
838 operations: vec![language::proto::serialize_operation(&operation)],
839 })
840 .await
841 {
842 worktree.update(&mut cx, |worktree, _| {
843 log::error!("error sending buffer operation: {}", error);
844 match worktree {
845 Worktree::Local(t) => &mut t.queued_operations,
846 Worktree::Remote(t) => &mut t.queued_operations,
847 }
848 .push((buffer_id, operation));
849 });
850 }
851 })
852 .detach();
853 }
854 }
855}
856
857impl WorktreeId {
858 pub fn from_usize(handle_id: usize) -> Self {
859 Self(handle_id)
860 }
861
862 pub(crate) fn from_proto(id: u64) -> Self {
863 Self(id as usize)
864 }
865
866 pub fn to_proto(&self) -> u64 {
867 self.0 as u64
868 }
869
870 pub fn to_usize(&self) -> usize {
871 self.0
872 }
873}
874
875impl fmt::Display for WorktreeId {
876 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
877 self.0.fmt(f)
878 }
879}
880
881#[derive(Clone)]
882pub struct Snapshot {
883 id: WorktreeId,
884 scan_id: usize,
885 abs_path: Arc<Path>,
886 root_name: String,
887 root_char_bag: CharBag,
888 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
889 entries_by_path: SumTree<Entry>,
890 entries_by_id: SumTree<PathEntry>,
891 removed_entry_ids: HashMap<u64, usize>,
892 next_entry_id: Arc<AtomicUsize>,
893}
894
895pub struct LocalWorktree {
896 snapshot: Snapshot,
897 config: WorktreeConfig,
898 background_snapshot: Arc<Mutex<Snapshot>>,
899 last_scan_state_rx: watch::Receiver<ScanState>,
900 _background_scanner_task: Option<Task<()>>,
901 poll_task: Option<Task<()>>,
902 share: Option<ShareState>,
903 loading_buffers: LoadingBuffers,
904 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
905 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
906 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
907 diagnostic_summaries: BTreeMap<Arc<Path>, DiagnosticSummary>,
908 queued_operations: Vec<(u64, Operation)>,
909 language_registry: Arc<LanguageRegistry>,
910 client: Arc<Client>,
911 user_store: ModelHandle<UserStore>,
912 fs: Arc<dyn Fs>,
913 languages: Vec<Arc<Language>>,
914 language_servers: HashMap<String, Arc<LanguageServer>>,
915}
916
917struct ShareState {
918 project_id: u64,
919 snapshots_tx: Sender<Snapshot>,
920}
921
922pub struct RemoteWorktree {
923 project_id: u64,
924 snapshot: Snapshot,
925 snapshot_rx: watch::Receiver<Snapshot>,
926 client: Arc<Client>,
927 updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
928 replica_id: ReplicaId,
929 loading_buffers: LoadingBuffers,
930 open_buffers: HashMap<usize, RemoteBuffer>,
931 diagnostic_summaries: BTreeMap<Arc<Path>, DiagnosticSummary>,
932 languages: Arc<LanguageRegistry>,
933 user_store: ModelHandle<UserStore>,
934 queued_operations: Vec<(u64, Operation)>,
935}
936
937type LoadingBuffers = HashMap<
938 Arc<Path>,
939 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
940>;
941
942#[derive(Default, Deserialize)]
943struct WorktreeConfig {
944 collaborators: Vec<String>,
945}
946
947impl LocalWorktree {
948 async fn new(
949 client: Arc<Client>,
950 user_store: ModelHandle<UserStore>,
951 path: impl Into<Arc<Path>>,
952 fs: Arc<dyn Fs>,
953 languages: Arc<LanguageRegistry>,
954 cx: &mut AsyncAppContext,
955 ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
956 let abs_path = path.into();
957 let path: Arc<Path> = Arc::from(Path::new(""));
958 let next_entry_id = AtomicUsize::new(0);
959
960 // After determining whether the root entry is a file or a directory, populate the
961 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
962 let root_name = abs_path
963 .file_name()
964 .map_or(String::new(), |f| f.to_string_lossy().to_string());
965 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
966 let metadata = fs.metadata(&abs_path).await?;
967
968 let mut config = WorktreeConfig::default();
969 if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
970 if let Ok(parsed) = toml::from_str(&zed_toml) {
971 config = parsed;
972 }
973 }
974
975 let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
976 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
977 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
978 let mut snapshot = Snapshot {
979 id: WorktreeId::from_usize(cx.model_id()),
980 scan_id: 0,
981 abs_path,
982 root_name: root_name.clone(),
983 root_char_bag,
984 ignores: Default::default(),
985 entries_by_path: Default::default(),
986 entries_by_id: Default::default(),
987 removed_entry_ids: Default::default(),
988 next_entry_id: Arc::new(next_entry_id),
989 };
990 if let Some(metadata) = metadata {
991 snapshot.insert_entry(
992 Entry::new(
993 path.into(),
994 &metadata,
995 &snapshot.next_entry_id,
996 snapshot.root_char_bag,
997 ),
998 fs.as_ref(),
999 );
1000 }
1001
1002 let tree = Self {
1003 snapshot: snapshot.clone(),
1004 config,
1005 background_snapshot: Arc::new(Mutex::new(snapshot)),
1006 last_scan_state_rx,
1007 _background_scanner_task: None,
1008 share: None,
1009 poll_task: None,
1010 loading_buffers: Default::default(),
1011 open_buffers: Default::default(),
1012 shared_buffers: Default::default(),
1013 diagnostics: Default::default(),
1014 diagnostic_summaries: Default::default(),
1015 queued_operations: Default::default(),
1016 language_registry: languages,
1017 client,
1018 user_store,
1019 fs,
1020 languages: Default::default(),
1021 language_servers: Default::default(),
1022 };
1023
1024 cx.spawn_weak(|this, mut cx| async move {
1025 while let Ok(scan_state) = scan_states_rx.recv().await {
1026 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1027 let to_send = handle.update(&mut cx, |this, cx| {
1028 last_scan_state_tx.blocking_send(scan_state).ok();
1029 this.poll_snapshot(cx);
1030 let tree = this.as_local_mut().unwrap();
1031 if !tree.is_scanning() {
1032 if let Some(share) = tree.share.as_ref() {
1033 return Some((tree.snapshot(), share.snapshots_tx.clone()));
1034 }
1035 }
1036 None
1037 });
1038
1039 if let Some((snapshot, snapshots_to_send_tx)) = to_send {
1040 if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
1041 log::error!("error submitting snapshot to send {}", err);
1042 }
1043 }
1044 } else {
1045 break;
1046 }
1047 }
1048 })
1049 .detach();
1050
1051 Worktree::Local(tree)
1052 });
1053
1054 Ok((tree, scan_states_tx))
1055 }
1056
1057 pub fn authorized_logins(&self) -> Vec<String> {
1058 self.config.collaborators.clone()
1059 }
1060
1061 pub fn language_registry(&self) -> &LanguageRegistry {
1062 &self.language_registry
1063 }
1064
1065 pub fn languages(&self) -> &[Arc<Language>] {
1066 &self.languages
1067 }
1068
1069 pub fn register_language(
1070 &mut self,
1071 language: &Arc<Language>,
1072 cx: &mut ModelContext<Worktree>,
1073 ) -> Option<Arc<LanguageServer>> {
1074 if !self.languages.iter().any(|l| Arc::ptr_eq(l, language)) {
1075 self.languages.push(language.clone());
1076 }
1077
1078 if let Some(server) = self.language_servers.get(language.name()) {
1079 return Some(server.clone());
1080 }
1081
1082 if let Some(language_server) = language
1083 .start_server(self.abs_path(), cx)
1084 .log_err()
1085 .flatten()
1086 {
1087 let disk_based_sources = language
1088 .disk_based_diagnostic_sources()
1089 .cloned()
1090 .unwrap_or_default();
1091 let disk_based_diagnostics_progress_token =
1092 language.disk_based_diagnostics_progress_token().cloned();
1093 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
1094 let (disk_based_diagnostics_done_tx, disk_based_diagnostics_done_rx) =
1095 smol::channel::unbounded();
1096 language_server
1097 .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
1098 smol::block_on(diagnostics_tx.send(params)).ok();
1099 })
1100 .detach();
1101 cx.spawn_weak(|this, mut cx| {
1102 let has_disk_based_diagnostic_progress_token =
1103 disk_based_diagnostics_progress_token.is_some();
1104 let disk_based_diagnostics_done_tx = disk_based_diagnostics_done_tx.clone();
1105 async move {
1106 while let Ok(diagnostics) = diagnostics_rx.recv().await {
1107 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1108 handle.update(&mut cx, |this, cx| {
1109 this.update_diagnostics(diagnostics, &disk_based_sources, cx)
1110 .log_err();
1111 if !has_disk_based_diagnostic_progress_token {
1112 smol::block_on(disk_based_diagnostics_done_tx.send(())).ok();
1113 }
1114 })
1115 } else {
1116 break;
1117 }
1118 }
1119 }
1120 })
1121 .detach();
1122
1123 let mut pending_disk_based_diagnostics: i32 = 0;
1124 language_server
1125 .on_notification::<lsp::notification::Progress, _>(move |params| {
1126 let token = match params.token {
1127 lsp::NumberOrString::Number(_) => None,
1128 lsp::NumberOrString::String(token) => Some(token),
1129 };
1130
1131 if token == disk_based_diagnostics_progress_token {
1132 match params.value {
1133 lsp::ProgressParamsValue::WorkDone(progress) => match progress {
1134 lsp::WorkDoneProgress::Begin(_) => {
1135 pending_disk_based_diagnostics += 1;
1136 }
1137 lsp::WorkDoneProgress::End(_) => {
1138 pending_disk_based_diagnostics -= 1;
1139 if pending_disk_based_diagnostics == 0 {
1140 smol::block_on(disk_based_diagnostics_done_tx.send(()))
1141 .ok();
1142 }
1143 }
1144 _ => {}
1145 },
1146 }
1147 }
1148 })
1149 .detach();
1150 let rpc = self.client.clone();
1151 cx.spawn_weak(|this, mut cx| async move {
1152 while let Ok(()) = disk_based_diagnostics_done_rx.recv().await {
1153 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1154 let message = handle.update(&mut cx, |this, cx| {
1155 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1156 let this = this.as_local().unwrap();
1157 this.share
1158 .as_ref()
1159 .map(|share| proto::DiskBasedDiagnosticsUpdated {
1160 project_id: share.project_id,
1161 worktree_id: this.id().to_proto(),
1162 })
1163 });
1164
1165 if let Some(message) = message {
1166 rpc.send(message).await.log_err();
1167 }
1168 } else {
1169 break;
1170 }
1171 }
1172 })
1173 .detach();
1174
1175 self.language_servers
1176 .insert(language.name().to_string(), language_server.clone());
1177 Some(language_server.clone())
1178 } else {
1179 None
1180 }
1181 }
1182
1183 fn get_open_buffer(
1184 &mut self,
1185 path: &Path,
1186 cx: &mut ModelContext<Worktree>,
1187 ) -> Option<ModelHandle<Buffer>> {
1188 let handle = cx.handle();
1189 let mut result = None;
1190 self.open_buffers.retain(|_buffer_id, buffer| {
1191 if let Some(buffer) = buffer.upgrade(cx) {
1192 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1193 if file.worktree == handle && file.path().as_ref() == path {
1194 result = Some(buffer);
1195 }
1196 }
1197 true
1198 } else {
1199 false
1200 }
1201 });
1202 result
1203 }
1204
1205 fn open_buffer(
1206 &mut self,
1207 path: &Path,
1208 cx: &mut ModelContext<Worktree>,
1209 ) -> Task<Result<ModelHandle<Buffer>>> {
1210 let path = Arc::from(path);
1211 cx.spawn(move |this, mut cx| async move {
1212 let (file, contents) = this
1213 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
1214 .await?;
1215
1216 let (diagnostics, language, language_server) = this.update(&mut cx, |this, cx| {
1217 let this = this.as_local_mut().unwrap();
1218 let diagnostics = this.diagnostics.remove(&path);
1219 let language = this
1220 .language_registry
1221 .select_language(file.full_path())
1222 .cloned();
1223 let server = language
1224 .as_ref()
1225 .and_then(|language| this.register_language(language, cx));
1226 (diagnostics, language, server)
1227 });
1228
1229 let mut buffer_operations = Vec::new();
1230 let buffer = cx.add_model(|cx| {
1231 let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1232 buffer.set_language(language, language_server, cx);
1233 if let Some(diagnostics) = diagnostics {
1234 let op = buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1235 buffer_operations.push(op);
1236 }
1237 buffer
1238 });
1239
1240 this.update(&mut cx, |this, cx| {
1241 for op in buffer_operations {
1242 this.send_buffer_update(buffer.read(cx).remote_id(), op, cx);
1243 }
1244 let this = this.as_local_mut().unwrap();
1245 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1246 });
1247
1248 Ok(buffer)
1249 })
1250 }
1251
1252 pub fn open_remote_buffer(
1253 &mut self,
1254 envelope: TypedEnvelope<proto::OpenBuffer>,
1255 cx: &mut ModelContext<Worktree>,
1256 ) -> Task<Result<proto::OpenBufferResponse>> {
1257 cx.spawn(|this, mut cx| async move {
1258 let peer_id = envelope.original_sender_id();
1259 let path = Path::new(&envelope.payload.path);
1260 let buffer = this
1261 .update(&mut cx, |this, cx| this.open_buffer(path, cx))
1262 .await?;
1263 this.update(&mut cx, |this, cx| {
1264 this.as_local_mut()
1265 .unwrap()
1266 .shared_buffers
1267 .entry(peer_id?)
1268 .or_default()
1269 .insert(buffer.id() as u64, buffer.clone());
1270
1271 Ok(proto::OpenBufferResponse {
1272 buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1273 })
1274 })
1275 })
1276 }
1277
1278 pub fn close_remote_buffer(
1279 &mut self,
1280 envelope: TypedEnvelope<proto::CloseBuffer>,
1281 cx: &mut ModelContext<Worktree>,
1282 ) -> Result<()> {
1283 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1284 shared_buffers.remove(&envelope.payload.buffer_id);
1285 cx.notify();
1286 }
1287
1288 Ok(())
1289 }
1290
1291 pub fn remove_collaborator(
1292 &mut self,
1293 peer_id: PeerId,
1294 replica_id: ReplicaId,
1295 cx: &mut ModelContext<Worktree>,
1296 ) {
1297 self.shared_buffers.remove(&peer_id);
1298 for (_, buffer) in &self.open_buffers {
1299 if let Some(buffer) = buffer.upgrade(cx) {
1300 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1301 }
1302 }
1303 cx.notify();
1304 }
1305
1306 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1307 let mut scan_state_rx = self.last_scan_state_rx.clone();
1308 async move {
1309 let mut scan_state = Some(scan_state_rx.borrow().clone());
1310 while let Some(ScanState::Scanning) = scan_state {
1311 scan_state = scan_state_rx.recv().await;
1312 }
1313 }
1314 }
1315
1316 fn is_scanning(&self) -> bool {
1317 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1318 true
1319 } else {
1320 false
1321 }
1322 }
1323
1324 pub fn snapshot(&self) -> Snapshot {
1325 self.snapshot.clone()
1326 }
1327
1328 pub fn abs_path(&self) -> &Arc<Path> {
1329 &self.snapshot.abs_path
1330 }
1331
1332 pub fn contains_abs_path(&self, path: &Path) -> bool {
1333 path.starts_with(&self.snapshot.abs_path)
1334 }
1335
1336 fn absolutize(&self, path: &Path) -> PathBuf {
1337 if path.file_name().is_some() {
1338 self.snapshot.abs_path.join(path)
1339 } else {
1340 self.snapshot.abs_path.to_path_buf()
1341 }
1342 }
1343
1344 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1345 let handle = cx.handle();
1346 let path = Arc::from(path);
1347 let worktree_path = self.abs_path.clone();
1348 let abs_path = self.absolutize(&path);
1349 let background_snapshot = self.background_snapshot.clone();
1350 let fs = self.fs.clone();
1351 cx.spawn(|this, mut cx| async move {
1352 let text = fs.load(&abs_path).await?;
1353 // Eagerly populate the snapshot with an updated entry for the loaded file
1354 let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1355 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1356 Ok((
1357 File {
1358 entry_id: Some(entry.id),
1359 worktree: handle,
1360 worktree_path,
1361 path: entry.path,
1362 mtime: entry.mtime,
1363 is_local: true,
1364 },
1365 text,
1366 ))
1367 })
1368 }
1369
1370 pub fn save_buffer_as(
1371 &self,
1372 buffer: ModelHandle<Buffer>,
1373 path: impl Into<Arc<Path>>,
1374 text: Rope,
1375 cx: &mut ModelContext<Worktree>,
1376 ) -> Task<Result<File>> {
1377 let save = self.save(path, text, cx);
1378 cx.spawn(|this, mut cx| async move {
1379 let entry = save.await?;
1380 this.update(&mut cx, |this, cx| {
1381 let this = this.as_local_mut().unwrap();
1382 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1383 Ok(File {
1384 entry_id: Some(entry.id),
1385 worktree: cx.handle(),
1386 worktree_path: this.abs_path.clone(),
1387 path: entry.path,
1388 mtime: entry.mtime,
1389 is_local: true,
1390 })
1391 })
1392 })
1393 }
1394
1395 fn save(
1396 &self,
1397 path: impl Into<Arc<Path>>,
1398 text: Rope,
1399 cx: &mut ModelContext<Worktree>,
1400 ) -> Task<Result<Entry>> {
1401 let path = path.into();
1402 let abs_path = self.absolutize(&path);
1403 let background_snapshot = self.background_snapshot.clone();
1404 let fs = self.fs.clone();
1405 let save = cx.background().spawn(async move {
1406 fs.save(&abs_path, &text).await?;
1407 refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1408 });
1409
1410 cx.spawn(|this, mut cx| async move {
1411 let entry = save.await?;
1412 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1413 Ok(entry)
1414 })
1415 }
1416
1417 pub fn share(
1418 &mut self,
1419 project_id: u64,
1420 cx: &mut ModelContext<Worktree>,
1421 ) -> Task<anyhow::Result<()>> {
1422 if self.share.is_some() {
1423 return Task::ready(Ok(()));
1424 }
1425
1426 let snapshot = self.snapshot();
1427 let rpc = self.client.clone();
1428 let worktree_id = cx.model_id() as u64;
1429 let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1430 self.share = Some(ShareState {
1431 project_id,
1432 snapshots_tx: snapshots_to_send_tx,
1433 });
1434
1435 cx.background()
1436 .spawn({
1437 let rpc = rpc.clone();
1438 let snapshot = snapshot.clone();
1439 async move {
1440 let mut prev_snapshot = snapshot;
1441 while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1442 let message =
1443 snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1444 match rpc.send(message).await {
1445 Ok(()) => prev_snapshot = snapshot,
1446 Err(err) => log::error!("error sending snapshot diff {}", err),
1447 }
1448 }
1449 }
1450 })
1451 .detach();
1452
1453 let share_message = cx.background().spawn(async move {
1454 proto::ShareWorktree {
1455 project_id,
1456 worktree: Some(snapshot.to_proto()),
1457 }
1458 });
1459
1460 cx.foreground().spawn(async move {
1461 rpc.request(share_message.await).await?;
1462 Ok(())
1463 })
1464 }
1465}
1466
1467fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1468 let contents = smol::block_on(fs.load(&abs_path))?;
1469 let parent = abs_path.parent().unwrap_or(Path::new("/"));
1470 let mut builder = GitignoreBuilder::new(parent);
1471 for line in contents.lines() {
1472 builder.add_line(Some(abs_path.into()), line)?;
1473 }
1474 Ok(builder.build()?)
1475}
1476
1477impl Deref for Worktree {
1478 type Target = Snapshot;
1479
1480 fn deref(&self) -> &Self::Target {
1481 match self {
1482 Worktree::Local(worktree) => &worktree.snapshot,
1483 Worktree::Remote(worktree) => &worktree.snapshot,
1484 }
1485 }
1486}
1487
1488impl Deref for LocalWorktree {
1489 type Target = Snapshot;
1490
1491 fn deref(&self) -> &Self::Target {
1492 &self.snapshot
1493 }
1494}
1495
1496impl Deref for RemoteWorktree {
1497 type Target = Snapshot;
1498
1499 fn deref(&self) -> &Self::Target {
1500 &self.snapshot
1501 }
1502}
1503
1504impl fmt::Debug for LocalWorktree {
1505 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1506 self.snapshot.fmt(f)
1507 }
1508}
1509
1510impl RemoteWorktree {
1511 fn get_open_buffer(
1512 &mut self,
1513 path: &Path,
1514 cx: &mut ModelContext<Worktree>,
1515 ) -> Option<ModelHandle<Buffer>> {
1516 let handle = cx.handle();
1517 let mut existing_buffer = None;
1518 self.open_buffers.retain(|_buffer_id, buffer| {
1519 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1520 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1521 if file.worktree == handle && file.path().as_ref() == path {
1522 existing_buffer = Some(buffer);
1523 }
1524 }
1525 true
1526 } else {
1527 false
1528 }
1529 });
1530 existing_buffer
1531 }
1532
1533 fn open_buffer(
1534 &mut self,
1535 path: &Path,
1536 cx: &mut ModelContext<Worktree>,
1537 ) -> Task<Result<ModelHandle<Buffer>>> {
1538 let rpc = self.client.clone();
1539 let replica_id = self.replica_id;
1540 let project_id = self.project_id;
1541 let remote_worktree_id = self.id();
1542 let root_path = self.snapshot.abs_path.clone();
1543 let path: Arc<Path> = Arc::from(path);
1544 let path_string = path.to_string_lossy().to_string();
1545 cx.spawn_weak(move |this, mut cx| async move {
1546 let entry = this
1547 .upgrade(&cx)
1548 .ok_or_else(|| anyhow!("worktree was closed"))?
1549 .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1550 .ok_or_else(|| anyhow!("file does not exist"))?;
1551 let response = rpc
1552 .request(proto::OpenBuffer {
1553 project_id,
1554 worktree_id: remote_worktree_id.to_proto(),
1555 path: path_string,
1556 })
1557 .await?;
1558
1559 let this = this
1560 .upgrade(&cx)
1561 .ok_or_else(|| anyhow!("worktree was closed"))?;
1562 let file = File {
1563 entry_id: Some(entry.id),
1564 worktree: this.clone(),
1565 worktree_path: root_path,
1566 path: entry.path,
1567 mtime: entry.mtime,
1568 is_local: false,
1569 };
1570 let language = this.read_with(&cx, |this, _| {
1571 use language::File;
1572 this.languages().select_language(file.full_path()).cloned()
1573 });
1574 let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1575 let buffer_id = remote_buffer.id as usize;
1576 let buffer = cx.add_model(|cx| {
1577 Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1578 .unwrap()
1579 .with_language(language, None, cx)
1580 });
1581 this.update(&mut cx, move |this, cx| {
1582 let this = this.as_remote_mut().unwrap();
1583 if let Some(RemoteBuffer::Operations(pending_ops)) = this
1584 .open_buffers
1585 .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1586 {
1587 buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1588 }
1589 Result::<_, anyhow::Error>::Ok(buffer)
1590 })
1591 })
1592 }
1593
1594 pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1595 for (_, buffer) in self.open_buffers.drain() {
1596 if let RemoteBuffer::Loaded(buffer) = buffer {
1597 if let Some(buffer) = buffer.upgrade(cx) {
1598 buffer.update(cx, |buffer, cx| buffer.close(cx))
1599 }
1600 }
1601 }
1602 }
1603
1604 fn snapshot(&self) -> Snapshot {
1605 self.snapshot.clone()
1606 }
1607
1608 pub fn update_from_remote(
1609 &mut self,
1610 envelope: TypedEnvelope<proto::UpdateWorktree>,
1611 cx: &mut ModelContext<Worktree>,
1612 ) -> Result<()> {
1613 let mut tx = self.updates_tx.clone();
1614 let payload = envelope.payload.clone();
1615 cx.background()
1616 .spawn(async move {
1617 tx.send(payload).await.expect("receiver runs to completion");
1618 })
1619 .detach();
1620
1621 Ok(())
1622 }
1623
1624 pub fn update_diagnostic_summary(
1625 &mut self,
1626 envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1627 cx: &mut ModelContext<Worktree>,
1628 ) {
1629 let path: Arc<Path> = Path::new(&envelope.payload.path).into();
1630 self.diagnostic_summaries.insert(
1631 path.clone(),
1632 DiagnosticSummary {
1633 error_count: envelope.payload.error_count as usize,
1634 warning_count: envelope.payload.warning_count as usize,
1635 info_count: envelope.payload.info_count as usize,
1636 hint_count: envelope.payload.hint_count as usize,
1637 },
1638 );
1639 cx.emit(Event::DiagnosticsUpdated(path));
1640 }
1641
1642 pub fn disk_based_diagnostics_updated(&self, cx: &mut ModelContext<Worktree>) {
1643 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1644 }
1645
1646 pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1647 for (_, buffer) in &self.open_buffers {
1648 if let Some(buffer) = buffer.upgrade(cx) {
1649 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1650 }
1651 }
1652 cx.notify();
1653 }
1654}
1655
1656enum RemoteBuffer {
1657 Operations(Vec<Operation>),
1658 Loaded(WeakModelHandle<Buffer>),
1659}
1660
1661impl RemoteBuffer {
1662 fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1663 match self {
1664 Self::Operations(_) => None,
1665 Self::Loaded(buffer) => buffer.upgrade(cx),
1666 }
1667 }
1668}
1669
1670impl Snapshot {
1671 pub fn id(&self) -> WorktreeId {
1672 self.id
1673 }
1674
1675 pub fn to_proto(&self) -> proto::Worktree {
1676 let root_name = self.root_name.clone();
1677 proto::Worktree {
1678 id: self.id.0 as u64,
1679 root_name,
1680 entries: self
1681 .entries_by_path
1682 .cursor::<()>()
1683 .filter(|e| !e.is_ignored)
1684 .map(Into::into)
1685 .collect(),
1686 }
1687 }
1688
1689 pub fn build_update(
1690 &self,
1691 other: &Self,
1692 project_id: u64,
1693 worktree_id: u64,
1694 include_ignored: bool,
1695 ) -> proto::UpdateWorktree {
1696 let mut updated_entries = Vec::new();
1697 let mut removed_entries = Vec::new();
1698 let mut self_entries = self
1699 .entries_by_id
1700 .cursor::<()>()
1701 .filter(|e| include_ignored || !e.is_ignored)
1702 .peekable();
1703 let mut other_entries = other
1704 .entries_by_id
1705 .cursor::<()>()
1706 .filter(|e| include_ignored || !e.is_ignored)
1707 .peekable();
1708 loop {
1709 match (self_entries.peek(), other_entries.peek()) {
1710 (Some(self_entry), Some(other_entry)) => {
1711 match Ord::cmp(&self_entry.id, &other_entry.id) {
1712 Ordering::Less => {
1713 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1714 updated_entries.push(entry);
1715 self_entries.next();
1716 }
1717 Ordering::Equal => {
1718 if self_entry.scan_id != other_entry.scan_id {
1719 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1720 updated_entries.push(entry);
1721 }
1722
1723 self_entries.next();
1724 other_entries.next();
1725 }
1726 Ordering::Greater => {
1727 removed_entries.push(other_entry.id as u64);
1728 other_entries.next();
1729 }
1730 }
1731 }
1732 (Some(self_entry), None) => {
1733 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1734 updated_entries.push(entry);
1735 self_entries.next();
1736 }
1737 (None, Some(other_entry)) => {
1738 removed_entries.push(other_entry.id as u64);
1739 other_entries.next();
1740 }
1741 (None, None) => break,
1742 }
1743 }
1744
1745 proto::UpdateWorktree {
1746 project_id,
1747 worktree_id,
1748 root_name: self.root_name().to_string(),
1749 updated_entries,
1750 removed_entries,
1751 }
1752 }
1753
1754 fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1755 self.scan_id += 1;
1756 let scan_id = self.scan_id;
1757
1758 let mut entries_by_path_edits = Vec::new();
1759 let mut entries_by_id_edits = Vec::new();
1760 for entry_id in update.removed_entries {
1761 let entry_id = entry_id as usize;
1762 let entry = self
1763 .entry_for_id(entry_id)
1764 .ok_or_else(|| anyhow!("unknown entry"))?;
1765 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1766 entries_by_id_edits.push(Edit::Remove(entry.id));
1767 }
1768
1769 for entry in update.updated_entries {
1770 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1771 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1772 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1773 }
1774 entries_by_id_edits.push(Edit::Insert(PathEntry {
1775 id: entry.id,
1776 path: entry.path.clone(),
1777 is_ignored: entry.is_ignored,
1778 scan_id,
1779 }));
1780 entries_by_path_edits.push(Edit::Insert(entry));
1781 }
1782
1783 self.entries_by_path.edit(entries_by_path_edits, &());
1784 self.entries_by_id.edit(entries_by_id_edits, &());
1785
1786 Ok(())
1787 }
1788
1789 pub fn file_count(&self) -> usize {
1790 self.entries_by_path.summary().file_count
1791 }
1792
1793 pub fn visible_file_count(&self) -> usize {
1794 self.entries_by_path.summary().visible_file_count
1795 }
1796
1797 fn traverse_from_offset(
1798 &self,
1799 include_dirs: bool,
1800 include_ignored: bool,
1801 start_offset: usize,
1802 ) -> Traversal {
1803 let mut cursor = self.entries_by_path.cursor();
1804 cursor.seek(
1805 &TraversalTarget::Count {
1806 count: start_offset,
1807 include_dirs,
1808 include_ignored,
1809 },
1810 Bias::Right,
1811 &(),
1812 );
1813 Traversal {
1814 cursor,
1815 include_dirs,
1816 include_ignored,
1817 }
1818 }
1819
1820 fn traverse_from_path(
1821 &self,
1822 include_dirs: bool,
1823 include_ignored: bool,
1824 path: &Path,
1825 ) -> Traversal {
1826 let mut cursor = self.entries_by_path.cursor();
1827 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1828 Traversal {
1829 cursor,
1830 include_dirs,
1831 include_ignored,
1832 }
1833 }
1834
1835 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1836 self.traverse_from_offset(false, include_ignored, start)
1837 }
1838
1839 pub fn entries(&self, include_ignored: bool) -> Traversal {
1840 self.traverse_from_offset(true, include_ignored, 0)
1841 }
1842
1843 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1844 let empty_path = Path::new("");
1845 self.entries_by_path
1846 .cursor::<()>()
1847 .filter(move |entry| entry.path.as_ref() != empty_path)
1848 .map(|entry| &entry.path)
1849 }
1850
1851 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1852 let mut cursor = self.entries_by_path.cursor();
1853 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1854 let traversal = Traversal {
1855 cursor,
1856 include_dirs: true,
1857 include_ignored: true,
1858 };
1859 ChildEntriesIter {
1860 traversal,
1861 parent_path,
1862 }
1863 }
1864
1865 pub fn root_entry(&self) -> Option<&Entry> {
1866 self.entry_for_path("")
1867 }
1868
1869 pub fn root_name(&self) -> &str {
1870 &self.root_name
1871 }
1872
1873 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1874 let path = path.as_ref();
1875 self.traverse_from_path(true, true, path)
1876 .entry()
1877 .and_then(|entry| {
1878 if entry.path.as_ref() == path {
1879 Some(entry)
1880 } else {
1881 None
1882 }
1883 })
1884 }
1885
1886 pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1887 let entry = self.entries_by_id.get(&id, &())?;
1888 self.entry_for_path(&entry.path)
1889 }
1890
1891 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1892 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1893 }
1894
1895 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1896 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1897 let abs_path = self.abs_path.join(&entry.path);
1898 match build_gitignore(&abs_path, fs) {
1899 Ok(ignore) => {
1900 let ignore_dir_path = entry.path.parent().unwrap();
1901 self.ignores
1902 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1903 }
1904 Err(error) => {
1905 log::error!(
1906 "error loading .gitignore file {:?} - {:?}",
1907 &entry.path,
1908 error
1909 );
1910 }
1911 }
1912 }
1913
1914 self.reuse_entry_id(&mut entry);
1915 self.entries_by_path.insert_or_replace(entry.clone(), &());
1916 self.entries_by_id.insert_or_replace(
1917 PathEntry {
1918 id: entry.id,
1919 path: entry.path.clone(),
1920 is_ignored: entry.is_ignored,
1921 scan_id: self.scan_id,
1922 },
1923 &(),
1924 );
1925 entry
1926 }
1927
1928 fn populate_dir(
1929 &mut self,
1930 parent_path: Arc<Path>,
1931 entries: impl IntoIterator<Item = Entry>,
1932 ignore: Option<Arc<Gitignore>>,
1933 ) {
1934 let mut parent_entry = self
1935 .entries_by_path
1936 .get(&PathKey(parent_path.clone()), &())
1937 .unwrap()
1938 .clone();
1939 if let Some(ignore) = ignore {
1940 self.ignores.insert(parent_path, (ignore, self.scan_id));
1941 }
1942 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1943 parent_entry.kind = EntryKind::Dir;
1944 } else {
1945 unreachable!();
1946 }
1947
1948 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1949 let mut entries_by_id_edits = Vec::new();
1950
1951 for mut entry in entries {
1952 self.reuse_entry_id(&mut entry);
1953 entries_by_id_edits.push(Edit::Insert(PathEntry {
1954 id: entry.id,
1955 path: entry.path.clone(),
1956 is_ignored: entry.is_ignored,
1957 scan_id: self.scan_id,
1958 }));
1959 entries_by_path_edits.push(Edit::Insert(entry));
1960 }
1961
1962 self.entries_by_path.edit(entries_by_path_edits, &());
1963 self.entries_by_id.edit(entries_by_id_edits, &());
1964 }
1965
1966 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1967 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1968 entry.id = removed_entry_id;
1969 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1970 entry.id = existing_entry.id;
1971 }
1972 }
1973
1974 fn remove_path(&mut self, path: &Path) {
1975 let mut new_entries;
1976 let removed_entries;
1977 {
1978 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1979 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1980 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1981 new_entries.push_tree(cursor.suffix(&()), &());
1982 }
1983 self.entries_by_path = new_entries;
1984
1985 let mut entries_by_id_edits = Vec::new();
1986 for entry in removed_entries.cursor::<()>() {
1987 let removed_entry_id = self
1988 .removed_entry_ids
1989 .entry(entry.inode)
1990 .or_insert(entry.id);
1991 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1992 entries_by_id_edits.push(Edit::Remove(entry.id));
1993 }
1994 self.entries_by_id.edit(entries_by_id_edits, &());
1995
1996 if path.file_name() == Some(&GITIGNORE) {
1997 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1998 *scan_id = self.scan_id;
1999 }
2000 }
2001 }
2002
2003 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2004 let mut new_ignores = Vec::new();
2005 for ancestor in path.ancestors().skip(1) {
2006 if let Some((ignore, _)) = self.ignores.get(ancestor) {
2007 new_ignores.push((ancestor, Some(ignore.clone())));
2008 } else {
2009 new_ignores.push((ancestor, None));
2010 }
2011 }
2012
2013 let mut ignore_stack = IgnoreStack::none();
2014 for (parent_path, ignore) in new_ignores.into_iter().rev() {
2015 if ignore_stack.is_path_ignored(&parent_path, true) {
2016 ignore_stack = IgnoreStack::all();
2017 break;
2018 } else if let Some(ignore) = ignore {
2019 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
2020 }
2021 }
2022
2023 if ignore_stack.is_path_ignored(path, is_dir) {
2024 ignore_stack = IgnoreStack::all();
2025 }
2026
2027 ignore_stack
2028 }
2029}
2030
2031impl fmt::Debug for Snapshot {
2032 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2033 for entry in self.entries_by_path.cursor::<()>() {
2034 for _ in entry.path.ancestors().skip(1) {
2035 write!(f, " ")?;
2036 }
2037 writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
2038 }
2039 Ok(())
2040 }
2041}
2042
2043#[derive(Clone, PartialEq)]
2044pub struct File {
2045 entry_id: Option<usize>,
2046 worktree: ModelHandle<Worktree>,
2047 worktree_path: Arc<Path>,
2048 pub path: Arc<Path>,
2049 pub mtime: SystemTime,
2050 is_local: bool,
2051}
2052
2053impl language::File for File {
2054 fn mtime(&self) -> SystemTime {
2055 self.mtime
2056 }
2057
2058 fn path(&self) -> &Arc<Path> {
2059 &self.path
2060 }
2061
2062 fn abs_path(&self) -> Option<PathBuf> {
2063 if self.is_local {
2064 Some(self.worktree_path.join(&self.path))
2065 } else {
2066 None
2067 }
2068 }
2069
2070 fn full_path(&self) -> PathBuf {
2071 let mut full_path = PathBuf::new();
2072 if let Some(worktree_name) = self.worktree_path.file_name() {
2073 full_path.push(worktree_name);
2074 }
2075 full_path.push(&self.path);
2076 full_path
2077 }
2078
2079 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2080 /// of its worktree, then this method will return the name of the worktree itself.
2081 fn file_name<'a>(&'a self) -> Option<OsString> {
2082 self.path
2083 .file_name()
2084 .or_else(|| self.worktree_path.file_name())
2085 .map(Into::into)
2086 }
2087
2088 fn is_deleted(&self) -> bool {
2089 self.entry_id.is_none()
2090 }
2091
2092 fn save(
2093 &self,
2094 buffer_id: u64,
2095 text: Rope,
2096 version: clock::Global,
2097 cx: &mut MutableAppContext,
2098 ) -> Task<Result<(clock::Global, SystemTime)>> {
2099 let worktree_id = self.worktree.read(cx).id().to_proto();
2100 self.worktree.update(cx, |worktree, cx| match worktree {
2101 Worktree::Local(worktree) => {
2102 let rpc = worktree.client.clone();
2103 let project_id = worktree.share.as_ref().map(|share| share.project_id);
2104 let save = worktree.save(self.path.clone(), text, cx);
2105 cx.background().spawn(async move {
2106 let entry = save.await?;
2107 if let Some(project_id) = project_id {
2108 rpc.send(proto::BufferSaved {
2109 project_id,
2110 worktree_id,
2111 buffer_id,
2112 version: (&version).into(),
2113 mtime: Some(entry.mtime.into()),
2114 })
2115 .await?;
2116 }
2117 Ok((version, entry.mtime))
2118 })
2119 }
2120 Worktree::Remote(worktree) => {
2121 let rpc = worktree.client.clone();
2122 let project_id = worktree.project_id;
2123 cx.foreground().spawn(async move {
2124 let response = rpc
2125 .request(proto::SaveBuffer {
2126 project_id,
2127 worktree_id,
2128 buffer_id,
2129 })
2130 .await?;
2131 let version = response.version.try_into()?;
2132 let mtime = response
2133 .mtime
2134 .ok_or_else(|| anyhow!("missing mtime"))?
2135 .into();
2136 Ok((version, mtime))
2137 })
2138 }
2139 })
2140 }
2141
2142 fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2143 let worktree = self.worktree.read(cx).as_local()?;
2144 let abs_path = worktree.absolutize(&self.path);
2145 let fs = worktree.fs.clone();
2146 Some(
2147 cx.background()
2148 .spawn(async move { fs.load(&abs_path).await }),
2149 )
2150 }
2151
2152 fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2153 self.worktree.update(cx, |worktree, cx| {
2154 worktree.send_buffer_update(buffer_id, operation, cx);
2155 });
2156 }
2157
2158 fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2159 self.worktree.update(cx, |worktree, cx| {
2160 if let Worktree::Remote(worktree) = worktree {
2161 let project_id = worktree.project_id;
2162 let worktree_id = worktree.id().to_proto();
2163 let rpc = worktree.client.clone();
2164 cx.background()
2165 .spawn(async move {
2166 if let Err(error) = rpc
2167 .send(proto::CloseBuffer {
2168 project_id,
2169 worktree_id,
2170 buffer_id,
2171 })
2172 .await
2173 {
2174 log::error!("error closing remote buffer: {}", error);
2175 }
2176 })
2177 .detach();
2178 }
2179 });
2180 }
2181
2182 fn as_any(&self) -> &dyn Any {
2183 self
2184 }
2185}
2186
2187impl File {
2188 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2189 file.and_then(|f| f.as_any().downcast_ref())
2190 }
2191
2192 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2193 self.worktree.read(cx).id()
2194 }
2195}
2196
2197#[derive(Clone, Debug)]
2198pub struct Entry {
2199 pub id: usize,
2200 pub kind: EntryKind,
2201 pub path: Arc<Path>,
2202 pub inode: u64,
2203 pub mtime: SystemTime,
2204 pub is_symlink: bool,
2205 pub is_ignored: bool,
2206}
2207
2208#[derive(Clone, Debug)]
2209pub enum EntryKind {
2210 PendingDir,
2211 Dir,
2212 File(CharBag),
2213}
2214
2215impl Entry {
2216 fn new(
2217 path: Arc<Path>,
2218 metadata: &fs::Metadata,
2219 next_entry_id: &AtomicUsize,
2220 root_char_bag: CharBag,
2221 ) -> Self {
2222 Self {
2223 id: next_entry_id.fetch_add(1, SeqCst),
2224 kind: if metadata.is_dir {
2225 EntryKind::PendingDir
2226 } else {
2227 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2228 },
2229 path,
2230 inode: metadata.inode,
2231 mtime: metadata.mtime,
2232 is_symlink: metadata.is_symlink,
2233 is_ignored: false,
2234 }
2235 }
2236
2237 pub fn is_dir(&self) -> bool {
2238 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2239 }
2240
2241 pub fn is_file(&self) -> bool {
2242 matches!(self.kind, EntryKind::File(_))
2243 }
2244}
2245
2246impl sum_tree::Item for Entry {
2247 type Summary = EntrySummary;
2248
2249 fn summary(&self) -> Self::Summary {
2250 let visible_count = if self.is_ignored { 0 } else { 1 };
2251 let file_count;
2252 let visible_file_count;
2253 if self.is_file() {
2254 file_count = 1;
2255 visible_file_count = visible_count;
2256 } else {
2257 file_count = 0;
2258 visible_file_count = 0;
2259 }
2260
2261 EntrySummary {
2262 max_path: self.path.clone(),
2263 count: 1,
2264 visible_count,
2265 file_count,
2266 visible_file_count,
2267 }
2268 }
2269}
2270
2271impl sum_tree::KeyedItem for Entry {
2272 type Key = PathKey;
2273
2274 fn key(&self) -> Self::Key {
2275 PathKey(self.path.clone())
2276 }
2277}
2278
2279#[derive(Clone, Debug)]
2280pub struct EntrySummary {
2281 max_path: Arc<Path>,
2282 count: usize,
2283 visible_count: usize,
2284 file_count: usize,
2285 visible_file_count: usize,
2286}
2287
2288impl Default for EntrySummary {
2289 fn default() -> Self {
2290 Self {
2291 max_path: Arc::from(Path::new("")),
2292 count: 0,
2293 visible_count: 0,
2294 file_count: 0,
2295 visible_file_count: 0,
2296 }
2297 }
2298}
2299
2300impl sum_tree::Summary for EntrySummary {
2301 type Context = ();
2302
2303 fn add_summary(&mut self, rhs: &Self, _: &()) {
2304 self.max_path = rhs.max_path.clone();
2305 self.visible_count += rhs.visible_count;
2306 self.file_count += rhs.file_count;
2307 self.visible_file_count += rhs.visible_file_count;
2308 }
2309}
2310
2311#[derive(Clone, Debug)]
2312struct PathEntry {
2313 id: usize,
2314 path: Arc<Path>,
2315 is_ignored: bool,
2316 scan_id: usize,
2317}
2318
2319impl sum_tree::Item for PathEntry {
2320 type Summary = PathEntrySummary;
2321
2322 fn summary(&self) -> Self::Summary {
2323 PathEntrySummary { max_id: self.id }
2324 }
2325}
2326
2327impl sum_tree::KeyedItem for PathEntry {
2328 type Key = usize;
2329
2330 fn key(&self) -> Self::Key {
2331 self.id
2332 }
2333}
2334
2335#[derive(Clone, Debug, Default)]
2336struct PathEntrySummary {
2337 max_id: usize,
2338}
2339
2340impl sum_tree::Summary for PathEntrySummary {
2341 type Context = ();
2342
2343 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2344 self.max_id = summary.max_id;
2345 }
2346}
2347
2348impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2349 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2350 *self = summary.max_id;
2351 }
2352}
2353
2354#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2355pub struct PathKey(Arc<Path>);
2356
2357impl Default for PathKey {
2358 fn default() -> Self {
2359 Self(Path::new("").into())
2360 }
2361}
2362
2363impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2364 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2365 self.0 = summary.max_path.clone();
2366 }
2367}
2368
2369struct BackgroundScanner {
2370 fs: Arc<dyn Fs>,
2371 snapshot: Arc<Mutex<Snapshot>>,
2372 notify: Sender<ScanState>,
2373 executor: Arc<executor::Background>,
2374}
2375
2376impl BackgroundScanner {
2377 fn new(
2378 snapshot: Arc<Mutex<Snapshot>>,
2379 notify: Sender<ScanState>,
2380 fs: Arc<dyn Fs>,
2381 executor: Arc<executor::Background>,
2382 ) -> Self {
2383 Self {
2384 fs,
2385 snapshot,
2386 notify,
2387 executor,
2388 }
2389 }
2390
2391 fn abs_path(&self) -> Arc<Path> {
2392 self.snapshot.lock().abs_path.clone()
2393 }
2394
2395 fn snapshot(&self) -> Snapshot {
2396 self.snapshot.lock().clone()
2397 }
2398
2399 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2400 if self.notify.send(ScanState::Scanning).await.is_err() {
2401 return;
2402 }
2403
2404 if let Err(err) = self.scan_dirs().await {
2405 if self
2406 .notify
2407 .send(ScanState::Err(Arc::new(err)))
2408 .await
2409 .is_err()
2410 {
2411 return;
2412 }
2413 }
2414
2415 if self.notify.send(ScanState::Idle).await.is_err() {
2416 return;
2417 }
2418
2419 futures::pin_mut!(events_rx);
2420 while let Some(events) = events_rx.next().await {
2421 if self.notify.send(ScanState::Scanning).await.is_err() {
2422 break;
2423 }
2424
2425 if !self.process_events(events).await {
2426 break;
2427 }
2428
2429 if self.notify.send(ScanState::Idle).await.is_err() {
2430 break;
2431 }
2432 }
2433 }
2434
2435 async fn scan_dirs(&mut self) -> Result<()> {
2436 let root_char_bag;
2437 let next_entry_id;
2438 let is_dir;
2439 {
2440 let snapshot = self.snapshot.lock();
2441 root_char_bag = snapshot.root_char_bag;
2442 next_entry_id = snapshot.next_entry_id.clone();
2443 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2444 };
2445
2446 if is_dir {
2447 let path: Arc<Path> = Arc::from(Path::new(""));
2448 let abs_path = self.abs_path();
2449 let (tx, rx) = channel::unbounded();
2450 tx.send(ScanJob {
2451 abs_path: abs_path.to_path_buf(),
2452 path,
2453 ignore_stack: IgnoreStack::none(),
2454 scan_queue: tx.clone(),
2455 })
2456 .await
2457 .unwrap();
2458 drop(tx);
2459
2460 self.executor
2461 .scoped(|scope| {
2462 for _ in 0..self.executor.num_cpus() {
2463 scope.spawn(async {
2464 while let Ok(job) = rx.recv().await {
2465 if let Err(err) = self
2466 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2467 .await
2468 {
2469 log::error!("error scanning {:?}: {}", job.abs_path, err);
2470 }
2471 }
2472 });
2473 }
2474 })
2475 .await;
2476 }
2477
2478 Ok(())
2479 }
2480
2481 async fn scan_dir(
2482 &self,
2483 root_char_bag: CharBag,
2484 next_entry_id: Arc<AtomicUsize>,
2485 job: &ScanJob,
2486 ) -> Result<()> {
2487 let mut new_entries: Vec<Entry> = Vec::new();
2488 let mut new_jobs: Vec<ScanJob> = Vec::new();
2489 let mut ignore_stack = job.ignore_stack.clone();
2490 let mut new_ignore = None;
2491
2492 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2493 while let Some(child_abs_path) = child_paths.next().await {
2494 let child_abs_path = match child_abs_path {
2495 Ok(child_abs_path) => child_abs_path,
2496 Err(error) => {
2497 log::error!("error processing entry {:?}", error);
2498 continue;
2499 }
2500 };
2501 let child_name = child_abs_path.file_name().unwrap();
2502 let child_path: Arc<Path> = job.path.join(child_name).into();
2503 let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2504 Some(metadata) => metadata,
2505 None => continue,
2506 };
2507
2508 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2509 if child_name == *GITIGNORE {
2510 match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2511 Ok(ignore) => {
2512 let ignore = Arc::new(ignore);
2513 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2514 new_ignore = Some(ignore);
2515 }
2516 Err(error) => {
2517 log::error!(
2518 "error loading .gitignore file {:?} - {:?}",
2519 child_name,
2520 error
2521 );
2522 }
2523 }
2524
2525 // Update ignore status of any child entries we've already processed to reflect the
2526 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2527 // there should rarely be too numerous. Update the ignore stack associated with any
2528 // new jobs as well.
2529 let mut new_jobs = new_jobs.iter_mut();
2530 for entry in &mut new_entries {
2531 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2532 if entry.is_dir() {
2533 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2534 IgnoreStack::all()
2535 } else {
2536 ignore_stack.clone()
2537 };
2538 }
2539 }
2540 }
2541
2542 let mut child_entry = Entry::new(
2543 child_path.clone(),
2544 &child_metadata,
2545 &next_entry_id,
2546 root_char_bag,
2547 );
2548
2549 if child_metadata.is_dir {
2550 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2551 child_entry.is_ignored = is_ignored;
2552 new_entries.push(child_entry);
2553 new_jobs.push(ScanJob {
2554 abs_path: child_abs_path,
2555 path: child_path,
2556 ignore_stack: if is_ignored {
2557 IgnoreStack::all()
2558 } else {
2559 ignore_stack.clone()
2560 },
2561 scan_queue: job.scan_queue.clone(),
2562 });
2563 } else {
2564 child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2565 new_entries.push(child_entry);
2566 };
2567 }
2568
2569 self.snapshot
2570 .lock()
2571 .populate_dir(job.path.clone(), new_entries, new_ignore);
2572 for new_job in new_jobs {
2573 job.scan_queue.send(new_job).await.unwrap();
2574 }
2575
2576 Ok(())
2577 }
2578
2579 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2580 let mut snapshot = self.snapshot();
2581 snapshot.scan_id += 1;
2582
2583 let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2584 abs_path
2585 } else {
2586 return false;
2587 };
2588 let root_char_bag = snapshot.root_char_bag;
2589 let next_entry_id = snapshot.next_entry_id.clone();
2590
2591 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2592 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2593
2594 for event in &events {
2595 match event.path.strip_prefix(&root_abs_path) {
2596 Ok(path) => snapshot.remove_path(&path),
2597 Err(_) => {
2598 log::error!(
2599 "unexpected event {:?} for root path {:?}",
2600 event.path,
2601 root_abs_path
2602 );
2603 continue;
2604 }
2605 }
2606 }
2607
2608 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2609 for event in events {
2610 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2611 Ok(path) => Arc::from(path.to_path_buf()),
2612 Err(_) => {
2613 log::error!(
2614 "unexpected event {:?} for root path {:?}",
2615 event.path,
2616 root_abs_path
2617 );
2618 continue;
2619 }
2620 };
2621
2622 match self.fs.metadata(&event.path).await {
2623 Ok(Some(metadata)) => {
2624 let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2625 let mut fs_entry = Entry::new(
2626 path.clone(),
2627 &metadata,
2628 snapshot.next_entry_id.as_ref(),
2629 snapshot.root_char_bag,
2630 );
2631 fs_entry.is_ignored = ignore_stack.is_all();
2632 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2633 if metadata.is_dir {
2634 scan_queue_tx
2635 .send(ScanJob {
2636 abs_path: event.path,
2637 path,
2638 ignore_stack,
2639 scan_queue: scan_queue_tx.clone(),
2640 })
2641 .await
2642 .unwrap();
2643 }
2644 }
2645 Ok(None) => {}
2646 Err(err) => {
2647 // TODO - create a special 'error' entry in the entries tree to mark this
2648 log::error!("error reading file on event {:?}", err);
2649 }
2650 }
2651 }
2652
2653 *self.snapshot.lock() = snapshot;
2654
2655 // Scan any directories that were created as part of this event batch.
2656 drop(scan_queue_tx);
2657 self.executor
2658 .scoped(|scope| {
2659 for _ in 0..self.executor.num_cpus() {
2660 scope.spawn(async {
2661 while let Ok(job) = scan_queue_rx.recv().await {
2662 if let Err(err) = self
2663 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2664 .await
2665 {
2666 log::error!("error scanning {:?}: {}", job.abs_path, err);
2667 }
2668 }
2669 });
2670 }
2671 })
2672 .await;
2673
2674 // Attempt to detect renames only over a single batch of file-system events.
2675 self.snapshot.lock().removed_entry_ids.clear();
2676
2677 self.update_ignore_statuses().await;
2678 true
2679 }
2680
2681 async fn update_ignore_statuses(&self) {
2682 let mut snapshot = self.snapshot();
2683
2684 let mut ignores_to_update = Vec::new();
2685 let mut ignores_to_delete = Vec::new();
2686 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2687 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2688 ignores_to_update.push(parent_path.clone());
2689 }
2690
2691 let ignore_path = parent_path.join(&*GITIGNORE);
2692 if snapshot.entry_for_path(ignore_path).is_none() {
2693 ignores_to_delete.push(parent_path.clone());
2694 }
2695 }
2696
2697 for parent_path in ignores_to_delete {
2698 snapshot.ignores.remove(&parent_path);
2699 self.snapshot.lock().ignores.remove(&parent_path);
2700 }
2701
2702 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2703 ignores_to_update.sort_unstable();
2704 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2705 while let Some(parent_path) = ignores_to_update.next() {
2706 while ignores_to_update
2707 .peek()
2708 .map_or(false, |p| p.starts_with(&parent_path))
2709 {
2710 ignores_to_update.next().unwrap();
2711 }
2712
2713 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2714 ignore_queue_tx
2715 .send(UpdateIgnoreStatusJob {
2716 path: parent_path,
2717 ignore_stack,
2718 ignore_queue: ignore_queue_tx.clone(),
2719 })
2720 .await
2721 .unwrap();
2722 }
2723 drop(ignore_queue_tx);
2724
2725 self.executor
2726 .scoped(|scope| {
2727 for _ in 0..self.executor.num_cpus() {
2728 scope.spawn(async {
2729 while let Ok(job) = ignore_queue_rx.recv().await {
2730 self.update_ignore_status(job, &snapshot).await;
2731 }
2732 });
2733 }
2734 })
2735 .await;
2736 }
2737
2738 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2739 let mut ignore_stack = job.ignore_stack;
2740 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2741 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2742 }
2743
2744 let mut entries_by_id_edits = Vec::new();
2745 let mut entries_by_path_edits = Vec::new();
2746 for mut entry in snapshot.child_entries(&job.path).cloned() {
2747 let was_ignored = entry.is_ignored;
2748 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2749 if entry.is_dir() {
2750 let child_ignore_stack = if entry.is_ignored {
2751 IgnoreStack::all()
2752 } else {
2753 ignore_stack.clone()
2754 };
2755 job.ignore_queue
2756 .send(UpdateIgnoreStatusJob {
2757 path: entry.path.clone(),
2758 ignore_stack: child_ignore_stack,
2759 ignore_queue: job.ignore_queue.clone(),
2760 })
2761 .await
2762 .unwrap();
2763 }
2764
2765 if entry.is_ignored != was_ignored {
2766 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2767 path_entry.scan_id = snapshot.scan_id;
2768 path_entry.is_ignored = entry.is_ignored;
2769 entries_by_id_edits.push(Edit::Insert(path_entry));
2770 entries_by_path_edits.push(Edit::Insert(entry));
2771 }
2772 }
2773
2774 let mut snapshot = self.snapshot.lock();
2775 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2776 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2777 }
2778}
2779
2780async fn refresh_entry(
2781 fs: &dyn Fs,
2782 snapshot: &Mutex<Snapshot>,
2783 path: Arc<Path>,
2784 abs_path: &Path,
2785) -> Result<Entry> {
2786 let root_char_bag;
2787 let next_entry_id;
2788 {
2789 let snapshot = snapshot.lock();
2790 root_char_bag = snapshot.root_char_bag;
2791 next_entry_id = snapshot.next_entry_id.clone();
2792 }
2793 let entry = Entry::new(
2794 path,
2795 &fs.metadata(abs_path)
2796 .await?
2797 .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2798 &next_entry_id,
2799 root_char_bag,
2800 );
2801 Ok(snapshot.lock().insert_entry(entry, fs))
2802}
2803
2804fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2805 let mut result = root_char_bag;
2806 result.extend(
2807 path.to_string_lossy()
2808 .chars()
2809 .map(|c| c.to_ascii_lowercase()),
2810 );
2811 result
2812}
2813
2814struct ScanJob {
2815 abs_path: PathBuf,
2816 path: Arc<Path>,
2817 ignore_stack: Arc<IgnoreStack>,
2818 scan_queue: Sender<ScanJob>,
2819}
2820
2821struct UpdateIgnoreStatusJob {
2822 path: Arc<Path>,
2823 ignore_stack: Arc<IgnoreStack>,
2824 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2825}
2826
2827pub trait WorktreeHandle {
2828 #[cfg(test)]
2829 fn flush_fs_events<'a>(
2830 &self,
2831 cx: &'a gpui::TestAppContext,
2832 ) -> futures::future::LocalBoxFuture<'a, ()>;
2833}
2834
2835impl WorktreeHandle for ModelHandle<Worktree> {
2836 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2837 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2838 // extra directory scans, and emit extra scan-state notifications.
2839 //
2840 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2841 // to ensure that all redundant FS events have already been processed.
2842 #[cfg(test)]
2843 fn flush_fs_events<'a>(
2844 &self,
2845 cx: &'a gpui::TestAppContext,
2846 ) -> futures::future::LocalBoxFuture<'a, ()> {
2847 use smol::future::FutureExt;
2848
2849 let filename = "fs-event-sentinel";
2850 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2851 let tree = self.clone();
2852 async move {
2853 std::fs::write(root_path.join(filename), "").unwrap();
2854 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2855 .await;
2856
2857 std::fs::remove_file(root_path.join(filename)).unwrap();
2858 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2859 .await;
2860
2861 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2862 .await;
2863 }
2864 .boxed_local()
2865 }
2866}
2867
2868#[derive(Clone, Debug)]
2869struct TraversalProgress<'a> {
2870 max_path: &'a Path,
2871 count: usize,
2872 visible_count: usize,
2873 file_count: usize,
2874 visible_file_count: usize,
2875}
2876
2877impl<'a> TraversalProgress<'a> {
2878 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2879 match (include_ignored, include_dirs) {
2880 (true, true) => self.count,
2881 (true, false) => self.file_count,
2882 (false, true) => self.visible_count,
2883 (false, false) => self.visible_file_count,
2884 }
2885 }
2886}
2887
2888impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2889 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2890 self.max_path = summary.max_path.as_ref();
2891 self.count += summary.count;
2892 self.visible_count += summary.visible_count;
2893 self.file_count += summary.file_count;
2894 self.visible_file_count += summary.visible_file_count;
2895 }
2896}
2897
2898impl<'a> Default for TraversalProgress<'a> {
2899 fn default() -> Self {
2900 Self {
2901 max_path: Path::new(""),
2902 count: 0,
2903 visible_count: 0,
2904 file_count: 0,
2905 visible_file_count: 0,
2906 }
2907 }
2908}
2909
2910pub struct Traversal<'a> {
2911 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2912 include_ignored: bool,
2913 include_dirs: bool,
2914}
2915
2916impl<'a> Traversal<'a> {
2917 pub fn advance(&mut self) -> bool {
2918 self.advance_to_offset(self.offset() + 1)
2919 }
2920
2921 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2922 self.cursor.seek_forward(
2923 &TraversalTarget::Count {
2924 count: offset,
2925 include_dirs: self.include_dirs,
2926 include_ignored: self.include_ignored,
2927 },
2928 Bias::Right,
2929 &(),
2930 )
2931 }
2932
2933 pub fn advance_to_sibling(&mut self) -> bool {
2934 while let Some(entry) = self.cursor.item() {
2935 self.cursor.seek_forward(
2936 &TraversalTarget::PathSuccessor(&entry.path),
2937 Bias::Left,
2938 &(),
2939 );
2940 if let Some(entry) = self.cursor.item() {
2941 if (self.include_dirs || !entry.is_dir())
2942 && (self.include_ignored || !entry.is_ignored)
2943 {
2944 return true;
2945 }
2946 }
2947 }
2948 false
2949 }
2950
2951 pub fn entry(&self) -> Option<&'a Entry> {
2952 self.cursor.item()
2953 }
2954
2955 pub fn offset(&self) -> usize {
2956 self.cursor
2957 .start()
2958 .count(self.include_dirs, self.include_ignored)
2959 }
2960}
2961
2962impl<'a> Iterator for Traversal<'a> {
2963 type Item = &'a Entry;
2964
2965 fn next(&mut self) -> Option<Self::Item> {
2966 if let Some(item) = self.entry() {
2967 self.advance();
2968 Some(item)
2969 } else {
2970 None
2971 }
2972 }
2973}
2974
2975#[derive(Debug)]
2976enum TraversalTarget<'a> {
2977 Path(&'a Path),
2978 PathSuccessor(&'a Path),
2979 Count {
2980 count: usize,
2981 include_ignored: bool,
2982 include_dirs: bool,
2983 },
2984}
2985
2986impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2987 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2988 match self {
2989 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2990 TraversalTarget::PathSuccessor(path) => {
2991 if !cursor_location.max_path.starts_with(path) {
2992 Ordering::Equal
2993 } else {
2994 Ordering::Greater
2995 }
2996 }
2997 TraversalTarget::Count {
2998 count,
2999 include_dirs,
3000 include_ignored,
3001 } => Ord::cmp(
3002 count,
3003 &cursor_location.count(*include_dirs, *include_ignored),
3004 ),
3005 }
3006 }
3007}
3008
3009struct ChildEntriesIter<'a> {
3010 parent_path: &'a Path,
3011 traversal: Traversal<'a>,
3012}
3013
3014impl<'a> Iterator for ChildEntriesIter<'a> {
3015 type Item = &'a Entry;
3016
3017 fn next(&mut self) -> Option<Self::Item> {
3018 if let Some(item) = self.traversal.entry() {
3019 if item.path.starts_with(&self.parent_path) {
3020 self.traversal.advance_to_sibling();
3021 return Some(item);
3022 }
3023 }
3024 None
3025 }
3026}
3027
3028impl<'a> From<&'a Entry> for proto::Entry {
3029 fn from(entry: &'a Entry) -> Self {
3030 Self {
3031 id: entry.id as u64,
3032 is_dir: entry.is_dir(),
3033 path: entry.path.to_string_lossy().to_string(),
3034 inode: entry.inode,
3035 mtime: Some(entry.mtime.into()),
3036 is_symlink: entry.is_symlink,
3037 is_ignored: entry.is_ignored,
3038 }
3039 }
3040}
3041
3042impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3043 type Error = anyhow::Error;
3044
3045 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3046 if let Some(mtime) = entry.mtime {
3047 let kind = if entry.is_dir {
3048 EntryKind::Dir
3049 } else {
3050 let mut char_bag = root_char_bag.clone();
3051 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3052 EntryKind::File(char_bag)
3053 };
3054 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
3055 Ok(Entry {
3056 id: entry.id as usize,
3057 kind,
3058 path: path.clone(),
3059 inode: entry.inode,
3060 mtime: mtime.into(),
3061 is_symlink: entry.is_symlink,
3062 is_ignored: entry.is_ignored,
3063 })
3064 } else {
3065 Err(anyhow!(
3066 "missing mtime in remote worktree entry {:?}",
3067 entry.path
3068 ))
3069 }
3070 }
3071}
3072
3073trait ToPointUtf16 {
3074 fn to_point_utf16(self) -> PointUtf16;
3075}
3076
3077impl ToPointUtf16 for lsp::Position {
3078 fn to_point_utf16(self) -> PointUtf16 {
3079 PointUtf16::new(self.line, self.character)
3080 }
3081}
3082
3083fn diagnostic_ranges<'a>(
3084 diagnostic: &'a lsp::Diagnostic,
3085 abs_path: &'a Path,
3086) -> impl 'a + Iterator<Item = Range<PointUtf16>> {
3087 diagnostic
3088 .related_information
3089 .iter()
3090 .flatten()
3091 .filter_map(move |info| {
3092 if info.location.uri.to_file_path().ok()? == abs_path {
3093 let info_start = PointUtf16::new(
3094 info.location.range.start.line,
3095 info.location.range.start.character,
3096 );
3097 let info_end = PointUtf16::new(
3098 info.location.range.end.line,
3099 info.location.range.end.character,
3100 );
3101 Some(info_start..info_end)
3102 } else {
3103 None
3104 }
3105 })
3106 .chain(Some(
3107 diagnostic.range.start.to_point_utf16()..diagnostic.range.end.to_point_utf16(),
3108 ))
3109}
3110
3111#[cfg(test)]
3112mod tests {
3113 use super::*;
3114 use crate::fs::FakeFs;
3115 use anyhow::Result;
3116 use client::test::{FakeHttpClient, FakeServer};
3117 use fs::RealFs;
3118 use gpui::test::subscribe;
3119 use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
3120 use language::{Diagnostic, LanguageConfig};
3121 use lsp::Url;
3122 use rand::prelude::*;
3123 use serde_json::json;
3124 use std::{cell::RefCell, rc::Rc};
3125 use std::{
3126 env,
3127 fmt::Write,
3128 time::{SystemTime, UNIX_EPOCH},
3129 };
3130 use text::Point;
3131 use unindent::Unindent as _;
3132 use util::test::temp_tree;
3133
3134 #[gpui::test]
3135 async fn test_traversal(mut cx: gpui::TestAppContext) {
3136 let fs = FakeFs::new();
3137 fs.insert_tree(
3138 "/root",
3139 json!({
3140 ".gitignore": "a/b\n",
3141 "a": {
3142 "b": "",
3143 "c": "",
3144 }
3145 }),
3146 )
3147 .await;
3148
3149 let http_client = FakeHttpClient::with_404_response();
3150 let client = Client::new(http_client.clone());
3151 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3152
3153 let tree = Worktree::open_local(
3154 client,
3155 user_store,
3156 Arc::from(Path::new("/root")),
3157 Arc::new(fs),
3158 Default::default(),
3159 &mut cx.to_async(),
3160 )
3161 .await
3162 .unwrap();
3163 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3164 .await;
3165
3166 tree.read_with(&cx, |tree, _| {
3167 assert_eq!(
3168 tree.entries(false)
3169 .map(|entry| entry.path.as_ref())
3170 .collect::<Vec<_>>(),
3171 vec![
3172 Path::new(""),
3173 Path::new(".gitignore"),
3174 Path::new("a"),
3175 Path::new("a/c"),
3176 ]
3177 );
3178 })
3179 }
3180
3181 #[gpui::test]
3182 async fn test_save_file(mut cx: gpui::TestAppContext) {
3183 let dir = temp_tree(json!({
3184 "file1": "the old contents",
3185 }));
3186
3187 let http_client = FakeHttpClient::with_404_response();
3188 let client = Client::new(http_client.clone());
3189 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3190
3191 let tree = Worktree::open_local(
3192 client,
3193 user_store,
3194 dir.path(),
3195 Arc::new(RealFs),
3196 Default::default(),
3197 &mut cx.to_async(),
3198 )
3199 .await
3200 .unwrap();
3201 let buffer = tree
3202 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3203 .await
3204 .unwrap();
3205 let save = buffer.update(&mut cx, |buffer, cx| {
3206 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3207 buffer.save(cx).unwrap()
3208 });
3209 save.await.unwrap();
3210
3211 let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3212 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3213 }
3214
3215 #[gpui::test]
3216 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3217 let dir = temp_tree(json!({
3218 "file1": "the old contents",
3219 }));
3220 let file_path = dir.path().join("file1");
3221
3222 let http_client = FakeHttpClient::with_404_response();
3223 let client = Client::new(http_client.clone());
3224 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3225
3226 let tree = Worktree::open_local(
3227 client,
3228 user_store,
3229 file_path.clone(),
3230 Arc::new(RealFs),
3231 Default::default(),
3232 &mut cx.to_async(),
3233 )
3234 .await
3235 .unwrap();
3236 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3237 .await;
3238 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3239
3240 let buffer = tree
3241 .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3242 .await
3243 .unwrap();
3244 let save = buffer.update(&mut cx, |buffer, cx| {
3245 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3246 buffer.save(cx).unwrap()
3247 });
3248 save.await.unwrap();
3249
3250 let new_text = std::fs::read_to_string(file_path).unwrap();
3251 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3252 }
3253
3254 #[gpui::test]
3255 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3256 let dir = temp_tree(json!({
3257 "a": {
3258 "file1": "",
3259 "file2": "",
3260 "file3": "",
3261 },
3262 "b": {
3263 "c": {
3264 "file4": "",
3265 "file5": "",
3266 }
3267 }
3268 }));
3269
3270 let user_id = 5;
3271 let http_client = FakeHttpClient::with_404_response();
3272 let mut client = Client::new(http_client.clone());
3273 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3274 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3275 let tree = Worktree::open_local(
3276 client,
3277 user_store.clone(),
3278 dir.path(),
3279 Arc::new(RealFs),
3280 Default::default(),
3281 &mut cx.to_async(),
3282 )
3283 .await
3284 .unwrap();
3285
3286 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3287 let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3288 async move { buffer.await.unwrap() }
3289 };
3290 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3291 tree.read_with(cx, |tree, _| {
3292 tree.entry_for_path(path)
3293 .expect(&format!("no entry for path {}", path))
3294 .id
3295 })
3296 };
3297
3298 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3299 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3300 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3301 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3302
3303 let file2_id = id_for_path("a/file2", &cx);
3304 let file3_id = id_for_path("a/file3", &cx);
3305 let file4_id = id_for_path("b/c/file4", &cx);
3306
3307 // Wait for the initial scan.
3308 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3309 .await;
3310
3311 // Create a remote copy of this worktree.
3312 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3313 let remote = Worktree::remote(
3314 1,
3315 1,
3316 initial_snapshot.to_proto(),
3317 Client::new(http_client.clone()),
3318 user_store,
3319 Default::default(),
3320 &mut cx.to_async(),
3321 )
3322 .await
3323 .unwrap();
3324
3325 cx.read(|cx| {
3326 assert!(!buffer2.read(cx).is_dirty());
3327 assert!(!buffer3.read(cx).is_dirty());
3328 assert!(!buffer4.read(cx).is_dirty());
3329 assert!(!buffer5.read(cx).is_dirty());
3330 });
3331
3332 // Rename and delete files and directories.
3333 tree.flush_fs_events(&cx).await;
3334 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3335 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3336 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3337 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3338 tree.flush_fs_events(&cx).await;
3339
3340 let expected_paths = vec![
3341 "a",
3342 "a/file1",
3343 "a/file2.new",
3344 "b",
3345 "d",
3346 "d/file3",
3347 "d/file4",
3348 ];
3349
3350 cx.read(|app| {
3351 assert_eq!(
3352 tree.read(app)
3353 .paths()
3354 .map(|p| p.to_str().unwrap())
3355 .collect::<Vec<_>>(),
3356 expected_paths
3357 );
3358
3359 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3360 assert_eq!(id_for_path("d/file3", &cx), file3_id);
3361 assert_eq!(id_for_path("d/file4", &cx), file4_id);
3362
3363 assert_eq!(
3364 buffer2.read(app).file().unwrap().path().as_ref(),
3365 Path::new("a/file2.new")
3366 );
3367 assert_eq!(
3368 buffer3.read(app).file().unwrap().path().as_ref(),
3369 Path::new("d/file3")
3370 );
3371 assert_eq!(
3372 buffer4.read(app).file().unwrap().path().as_ref(),
3373 Path::new("d/file4")
3374 );
3375 assert_eq!(
3376 buffer5.read(app).file().unwrap().path().as_ref(),
3377 Path::new("b/c/file5")
3378 );
3379
3380 assert!(!buffer2.read(app).file().unwrap().is_deleted());
3381 assert!(!buffer3.read(app).file().unwrap().is_deleted());
3382 assert!(!buffer4.read(app).file().unwrap().is_deleted());
3383 assert!(buffer5.read(app).file().unwrap().is_deleted());
3384 });
3385
3386 // Update the remote worktree. Check that it becomes consistent with the
3387 // local worktree.
3388 remote.update(&mut cx, |remote, cx| {
3389 let update_message =
3390 tree.read(cx)
3391 .snapshot()
3392 .build_update(&initial_snapshot, 1, 1, true);
3393 remote
3394 .as_remote_mut()
3395 .unwrap()
3396 .snapshot
3397 .apply_update(update_message)
3398 .unwrap();
3399
3400 assert_eq!(
3401 remote
3402 .paths()
3403 .map(|p| p.to_str().unwrap())
3404 .collect::<Vec<_>>(),
3405 expected_paths
3406 );
3407 });
3408 }
3409
3410 #[gpui::test]
3411 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3412 let dir = temp_tree(json!({
3413 ".git": {},
3414 ".gitignore": "ignored-dir\n",
3415 "tracked-dir": {
3416 "tracked-file1": "tracked contents",
3417 },
3418 "ignored-dir": {
3419 "ignored-file1": "ignored contents",
3420 }
3421 }));
3422
3423 let http_client = FakeHttpClient::with_404_response();
3424 let client = Client::new(http_client.clone());
3425 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3426
3427 let tree = Worktree::open_local(
3428 client,
3429 user_store,
3430 dir.path(),
3431 Arc::new(RealFs),
3432 Default::default(),
3433 &mut cx.to_async(),
3434 )
3435 .await
3436 .unwrap();
3437 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3438 .await;
3439 tree.flush_fs_events(&cx).await;
3440 cx.read(|cx| {
3441 let tree = tree.read(cx);
3442 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3443 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3444 assert_eq!(tracked.is_ignored, false);
3445 assert_eq!(ignored.is_ignored, true);
3446 });
3447
3448 std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3449 std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3450 tree.flush_fs_events(&cx).await;
3451 cx.read(|cx| {
3452 let tree = tree.read(cx);
3453 let dot_git = tree.entry_for_path(".git").unwrap();
3454 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3455 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3456 assert_eq!(tracked.is_ignored, false);
3457 assert_eq!(ignored.is_ignored, true);
3458 assert_eq!(dot_git.is_ignored, true);
3459 });
3460 }
3461
3462 #[gpui::test]
3463 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3464 let user_id = 100;
3465 let http_client = FakeHttpClient::with_404_response();
3466 let mut client = Client::new(http_client);
3467 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3468 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3469
3470 let fs = Arc::new(FakeFs::new());
3471 fs.insert_tree(
3472 "/the-dir",
3473 json!({
3474 "a.txt": "a-contents",
3475 "b.txt": "b-contents",
3476 }),
3477 )
3478 .await;
3479
3480 let worktree = Worktree::open_local(
3481 client.clone(),
3482 user_store,
3483 "/the-dir".as_ref(),
3484 fs,
3485 Default::default(),
3486 &mut cx.to_async(),
3487 )
3488 .await
3489 .unwrap();
3490
3491 // Spawn multiple tasks to open paths, repeating some paths.
3492 let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3493 (
3494 worktree.open_buffer("a.txt", cx),
3495 worktree.open_buffer("b.txt", cx),
3496 worktree.open_buffer("a.txt", cx),
3497 )
3498 });
3499
3500 let buffer_a_1 = buffer_a_1.await.unwrap();
3501 let buffer_a_2 = buffer_a_2.await.unwrap();
3502 let buffer_b = buffer_b.await.unwrap();
3503 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3504 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3505
3506 // There is only one buffer per path.
3507 let buffer_a_id = buffer_a_1.id();
3508 assert_eq!(buffer_a_2.id(), buffer_a_id);
3509
3510 // Open the same path again while it is still open.
3511 drop(buffer_a_1);
3512 let buffer_a_3 = worktree
3513 .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3514 .await
3515 .unwrap();
3516
3517 // There's still only one buffer per path.
3518 assert_eq!(buffer_a_3.id(), buffer_a_id);
3519 }
3520
3521 #[gpui::test]
3522 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3523 use std::fs;
3524
3525 let dir = temp_tree(json!({
3526 "file1": "abc",
3527 "file2": "def",
3528 "file3": "ghi",
3529 }));
3530 let http_client = FakeHttpClient::with_404_response();
3531 let client = Client::new(http_client.clone());
3532 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3533
3534 let tree = Worktree::open_local(
3535 client,
3536 user_store,
3537 dir.path(),
3538 Arc::new(RealFs),
3539 Default::default(),
3540 &mut cx.to_async(),
3541 )
3542 .await
3543 .unwrap();
3544 tree.flush_fs_events(&cx).await;
3545 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3546 .await;
3547
3548 let buffer1 = tree
3549 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3550 .await
3551 .unwrap();
3552 let events = Rc::new(RefCell::new(Vec::new()));
3553
3554 // initially, the buffer isn't dirty.
3555 buffer1.update(&mut cx, |buffer, cx| {
3556 cx.subscribe(&buffer1, {
3557 let events = events.clone();
3558 move |_, _, event, _| events.borrow_mut().push(event.clone())
3559 })
3560 .detach();
3561
3562 assert!(!buffer.is_dirty());
3563 assert!(events.borrow().is_empty());
3564
3565 buffer.edit(vec![1..2], "", cx);
3566 });
3567
3568 // after the first edit, the buffer is dirty, and emits a dirtied event.
3569 buffer1.update(&mut cx, |buffer, cx| {
3570 assert!(buffer.text() == "ac");
3571 assert!(buffer.is_dirty());
3572 assert_eq!(
3573 *events.borrow(),
3574 &[language::Event::Edited, language::Event::Dirtied]
3575 );
3576 events.borrow_mut().clear();
3577 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3578 });
3579
3580 // after saving, the buffer is not dirty, and emits a saved event.
3581 buffer1.update(&mut cx, |buffer, cx| {
3582 assert!(!buffer.is_dirty());
3583 assert_eq!(*events.borrow(), &[language::Event::Saved]);
3584 events.borrow_mut().clear();
3585
3586 buffer.edit(vec![1..1], "B", cx);
3587 buffer.edit(vec![2..2], "D", cx);
3588 });
3589
3590 // after editing again, the buffer is dirty, and emits another dirty event.
3591 buffer1.update(&mut cx, |buffer, cx| {
3592 assert!(buffer.text() == "aBDc");
3593 assert!(buffer.is_dirty());
3594 assert_eq!(
3595 *events.borrow(),
3596 &[
3597 language::Event::Edited,
3598 language::Event::Dirtied,
3599 language::Event::Edited,
3600 ],
3601 );
3602 events.borrow_mut().clear();
3603
3604 // TODO - currently, after restoring the buffer to its
3605 // previously-saved state, the is still considered dirty.
3606 buffer.edit([1..3], "", cx);
3607 assert!(buffer.text() == "ac");
3608 assert!(buffer.is_dirty());
3609 });
3610
3611 assert_eq!(*events.borrow(), &[language::Event::Edited]);
3612
3613 // When a file is deleted, the buffer is considered dirty.
3614 let events = Rc::new(RefCell::new(Vec::new()));
3615 let buffer2 = tree
3616 .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3617 .await
3618 .unwrap();
3619 buffer2.update(&mut cx, |_, cx| {
3620 cx.subscribe(&buffer2, {
3621 let events = events.clone();
3622 move |_, _, event, _| events.borrow_mut().push(event.clone())
3623 })
3624 .detach();
3625 });
3626
3627 fs::remove_file(dir.path().join("file2")).unwrap();
3628 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3629 assert_eq!(
3630 *events.borrow(),
3631 &[language::Event::Dirtied, language::Event::FileHandleChanged]
3632 );
3633
3634 // When a file is already dirty when deleted, we don't emit a Dirtied event.
3635 let events = Rc::new(RefCell::new(Vec::new()));
3636 let buffer3 = tree
3637 .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3638 .await
3639 .unwrap();
3640 buffer3.update(&mut cx, |_, cx| {
3641 cx.subscribe(&buffer3, {
3642 let events = events.clone();
3643 move |_, _, event, _| events.borrow_mut().push(event.clone())
3644 })
3645 .detach();
3646 });
3647
3648 tree.flush_fs_events(&cx).await;
3649 buffer3.update(&mut cx, |buffer, cx| {
3650 buffer.edit(Some(0..0), "x", cx);
3651 });
3652 events.borrow_mut().clear();
3653 fs::remove_file(dir.path().join("file3")).unwrap();
3654 buffer3
3655 .condition(&cx, |_, _| !events.borrow().is_empty())
3656 .await;
3657 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3658 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3659 }
3660
3661 #[gpui::test]
3662 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3663 use std::fs;
3664
3665 let initial_contents = "aaa\nbbbbb\nc\n";
3666 let dir = temp_tree(json!({ "the-file": initial_contents }));
3667 let http_client = FakeHttpClient::with_404_response();
3668 let client = Client::new(http_client.clone());
3669 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3670
3671 let tree = Worktree::open_local(
3672 client,
3673 user_store,
3674 dir.path(),
3675 Arc::new(RealFs),
3676 Default::default(),
3677 &mut cx.to_async(),
3678 )
3679 .await
3680 .unwrap();
3681 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3682 .await;
3683
3684 let abs_path = dir.path().join("the-file");
3685 let buffer = tree
3686 .update(&mut cx, |tree, cx| {
3687 tree.open_buffer(Path::new("the-file"), cx)
3688 })
3689 .await
3690 .unwrap();
3691
3692 // TODO
3693 // Add a cursor on each row.
3694 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3695 // assert!(!buffer.is_dirty());
3696 // buffer.add_selection_set(
3697 // &(0..3)
3698 // .map(|row| Selection {
3699 // id: row as usize,
3700 // start: Point::new(row, 1),
3701 // end: Point::new(row, 1),
3702 // reversed: false,
3703 // goal: SelectionGoal::None,
3704 // })
3705 // .collect::<Vec<_>>(),
3706 // cx,
3707 // )
3708 // });
3709
3710 // Change the file on disk, adding two new lines of text, and removing
3711 // one line.
3712 buffer.read_with(&cx, |buffer, _| {
3713 assert!(!buffer.is_dirty());
3714 assert!(!buffer.has_conflict());
3715 });
3716 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3717 fs::write(&abs_path, new_contents).unwrap();
3718
3719 // Because the buffer was not modified, it is reloaded from disk. Its
3720 // contents are edited according to the diff between the old and new
3721 // file contents.
3722 buffer
3723 .condition(&cx, |buffer, _| buffer.text() == new_contents)
3724 .await;
3725
3726 buffer.update(&mut cx, |buffer, _| {
3727 assert_eq!(buffer.text(), new_contents);
3728 assert!(!buffer.is_dirty());
3729 assert!(!buffer.has_conflict());
3730
3731 // TODO
3732 // let cursor_positions = buffer
3733 // .selection_set(selection_set_id)
3734 // .unwrap()
3735 // .selections::<Point>(&*buffer)
3736 // .map(|selection| {
3737 // assert_eq!(selection.start, selection.end);
3738 // selection.start
3739 // })
3740 // .collect::<Vec<_>>();
3741 // assert_eq!(
3742 // cursor_positions,
3743 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3744 // );
3745 });
3746
3747 // Modify the buffer
3748 buffer.update(&mut cx, |buffer, cx| {
3749 buffer.edit(vec![0..0], " ", cx);
3750 assert!(buffer.is_dirty());
3751 assert!(!buffer.has_conflict());
3752 });
3753
3754 // Change the file on disk again, adding blank lines to the beginning.
3755 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3756
3757 // Because the buffer is modified, it doesn't reload from disk, but is
3758 // marked as having a conflict.
3759 buffer
3760 .condition(&cx, |buffer, _| buffer.has_conflict())
3761 .await;
3762 }
3763
3764 #[gpui::test]
3765 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3766 let (language_server_config, mut fake_server) =
3767 LanguageServerConfig::fake(cx.background()).await;
3768 let progress_token = language_server_config
3769 .disk_based_diagnostics_progress_token
3770 .clone()
3771 .unwrap();
3772 let mut languages = LanguageRegistry::new();
3773 languages.add(Arc::new(Language::new(
3774 LanguageConfig {
3775 name: "Rust".to_string(),
3776 path_suffixes: vec!["rs".to_string()],
3777 language_server: Some(language_server_config),
3778 ..Default::default()
3779 },
3780 Some(tree_sitter_rust::language()),
3781 )));
3782
3783 let dir = temp_tree(json!({
3784 "a.rs": "fn a() { A }",
3785 "b.rs": "const y: i32 = 1",
3786 }));
3787
3788 let http_client = FakeHttpClient::with_404_response();
3789 let client = Client::new(http_client.clone());
3790 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3791
3792 let tree = Worktree::open_local(
3793 client,
3794 user_store,
3795 dir.path(),
3796 Arc::new(RealFs),
3797 Arc::new(languages),
3798 &mut cx.to_async(),
3799 )
3800 .await
3801 .unwrap();
3802 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3803 .await;
3804
3805 // Cause worktree to start the fake language server
3806 let _buffer = tree
3807 .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3808 .await
3809 .unwrap();
3810
3811 let mut events = subscribe(&tree, &mut cx);
3812
3813 fake_server.start_progress(&progress_token).await;
3814 fake_server.start_progress(&progress_token).await;
3815 fake_server.end_progress(&progress_token).await;
3816 fake_server.start_progress(&progress_token).await;
3817
3818 fake_server
3819 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3820 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3821 version: None,
3822 diagnostics: vec![lsp::Diagnostic {
3823 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
3824 severity: Some(lsp::DiagnosticSeverity::ERROR),
3825 message: "undefined variable 'A'".to_string(),
3826 ..Default::default()
3827 }],
3828 })
3829 .await;
3830
3831 let event = events.next().await.unwrap();
3832 assert_eq!(
3833 event,
3834 Event::DiagnosticsUpdated(Arc::from(Path::new("a.rs")))
3835 );
3836
3837 fake_server.end_progress(&progress_token).await;
3838 fake_server.end_progress(&progress_token).await;
3839
3840 let event = events.next().await.unwrap();
3841 assert_eq!(event, Event::DiskBasedDiagnosticsUpdated);
3842
3843 let buffer = tree
3844 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3845 .await
3846 .unwrap();
3847
3848 buffer.read_with(&cx, |buffer, _| {
3849 let snapshot = buffer.snapshot();
3850 let diagnostics = snapshot
3851 .diagnostics_in_range::<_, Point>(0..buffer.len())
3852 .collect::<Vec<_>>();
3853 assert_eq!(
3854 diagnostics,
3855 &[DiagnosticEntry {
3856 range: Point::new(0, 9)..Point::new(0, 10),
3857 diagnostic: Diagnostic {
3858 severity: lsp::DiagnosticSeverity::ERROR,
3859 message: "undefined variable 'A'".to_string(),
3860 group_id: 0,
3861 is_primary: true,
3862 ..Default::default()
3863 }
3864 }]
3865 )
3866 });
3867 }
3868
3869 #[gpui::test]
3870 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3871 let fs = Arc::new(FakeFs::new());
3872 let http_client = FakeHttpClient::with_404_response();
3873 let client = Client::new(http_client.clone());
3874 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3875
3876 fs.insert_tree(
3877 "/the-dir",
3878 json!({
3879 "a.rs": "
3880 fn foo(mut v: Vec<usize>) {
3881 for x in &v {
3882 v.push(1);
3883 }
3884 }
3885 "
3886 .unindent(),
3887 }),
3888 )
3889 .await;
3890
3891 let worktree = Worktree::open_local(
3892 client.clone(),
3893 user_store,
3894 "/the-dir".as_ref(),
3895 fs,
3896 Default::default(),
3897 &mut cx.to_async(),
3898 )
3899 .await
3900 .unwrap();
3901
3902 let buffer = worktree
3903 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3904 .await
3905 .unwrap();
3906
3907 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3908 let message = lsp::PublishDiagnosticsParams {
3909 uri: buffer_uri.clone(),
3910 diagnostics: vec![
3911 lsp::Diagnostic {
3912 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3913 severity: Some(DiagnosticSeverity::WARNING),
3914 message: "error 1".to_string(),
3915 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3916 location: lsp::Location {
3917 uri: buffer_uri.clone(),
3918 range: lsp::Range::new(
3919 lsp::Position::new(1, 8),
3920 lsp::Position::new(1, 9),
3921 ),
3922 },
3923 message: "error 1 hint 1".to_string(),
3924 }]),
3925 ..Default::default()
3926 },
3927 lsp::Diagnostic {
3928 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3929 severity: Some(DiagnosticSeverity::HINT),
3930 message: "error 1 hint 1".to_string(),
3931 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3932 location: lsp::Location {
3933 uri: buffer_uri.clone(),
3934 range: lsp::Range::new(
3935 lsp::Position::new(1, 8),
3936 lsp::Position::new(1, 9),
3937 ),
3938 },
3939 message: "original diagnostic".to_string(),
3940 }]),
3941 ..Default::default()
3942 },
3943 lsp::Diagnostic {
3944 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3945 severity: Some(DiagnosticSeverity::ERROR),
3946 message: "error 2".to_string(),
3947 related_information: Some(vec![
3948 lsp::DiagnosticRelatedInformation {
3949 location: lsp::Location {
3950 uri: buffer_uri.clone(),
3951 range: lsp::Range::new(
3952 lsp::Position::new(1, 13),
3953 lsp::Position::new(1, 15),
3954 ),
3955 },
3956 message: "error 2 hint 1".to_string(),
3957 },
3958 lsp::DiagnosticRelatedInformation {
3959 location: lsp::Location {
3960 uri: buffer_uri.clone(),
3961 range: lsp::Range::new(
3962 lsp::Position::new(1, 13),
3963 lsp::Position::new(1, 15),
3964 ),
3965 },
3966 message: "error 2 hint 2".to_string(),
3967 },
3968 ]),
3969 ..Default::default()
3970 },
3971 lsp::Diagnostic {
3972 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3973 severity: Some(DiagnosticSeverity::HINT),
3974 message: "error 2 hint 1".to_string(),
3975 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3976 location: lsp::Location {
3977 uri: buffer_uri.clone(),
3978 range: lsp::Range::new(
3979 lsp::Position::new(2, 8),
3980 lsp::Position::new(2, 17),
3981 ),
3982 },
3983 message: "original diagnostic".to_string(),
3984 }]),
3985 ..Default::default()
3986 },
3987 lsp::Diagnostic {
3988 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3989 severity: Some(DiagnosticSeverity::HINT),
3990 message: "error 2 hint 2".to_string(),
3991 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3992 location: lsp::Location {
3993 uri: buffer_uri.clone(),
3994 range: lsp::Range::new(
3995 lsp::Position::new(2, 8),
3996 lsp::Position::new(2, 17),
3997 ),
3998 },
3999 message: "original diagnostic".to_string(),
4000 }]),
4001 ..Default::default()
4002 },
4003 ],
4004 version: None,
4005 };
4006
4007 worktree
4008 .update(&mut cx, |tree, cx| {
4009 tree.update_diagnostics(message, &Default::default(), cx)
4010 })
4011 .unwrap();
4012 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
4013
4014 assert_eq!(
4015 buffer
4016 .diagnostics_in_range::<_, Point>(0..buffer.len())
4017 .collect::<Vec<_>>(),
4018 &[
4019 DiagnosticEntry {
4020 range: Point::new(1, 8)..Point::new(1, 9),
4021 diagnostic: Diagnostic {
4022 severity: DiagnosticSeverity::WARNING,
4023 message: "error 1".to_string(),
4024 group_id: 0,
4025 is_primary: true,
4026 ..Default::default()
4027 }
4028 },
4029 DiagnosticEntry {
4030 range: Point::new(1, 8)..Point::new(1, 9),
4031 diagnostic: Diagnostic {
4032 severity: DiagnosticSeverity::HINT,
4033 message: "error 1 hint 1".to_string(),
4034 group_id: 0,
4035 is_primary: false,
4036 ..Default::default()
4037 }
4038 },
4039 DiagnosticEntry {
4040 range: Point::new(1, 13)..Point::new(1, 15),
4041 diagnostic: Diagnostic {
4042 severity: DiagnosticSeverity::HINT,
4043 message: "error 2 hint 1".to_string(),
4044 group_id: 1,
4045 is_primary: false,
4046 ..Default::default()
4047 }
4048 },
4049 DiagnosticEntry {
4050 range: Point::new(1, 13)..Point::new(1, 15),
4051 diagnostic: Diagnostic {
4052 severity: DiagnosticSeverity::HINT,
4053 message: "error 2 hint 2".to_string(),
4054 group_id: 1,
4055 is_primary: false,
4056 ..Default::default()
4057 }
4058 },
4059 DiagnosticEntry {
4060 range: Point::new(2, 8)..Point::new(2, 17),
4061 diagnostic: Diagnostic {
4062 severity: DiagnosticSeverity::ERROR,
4063 message: "error 2".to_string(),
4064 group_id: 1,
4065 is_primary: true,
4066 ..Default::default()
4067 }
4068 }
4069 ]
4070 );
4071
4072 assert_eq!(
4073 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
4074 &[
4075 DiagnosticEntry {
4076 range: Point::new(1, 8)..Point::new(1, 9),
4077 diagnostic: Diagnostic {
4078 severity: DiagnosticSeverity::WARNING,
4079 message: "error 1".to_string(),
4080 group_id: 0,
4081 is_primary: true,
4082 ..Default::default()
4083 }
4084 },
4085 DiagnosticEntry {
4086 range: Point::new(1, 8)..Point::new(1, 9),
4087 diagnostic: Diagnostic {
4088 severity: DiagnosticSeverity::HINT,
4089 message: "error 1 hint 1".to_string(),
4090 group_id: 0,
4091 is_primary: false,
4092 ..Default::default()
4093 }
4094 },
4095 ]
4096 );
4097 assert_eq!(
4098 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
4099 &[
4100 DiagnosticEntry {
4101 range: Point::new(1, 13)..Point::new(1, 15),
4102 diagnostic: Diagnostic {
4103 severity: DiagnosticSeverity::HINT,
4104 message: "error 2 hint 1".to_string(),
4105 group_id: 1,
4106 is_primary: false,
4107 ..Default::default()
4108 }
4109 },
4110 DiagnosticEntry {
4111 range: Point::new(1, 13)..Point::new(1, 15),
4112 diagnostic: Diagnostic {
4113 severity: DiagnosticSeverity::HINT,
4114 message: "error 2 hint 2".to_string(),
4115 group_id: 1,
4116 is_primary: false,
4117 ..Default::default()
4118 }
4119 },
4120 DiagnosticEntry {
4121 range: Point::new(2, 8)..Point::new(2, 17),
4122 diagnostic: Diagnostic {
4123 severity: DiagnosticSeverity::ERROR,
4124 message: "error 2".to_string(),
4125 group_id: 1,
4126 is_primary: true,
4127 ..Default::default()
4128 }
4129 }
4130 ]
4131 );
4132 }
4133
4134 #[gpui::test(iterations = 100)]
4135 fn test_random(mut rng: StdRng) {
4136 let operations = env::var("OPERATIONS")
4137 .map(|o| o.parse().unwrap())
4138 .unwrap_or(40);
4139 let initial_entries = env::var("INITIAL_ENTRIES")
4140 .map(|o| o.parse().unwrap())
4141 .unwrap_or(20);
4142
4143 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
4144 for _ in 0..initial_entries {
4145 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
4146 }
4147 log::info!("Generated initial tree");
4148
4149 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4150 let fs = Arc::new(RealFs);
4151 let next_entry_id = Arc::new(AtomicUsize::new(0));
4152 let mut initial_snapshot = Snapshot {
4153 id: WorktreeId::from_usize(0),
4154 scan_id: 0,
4155 abs_path: root_dir.path().into(),
4156 entries_by_path: Default::default(),
4157 entries_by_id: Default::default(),
4158 removed_entry_ids: Default::default(),
4159 ignores: Default::default(),
4160 root_name: Default::default(),
4161 root_char_bag: Default::default(),
4162 next_entry_id: next_entry_id.clone(),
4163 };
4164 initial_snapshot.insert_entry(
4165 Entry::new(
4166 Path::new("").into(),
4167 &smol::block_on(fs.metadata(root_dir.path()))
4168 .unwrap()
4169 .unwrap(),
4170 &next_entry_id,
4171 Default::default(),
4172 ),
4173 fs.as_ref(),
4174 );
4175 let mut scanner = BackgroundScanner::new(
4176 Arc::new(Mutex::new(initial_snapshot.clone())),
4177 notify_tx,
4178 fs.clone(),
4179 Arc::new(gpui::executor::Background::new()),
4180 );
4181 smol::block_on(scanner.scan_dirs()).unwrap();
4182 scanner.snapshot().check_invariants();
4183
4184 let mut events = Vec::new();
4185 let mut snapshots = Vec::new();
4186 let mut mutations_len = operations;
4187 while mutations_len > 1 {
4188 if !events.is_empty() && rng.gen_bool(0.4) {
4189 let len = rng.gen_range(0..=events.len());
4190 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4191 log::info!("Delivering events: {:#?}", to_deliver);
4192 smol::block_on(scanner.process_events(to_deliver));
4193 scanner.snapshot().check_invariants();
4194 } else {
4195 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4196 mutations_len -= 1;
4197 }
4198
4199 if rng.gen_bool(0.2) {
4200 snapshots.push(scanner.snapshot());
4201 }
4202 }
4203 log::info!("Quiescing: {:#?}", events);
4204 smol::block_on(scanner.process_events(events));
4205 scanner.snapshot().check_invariants();
4206
4207 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4208 let mut new_scanner = BackgroundScanner::new(
4209 Arc::new(Mutex::new(initial_snapshot)),
4210 notify_tx,
4211 scanner.fs.clone(),
4212 scanner.executor.clone(),
4213 );
4214 smol::block_on(new_scanner.scan_dirs()).unwrap();
4215 assert_eq!(
4216 scanner.snapshot().to_vec(true),
4217 new_scanner.snapshot().to_vec(true)
4218 );
4219
4220 for mut prev_snapshot in snapshots {
4221 let include_ignored = rng.gen::<bool>();
4222 if !include_ignored {
4223 let mut entries_by_path_edits = Vec::new();
4224 let mut entries_by_id_edits = Vec::new();
4225 for entry in prev_snapshot
4226 .entries_by_id
4227 .cursor::<()>()
4228 .filter(|e| e.is_ignored)
4229 {
4230 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4231 entries_by_id_edits.push(Edit::Remove(entry.id));
4232 }
4233
4234 prev_snapshot
4235 .entries_by_path
4236 .edit(entries_by_path_edits, &());
4237 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4238 }
4239
4240 let update = scanner
4241 .snapshot()
4242 .build_update(&prev_snapshot, 0, 0, include_ignored);
4243 prev_snapshot.apply_update(update).unwrap();
4244 assert_eq!(
4245 prev_snapshot.to_vec(true),
4246 scanner.snapshot().to_vec(include_ignored)
4247 );
4248 }
4249 }
4250
4251 fn randomly_mutate_tree(
4252 root_path: &Path,
4253 insertion_probability: f64,
4254 rng: &mut impl Rng,
4255 ) -> Result<Vec<fsevent::Event>> {
4256 let root_path = root_path.canonicalize().unwrap();
4257 let (dirs, files) = read_dir_recursive(root_path.clone());
4258
4259 let mut events = Vec::new();
4260 let mut record_event = |path: PathBuf| {
4261 events.push(fsevent::Event {
4262 event_id: SystemTime::now()
4263 .duration_since(UNIX_EPOCH)
4264 .unwrap()
4265 .as_secs(),
4266 flags: fsevent::StreamFlags::empty(),
4267 path,
4268 });
4269 };
4270
4271 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4272 let path = dirs.choose(rng).unwrap();
4273 let new_path = path.join(gen_name(rng));
4274
4275 if rng.gen() {
4276 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4277 std::fs::create_dir(&new_path)?;
4278 } else {
4279 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4280 std::fs::write(&new_path, "")?;
4281 }
4282 record_event(new_path);
4283 } else if rng.gen_bool(0.05) {
4284 let ignore_dir_path = dirs.choose(rng).unwrap();
4285 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4286
4287 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4288 let files_to_ignore = {
4289 let len = rng.gen_range(0..=subfiles.len());
4290 subfiles.choose_multiple(rng, len)
4291 };
4292 let dirs_to_ignore = {
4293 let len = rng.gen_range(0..subdirs.len());
4294 subdirs.choose_multiple(rng, len)
4295 };
4296
4297 let mut ignore_contents = String::new();
4298 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4299 write!(
4300 ignore_contents,
4301 "{}\n",
4302 path_to_ignore
4303 .strip_prefix(&ignore_dir_path)?
4304 .to_str()
4305 .unwrap()
4306 )
4307 .unwrap();
4308 }
4309 log::info!(
4310 "Creating {:?} with contents:\n{}",
4311 ignore_path.strip_prefix(&root_path)?,
4312 ignore_contents
4313 );
4314 std::fs::write(&ignore_path, ignore_contents).unwrap();
4315 record_event(ignore_path);
4316 } else {
4317 let old_path = {
4318 let file_path = files.choose(rng);
4319 let dir_path = dirs[1..].choose(rng);
4320 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4321 };
4322
4323 let is_rename = rng.gen();
4324 if is_rename {
4325 let new_path_parent = dirs
4326 .iter()
4327 .filter(|d| !d.starts_with(old_path))
4328 .choose(rng)
4329 .unwrap();
4330
4331 let overwrite_existing_dir =
4332 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4333 let new_path = if overwrite_existing_dir {
4334 std::fs::remove_dir_all(&new_path_parent).ok();
4335 new_path_parent.to_path_buf()
4336 } else {
4337 new_path_parent.join(gen_name(rng))
4338 };
4339
4340 log::info!(
4341 "Renaming {:?} to {}{:?}",
4342 old_path.strip_prefix(&root_path)?,
4343 if overwrite_existing_dir {
4344 "overwrite "
4345 } else {
4346 ""
4347 },
4348 new_path.strip_prefix(&root_path)?
4349 );
4350 std::fs::rename(&old_path, &new_path)?;
4351 record_event(old_path.clone());
4352 record_event(new_path);
4353 } else if old_path.is_dir() {
4354 let (dirs, files) = read_dir_recursive(old_path.clone());
4355
4356 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4357 std::fs::remove_dir_all(&old_path).unwrap();
4358 for file in files {
4359 record_event(file);
4360 }
4361 for dir in dirs {
4362 record_event(dir);
4363 }
4364 } else {
4365 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4366 std::fs::remove_file(old_path).unwrap();
4367 record_event(old_path.clone());
4368 }
4369 }
4370
4371 Ok(events)
4372 }
4373
4374 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4375 let child_entries = std::fs::read_dir(&path).unwrap();
4376 let mut dirs = vec![path];
4377 let mut files = Vec::new();
4378 for child_entry in child_entries {
4379 let child_path = child_entry.unwrap().path();
4380 if child_path.is_dir() {
4381 let (child_dirs, child_files) = read_dir_recursive(child_path);
4382 dirs.extend(child_dirs);
4383 files.extend(child_files);
4384 } else {
4385 files.push(child_path);
4386 }
4387 }
4388 (dirs, files)
4389 }
4390
4391 fn gen_name(rng: &mut impl Rng) -> String {
4392 (0..6)
4393 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4394 .map(char::from)
4395 .collect()
4396 }
4397
4398 impl Snapshot {
4399 fn check_invariants(&self) {
4400 let mut files = self.files(true, 0);
4401 let mut visible_files = self.files(false, 0);
4402 for entry in self.entries_by_path.cursor::<()>() {
4403 if entry.is_file() {
4404 assert_eq!(files.next().unwrap().inode, entry.inode);
4405 if !entry.is_ignored {
4406 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4407 }
4408 }
4409 }
4410 assert!(files.next().is_none());
4411 assert!(visible_files.next().is_none());
4412
4413 let mut bfs_paths = Vec::new();
4414 let mut stack = vec![Path::new("")];
4415 while let Some(path) = stack.pop() {
4416 bfs_paths.push(path);
4417 let ix = stack.len();
4418 for child_entry in self.child_entries(path) {
4419 stack.insert(ix, &child_entry.path);
4420 }
4421 }
4422
4423 let dfs_paths = self
4424 .entries_by_path
4425 .cursor::<()>()
4426 .map(|e| e.path.as_ref())
4427 .collect::<Vec<_>>();
4428 assert_eq!(bfs_paths, dfs_paths);
4429
4430 for (ignore_parent_path, _) in &self.ignores {
4431 assert!(self.entry_for_path(ignore_parent_path).is_some());
4432 assert!(self
4433 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4434 .is_some());
4435 }
4436 }
4437
4438 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4439 let mut paths = Vec::new();
4440 for entry in self.entries_by_path.cursor::<()>() {
4441 if include_ignored || !entry.is_ignored {
4442 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4443 }
4444 }
4445 paths.sort_by(|a, b| a.0.cmp(&b.0));
4446 paths
4447 }
4448 }
4449}