1use super::{
2 fs::{self, Fs},
3 ignore::IgnoreStack,
4 DiagnosticSummary,
5};
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context, Result};
8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
9use clock::ReplicaId;
10use collections::{hash_map, HashMap, HashSet};
11use futures::{Stream, StreamExt};
12use fuzzy::CharBag;
13use gpui::{
14 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
15 Task, UpgradeModelHandle, WeakModelHandle,
16};
17use language::{
18 Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Language, LanguageRegistry,
19 Operation, PointUtf16, Rope,
20};
21use lazy_static::lazy_static;
22use lsp::LanguageServer;
23use parking_lot::Mutex;
24use postage::{
25 prelude::{Sink as _, Stream as _},
26 watch,
27};
28use serde::Deserialize;
29use smol::channel::{self, Sender};
30use std::{
31 any::Any,
32 cmp::{self, Ordering},
33 convert::{TryFrom, TryInto},
34 ffi::{OsStr, OsString},
35 fmt,
36 future::Future,
37 ops::{Deref, Range},
38 path::{Path, PathBuf},
39 sync::{
40 atomic::{AtomicUsize, Ordering::SeqCst},
41 Arc,
42 },
43 time::{Duration, SystemTime},
44};
45use sum_tree::{Bias, TreeMap};
46use sum_tree::{Edit, SeekTarget, SumTree};
47use util::{post_inc, ResultExt, TryFutureExt};
48
49lazy_static! {
50 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
51}
52
53#[derive(Clone, Debug)]
54enum ScanState {
55 Idle,
56 Scanning,
57 Err(Arc<anyhow::Error>),
58}
59
60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
61pub struct WorktreeId(usize);
62
63pub enum Worktree {
64 Local(LocalWorktree),
65 Remote(RemoteWorktree),
66}
67
68#[derive(Clone, Debug, Eq, PartialEq)]
69pub enum Event {
70 DiskBasedDiagnosticsUpdated,
71 DiagnosticsUpdated(Arc<Path>),
72}
73
74impl Entity for Worktree {
75 type Event = Event;
76
77 fn app_will_quit(
78 &mut self,
79 _: &mut MutableAppContext,
80 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
81 use futures::FutureExt;
82
83 if let Self::Local(worktree) = self {
84 let shutdown_futures = worktree
85 .language_servers
86 .drain()
87 .filter_map(|(_, server)| server.shutdown())
88 .collect::<Vec<_>>();
89 Some(
90 async move {
91 futures::future::join_all(shutdown_futures).await;
92 }
93 .boxed(),
94 )
95 } else {
96 None
97 }
98 }
99}
100
101impl Worktree {
102 pub async fn open_local(
103 client: Arc<Client>,
104 user_store: ModelHandle<UserStore>,
105 path: impl Into<Arc<Path>>,
106 fs: Arc<dyn Fs>,
107 languages: Arc<LanguageRegistry>,
108 cx: &mut AsyncAppContext,
109 ) -> Result<ModelHandle<Self>> {
110 let (tree, scan_states_tx) =
111 LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
112 tree.update(cx, |tree, cx| {
113 let tree = tree.as_local_mut().unwrap();
114 let abs_path = tree.snapshot.abs_path.clone();
115 let background_snapshot = tree.background_snapshot.clone();
116 let background = cx.background().clone();
117 tree._background_scanner_task = Some(cx.background().spawn(async move {
118 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
119 let scanner =
120 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
121 scanner.run(events).await;
122 }));
123 });
124 Ok(tree)
125 }
126
127 pub async fn remote(
128 project_remote_id: u64,
129 replica_id: ReplicaId,
130 worktree: proto::Worktree,
131 client: Arc<Client>,
132 user_store: ModelHandle<UserStore>,
133 languages: Arc<LanguageRegistry>,
134 cx: &mut AsyncAppContext,
135 ) -> Result<ModelHandle<Self>> {
136 let remote_id = worktree.id;
137 let root_char_bag: CharBag = worktree
138 .root_name
139 .chars()
140 .map(|c| c.to_ascii_lowercase())
141 .collect();
142 let root_name = worktree.root_name.clone();
143 let (entries_by_path, entries_by_id, diagnostic_summaries) = cx
144 .background()
145 .spawn(async move {
146 let mut entries_by_path_edits = Vec::new();
147 let mut entries_by_id_edits = Vec::new();
148 for entry in worktree.entries {
149 match Entry::try_from((&root_char_bag, entry)) {
150 Ok(entry) => {
151 entries_by_id_edits.push(Edit::Insert(PathEntry {
152 id: entry.id,
153 path: entry.path.clone(),
154 is_ignored: entry.is_ignored,
155 scan_id: 0,
156 }));
157 entries_by_path_edits.push(Edit::Insert(entry));
158 }
159 Err(err) => log::warn!("error for remote worktree entry {:?}", err),
160 }
161 }
162
163 let mut entries_by_path = SumTree::new();
164 let mut entries_by_id = SumTree::new();
165 entries_by_path.edit(entries_by_path_edits, &());
166 entries_by_id.edit(entries_by_id_edits, &());
167
168 let diagnostic_summaries = TreeMap::from_ordered_entries(
169 worktree.diagnostic_summaries.into_iter().map(|summary| {
170 (
171 PathKey(PathBuf::from(summary.path).into()),
172 DiagnosticSummary {
173 error_count: summary.error_count as usize,
174 warning_count: summary.warning_count as usize,
175 info_count: summary.info_count as usize,
176 hint_count: summary.hint_count as usize,
177 },
178 )
179 }),
180 );
181
182 (entries_by_path, entries_by_id, diagnostic_summaries)
183 })
184 .await;
185
186 let worktree = cx.update(|cx| {
187 cx.add_model(|cx: &mut ModelContext<Worktree>| {
188 let snapshot = Snapshot {
189 id: WorktreeId(remote_id as usize),
190 scan_id: 0,
191 abs_path: Path::new("").into(),
192 root_name,
193 root_char_bag,
194 ignores: Default::default(),
195 entries_by_path,
196 entries_by_id,
197 removed_entry_ids: Default::default(),
198 next_entry_id: Default::default(),
199 diagnostic_summaries,
200 };
201
202 let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
203 let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
204
205 cx.background()
206 .spawn(async move {
207 while let Some(update) = updates_rx.recv().await {
208 let mut snapshot = snapshot_tx.borrow().clone();
209 if let Err(error) = snapshot.apply_update(update) {
210 log::error!("error applying worktree update: {}", error);
211 }
212 *snapshot_tx.borrow_mut() = snapshot;
213 }
214 })
215 .detach();
216
217 {
218 let mut snapshot_rx = snapshot_rx.clone();
219 cx.spawn_weak(|this, mut cx| async move {
220 while let Some(_) = snapshot_rx.recv().await {
221 if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
222 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
223 } else {
224 break;
225 }
226 }
227 })
228 .detach();
229 }
230
231 Worktree::Remote(RemoteWorktree {
232 project_id: project_remote_id,
233 replica_id,
234 snapshot,
235 snapshot_rx,
236 updates_tx,
237 client: client.clone(),
238 loading_buffers: Default::default(),
239 open_buffers: Default::default(),
240 queued_operations: Default::default(),
241 languages,
242 user_store,
243 })
244 })
245 });
246
247 Ok(worktree)
248 }
249
250 pub fn as_local(&self) -> Option<&LocalWorktree> {
251 if let Worktree::Local(worktree) = self {
252 Some(worktree)
253 } else {
254 None
255 }
256 }
257
258 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
259 if let Worktree::Remote(worktree) = self {
260 Some(worktree)
261 } else {
262 None
263 }
264 }
265
266 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
267 if let Worktree::Local(worktree) = self {
268 Some(worktree)
269 } else {
270 None
271 }
272 }
273
274 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
275 if let Worktree::Remote(worktree) = self {
276 Some(worktree)
277 } else {
278 None
279 }
280 }
281
282 pub fn snapshot(&self) -> Snapshot {
283 match self {
284 Worktree::Local(worktree) => worktree.snapshot(),
285 Worktree::Remote(worktree) => worktree.snapshot(),
286 }
287 }
288
289 pub fn replica_id(&self) -> ReplicaId {
290 match self {
291 Worktree::Local(_) => 0,
292 Worktree::Remote(worktree) => worktree.replica_id,
293 }
294 }
295
296 pub fn remove_collaborator(
297 &mut self,
298 peer_id: PeerId,
299 replica_id: ReplicaId,
300 cx: &mut ModelContext<Self>,
301 ) {
302 match self {
303 Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
304 Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
305 }
306 }
307
308 pub fn languages(&self) -> &Arc<LanguageRegistry> {
309 match self {
310 Worktree::Local(worktree) => &worktree.language_registry,
311 Worktree::Remote(worktree) => &worktree.languages,
312 }
313 }
314
315 pub fn user_store(&self) -> &ModelHandle<UserStore> {
316 match self {
317 Worktree::Local(worktree) => &worktree.user_store,
318 Worktree::Remote(worktree) => &worktree.user_store,
319 }
320 }
321
322 pub fn handle_open_buffer(
323 &mut self,
324 envelope: TypedEnvelope<proto::OpenBuffer>,
325 rpc: Arc<Client>,
326 cx: &mut ModelContext<Self>,
327 ) -> anyhow::Result<()> {
328 let receipt = envelope.receipt();
329
330 let response = self
331 .as_local_mut()
332 .unwrap()
333 .open_remote_buffer(envelope, cx);
334
335 cx.background()
336 .spawn(
337 async move {
338 rpc.respond(receipt, response.await?).await?;
339 Ok(())
340 }
341 .log_err(),
342 )
343 .detach();
344
345 Ok(())
346 }
347
348 pub fn handle_close_buffer(
349 &mut self,
350 envelope: TypedEnvelope<proto::CloseBuffer>,
351 _: Arc<Client>,
352 cx: &mut ModelContext<Self>,
353 ) -> anyhow::Result<()> {
354 self.as_local_mut()
355 .unwrap()
356 .close_remote_buffer(envelope, cx)
357 }
358
359 pub fn diagnostic_summaries<'a>(
360 &'a self,
361 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
362 match self {
363 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
364 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
365 }
366 .iter()
367 .map(|(path, summary)| (path.0.clone(), summary.clone()))
368 }
369
370 pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
371 match self {
372 Worktree::Local(worktree) => &mut worktree.loading_buffers,
373 Worktree::Remote(worktree) => &mut worktree.loading_buffers,
374 }
375 }
376
377 pub fn open_buffer(
378 &mut self,
379 path: impl AsRef<Path>,
380 cx: &mut ModelContext<Self>,
381 ) -> Task<Result<ModelHandle<Buffer>>> {
382 let path = path.as_ref();
383
384 // If there is already a buffer for the given path, then return it.
385 let existing_buffer = match self {
386 Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
387 Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
388 };
389 if let Some(existing_buffer) = existing_buffer {
390 return cx.spawn(move |_, _| async move { Ok(existing_buffer) });
391 }
392
393 let path: Arc<Path> = Arc::from(path);
394 let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
395 // If the given path is already being loaded, then wait for that existing
396 // task to complete and return the same buffer.
397 hash_map::Entry::Occupied(e) => e.get().clone(),
398
399 // Otherwise, record the fact that this path is now being loaded.
400 hash_map::Entry::Vacant(entry) => {
401 let (mut tx, rx) = postage::watch::channel();
402 entry.insert(rx.clone());
403
404 let load_buffer = match self {
405 Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
406 Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
407 };
408 cx.spawn(move |this, mut cx| async move {
409 let result = load_buffer.await;
410
411 // After the buffer loads, record the fact that it is no longer
412 // loading.
413 this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
414 *tx.borrow_mut() = Some(result.map_err(|e| Arc::new(e)));
415 })
416 .detach();
417 rx
418 }
419 };
420
421 cx.spawn(|_, _| async move {
422 loop {
423 if let Some(result) = loading_watch.borrow().as_ref() {
424 return result.clone().map_err(|e| anyhow!("{}", e));
425 }
426 loading_watch.recv().await;
427 }
428 })
429 }
430
431 #[cfg(feature = "test-support")]
432 pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
433 let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
434 Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
435 Worktree::Remote(worktree) => {
436 Box::new(worktree.open_buffers.values().filter_map(|buf| {
437 if let RemoteBuffer::Loaded(buf) = buf {
438 Some(buf)
439 } else {
440 None
441 }
442 }))
443 }
444 };
445
446 let path = path.as_ref();
447 open_buffers
448 .find(|buffer| {
449 if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
450 file.path().as_ref() == path
451 } else {
452 false
453 }
454 })
455 .is_some()
456 }
457
458 pub fn handle_update_buffer(
459 &mut self,
460 envelope: TypedEnvelope<proto::UpdateBuffer>,
461 cx: &mut ModelContext<Self>,
462 ) -> Result<()> {
463 let payload = envelope.payload.clone();
464 let buffer_id = payload.buffer_id as usize;
465 let ops = payload
466 .operations
467 .into_iter()
468 .map(|op| language::proto::deserialize_operation(op))
469 .collect::<Result<Vec<_>, _>>()?;
470
471 match self {
472 Worktree::Local(worktree) => {
473 let buffer = worktree
474 .open_buffers
475 .get(&buffer_id)
476 .and_then(|buf| buf.upgrade(cx))
477 .ok_or_else(|| {
478 anyhow!("invalid buffer {} in update buffer message", buffer_id)
479 })?;
480 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
481 }
482 Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
483 Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
484 Some(RemoteBuffer::Loaded(buffer)) => {
485 if let Some(buffer) = buffer.upgrade(cx) {
486 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
487 } else {
488 worktree
489 .open_buffers
490 .insert(buffer_id, RemoteBuffer::Operations(ops));
491 }
492 }
493 None => {
494 worktree
495 .open_buffers
496 .insert(buffer_id, RemoteBuffer::Operations(ops));
497 }
498 },
499 }
500
501 Ok(())
502 }
503
504 pub fn handle_save_buffer(
505 &mut self,
506 envelope: TypedEnvelope<proto::SaveBuffer>,
507 rpc: Arc<Client>,
508 cx: &mut ModelContext<Self>,
509 ) -> Result<()> {
510 let sender_id = envelope.original_sender_id()?;
511 let this = self.as_local().unwrap();
512 let project_id = this
513 .share
514 .as_ref()
515 .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
516 .project_id;
517
518 let buffer = this
519 .shared_buffers
520 .get(&sender_id)
521 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
522 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
523
524 let receipt = envelope.receipt();
525 let worktree_id = envelope.payload.worktree_id;
526 let buffer_id = envelope.payload.buffer_id;
527 let save = cx.spawn(|_, mut cx| async move {
528 buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
529 });
530
531 cx.background()
532 .spawn(
533 async move {
534 let (version, mtime) = save.await?;
535
536 rpc.respond(
537 receipt,
538 proto::BufferSaved {
539 project_id,
540 worktree_id,
541 buffer_id,
542 version: (&version).into(),
543 mtime: Some(mtime.into()),
544 },
545 )
546 .await?;
547
548 Ok(())
549 }
550 .log_err(),
551 )
552 .detach();
553
554 Ok(())
555 }
556
557 pub fn handle_buffer_saved(
558 &mut self,
559 envelope: TypedEnvelope<proto::BufferSaved>,
560 cx: &mut ModelContext<Self>,
561 ) -> Result<()> {
562 let payload = envelope.payload.clone();
563 let worktree = self.as_remote_mut().unwrap();
564 if let Some(buffer) = worktree
565 .open_buffers
566 .get(&(payload.buffer_id as usize))
567 .and_then(|buf| buf.upgrade(cx))
568 {
569 buffer.update(cx, |buffer, cx| {
570 let version = payload.version.try_into()?;
571 let mtime = payload
572 .mtime
573 .ok_or_else(|| anyhow!("missing mtime"))?
574 .into();
575 buffer.did_save(version, mtime, None, cx);
576 Result::<_, anyhow::Error>::Ok(())
577 })?;
578 }
579 Ok(())
580 }
581
582 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
583 match self {
584 Self::Local(worktree) => {
585 let is_fake_fs = worktree.fs.is_fake();
586 worktree.snapshot = worktree.background_snapshot.lock().clone();
587 if worktree.is_scanning() {
588 if worktree.poll_task.is_none() {
589 worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
590 if is_fake_fs {
591 smol::future::yield_now().await;
592 } else {
593 smol::Timer::after(Duration::from_millis(100)).await;
594 }
595 this.update(&mut cx, |this, cx| {
596 this.as_local_mut().unwrap().poll_task = None;
597 this.poll_snapshot(cx);
598 })
599 }));
600 }
601 } else {
602 worktree.poll_task.take();
603 self.update_open_buffers(cx);
604 }
605 }
606 Self::Remote(worktree) => {
607 worktree.snapshot = worktree.snapshot_rx.borrow().clone();
608 self.update_open_buffers(cx);
609 }
610 };
611
612 cx.notify();
613 }
614
615 fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
616 let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
617 Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
618 Self::Remote(worktree) => {
619 Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
620 if let RemoteBuffer::Loaded(buf) = buf {
621 Some((id, buf))
622 } else {
623 None
624 }
625 }))
626 }
627 };
628
629 let local = self.as_local().is_some();
630 let worktree_path = self.abs_path.clone();
631 let worktree_handle = cx.handle();
632 let mut buffers_to_delete = Vec::new();
633 for (buffer_id, buffer) in open_buffers {
634 if let Some(buffer) = buffer.upgrade(cx) {
635 buffer.update(cx, |buffer, cx| {
636 if let Some(old_file) = File::from_dyn(buffer.file()) {
637 let new_file = if let Some(entry) = old_file
638 .entry_id
639 .and_then(|entry_id| self.entry_for_id(entry_id))
640 {
641 File {
642 is_local: local,
643 worktree_path: worktree_path.clone(),
644 entry_id: Some(entry.id),
645 mtime: entry.mtime,
646 path: entry.path.clone(),
647 worktree: worktree_handle.clone(),
648 }
649 } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
650 File {
651 is_local: local,
652 worktree_path: worktree_path.clone(),
653 entry_id: Some(entry.id),
654 mtime: entry.mtime,
655 path: entry.path.clone(),
656 worktree: worktree_handle.clone(),
657 }
658 } else {
659 File {
660 is_local: local,
661 worktree_path: worktree_path.clone(),
662 entry_id: None,
663 path: old_file.path().clone(),
664 mtime: old_file.mtime(),
665 worktree: worktree_handle.clone(),
666 }
667 };
668
669 if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
670 task.detach();
671 }
672 }
673 });
674 } else {
675 buffers_to_delete.push(*buffer_id);
676 }
677 }
678
679 for buffer_id in buffers_to_delete {
680 match self {
681 Self::Local(worktree) => {
682 worktree.open_buffers.remove(&buffer_id);
683 }
684 Self::Remote(worktree) => {
685 worktree.open_buffers.remove(&buffer_id);
686 }
687 }
688 }
689 }
690
691 pub fn update_diagnostics(
692 &mut self,
693 params: lsp::PublishDiagnosticsParams,
694 disk_based_sources: &HashSet<String>,
695 cx: &mut ModelContext<Worktree>,
696 ) -> Result<()> {
697 let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
698 let abs_path = params
699 .uri
700 .to_file_path()
701 .map_err(|_| anyhow!("URI is not a file"))?;
702 let worktree_path = Arc::from(
703 abs_path
704 .strip_prefix(&this.abs_path)
705 .context("path is not within worktree")?,
706 );
707
708 let mut next_group_id = 0;
709 let mut diagnostics = Vec::default();
710 let mut primary_diagnostic_group_ids = HashMap::default();
711 let mut sources_by_group_id = HashMap::default();
712 let mut supporting_diagnostic_severities = HashMap::default();
713 for diagnostic in ¶ms.diagnostics {
714 let source = diagnostic.source.as_ref();
715 let code = diagnostic.code.as_ref().map(|code| match code {
716 lsp::NumberOrString::Number(code) => code.to_string(),
717 lsp::NumberOrString::String(code) => code.clone(),
718 });
719 let range = range_from_lsp(diagnostic.range);
720 let is_supporting = diagnostic
721 .related_information
722 .as_ref()
723 .map_or(false, |infos| {
724 infos.iter().any(|info| {
725 primary_diagnostic_group_ids.contains_key(&(
726 source,
727 code.clone(),
728 range_from_lsp(info.location.range),
729 ))
730 })
731 });
732
733 if is_supporting {
734 if let Some(severity) = diagnostic.severity {
735 supporting_diagnostic_severities
736 .insert((source, code.clone(), range), severity);
737 }
738 } else {
739 let group_id = post_inc(&mut next_group_id);
740 let is_disk_based =
741 source.map_or(false, |source| disk_based_sources.contains(source));
742
743 sources_by_group_id.insert(group_id, source);
744 primary_diagnostic_group_ids
745 .insert((source, code.clone(), range.clone()), group_id);
746
747 diagnostics.push(DiagnosticEntry {
748 range,
749 diagnostic: Diagnostic {
750 code: code.clone(),
751 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
752 message: diagnostic.message.clone(),
753 group_id,
754 is_primary: true,
755 is_valid: true,
756 is_disk_based,
757 },
758 });
759 if let Some(infos) = &diagnostic.related_information {
760 for info in infos {
761 if info.location.uri == params.uri {
762 let range = range_from_lsp(info.location.range);
763 diagnostics.push(DiagnosticEntry {
764 range,
765 diagnostic: Diagnostic {
766 code: code.clone(),
767 severity: DiagnosticSeverity::INFORMATION,
768 message: info.message.clone(),
769 group_id,
770 is_primary: false,
771 is_valid: true,
772 is_disk_based,
773 },
774 });
775 }
776 }
777 }
778 }
779 }
780
781 for entry in &mut diagnostics {
782 let diagnostic = &mut entry.diagnostic;
783 if !diagnostic.is_primary {
784 let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
785 if let Some(&severity) = supporting_diagnostic_severities.get(&(
786 source,
787 diagnostic.code.clone(),
788 entry.range.clone(),
789 )) {
790 diagnostic.severity = severity;
791 }
792 }
793 }
794
795 self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)?;
796 Ok(())
797 }
798
799 pub fn update_diagnostic_entries(
800 &mut self,
801 worktree_path: Arc<Path>,
802 version: Option<i32>,
803 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
804 cx: &mut ModelContext<Self>,
805 ) -> Result<()> {
806 let this = self.as_local_mut().unwrap();
807 for buffer in this.open_buffers.values() {
808 if let Some(buffer) = buffer.upgrade(cx) {
809 if buffer
810 .read(cx)
811 .file()
812 .map_or(false, |file| *file.path() == worktree_path)
813 {
814 let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
815 (
816 buffer.remote_id(),
817 buffer.update_diagnostics(version, diagnostics.clone(), cx),
818 )
819 });
820 self.send_buffer_update(remote_id, operation?, cx);
821 break;
822 }
823 }
824 }
825
826 let this = self.as_local_mut().unwrap();
827 let summary = DiagnosticSummary::new(&diagnostics);
828 this.snapshot
829 .diagnostic_summaries
830 .insert(PathKey(worktree_path.clone()), summary.clone());
831 this.diagnostics.insert(worktree_path.clone(), diagnostics);
832
833 cx.emit(Event::DiagnosticsUpdated(worktree_path.clone()));
834
835 if let Some(share) = this.share.as_ref() {
836 cx.foreground()
837 .spawn({
838 let client = this.client.clone();
839 let project_id = share.project_id;
840 let worktree_id = this.id().to_proto();
841 let path = worktree_path.to_string_lossy().to_string();
842 async move {
843 client
844 .send(proto::UpdateDiagnosticSummary {
845 project_id,
846 worktree_id,
847 summary: Some(proto::DiagnosticSummary {
848 path,
849 error_count: summary.error_count as u32,
850 warning_count: summary.warning_count as u32,
851 info_count: summary.info_count as u32,
852 hint_count: summary.hint_count as u32,
853 }),
854 })
855 .await
856 .log_err()
857 }
858 })
859 .detach();
860 }
861
862 Ok(())
863 }
864
865 fn send_buffer_update(
866 &mut self,
867 buffer_id: u64,
868 operation: Operation,
869 cx: &mut ModelContext<Self>,
870 ) {
871 if let Some((project_id, worktree_id, rpc)) = match self {
872 Worktree::Local(worktree) => worktree
873 .share
874 .as_ref()
875 .map(|share| (share.project_id, worktree.id(), worktree.client.clone())),
876 Worktree::Remote(worktree) => Some((
877 worktree.project_id,
878 worktree.snapshot.id(),
879 worktree.client.clone(),
880 )),
881 } {
882 cx.spawn(|worktree, mut cx| async move {
883 if let Err(error) = rpc
884 .request(proto::UpdateBuffer {
885 project_id,
886 worktree_id: worktree_id.0 as u64,
887 buffer_id,
888 operations: vec![language::proto::serialize_operation(&operation)],
889 })
890 .await
891 {
892 worktree.update(&mut cx, |worktree, _| {
893 log::error!("error sending buffer operation: {}", error);
894 match worktree {
895 Worktree::Local(t) => &mut t.queued_operations,
896 Worktree::Remote(t) => &mut t.queued_operations,
897 }
898 .push((buffer_id, operation));
899 });
900 }
901 })
902 .detach();
903 }
904 }
905}
906
907impl WorktreeId {
908 pub fn from_usize(handle_id: usize) -> Self {
909 Self(handle_id)
910 }
911
912 pub(crate) fn from_proto(id: u64) -> Self {
913 Self(id as usize)
914 }
915
916 pub fn to_proto(&self) -> u64 {
917 self.0 as u64
918 }
919
920 pub fn to_usize(&self) -> usize {
921 self.0
922 }
923}
924
925impl fmt::Display for WorktreeId {
926 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
927 self.0.fmt(f)
928 }
929}
930
931#[derive(Clone)]
932pub struct Snapshot {
933 id: WorktreeId,
934 scan_id: usize,
935 abs_path: Arc<Path>,
936 root_name: String,
937 root_char_bag: CharBag,
938 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
939 entries_by_path: SumTree<Entry>,
940 entries_by_id: SumTree<PathEntry>,
941 removed_entry_ids: HashMap<u64, usize>,
942 next_entry_id: Arc<AtomicUsize>,
943 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
944}
945
946pub struct LocalWorktree {
947 snapshot: Snapshot,
948 config: WorktreeConfig,
949 background_snapshot: Arc<Mutex<Snapshot>>,
950 last_scan_state_rx: watch::Receiver<ScanState>,
951 _background_scanner_task: Option<Task<()>>,
952 poll_task: Option<Task<()>>,
953 share: Option<ShareState>,
954 loading_buffers: LoadingBuffers,
955 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
956 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
957 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
958 queued_operations: Vec<(u64, Operation)>,
959 language_registry: Arc<LanguageRegistry>,
960 client: Arc<Client>,
961 user_store: ModelHandle<UserStore>,
962 fs: Arc<dyn Fs>,
963 languages: Vec<Arc<Language>>,
964 language_servers: HashMap<String, Arc<LanguageServer>>,
965}
966
967struct ShareState {
968 project_id: u64,
969 snapshots_tx: Sender<Snapshot>,
970}
971
972pub struct RemoteWorktree {
973 project_id: u64,
974 snapshot: Snapshot,
975 snapshot_rx: watch::Receiver<Snapshot>,
976 client: Arc<Client>,
977 updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
978 replica_id: ReplicaId,
979 loading_buffers: LoadingBuffers,
980 open_buffers: HashMap<usize, RemoteBuffer>,
981 languages: Arc<LanguageRegistry>,
982 user_store: ModelHandle<UserStore>,
983 queued_operations: Vec<(u64, Operation)>,
984}
985
986type LoadingBuffers = HashMap<
987 Arc<Path>,
988 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
989>;
990
991#[derive(Default, Deserialize)]
992struct WorktreeConfig {
993 collaborators: Vec<String>,
994}
995
996impl LocalWorktree {
997 async fn new(
998 client: Arc<Client>,
999 user_store: ModelHandle<UserStore>,
1000 path: impl Into<Arc<Path>>,
1001 fs: Arc<dyn Fs>,
1002 languages: Arc<LanguageRegistry>,
1003 cx: &mut AsyncAppContext,
1004 ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
1005 let abs_path = path.into();
1006 let path: Arc<Path> = Arc::from(Path::new(""));
1007 let next_entry_id = AtomicUsize::new(0);
1008
1009 // After determining whether the root entry is a file or a directory, populate the
1010 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
1011 let root_name = abs_path
1012 .file_name()
1013 .map_or(String::new(), |f| f.to_string_lossy().to_string());
1014 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
1015 let metadata = fs.metadata(&abs_path).await?;
1016
1017 let mut config = WorktreeConfig::default();
1018 if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
1019 if let Ok(parsed) = toml::from_str(&zed_toml) {
1020 config = parsed;
1021 }
1022 }
1023
1024 let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
1025 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
1026 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
1027 let mut snapshot = Snapshot {
1028 id: WorktreeId::from_usize(cx.model_id()),
1029 scan_id: 0,
1030 abs_path,
1031 root_name: root_name.clone(),
1032 root_char_bag,
1033 ignores: Default::default(),
1034 entries_by_path: Default::default(),
1035 entries_by_id: Default::default(),
1036 removed_entry_ids: Default::default(),
1037 next_entry_id: Arc::new(next_entry_id),
1038 diagnostic_summaries: Default::default(),
1039 };
1040 if let Some(metadata) = metadata {
1041 snapshot.insert_entry(
1042 Entry::new(
1043 path.into(),
1044 &metadata,
1045 &snapshot.next_entry_id,
1046 snapshot.root_char_bag,
1047 ),
1048 fs.as_ref(),
1049 );
1050 }
1051
1052 let tree = Self {
1053 snapshot: snapshot.clone(),
1054 config,
1055 background_snapshot: Arc::new(Mutex::new(snapshot)),
1056 last_scan_state_rx,
1057 _background_scanner_task: None,
1058 share: None,
1059 poll_task: None,
1060 loading_buffers: Default::default(),
1061 open_buffers: Default::default(),
1062 shared_buffers: Default::default(),
1063 diagnostics: Default::default(),
1064 queued_operations: Default::default(),
1065 language_registry: languages,
1066 client,
1067 user_store,
1068 fs,
1069 languages: Default::default(),
1070 language_servers: Default::default(),
1071 };
1072
1073 cx.spawn_weak(|this, mut cx| async move {
1074 while let Ok(scan_state) = scan_states_rx.recv().await {
1075 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1076 let to_send = handle.update(&mut cx, |this, cx| {
1077 last_scan_state_tx.blocking_send(scan_state).ok();
1078 this.poll_snapshot(cx);
1079 let tree = this.as_local_mut().unwrap();
1080 if !tree.is_scanning() {
1081 if let Some(share) = tree.share.as_ref() {
1082 return Some((tree.snapshot(), share.snapshots_tx.clone()));
1083 }
1084 }
1085 None
1086 });
1087
1088 if let Some((snapshot, snapshots_to_send_tx)) = to_send {
1089 if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
1090 log::error!("error submitting snapshot to send {}", err);
1091 }
1092 }
1093 } else {
1094 break;
1095 }
1096 }
1097 })
1098 .detach();
1099
1100 Worktree::Local(tree)
1101 });
1102
1103 Ok((tree, scan_states_tx))
1104 }
1105
1106 pub fn authorized_logins(&self) -> Vec<String> {
1107 self.config.collaborators.clone()
1108 }
1109
1110 pub fn language_registry(&self) -> &LanguageRegistry {
1111 &self.language_registry
1112 }
1113
1114 pub fn languages(&self) -> &[Arc<Language>] {
1115 &self.languages
1116 }
1117
1118 pub fn register_language(
1119 &mut self,
1120 language: &Arc<Language>,
1121 cx: &mut ModelContext<Worktree>,
1122 ) -> Option<Arc<LanguageServer>> {
1123 if !self.languages.iter().any(|l| Arc::ptr_eq(l, language)) {
1124 self.languages.push(language.clone());
1125 }
1126
1127 if let Some(server) = self.language_servers.get(language.name()) {
1128 return Some(server.clone());
1129 }
1130
1131 if let Some(language_server) = language
1132 .start_server(self.abs_path(), cx)
1133 .log_err()
1134 .flatten()
1135 {
1136 let disk_based_sources = language
1137 .disk_based_diagnostic_sources()
1138 .cloned()
1139 .unwrap_or_default();
1140 let disk_based_diagnostics_progress_token =
1141 language.disk_based_diagnostics_progress_token().cloned();
1142 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
1143 let (disk_based_diagnostics_done_tx, disk_based_diagnostics_done_rx) =
1144 smol::channel::unbounded();
1145 language_server
1146 .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
1147 smol::block_on(diagnostics_tx.send(params)).ok();
1148 })
1149 .detach();
1150 cx.spawn_weak(|this, mut cx| {
1151 let has_disk_based_diagnostic_progress_token =
1152 disk_based_diagnostics_progress_token.is_some();
1153 let disk_based_diagnostics_done_tx = disk_based_diagnostics_done_tx.clone();
1154 async move {
1155 while let Ok(diagnostics) = diagnostics_rx.recv().await {
1156 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1157 handle.update(&mut cx, |this, cx| {
1158 this.update_diagnostics(diagnostics, &disk_based_sources, cx)
1159 .log_err();
1160 if !has_disk_based_diagnostic_progress_token {
1161 smol::block_on(disk_based_diagnostics_done_tx.send(())).ok();
1162 }
1163 })
1164 } else {
1165 break;
1166 }
1167 }
1168 }
1169 })
1170 .detach();
1171
1172 let mut pending_disk_based_diagnostics: i32 = 0;
1173 language_server
1174 .on_notification::<lsp::notification::Progress, _>(move |params| {
1175 let token = match params.token {
1176 lsp::NumberOrString::Number(_) => None,
1177 lsp::NumberOrString::String(token) => Some(token),
1178 };
1179
1180 if token == disk_based_diagnostics_progress_token {
1181 match params.value {
1182 lsp::ProgressParamsValue::WorkDone(progress) => match progress {
1183 lsp::WorkDoneProgress::Begin(_) => {
1184 pending_disk_based_diagnostics += 1;
1185 }
1186 lsp::WorkDoneProgress::End(_) => {
1187 pending_disk_based_diagnostics -= 1;
1188 if pending_disk_based_diagnostics == 0 {
1189 smol::block_on(disk_based_diagnostics_done_tx.send(()))
1190 .ok();
1191 }
1192 }
1193 _ => {}
1194 },
1195 }
1196 }
1197 })
1198 .detach();
1199 let rpc = self.client.clone();
1200 cx.spawn_weak(|this, mut cx| async move {
1201 while let Ok(()) = disk_based_diagnostics_done_rx.recv().await {
1202 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1203 let message = handle.update(&mut cx, |this, cx| {
1204 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1205 let this = this.as_local().unwrap();
1206 this.share
1207 .as_ref()
1208 .map(|share| proto::DiskBasedDiagnosticsUpdated {
1209 project_id: share.project_id,
1210 worktree_id: this.id().to_proto(),
1211 })
1212 });
1213
1214 if let Some(message) = message {
1215 rpc.send(message).await.log_err();
1216 }
1217 } else {
1218 break;
1219 }
1220 }
1221 })
1222 .detach();
1223
1224 self.language_servers
1225 .insert(language.name().to_string(), language_server.clone());
1226 Some(language_server.clone())
1227 } else {
1228 None
1229 }
1230 }
1231
1232 fn get_open_buffer(
1233 &mut self,
1234 path: &Path,
1235 cx: &mut ModelContext<Worktree>,
1236 ) -> Option<ModelHandle<Buffer>> {
1237 let handle = cx.handle();
1238 let mut result = None;
1239 self.open_buffers.retain(|_buffer_id, buffer| {
1240 if let Some(buffer) = buffer.upgrade(cx) {
1241 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1242 if file.worktree == handle && file.path().as_ref() == path {
1243 result = Some(buffer);
1244 }
1245 }
1246 true
1247 } else {
1248 false
1249 }
1250 });
1251 result
1252 }
1253
1254 fn open_buffer(
1255 &mut self,
1256 path: &Path,
1257 cx: &mut ModelContext<Worktree>,
1258 ) -> Task<Result<ModelHandle<Buffer>>> {
1259 let path = Arc::from(path);
1260 cx.spawn(move |this, mut cx| async move {
1261 let (file, contents) = this
1262 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
1263 .await?;
1264
1265 let (diagnostics, language, language_server) = this.update(&mut cx, |this, cx| {
1266 let this = this.as_local_mut().unwrap();
1267 let diagnostics = this.diagnostics.remove(&path);
1268 let language = this
1269 .language_registry
1270 .select_language(file.full_path())
1271 .cloned();
1272 let server = language
1273 .as_ref()
1274 .and_then(|language| this.register_language(language, cx));
1275 (diagnostics, language, server)
1276 });
1277
1278 let mut buffer_operations = Vec::new();
1279 let buffer = cx.add_model(|cx| {
1280 let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1281 buffer.set_language(language, language_server, cx);
1282 if let Some(diagnostics) = diagnostics {
1283 let op = buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1284 buffer_operations.push(op);
1285 }
1286 buffer
1287 });
1288
1289 this.update(&mut cx, |this, cx| {
1290 for op in buffer_operations {
1291 this.send_buffer_update(buffer.read(cx).remote_id(), op, cx);
1292 }
1293 let this = this.as_local_mut().unwrap();
1294 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1295 });
1296
1297 Ok(buffer)
1298 })
1299 }
1300
1301 pub fn open_remote_buffer(
1302 &mut self,
1303 envelope: TypedEnvelope<proto::OpenBuffer>,
1304 cx: &mut ModelContext<Worktree>,
1305 ) -> Task<Result<proto::OpenBufferResponse>> {
1306 cx.spawn(|this, mut cx| async move {
1307 let peer_id = envelope.original_sender_id();
1308 let path = Path::new(&envelope.payload.path);
1309 let buffer = this
1310 .update(&mut cx, |this, cx| this.open_buffer(path, cx))
1311 .await?;
1312 this.update(&mut cx, |this, cx| {
1313 this.as_local_mut()
1314 .unwrap()
1315 .shared_buffers
1316 .entry(peer_id?)
1317 .or_default()
1318 .insert(buffer.id() as u64, buffer.clone());
1319
1320 Ok(proto::OpenBufferResponse {
1321 buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1322 })
1323 })
1324 })
1325 }
1326
1327 pub fn close_remote_buffer(
1328 &mut self,
1329 envelope: TypedEnvelope<proto::CloseBuffer>,
1330 cx: &mut ModelContext<Worktree>,
1331 ) -> Result<()> {
1332 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1333 shared_buffers.remove(&envelope.payload.buffer_id);
1334 cx.notify();
1335 }
1336
1337 Ok(())
1338 }
1339
1340 pub fn remove_collaborator(
1341 &mut self,
1342 peer_id: PeerId,
1343 replica_id: ReplicaId,
1344 cx: &mut ModelContext<Worktree>,
1345 ) {
1346 self.shared_buffers.remove(&peer_id);
1347 for (_, buffer) in &self.open_buffers {
1348 if let Some(buffer) = buffer.upgrade(cx) {
1349 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1350 }
1351 }
1352 cx.notify();
1353 }
1354
1355 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1356 let mut scan_state_rx = self.last_scan_state_rx.clone();
1357 async move {
1358 let mut scan_state = Some(scan_state_rx.borrow().clone());
1359 while let Some(ScanState::Scanning) = scan_state {
1360 scan_state = scan_state_rx.recv().await;
1361 }
1362 }
1363 }
1364
1365 fn is_scanning(&self) -> bool {
1366 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1367 true
1368 } else {
1369 false
1370 }
1371 }
1372
1373 pub fn snapshot(&self) -> Snapshot {
1374 self.snapshot.clone()
1375 }
1376
1377 pub fn abs_path(&self) -> &Arc<Path> {
1378 &self.snapshot.abs_path
1379 }
1380
1381 pub fn contains_abs_path(&self, path: &Path) -> bool {
1382 path.starts_with(&self.snapshot.abs_path)
1383 }
1384
1385 fn absolutize(&self, path: &Path) -> PathBuf {
1386 if path.file_name().is_some() {
1387 self.snapshot.abs_path.join(path)
1388 } else {
1389 self.snapshot.abs_path.to_path_buf()
1390 }
1391 }
1392
1393 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1394 let handle = cx.handle();
1395 let path = Arc::from(path);
1396 let worktree_path = self.abs_path.clone();
1397 let abs_path = self.absolutize(&path);
1398 let background_snapshot = self.background_snapshot.clone();
1399 let fs = self.fs.clone();
1400 cx.spawn(|this, mut cx| async move {
1401 let text = fs.load(&abs_path).await?;
1402 // Eagerly populate the snapshot with an updated entry for the loaded file
1403 let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1404 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1405 Ok((
1406 File {
1407 entry_id: Some(entry.id),
1408 worktree: handle,
1409 worktree_path,
1410 path: entry.path,
1411 mtime: entry.mtime,
1412 is_local: true,
1413 },
1414 text,
1415 ))
1416 })
1417 }
1418
1419 pub fn save_buffer_as(
1420 &self,
1421 buffer: ModelHandle<Buffer>,
1422 path: impl Into<Arc<Path>>,
1423 text: Rope,
1424 cx: &mut ModelContext<Worktree>,
1425 ) -> Task<Result<File>> {
1426 let save = self.save(path, text, cx);
1427 cx.spawn(|this, mut cx| async move {
1428 let entry = save.await?;
1429 this.update(&mut cx, |this, cx| {
1430 let this = this.as_local_mut().unwrap();
1431 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1432 Ok(File {
1433 entry_id: Some(entry.id),
1434 worktree: cx.handle(),
1435 worktree_path: this.abs_path.clone(),
1436 path: entry.path,
1437 mtime: entry.mtime,
1438 is_local: true,
1439 })
1440 })
1441 })
1442 }
1443
1444 fn save(
1445 &self,
1446 path: impl Into<Arc<Path>>,
1447 text: Rope,
1448 cx: &mut ModelContext<Worktree>,
1449 ) -> Task<Result<Entry>> {
1450 let path = path.into();
1451 let abs_path = self.absolutize(&path);
1452 let background_snapshot = self.background_snapshot.clone();
1453 let fs = self.fs.clone();
1454 let save = cx.background().spawn(async move {
1455 fs.save(&abs_path, &text).await?;
1456 refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1457 });
1458
1459 cx.spawn(|this, mut cx| async move {
1460 let entry = save.await?;
1461 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1462 Ok(entry)
1463 })
1464 }
1465
1466 pub fn share(
1467 &mut self,
1468 project_id: u64,
1469 cx: &mut ModelContext<Worktree>,
1470 ) -> Task<anyhow::Result<()>> {
1471 if self.share.is_some() {
1472 return Task::ready(Ok(()));
1473 }
1474
1475 let snapshot = self.snapshot();
1476 let rpc = self.client.clone();
1477 let worktree_id = cx.model_id() as u64;
1478 let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1479 self.share = Some(ShareState {
1480 project_id,
1481 snapshots_tx: snapshots_to_send_tx,
1482 });
1483
1484 cx.background()
1485 .spawn({
1486 let rpc = rpc.clone();
1487 let snapshot = snapshot.clone();
1488 async move {
1489 let mut prev_snapshot = snapshot;
1490 while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1491 let message =
1492 snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1493 match rpc.send(message).await {
1494 Ok(()) => prev_snapshot = snapshot,
1495 Err(err) => log::error!("error sending snapshot diff {}", err),
1496 }
1497 }
1498 }
1499 })
1500 .detach();
1501
1502 let share_message = cx.background().spawn(async move {
1503 proto::ShareWorktree {
1504 project_id,
1505 worktree: Some(snapshot.to_proto()),
1506 }
1507 });
1508
1509 cx.foreground().spawn(async move {
1510 rpc.request(share_message.await).await?;
1511 Ok(())
1512 })
1513 }
1514}
1515
1516fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1517 let contents = smol::block_on(fs.load(&abs_path))?;
1518 let parent = abs_path.parent().unwrap_or(Path::new("/"));
1519 let mut builder = GitignoreBuilder::new(parent);
1520 for line in contents.lines() {
1521 builder.add_line(Some(abs_path.into()), line)?;
1522 }
1523 Ok(builder.build()?)
1524}
1525
1526impl Deref for Worktree {
1527 type Target = Snapshot;
1528
1529 fn deref(&self) -> &Self::Target {
1530 match self {
1531 Worktree::Local(worktree) => &worktree.snapshot,
1532 Worktree::Remote(worktree) => &worktree.snapshot,
1533 }
1534 }
1535}
1536
1537impl Deref for LocalWorktree {
1538 type Target = Snapshot;
1539
1540 fn deref(&self) -> &Self::Target {
1541 &self.snapshot
1542 }
1543}
1544
1545impl Deref for RemoteWorktree {
1546 type Target = Snapshot;
1547
1548 fn deref(&self) -> &Self::Target {
1549 &self.snapshot
1550 }
1551}
1552
1553impl fmt::Debug for LocalWorktree {
1554 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1555 self.snapshot.fmt(f)
1556 }
1557}
1558
1559impl RemoteWorktree {
1560 fn get_open_buffer(
1561 &mut self,
1562 path: &Path,
1563 cx: &mut ModelContext<Worktree>,
1564 ) -> Option<ModelHandle<Buffer>> {
1565 let handle = cx.handle();
1566 let mut existing_buffer = None;
1567 self.open_buffers.retain(|_buffer_id, buffer| {
1568 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1569 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1570 if file.worktree == handle && file.path().as_ref() == path {
1571 existing_buffer = Some(buffer);
1572 }
1573 }
1574 true
1575 } else {
1576 false
1577 }
1578 });
1579 existing_buffer
1580 }
1581
1582 fn open_buffer(
1583 &mut self,
1584 path: &Path,
1585 cx: &mut ModelContext<Worktree>,
1586 ) -> Task<Result<ModelHandle<Buffer>>> {
1587 let rpc = self.client.clone();
1588 let replica_id = self.replica_id;
1589 let project_id = self.project_id;
1590 let remote_worktree_id = self.id();
1591 let root_path = self.snapshot.abs_path.clone();
1592 let path: Arc<Path> = Arc::from(path);
1593 let path_string = path.to_string_lossy().to_string();
1594 cx.spawn_weak(move |this, mut cx| async move {
1595 let entry = this
1596 .upgrade(&cx)
1597 .ok_or_else(|| anyhow!("worktree was closed"))?
1598 .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1599 .ok_or_else(|| anyhow!("file does not exist"))?;
1600 let response = rpc
1601 .request(proto::OpenBuffer {
1602 project_id,
1603 worktree_id: remote_worktree_id.to_proto(),
1604 path: path_string,
1605 })
1606 .await?;
1607
1608 let this = this
1609 .upgrade(&cx)
1610 .ok_or_else(|| anyhow!("worktree was closed"))?;
1611 let file = File {
1612 entry_id: Some(entry.id),
1613 worktree: this.clone(),
1614 worktree_path: root_path,
1615 path: entry.path,
1616 mtime: entry.mtime,
1617 is_local: false,
1618 };
1619 let language = this.read_with(&cx, |this, _| {
1620 use language::File;
1621 this.languages().select_language(file.full_path()).cloned()
1622 });
1623 let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1624 let buffer_id = remote_buffer.id as usize;
1625 let buffer = cx.add_model(|cx| {
1626 Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1627 .unwrap()
1628 .with_language(language, None, cx)
1629 });
1630 this.update(&mut cx, move |this, cx| {
1631 let this = this.as_remote_mut().unwrap();
1632 if let Some(RemoteBuffer::Operations(pending_ops)) = this
1633 .open_buffers
1634 .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1635 {
1636 buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1637 }
1638 Result::<_, anyhow::Error>::Ok(buffer)
1639 })
1640 })
1641 }
1642
1643 pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1644 for (_, buffer) in self.open_buffers.drain() {
1645 if let RemoteBuffer::Loaded(buffer) = buffer {
1646 if let Some(buffer) = buffer.upgrade(cx) {
1647 buffer.update(cx, |buffer, cx| buffer.close(cx))
1648 }
1649 }
1650 }
1651 }
1652
1653 fn snapshot(&self) -> Snapshot {
1654 self.snapshot.clone()
1655 }
1656
1657 pub fn update_from_remote(
1658 &mut self,
1659 envelope: TypedEnvelope<proto::UpdateWorktree>,
1660 cx: &mut ModelContext<Worktree>,
1661 ) -> Result<()> {
1662 let mut tx = self.updates_tx.clone();
1663 let payload = envelope.payload.clone();
1664 cx.background()
1665 .spawn(async move {
1666 tx.send(payload).await.expect("receiver runs to completion");
1667 })
1668 .detach();
1669
1670 Ok(())
1671 }
1672
1673 pub fn update_diagnostic_summary(
1674 &mut self,
1675 envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1676 cx: &mut ModelContext<Worktree>,
1677 ) {
1678 if let Some(summary) = envelope.payload.summary {
1679 let path: Arc<Path> = Path::new(&summary.path).into();
1680 self.snapshot.diagnostic_summaries.insert(
1681 PathKey(path.clone()),
1682 DiagnosticSummary {
1683 error_count: summary.error_count as usize,
1684 warning_count: summary.warning_count as usize,
1685 info_count: summary.info_count as usize,
1686 hint_count: summary.hint_count as usize,
1687 },
1688 );
1689 cx.emit(Event::DiagnosticsUpdated(path));
1690 }
1691 }
1692
1693 pub fn disk_based_diagnostics_updated(&self, cx: &mut ModelContext<Worktree>) {
1694 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1695 }
1696
1697 pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1698 for (_, buffer) in &self.open_buffers {
1699 if let Some(buffer) = buffer.upgrade(cx) {
1700 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1701 }
1702 }
1703 cx.notify();
1704 }
1705}
1706
1707enum RemoteBuffer {
1708 Operations(Vec<Operation>),
1709 Loaded(WeakModelHandle<Buffer>),
1710}
1711
1712impl RemoteBuffer {
1713 fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1714 match self {
1715 Self::Operations(_) => None,
1716 Self::Loaded(buffer) => buffer.upgrade(cx),
1717 }
1718 }
1719}
1720
1721impl Snapshot {
1722 pub fn id(&self) -> WorktreeId {
1723 self.id
1724 }
1725
1726 pub fn to_proto(&self) -> proto::Worktree {
1727 let root_name = self.root_name.clone();
1728 proto::Worktree {
1729 id: self.id.0 as u64,
1730 root_name,
1731 entries: self
1732 .entries_by_path
1733 .iter()
1734 .filter(|e| !e.is_ignored)
1735 .map(Into::into)
1736 .collect(),
1737 diagnostic_summaries: self
1738 .diagnostic_summaries
1739 .iter()
1740 .map(|(path, summary)| summary.to_proto(path.0.clone()))
1741 .collect(),
1742 }
1743 }
1744
1745 pub fn build_update(
1746 &self,
1747 other: &Self,
1748 project_id: u64,
1749 worktree_id: u64,
1750 include_ignored: bool,
1751 ) -> proto::UpdateWorktree {
1752 let mut updated_entries = Vec::new();
1753 let mut removed_entries = Vec::new();
1754 let mut self_entries = self
1755 .entries_by_id
1756 .cursor::<()>()
1757 .filter(|e| include_ignored || !e.is_ignored)
1758 .peekable();
1759 let mut other_entries = other
1760 .entries_by_id
1761 .cursor::<()>()
1762 .filter(|e| include_ignored || !e.is_ignored)
1763 .peekable();
1764 loop {
1765 match (self_entries.peek(), other_entries.peek()) {
1766 (Some(self_entry), Some(other_entry)) => {
1767 match Ord::cmp(&self_entry.id, &other_entry.id) {
1768 Ordering::Less => {
1769 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1770 updated_entries.push(entry);
1771 self_entries.next();
1772 }
1773 Ordering::Equal => {
1774 if self_entry.scan_id != other_entry.scan_id {
1775 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1776 updated_entries.push(entry);
1777 }
1778
1779 self_entries.next();
1780 other_entries.next();
1781 }
1782 Ordering::Greater => {
1783 removed_entries.push(other_entry.id as u64);
1784 other_entries.next();
1785 }
1786 }
1787 }
1788 (Some(self_entry), None) => {
1789 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1790 updated_entries.push(entry);
1791 self_entries.next();
1792 }
1793 (None, Some(other_entry)) => {
1794 removed_entries.push(other_entry.id as u64);
1795 other_entries.next();
1796 }
1797 (None, None) => break,
1798 }
1799 }
1800
1801 proto::UpdateWorktree {
1802 project_id,
1803 worktree_id,
1804 root_name: self.root_name().to_string(),
1805 updated_entries,
1806 removed_entries,
1807 }
1808 }
1809
1810 fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1811 self.scan_id += 1;
1812 let scan_id = self.scan_id;
1813
1814 let mut entries_by_path_edits = Vec::new();
1815 let mut entries_by_id_edits = Vec::new();
1816 for entry_id in update.removed_entries {
1817 let entry_id = entry_id as usize;
1818 let entry = self
1819 .entry_for_id(entry_id)
1820 .ok_or_else(|| anyhow!("unknown entry"))?;
1821 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1822 entries_by_id_edits.push(Edit::Remove(entry.id));
1823 }
1824
1825 for entry in update.updated_entries {
1826 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1827 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1828 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1829 }
1830 entries_by_id_edits.push(Edit::Insert(PathEntry {
1831 id: entry.id,
1832 path: entry.path.clone(),
1833 is_ignored: entry.is_ignored,
1834 scan_id,
1835 }));
1836 entries_by_path_edits.push(Edit::Insert(entry));
1837 }
1838
1839 self.entries_by_path.edit(entries_by_path_edits, &());
1840 self.entries_by_id.edit(entries_by_id_edits, &());
1841
1842 Ok(())
1843 }
1844
1845 pub fn file_count(&self) -> usize {
1846 self.entries_by_path.summary().file_count
1847 }
1848
1849 pub fn visible_file_count(&self) -> usize {
1850 self.entries_by_path.summary().visible_file_count
1851 }
1852
1853 fn traverse_from_offset(
1854 &self,
1855 include_dirs: bool,
1856 include_ignored: bool,
1857 start_offset: usize,
1858 ) -> Traversal {
1859 let mut cursor = self.entries_by_path.cursor();
1860 cursor.seek(
1861 &TraversalTarget::Count {
1862 count: start_offset,
1863 include_dirs,
1864 include_ignored,
1865 },
1866 Bias::Right,
1867 &(),
1868 );
1869 Traversal {
1870 cursor,
1871 include_dirs,
1872 include_ignored,
1873 }
1874 }
1875
1876 fn traverse_from_path(
1877 &self,
1878 include_dirs: bool,
1879 include_ignored: bool,
1880 path: &Path,
1881 ) -> Traversal {
1882 let mut cursor = self.entries_by_path.cursor();
1883 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1884 Traversal {
1885 cursor,
1886 include_dirs,
1887 include_ignored,
1888 }
1889 }
1890
1891 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1892 self.traverse_from_offset(false, include_ignored, start)
1893 }
1894
1895 pub fn entries(&self, include_ignored: bool) -> Traversal {
1896 self.traverse_from_offset(true, include_ignored, 0)
1897 }
1898
1899 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1900 let empty_path = Path::new("");
1901 self.entries_by_path
1902 .cursor::<()>()
1903 .filter(move |entry| entry.path.as_ref() != empty_path)
1904 .map(|entry| &entry.path)
1905 }
1906
1907 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1908 let mut cursor = self.entries_by_path.cursor();
1909 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1910 let traversal = Traversal {
1911 cursor,
1912 include_dirs: true,
1913 include_ignored: true,
1914 };
1915 ChildEntriesIter {
1916 traversal,
1917 parent_path,
1918 }
1919 }
1920
1921 pub fn root_entry(&self) -> Option<&Entry> {
1922 self.entry_for_path("")
1923 }
1924
1925 pub fn root_name(&self) -> &str {
1926 &self.root_name
1927 }
1928
1929 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1930 let path = path.as_ref();
1931 self.traverse_from_path(true, true, path)
1932 .entry()
1933 .and_then(|entry| {
1934 if entry.path.as_ref() == path {
1935 Some(entry)
1936 } else {
1937 None
1938 }
1939 })
1940 }
1941
1942 pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1943 let entry = self.entries_by_id.get(&id, &())?;
1944 self.entry_for_path(&entry.path)
1945 }
1946
1947 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1948 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1949 }
1950
1951 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1952 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1953 let abs_path = self.abs_path.join(&entry.path);
1954 match build_gitignore(&abs_path, fs) {
1955 Ok(ignore) => {
1956 let ignore_dir_path = entry.path.parent().unwrap();
1957 self.ignores
1958 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1959 }
1960 Err(error) => {
1961 log::error!(
1962 "error loading .gitignore file {:?} - {:?}",
1963 &entry.path,
1964 error
1965 );
1966 }
1967 }
1968 }
1969
1970 self.reuse_entry_id(&mut entry);
1971 self.entries_by_path.insert_or_replace(entry.clone(), &());
1972 self.entries_by_id.insert_or_replace(
1973 PathEntry {
1974 id: entry.id,
1975 path: entry.path.clone(),
1976 is_ignored: entry.is_ignored,
1977 scan_id: self.scan_id,
1978 },
1979 &(),
1980 );
1981 entry
1982 }
1983
1984 fn populate_dir(
1985 &mut self,
1986 parent_path: Arc<Path>,
1987 entries: impl IntoIterator<Item = Entry>,
1988 ignore: Option<Arc<Gitignore>>,
1989 ) {
1990 let mut parent_entry = self
1991 .entries_by_path
1992 .get(&PathKey(parent_path.clone()), &())
1993 .unwrap()
1994 .clone();
1995 if let Some(ignore) = ignore {
1996 self.ignores.insert(parent_path, (ignore, self.scan_id));
1997 }
1998 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1999 parent_entry.kind = EntryKind::Dir;
2000 } else {
2001 unreachable!();
2002 }
2003
2004 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2005 let mut entries_by_id_edits = Vec::new();
2006
2007 for mut entry in entries {
2008 self.reuse_entry_id(&mut entry);
2009 entries_by_id_edits.push(Edit::Insert(PathEntry {
2010 id: entry.id,
2011 path: entry.path.clone(),
2012 is_ignored: entry.is_ignored,
2013 scan_id: self.scan_id,
2014 }));
2015 entries_by_path_edits.push(Edit::Insert(entry));
2016 }
2017
2018 self.entries_by_path.edit(entries_by_path_edits, &());
2019 self.entries_by_id.edit(entries_by_id_edits, &());
2020 }
2021
2022 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2023 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2024 entry.id = removed_entry_id;
2025 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2026 entry.id = existing_entry.id;
2027 }
2028 }
2029
2030 fn remove_path(&mut self, path: &Path) {
2031 let mut new_entries;
2032 let removed_entries;
2033 {
2034 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2035 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2036 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2037 new_entries.push_tree(cursor.suffix(&()), &());
2038 }
2039 self.entries_by_path = new_entries;
2040
2041 let mut entries_by_id_edits = Vec::new();
2042 for entry in removed_entries.cursor::<()>() {
2043 let removed_entry_id = self
2044 .removed_entry_ids
2045 .entry(entry.inode)
2046 .or_insert(entry.id);
2047 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2048 entries_by_id_edits.push(Edit::Remove(entry.id));
2049 }
2050 self.entries_by_id.edit(entries_by_id_edits, &());
2051
2052 if path.file_name() == Some(&GITIGNORE) {
2053 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
2054 *scan_id = self.scan_id;
2055 }
2056 }
2057 }
2058
2059 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2060 let mut new_ignores = Vec::new();
2061 for ancestor in path.ancestors().skip(1) {
2062 if let Some((ignore, _)) = self.ignores.get(ancestor) {
2063 new_ignores.push((ancestor, Some(ignore.clone())));
2064 } else {
2065 new_ignores.push((ancestor, None));
2066 }
2067 }
2068
2069 let mut ignore_stack = IgnoreStack::none();
2070 for (parent_path, ignore) in new_ignores.into_iter().rev() {
2071 if ignore_stack.is_path_ignored(&parent_path, true) {
2072 ignore_stack = IgnoreStack::all();
2073 break;
2074 } else if let Some(ignore) = ignore {
2075 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
2076 }
2077 }
2078
2079 if ignore_stack.is_path_ignored(path, is_dir) {
2080 ignore_stack = IgnoreStack::all();
2081 }
2082
2083 ignore_stack
2084 }
2085}
2086
2087impl fmt::Debug for Snapshot {
2088 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2089 for entry in self.entries_by_path.cursor::<()>() {
2090 for _ in entry.path.ancestors().skip(1) {
2091 write!(f, " ")?;
2092 }
2093 writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
2094 }
2095 Ok(())
2096 }
2097}
2098
2099#[derive(Clone, PartialEq)]
2100pub struct File {
2101 entry_id: Option<usize>,
2102 worktree: ModelHandle<Worktree>,
2103 worktree_path: Arc<Path>,
2104 pub path: Arc<Path>,
2105 pub mtime: SystemTime,
2106 is_local: bool,
2107}
2108
2109impl language::File for File {
2110 fn mtime(&self) -> SystemTime {
2111 self.mtime
2112 }
2113
2114 fn path(&self) -> &Arc<Path> {
2115 &self.path
2116 }
2117
2118 fn abs_path(&self) -> Option<PathBuf> {
2119 if self.is_local {
2120 Some(self.worktree_path.join(&self.path))
2121 } else {
2122 None
2123 }
2124 }
2125
2126 fn full_path(&self) -> PathBuf {
2127 let mut full_path = PathBuf::new();
2128 if let Some(worktree_name) = self.worktree_path.file_name() {
2129 full_path.push(worktree_name);
2130 }
2131 full_path.push(&self.path);
2132 full_path
2133 }
2134
2135 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2136 /// of its worktree, then this method will return the name of the worktree itself.
2137 fn file_name<'a>(&'a self) -> Option<OsString> {
2138 self.path
2139 .file_name()
2140 .or_else(|| self.worktree_path.file_name())
2141 .map(Into::into)
2142 }
2143
2144 fn is_deleted(&self) -> bool {
2145 self.entry_id.is_none()
2146 }
2147
2148 fn save(
2149 &self,
2150 buffer_id: u64,
2151 text: Rope,
2152 version: clock::Global,
2153 cx: &mut MutableAppContext,
2154 ) -> Task<Result<(clock::Global, SystemTime)>> {
2155 let worktree_id = self.worktree.read(cx).id().to_proto();
2156 self.worktree.update(cx, |worktree, cx| match worktree {
2157 Worktree::Local(worktree) => {
2158 let rpc = worktree.client.clone();
2159 let project_id = worktree.share.as_ref().map(|share| share.project_id);
2160 let save = worktree.save(self.path.clone(), text, cx);
2161 cx.background().spawn(async move {
2162 let entry = save.await?;
2163 if let Some(project_id) = project_id {
2164 rpc.send(proto::BufferSaved {
2165 project_id,
2166 worktree_id,
2167 buffer_id,
2168 version: (&version).into(),
2169 mtime: Some(entry.mtime.into()),
2170 })
2171 .await?;
2172 }
2173 Ok((version, entry.mtime))
2174 })
2175 }
2176 Worktree::Remote(worktree) => {
2177 let rpc = worktree.client.clone();
2178 let project_id = worktree.project_id;
2179 cx.foreground().spawn(async move {
2180 let response = rpc
2181 .request(proto::SaveBuffer {
2182 project_id,
2183 worktree_id,
2184 buffer_id,
2185 })
2186 .await?;
2187 let version = response.version.try_into()?;
2188 let mtime = response
2189 .mtime
2190 .ok_or_else(|| anyhow!("missing mtime"))?
2191 .into();
2192 Ok((version, mtime))
2193 })
2194 }
2195 })
2196 }
2197
2198 fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2199 let worktree = self.worktree.read(cx).as_local()?;
2200 let abs_path = worktree.absolutize(&self.path);
2201 let fs = worktree.fs.clone();
2202 Some(
2203 cx.background()
2204 .spawn(async move { fs.load(&abs_path).await }),
2205 )
2206 }
2207
2208 fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2209 self.worktree.update(cx, |worktree, cx| {
2210 worktree.send_buffer_update(buffer_id, operation, cx);
2211 });
2212 }
2213
2214 fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2215 self.worktree.update(cx, |worktree, cx| {
2216 if let Worktree::Remote(worktree) = worktree {
2217 let project_id = worktree.project_id;
2218 let worktree_id = worktree.id().to_proto();
2219 let rpc = worktree.client.clone();
2220 cx.background()
2221 .spawn(async move {
2222 if let Err(error) = rpc
2223 .send(proto::CloseBuffer {
2224 project_id,
2225 worktree_id,
2226 buffer_id,
2227 })
2228 .await
2229 {
2230 log::error!("error closing remote buffer: {}", error);
2231 }
2232 })
2233 .detach();
2234 }
2235 });
2236 }
2237
2238 fn as_any(&self) -> &dyn Any {
2239 self
2240 }
2241}
2242
2243impl File {
2244 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2245 file.and_then(|f| f.as_any().downcast_ref())
2246 }
2247
2248 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2249 self.worktree.read(cx).id()
2250 }
2251}
2252
2253#[derive(Clone, Debug)]
2254pub struct Entry {
2255 pub id: usize,
2256 pub kind: EntryKind,
2257 pub path: Arc<Path>,
2258 pub inode: u64,
2259 pub mtime: SystemTime,
2260 pub is_symlink: bool,
2261 pub is_ignored: bool,
2262}
2263
2264#[derive(Clone, Debug)]
2265pub enum EntryKind {
2266 PendingDir,
2267 Dir,
2268 File(CharBag),
2269}
2270
2271impl Entry {
2272 fn new(
2273 path: Arc<Path>,
2274 metadata: &fs::Metadata,
2275 next_entry_id: &AtomicUsize,
2276 root_char_bag: CharBag,
2277 ) -> Self {
2278 Self {
2279 id: next_entry_id.fetch_add(1, SeqCst),
2280 kind: if metadata.is_dir {
2281 EntryKind::PendingDir
2282 } else {
2283 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2284 },
2285 path,
2286 inode: metadata.inode,
2287 mtime: metadata.mtime,
2288 is_symlink: metadata.is_symlink,
2289 is_ignored: false,
2290 }
2291 }
2292
2293 pub fn is_dir(&self) -> bool {
2294 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2295 }
2296
2297 pub fn is_file(&self) -> bool {
2298 matches!(self.kind, EntryKind::File(_))
2299 }
2300}
2301
2302impl sum_tree::Item for Entry {
2303 type Summary = EntrySummary;
2304
2305 fn summary(&self) -> Self::Summary {
2306 let visible_count = if self.is_ignored { 0 } else { 1 };
2307 let file_count;
2308 let visible_file_count;
2309 if self.is_file() {
2310 file_count = 1;
2311 visible_file_count = visible_count;
2312 } else {
2313 file_count = 0;
2314 visible_file_count = 0;
2315 }
2316
2317 EntrySummary {
2318 max_path: self.path.clone(),
2319 count: 1,
2320 visible_count,
2321 file_count,
2322 visible_file_count,
2323 }
2324 }
2325}
2326
2327impl sum_tree::KeyedItem for Entry {
2328 type Key = PathKey;
2329
2330 fn key(&self) -> Self::Key {
2331 PathKey(self.path.clone())
2332 }
2333}
2334
2335#[derive(Clone, Debug)]
2336pub struct EntrySummary {
2337 max_path: Arc<Path>,
2338 count: usize,
2339 visible_count: usize,
2340 file_count: usize,
2341 visible_file_count: usize,
2342}
2343
2344impl Default for EntrySummary {
2345 fn default() -> Self {
2346 Self {
2347 max_path: Arc::from(Path::new("")),
2348 count: 0,
2349 visible_count: 0,
2350 file_count: 0,
2351 visible_file_count: 0,
2352 }
2353 }
2354}
2355
2356impl sum_tree::Summary for EntrySummary {
2357 type Context = ();
2358
2359 fn add_summary(&mut self, rhs: &Self, _: &()) {
2360 self.max_path = rhs.max_path.clone();
2361 self.visible_count += rhs.visible_count;
2362 self.file_count += rhs.file_count;
2363 self.visible_file_count += rhs.visible_file_count;
2364 }
2365}
2366
2367#[derive(Clone, Debug)]
2368struct PathEntry {
2369 id: usize,
2370 path: Arc<Path>,
2371 is_ignored: bool,
2372 scan_id: usize,
2373}
2374
2375impl sum_tree::Item for PathEntry {
2376 type Summary = PathEntrySummary;
2377
2378 fn summary(&self) -> Self::Summary {
2379 PathEntrySummary { max_id: self.id }
2380 }
2381}
2382
2383impl sum_tree::KeyedItem for PathEntry {
2384 type Key = usize;
2385
2386 fn key(&self) -> Self::Key {
2387 self.id
2388 }
2389}
2390
2391#[derive(Clone, Debug, Default)]
2392struct PathEntrySummary {
2393 max_id: usize,
2394}
2395
2396impl sum_tree::Summary for PathEntrySummary {
2397 type Context = ();
2398
2399 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2400 self.max_id = summary.max_id;
2401 }
2402}
2403
2404impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2405 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2406 *self = summary.max_id;
2407 }
2408}
2409
2410#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2411pub struct PathKey(Arc<Path>);
2412
2413impl Default for PathKey {
2414 fn default() -> Self {
2415 Self(Path::new("").into())
2416 }
2417}
2418
2419impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2420 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2421 self.0 = summary.max_path.clone();
2422 }
2423}
2424
2425struct BackgroundScanner {
2426 fs: Arc<dyn Fs>,
2427 snapshot: Arc<Mutex<Snapshot>>,
2428 notify: Sender<ScanState>,
2429 executor: Arc<executor::Background>,
2430}
2431
2432impl BackgroundScanner {
2433 fn new(
2434 snapshot: Arc<Mutex<Snapshot>>,
2435 notify: Sender<ScanState>,
2436 fs: Arc<dyn Fs>,
2437 executor: Arc<executor::Background>,
2438 ) -> Self {
2439 Self {
2440 fs,
2441 snapshot,
2442 notify,
2443 executor,
2444 }
2445 }
2446
2447 fn abs_path(&self) -> Arc<Path> {
2448 self.snapshot.lock().abs_path.clone()
2449 }
2450
2451 fn snapshot(&self) -> Snapshot {
2452 self.snapshot.lock().clone()
2453 }
2454
2455 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2456 if self.notify.send(ScanState::Scanning).await.is_err() {
2457 return;
2458 }
2459
2460 if let Err(err) = self.scan_dirs().await {
2461 if self
2462 .notify
2463 .send(ScanState::Err(Arc::new(err)))
2464 .await
2465 .is_err()
2466 {
2467 return;
2468 }
2469 }
2470
2471 if self.notify.send(ScanState::Idle).await.is_err() {
2472 return;
2473 }
2474
2475 futures::pin_mut!(events_rx);
2476 while let Some(events) = events_rx.next().await {
2477 if self.notify.send(ScanState::Scanning).await.is_err() {
2478 break;
2479 }
2480
2481 if !self.process_events(events).await {
2482 break;
2483 }
2484
2485 if self.notify.send(ScanState::Idle).await.is_err() {
2486 break;
2487 }
2488 }
2489 }
2490
2491 async fn scan_dirs(&mut self) -> Result<()> {
2492 let root_char_bag;
2493 let next_entry_id;
2494 let is_dir;
2495 {
2496 let snapshot = self.snapshot.lock();
2497 root_char_bag = snapshot.root_char_bag;
2498 next_entry_id = snapshot.next_entry_id.clone();
2499 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2500 };
2501
2502 if is_dir {
2503 let path: Arc<Path> = Arc::from(Path::new(""));
2504 let abs_path = self.abs_path();
2505 let (tx, rx) = channel::unbounded();
2506 tx.send(ScanJob {
2507 abs_path: abs_path.to_path_buf(),
2508 path,
2509 ignore_stack: IgnoreStack::none(),
2510 scan_queue: tx.clone(),
2511 })
2512 .await
2513 .unwrap();
2514 drop(tx);
2515
2516 self.executor
2517 .scoped(|scope| {
2518 for _ in 0..self.executor.num_cpus() {
2519 scope.spawn(async {
2520 while let Ok(job) = rx.recv().await {
2521 if let Err(err) = self
2522 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2523 .await
2524 {
2525 log::error!("error scanning {:?}: {}", job.abs_path, err);
2526 }
2527 }
2528 });
2529 }
2530 })
2531 .await;
2532 }
2533
2534 Ok(())
2535 }
2536
2537 async fn scan_dir(
2538 &self,
2539 root_char_bag: CharBag,
2540 next_entry_id: Arc<AtomicUsize>,
2541 job: &ScanJob,
2542 ) -> Result<()> {
2543 let mut new_entries: Vec<Entry> = Vec::new();
2544 let mut new_jobs: Vec<ScanJob> = Vec::new();
2545 let mut ignore_stack = job.ignore_stack.clone();
2546 let mut new_ignore = None;
2547
2548 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2549 while let Some(child_abs_path) = child_paths.next().await {
2550 let child_abs_path = match child_abs_path {
2551 Ok(child_abs_path) => child_abs_path,
2552 Err(error) => {
2553 log::error!("error processing entry {:?}", error);
2554 continue;
2555 }
2556 };
2557 let child_name = child_abs_path.file_name().unwrap();
2558 let child_path: Arc<Path> = job.path.join(child_name).into();
2559 let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2560 Some(metadata) => metadata,
2561 None => continue,
2562 };
2563
2564 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2565 if child_name == *GITIGNORE {
2566 match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2567 Ok(ignore) => {
2568 let ignore = Arc::new(ignore);
2569 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2570 new_ignore = Some(ignore);
2571 }
2572 Err(error) => {
2573 log::error!(
2574 "error loading .gitignore file {:?} - {:?}",
2575 child_name,
2576 error
2577 );
2578 }
2579 }
2580
2581 // Update ignore status of any child entries we've already processed to reflect the
2582 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2583 // there should rarely be too numerous. Update the ignore stack associated with any
2584 // new jobs as well.
2585 let mut new_jobs = new_jobs.iter_mut();
2586 for entry in &mut new_entries {
2587 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2588 if entry.is_dir() {
2589 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2590 IgnoreStack::all()
2591 } else {
2592 ignore_stack.clone()
2593 };
2594 }
2595 }
2596 }
2597
2598 let mut child_entry = Entry::new(
2599 child_path.clone(),
2600 &child_metadata,
2601 &next_entry_id,
2602 root_char_bag,
2603 );
2604
2605 if child_metadata.is_dir {
2606 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2607 child_entry.is_ignored = is_ignored;
2608 new_entries.push(child_entry);
2609 new_jobs.push(ScanJob {
2610 abs_path: child_abs_path,
2611 path: child_path,
2612 ignore_stack: if is_ignored {
2613 IgnoreStack::all()
2614 } else {
2615 ignore_stack.clone()
2616 },
2617 scan_queue: job.scan_queue.clone(),
2618 });
2619 } else {
2620 child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2621 new_entries.push(child_entry);
2622 };
2623 }
2624
2625 self.snapshot
2626 .lock()
2627 .populate_dir(job.path.clone(), new_entries, new_ignore);
2628 for new_job in new_jobs {
2629 job.scan_queue.send(new_job).await.unwrap();
2630 }
2631
2632 Ok(())
2633 }
2634
2635 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2636 let mut snapshot = self.snapshot();
2637 snapshot.scan_id += 1;
2638
2639 let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2640 abs_path
2641 } else {
2642 return false;
2643 };
2644 let root_char_bag = snapshot.root_char_bag;
2645 let next_entry_id = snapshot.next_entry_id.clone();
2646
2647 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2648 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2649
2650 for event in &events {
2651 match event.path.strip_prefix(&root_abs_path) {
2652 Ok(path) => snapshot.remove_path(&path),
2653 Err(_) => {
2654 log::error!(
2655 "unexpected event {:?} for root path {:?}",
2656 event.path,
2657 root_abs_path
2658 );
2659 continue;
2660 }
2661 }
2662 }
2663
2664 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2665 for event in events {
2666 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2667 Ok(path) => Arc::from(path.to_path_buf()),
2668 Err(_) => {
2669 log::error!(
2670 "unexpected event {:?} for root path {:?}",
2671 event.path,
2672 root_abs_path
2673 );
2674 continue;
2675 }
2676 };
2677
2678 match self.fs.metadata(&event.path).await {
2679 Ok(Some(metadata)) => {
2680 let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2681 let mut fs_entry = Entry::new(
2682 path.clone(),
2683 &metadata,
2684 snapshot.next_entry_id.as_ref(),
2685 snapshot.root_char_bag,
2686 );
2687 fs_entry.is_ignored = ignore_stack.is_all();
2688 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2689 if metadata.is_dir {
2690 scan_queue_tx
2691 .send(ScanJob {
2692 abs_path: event.path,
2693 path,
2694 ignore_stack,
2695 scan_queue: scan_queue_tx.clone(),
2696 })
2697 .await
2698 .unwrap();
2699 }
2700 }
2701 Ok(None) => {}
2702 Err(err) => {
2703 // TODO - create a special 'error' entry in the entries tree to mark this
2704 log::error!("error reading file on event {:?}", err);
2705 }
2706 }
2707 }
2708
2709 *self.snapshot.lock() = snapshot;
2710
2711 // Scan any directories that were created as part of this event batch.
2712 drop(scan_queue_tx);
2713 self.executor
2714 .scoped(|scope| {
2715 for _ in 0..self.executor.num_cpus() {
2716 scope.spawn(async {
2717 while let Ok(job) = scan_queue_rx.recv().await {
2718 if let Err(err) = self
2719 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2720 .await
2721 {
2722 log::error!("error scanning {:?}: {}", job.abs_path, err);
2723 }
2724 }
2725 });
2726 }
2727 })
2728 .await;
2729
2730 // Attempt to detect renames only over a single batch of file-system events.
2731 self.snapshot.lock().removed_entry_ids.clear();
2732
2733 self.update_ignore_statuses().await;
2734 true
2735 }
2736
2737 async fn update_ignore_statuses(&self) {
2738 let mut snapshot = self.snapshot();
2739
2740 let mut ignores_to_update = Vec::new();
2741 let mut ignores_to_delete = Vec::new();
2742 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2743 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2744 ignores_to_update.push(parent_path.clone());
2745 }
2746
2747 let ignore_path = parent_path.join(&*GITIGNORE);
2748 if snapshot.entry_for_path(ignore_path).is_none() {
2749 ignores_to_delete.push(parent_path.clone());
2750 }
2751 }
2752
2753 for parent_path in ignores_to_delete {
2754 snapshot.ignores.remove(&parent_path);
2755 self.snapshot.lock().ignores.remove(&parent_path);
2756 }
2757
2758 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2759 ignores_to_update.sort_unstable();
2760 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2761 while let Some(parent_path) = ignores_to_update.next() {
2762 while ignores_to_update
2763 .peek()
2764 .map_or(false, |p| p.starts_with(&parent_path))
2765 {
2766 ignores_to_update.next().unwrap();
2767 }
2768
2769 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2770 ignore_queue_tx
2771 .send(UpdateIgnoreStatusJob {
2772 path: parent_path,
2773 ignore_stack,
2774 ignore_queue: ignore_queue_tx.clone(),
2775 })
2776 .await
2777 .unwrap();
2778 }
2779 drop(ignore_queue_tx);
2780
2781 self.executor
2782 .scoped(|scope| {
2783 for _ in 0..self.executor.num_cpus() {
2784 scope.spawn(async {
2785 while let Ok(job) = ignore_queue_rx.recv().await {
2786 self.update_ignore_status(job, &snapshot).await;
2787 }
2788 });
2789 }
2790 })
2791 .await;
2792 }
2793
2794 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2795 let mut ignore_stack = job.ignore_stack;
2796 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2797 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2798 }
2799
2800 let mut entries_by_id_edits = Vec::new();
2801 let mut entries_by_path_edits = Vec::new();
2802 for mut entry in snapshot.child_entries(&job.path).cloned() {
2803 let was_ignored = entry.is_ignored;
2804 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2805 if entry.is_dir() {
2806 let child_ignore_stack = if entry.is_ignored {
2807 IgnoreStack::all()
2808 } else {
2809 ignore_stack.clone()
2810 };
2811 job.ignore_queue
2812 .send(UpdateIgnoreStatusJob {
2813 path: entry.path.clone(),
2814 ignore_stack: child_ignore_stack,
2815 ignore_queue: job.ignore_queue.clone(),
2816 })
2817 .await
2818 .unwrap();
2819 }
2820
2821 if entry.is_ignored != was_ignored {
2822 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2823 path_entry.scan_id = snapshot.scan_id;
2824 path_entry.is_ignored = entry.is_ignored;
2825 entries_by_id_edits.push(Edit::Insert(path_entry));
2826 entries_by_path_edits.push(Edit::Insert(entry));
2827 }
2828 }
2829
2830 let mut snapshot = self.snapshot.lock();
2831 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2832 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2833 }
2834}
2835
2836async fn refresh_entry(
2837 fs: &dyn Fs,
2838 snapshot: &Mutex<Snapshot>,
2839 path: Arc<Path>,
2840 abs_path: &Path,
2841) -> Result<Entry> {
2842 let root_char_bag;
2843 let next_entry_id;
2844 {
2845 let snapshot = snapshot.lock();
2846 root_char_bag = snapshot.root_char_bag;
2847 next_entry_id = snapshot.next_entry_id.clone();
2848 }
2849 let entry = Entry::new(
2850 path,
2851 &fs.metadata(abs_path)
2852 .await?
2853 .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2854 &next_entry_id,
2855 root_char_bag,
2856 );
2857 Ok(snapshot.lock().insert_entry(entry, fs))
2858}
2859
2860fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2861 let mut result = root_char_bag;
2862 result.extend(
2863 path.to_string_lossy()
2864 .chars()
2865 .map(|c| c.to_ascii_lowercase()),
2866 );
2867 result
2868}
2869
2870struct ScanJob {
2871 abs_path: PathBuf,
2872 path: Arc<Path>,
2873 ignore_stack: Arc<IgnoreStack>,
2874 scan_queue: Sender<ScanJob>,
2875}
2876
2877struct UpdateIgnoreStatusJob {
2878 path: Arc<Path>,
2879 ignore_stack: Arc<IgnoreStack>,
2880 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2881}
2882
2883pub trait WorktreeHandle {
2884 #[cfg(test)]
2885 fn flush_fs_events<'a>(
2886 &self,
2887 cx: &'a gpui::TestAppContext,
2888 ) -> futures::future::LocalBoxFuture<'a, ()>;
2889}
2890
2891impl WorktreeHandle for ModelHandle<Worktree> {
2892 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2893 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2894 // extra directory scans, and emit extra scan-state notifications.
2895 //
2896 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2897 // to ensure that all redundant FS events have already been processed.
2898 #[cfg(test)]
2899 fn flush_fs_events<'a>(
2900 &self,
2901 cx: &'a gpui::TestAppContext,
2902 ) -> futures::future::LocalBoxFuture<'a, ()> {
2903 use smol::future::FutureExt;
2904
2905 let filename = "fs-event-sentinel";
2906 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2907 let tree = self.clone();
2908 async move {
2909 std::fs::write(root_path.join(filename), "").unwrap();
2910 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2911 .await;
2912
2913 std::fs::remove_file(root_path.join(filename)).unwrap();
2914 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2915 .await;
2916
2917 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2918 .await;
2919 }
2920 .boxed_local()
2921 }
2922}
2923
2924#[derive(Clone, Debug)]
2925struct TraversalProgress<'a> {
2926 max_path: &'a Path,
2927 count: usize,
2928 visible_count: usize,
2929 file_count: usize,
2930 visible_file_count: usize,
2931}
2932
2933impl<'a> TraversalProgress<'a> {
2934 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2935 match (include_ignored, include_dirs) {
2936 (true, true) => self.count,
2937 (true, false) => self.file_count,
2938 (false, true) => self.visible_count,
2939 (false, false) => self.visible_file_count,
2940 }
2941 }
2942}
2943
2944impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2945 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2946 self.max_path = summary.max_path.as_ref();
2947 self.count += summary.count;
2948 self.visible_count += summary.visible_count;
2949 self.file_count += summary.file_count;
2950 self.visible_file_count += summary.visible_file_count;
2951 }
2952}
2953
2954impl<'a> Default for TraversalProgress<'a> {
2955 fn default() -> Self {
2956 Self {
2957 max_path: Path::new(""),
2958 count: 0,
2959 visible_count: 0,
2960 file_count: 0,
2961 visible_file_count: 0,
2962 }
2963 }
2964}
2965
2966pub struct Traversal<'a> {
2967 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2968 include_ignored: bool,
2969 include_dirs: bool,
2970}
2971
2972impl<'a> Traversal<'a> {
2973 pub fn advance(&mut self) -> bool {
2974 self.advance_to_offset(self.offset() + 1)
2975 }
2976
2977 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2978 self.cursor.seek_forward(
2979 &TraversalTarget::Count {
2980 count: offset,
2981 include_dirs: self.include_dirs,
2982 include_ignored: self.include_ignored,
2983 },
2984 Bias::Right,
2985 &(),
2986 )
2987 }
2988
2989 pub fn advance_to_sibling(&mut self) -> bool {
2990 while let Some(entry) = self.cursor.item() {
2991 self.cursor.seek_forward(
2992 &TraversalTarget::PathSuccessor(&entry.path),
2993 Bias::Left,
2994 &(),
2995 );
2996 if let Some(entry) = self.cursor.item() {
2997 if (self.include_dirs || !entry.is_dir())
2998 && (self.include_ignored || !entry.is_ignored)
2999 {
3000 return true;
3001 }
3002 }
3003 }
3004 false
3005 }
3006
3007 pub fn entry(&self) -> Option<&'a Entry> {
3008 self.cursor.item()
3009 }
3010
3011 pub fn offset(&self) -> usize {
3012 self.cursor
3013 .start()
3014 .count(self.include_dirs, self.include_ignored)
3015 }
3016}
3017
3018impl<'a> Iterator for Traversal<'a> {
3019 type Item = &'a Entry;
3020
3021 fn next(&mut self) -> Option<Self::Item> {
3022 if let Some(item) = self.entry() {
3023 self.advance();
3024 Some(item)
3025 } else {
3026 None
3027 }
3028 }
3029}
3030
3031#[derive(Debug)]
3032enum TraversalTarget<'a> {
3033 Path(&'a Path),
3034 PathSuccessor(&'a Path),
3035 Count {
3036 count: usize,
3037 include_ignored: bool,
3038 include_dirs: bool,
3039 },
3040}
3041
3042impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3043 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3044 match self {
3045 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3046 TraversalTarget::PathSuccessor(path) => {
3047 if !cursor_location.max_path.starts_with(path) {
3048 Ordering::Equal
3049 } else {
3050 Ordering::Greater
3051 }
3052 }
3053 TraversalTarget::Count {
3054 count,
3055 include_dirs,
3056 include_ignored,
3057 } => Ord::cmp(
3058 count,
3059 &cursor_location.count(*include_dirs, *include_ignored),
3060 ),
3061 }
3062 }
3063}
3064
3065struct ChildEntriesIter<'a> {
3066 parent_path: &'a Path,
3067 traversal: Traversal<'a>,
3068}
3069
3070impl<'a> Iterator for ChildEntriesIter<'a> {
3071 type Item = &'a Entry;
3072
3073 fn next(&mut self) -> Option<Self::Item> {
3074 if let Some(item) = self.traversal.entry() {
3075 if item.path.starts_with(&self.parent_path) {
3076 self.traversal.advance_to_sibling();
3077 return Some(item);
3078 }
3079 }
3080 None
3081 }
3082}
3083
3084impl<'a> From<&'a Entry> for proto::Entry {
3085 fn from(entry: &'a Entry) -> Self {
3086 Self {
3087 id: entry.id as u64,
3088 is_dir: entry.is_dir(),
3089 path: entry.path.to_string_lossy().to_string(),
3090 inode: entry.inode,
3091 mtime: Some(entry.mtime.into()),
3092 is_symlink: entry.is_symlink,
3093 is_ignored: entry.is_ignored,
3094 }
3095 }
3096}
3097
3098impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3099 type Error = anyhow::Error;
3100
3101 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3102 if let Some(mtime) = entry.mtime {
3103 let kind = if entry.is_dir {
3104 EntryKind::Dir
3105 } else {
3106 let mut char_bag = root_char_bag.clone();
3107 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3108 EntryKind::File(char_bag)
3109 };
3110 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
3111 Ok(Entry {
3112 id: entry.id as usize,
3113 kind,
3114 path: path.clone(),
3115 inode: entry.inode,
3116 mtime: mtime.into(),
3117 is_symlink: entry.is_symlink,
3118 is_ignored: entry.is_ignored,
3119 })
3120 } else {
3121 Err(anyhow!(
3122 "missing mtime in remote worktree entry {:?}",
3123 entry.path
3124 ))
3125 }
3126 }
3127}
3128
3129trait ToPointUtf16 {
3130 fn to_point_utf16(self) -> PointUtf16;
3131}
3132
3133impl ToPointUtf16 for lsp::Position {
3134 fn to_point_utf16(self) -> PointUtf16 {
3135 PointUtf16::new(self.line, self.character)
3136 }
3137}
3138
3139fn range_from_lsp(range: lsp::Range) -> Range<PointUtf16> {
3140 let start = PointUtf16::new(range.start.line, range.start.character);
3141 let end = PointUtf16::new(range.end.line, range.end.character);
3142 start..end
3143}
3144
3145#[cfg(test)]
3146mod tests {
3147 use super::*;
3148 use crate::fs::FakeFs;
3149 use anyhow::Result;
3150 use client::test::{FakeHttpClient, FakeServer};
3151 use fs::RealFs;
3152 use gpui::test::subscribe;
3153 use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
3154 use language::{Diagnostic, LanguageConfig};
3155 use lsp::Url;
3156 use rand::prelude::*;
3157 use serde_json::json;
3158 use std::{cell::RefCell, rc::Rc};
3159 use std::{
3160 env,
3161 fmt::Write,
3162 time::{SystemTime, UNIX_EPOCH},
3163 };
3164 use text::Point;
3165 use unindent::Unindent as _;
3166 use util::test::temp_tree;
3167
3168 #[gpui::test]
3169 async fn test_traversal(mut cx: gpui::TestAppContext) {
3170 let fs = FakeFs::new();
3171 fs.insert_tree(
3172 "/root",
3173 json!({
3174 ".gitignore": "a/b\n",
3175 "a": {
3176 "b": "",
3177 "c": "",
3178 }
3179 }),
3180 )
3181 .await;
3182
3183 let http_client = FakeHttpClient::with_404_response();
3184 let client = Client::new(http_client.clone());
3185 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3186
3187 let tree = Worktree::open_local(
3188 client,
3189 user_store,
3190 Arc::from(Path::new("/root")),
3191 Arc::new(fs),
3192 Default::default(),
3193 &mut cx.to_async(),
3194 )
3195 .await
3196 .unwrap();
3197 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3198 .await;
3199
3200 tree.read_with(&cx, |tree, _| {
3201 assert_eq!(
3202 tree.entries(false)
3203 .map(|entry| entry.path.as_ref())
3204 .collect::<Vec<_>>(),
3205 vec![
3206 Path::new(""),
3207 Path::new(".gitignore"),
3208 Path::new("a"),
3209 Path::new("a/c"),
3210 ]
3211 );
3212 })
3213 }
3214
3215 #[gpui::test]
3216 async fn test_save_file(mut cx: gpui::TestAppContext) {
3217 let dir = temp_tree(json!({
3218 "file1": "the old contents",
3219 }));
3220
3221 let http_client = FakeHttpClient::with_404_response();
3222 let client = Client::new(http_client.clone());
3223 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3224
3225 let tree = Worktree::open_local(
3226 client,
3227 user_store,
3228 dir.path(),
3229 Arc::new(RealFs),
3230 Default::default(),
3231 &mut cx.to_async(),
3232 )
3233 .await
3234 .unwrap();
3235 let buffer = tree
3236 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3237 .await
3238 .unwrap();
3239 let save = buffer.update(&mut cx, |buffer, cx| {
3240 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3241 buffer.save(cx).unwrap()
3242 });
3243 save.await.unwrap();
3244
3245 let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3246 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3247 }
3248
3249 #[gpui::test]
3250 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3251 let dir = temp_tree(json!({
3252 "file1": "the old contents",
3253 }));
3254 let file_path = dir.path().join("file1");
3255
3256 let http_client = FakeHttpClient::with_404_response();
3257 let client = Client::new(http_client.clone());
3258 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3259
3260 let tree = Worktree::open_local(
3261 client,
3262 user_store,
3263 file_path.clone(),
3264 Arc::new(RealFs),
3265 Default::default(),
3266 &mut cx.to_async(),
3267 )
3268 .await
3269 .unwrap();
3270 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3271 .await;
3272 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3273
3274 let buffer = tree
3275 .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3276 .await
3277 .unwrap();
3278 let save = buffer.update(&mut cx, |buffer, cx| {
3279 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3280 buffer.save(cx).unwrap()
3281 });
3282 save.await.unwrap();
3283
3284 let new_text = std::fs::read_to_string(file_path).unwrap();
3285 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3286 }
3287
3288 #[gpui::test]
3289 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3290 let dir = temp_tree(json!({
3291 "a": {
3292 "file1": "",
3293 "file2": "",
3294 "file3": "",
3295 },
3296 "b": {
3297 "c": {
3298 "file4": "",
3299 "file5": "",
3300 }
3301 }
3302 }));
3303
3304 let user_id = 5;
3305 let http_client = FakeHttpClient::with_404_response();
3306 let mut client = Client::new(http_client.clone());
3307 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3308 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3309 let tree = Worktree::open_local(
3310 client,
3311 user_store.clone(),
3312 dir.path(),
3313 Arc::new(RealFs),
3314 Default::default(),
3315 &mut cx.to_async(),
3316 )
3317 .await
3318 .unwrap();
3319
3320 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3321 let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3322 async move { buffer.await.unwrap() }
3323 };
3324 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3325 tree.read_with(cx, |tree, _| {
3326 tree.entry_for_path(path)
3327 .expect(&format!("no entry for path {}", path))
3328 .id
3329 })
3330 };
3331
3332 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3333 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3334 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3335 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3336
3337 let file2_id = id_for_path("a/file2", &cx);
3338 let file3_id = id_for_path("a/file3", &cx);
3339 let file4_id = id_for_path("b/c/file4", &cx);
3340
3341 // Wait for the initial scan.
3342 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3343 .await;
3344
3345 // Create a remote copy of this worktree.
3346 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3347 let remote = Worktree::remote(
3348 1,
3349 1,
3350 initial_snapshot.to_proto(),
3351 Client::new(http_client.clone()),
3352 user_store,
3353 Default::default(),
3354 &mut cx.to_async(),
3355 )
3356 .await
3357 .unwrap();
3358
3359 cx.read(|cx| {
3360 assert!(!buffer2.read(cx).is_dirty());
3361 assert!(!buffer3.read(cx).is_dirty());
3362 assert!(!buffer4.read(cx).is_dirty());
3363 assert!(!buffer5.read(cx).is_dirty());
3364 });
3365
3366 // Rename and delete files and directories.
3367 tree.flush_fs_events(&cx).await;
3368 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3369 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3370 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3371 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3372 tree.flush_fs_events(&cx).await;
3373
3374 let expected_paths = vec![
3375 "a",
3376 "a/file1",
3377 "a/file2.new",
3378 "b",
3379 "d",
3380 "d/file3",
3381 "d/file4",
3382 ];
3383
3384 cx.read(|app| {
3385 assert_eq!(
3386 tree.read(app)
3387 .paths()
3388 .map(|p| p.to_str().unwrap())
3389 .collect::<Vec<_>>(),
3390 expected_paths
3391 );
3392
3393 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3394 assert_eq!(id_for_path("d/file3", &cx), file3_id);
3395 assert_eq!(id_for_path("d/file4", &cx), file4_id);
3396
3397 assert_eq!(
3398 buffer2.read(app).file().unwrap().path().as_ref(),
3399 Path::new("a/file2.new")
3400 );
3401 assert_eq!(
3402 buffer3.read(app).file().unwrap().path().as_ref(),
3403 Path::new("d/file3")
3404 );
3405 assert_eq!(
3406 buffer4.read(app).file().unwrap().path().as_ref(),
3407 Path::new("d/file4")
3408 );
3409 assert_eq!(
3410 buffer5.read(app).file().unwrap().path().as_ref(),
3411 Path::new("b/c/file5")
3412 );
3413
3414 assert!(!buffer2.read(app).file().unwrap().is_deleted());
3415 assert!(!buffer3.read(app).file().unwrap().is_deleted());
3416 assert!(!buffer4.read(app).file().unwrap().is_deleted());
3417 assert!(buffer5.read(app).file().unwrap().is_deleted());
3418 });
3419
3420 // Update the remote worktree. Check that it becomes consistent with the
3421 // local worktree.
3422 remote.update(&mut cx, |remote, cx| {
3423 let update_message =
3424 tree.read(cx)
3425 .snapshot()
3426 .build_update(&initial_snapshot, 1, 1, true);
3427 remote
3428 .as_remote_mut()
3429 .unwrap()
3430 .snapshot
3431 .apply_update(update_message)
3432 .unwrap();
3433
3434 assert_eq!(
3435 remote
3436 .paths()
3437 .map(|p| p.to_str().unwrap())
3438 .collect::<Vec<_>>(),
3439 expected_paths
3440 );
3441 });
3442 }
3443
3444 #[gpui::test]
3445 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3446 let dir = temp_tree(json!({
3447 ".git": {},
3448 ".gitignore": "ignored-dir\n",
3449 "tracked-dir": {
3450 "tracked-file1": "tracked contents",
3451 },
3452 "ignored-dir": {
3453 "ignored-file1": "ignored contents",
3454 }
3455 }));
3456
3457 let http_client = FakeHttpClient::with_404_response();
3458 let client = Client::new(http_client.clone());
3459 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3460
3461 let tree = Worktree::open_local(
3462 client,
3463 user_store,
3464 dir.path(),
3465 Arc::new(RealFs),
3466 Default::default(),
3467 &mut cx.to_async(),
3468 )
3469 .await
3470 .unwrap();
3471 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3472 .await;
3473 tree.flush_fs_events(&cx).await;
3474 cx.read(|cx| {
3475 let tree = tree.read(cx);
3476 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3477 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3478 assert_eq!(tracked.is_ignored, false);
3479 assert_eq!(ignored.is_ignored, true);
3480 });
3481
3482 std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3483 std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3484 tree.flush_fs_events(&cx).await;
3485 cx.read(|cx| {
3486 let tree = tree.read(cx);
3487 let dot_git = tree.entry_for_path(".git").unwrap();
3488 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3489 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3490 assert_eq!(tracked.is_ignored, false);
3491 assert_eq!(ignored.is_ignored, true);
3492 assert_eq!(dot_git.is_ignored, true);
3493 });
3494 }
3495
3496 #[gpui::test]
3497 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3498 let user_id = 100;
3499 let http_client = FakeHttpClient::with_404_response();
3500 let mut client = Client::new(http_client);
3501 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3502 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3503
3504 let fs = Arc::new(FakeFs::new());
3505 fs.insert_tree(
3506 "/the-dir",
3507 json!({
3508 "a.txt": "a-contents",
3509 "b.txt": "b-contents",
3510 }),
3511 )
3512 .await;
3513
3514 let worktree = Worktree::open_local(
3515 client.clone(),
3516 user_store,
3517 "/the-dir".as_ref(),
3518 fs,
3519 Default::default(),
3520 &mut cx.to_async(),
3521 )
3522 .await
3523 .unwrap();
3524
3525 // Spawn multiple tasks to open paths, repeating some paths.
3526 let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3527 (
3528 worktree.open_buffer("a.txt", cx),
3529 worktree.open_buffer("b.txt", cx),
3530 worktree.open_buffer("a.txt", cx),
3531 )
3532 });
3533
3534 let buffer_a_1 = buffer_a_1.await.unwrap();
3535 let buffer_a_2 = buffer_a_2.await.unwrap();
3536 let buffer_b = buffer_b.await.unwrap();
3537 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3538 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3539
3540 // There is only one buffer per path.
3541 let buffer_a_id = buffer_a_1.id();
3542 assert_eq!(buffer_a_2.id(), buffer_a_id);
3543
3544 // Open the same path again while it is still open.
3545 drop(buffer_a_1);
3546 let buffer_a_3 = worktree
3547 .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3548 .await
3549 .unwrap();
3550
3551 // There's still only one buffer per path.
3552 assert_eq!(buffer_a_3.id(), buffer_a_id);
3553 }
3554
3555 #[gpui::test]
3556 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3557 use std::fs;
3558
3559 let dir = temp_tree(json!({
3560 "file1": "abc",
3561 "file2": "def",
3562 "file3": "ghi",
3563 }));
3564 let http_client = FakeHttpClient::with_404_response();
3565 let client = Client::new(http_client.clone());
3566 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3567
3568 let tree = Worktree::open_local(
3569 client,
3570 user_store,
3571 dir.path(),
3572 Arc::new(RealFs),
3573 Default::default(),
3574 &mut cx.to_async(),
3575 )
3576 .await
3577 .unwrap();
3578 tree.flush_fs_events(&cx).await;
3579 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3580 .await;
3581
3582 let buffer1 = tree
3583 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3584 .await
3585 .unwrap();
3586 let events = Rc::new(RefCell::new(Vec::new()));
3587
3588 // initially, the buffer isn't dirty.
3589 buffer1.update(&mut cx, |buffer, cx| {
3590 cx.subscribe(&buffer1, {
3591 let events = events.clone();
3592 move |_, _, event, _| events.borrow_mut().push(event.clone())
3593 })
3594 .detach();
3595
3596 assert!(!buffer.is_dirty());
3597 assert!(events.borrow().is_empty());
3598
3599 buffer.edit(vec![1..2], "", cx);
3600 });
3601
3602 // after the first edit, the buffer is dirty, and emits a dirtied event.
3603 buffer1.update(&mut cx, |buffer, cx| {
3604 assert!(buffer.text() == "ac");
3605 assert!(buffer.is_dirty());
3606 assert_eq!(
3607 *events.borrow(),
3608 &[language::Event::Edited, language::Event::Dirtied]
3609 );
3610 events.borrow_mut().clear();
3611 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3612 });
3613
3614 // after saving, the buffer is not dirty, and emits a saved event.
3615 buffer1.update(&mut cx, |buffer, cx| {
3616 assert!(!buffer.is_dirty());
3617 assert_eq!(*events.borrow(), &[language::Event::Saved]);
3618 events.borrow_mut().clear();
3619
3620 buffer.edit(vec![1..1], "B", cx);
3621 buffer.edit(vec![2..2], "D", cx);
3622 });
3623
3624 // after editing again, the buffer is dirty, and emits another dirty event.
3625 buffer1.update(&mut cx, |buffer, cx| {
3626 assert!(buffer.text() == "aBDc");
3627 assert!(buffer.is_dirty());
3628 assert_eq!(
3629 *events.borrow(),
3630 &[
3631 language::Event::Edited,
3632 language::Event::Dirtied,
3633 language::Event::Edited,
3634 ],
3635 );
3636 events.borrow_mut().clear();
3637
3638 // TODO - currently, after restoring the buffer to its
3639 // previously-saved state, the is still considered dirty.
3640 buffer.edit([1..3], "", cx);
3641 assert!(buffer.text() == "ac");
3642 assert!(buffer.is_dirty());
3643 });
3644
3645 assert_eq!(*events.borrow(), &[language::Event::Edited]);
3646
3647 // When a file is deleted, the buffer is considered dirty.
3648 let events = Rc::new(RefCell::new(Vec::new()));
3649 let buffer2 = tree
3650 .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3651 .await
3652 .unwrap();
3653 buffer2.update(&mut cx, |_, cx| {
3654 cx.subscribe(&buffer2, {
3655 let events = events.clone();
3656 move |_, _, event, _| events.borrow_mut().push(event.clone())
3657 })
3658 .detach();
3659 });
3660
3661 fs::remove_file(dir.path().join("file2")).unwrap();
3662 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3663 assert_eq!(
3664 *events.borrow(),
3665 &[language::Event::Dirtied, language::Event::FileHandleChanged]
3666 );
3667
3668 // When a file is already dirty when deleted, we don't emit a Dirtied event.
3669 let events = Rc::new(RefCell::new(Vec::new()));
3670 let buffer3 = tree
3671 .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3672 .await
3673 .unwrap();
3674 buffer3.update(&mut cx, |_, cx| {
3675 cx.subscribe(&buffer3, {
3676 let events = events.clone();
3677 move |_, _, event, _| events.borrow_mut().push(event.clone())
3678 })
3679 .detach();
3680 });
3681
3682 tree.flush_fs_events(&cx).await;
3683 buffer3.update(&mut cx, |buffer, cx| {
3684 buffer.edit(Some(0..0), "x", cx);
3685 });
3686 events.borrow_mut().clear();
3687 fs::remove_file(dir.path().join("file3")).unwrap();
3688 buffer3
3689 .condition(&cx, |_, _| !events.borrow().is_empty())
3690 .await;
3691 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3692 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3693 }
3694
3695 #[gpui::test]
3696 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3697 use std::fs;
3698
3699 let initial_contents = "aaa\nbbbbb\nc\n";
3700 let dir = temp_tree(json!({ "the-file": initial_contents }));
3701 let http_client = FakeHttpClient::with_404_response();
3702 let client = Client::new(http_client.clone());
3703 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3704
3705 let tree = Worktree::open_local(
3706 client,
3707 user_store,
3708 dir.path(),
3709 Arc::new(RealFs),
3710 Default::default(),
3711 &mut cx.to_async(),
3712 )
3713 .await
3714 .unwrap();
3715 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3716 .await;
3717
3718 let abs_path = dir.path().join("the-file");
3719 let buffer = tree
3720 .update(&mut cx, |tree, cx| {
3721 tree.open_buffer(Path::new("the-file"), cx)
3722 })
3723 .await
3724 .unwrap();
3725
3726 // TODO
3727 // Add a cursor on each row.
3728 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3729 // assert!(!buffer.is_dirty());
3730 // buffer.add_selection_set(
3731 // &(0..3)
3732 // .map(|row| Selection {
3733 // id: row as usize,
3734 // start: Point::new(row, 1),
3735 // end: Point::new(row, 1),
3736 // reversed: false,
3737 // goal: SelectionGoal::None,
3738 // })
3739 // .collect::<Vec<_>>(),
3740 // cx,
3741 // )
3742 // });
3743
3744 // Change the file on disk, adding two new lines of text, and removing
3745 // one line.
3746 buffer.read_with(&cx, |buffer, _| {
3747 assert!(!buffer.is_dirty());
3748 assert!(!buffer.has_conflict());
3749 });
3750 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3751 fs::write(&abs_path, new_contents).unwrap();
3752
3753 // Because the buffer was not modified, it is reloaded from disk. Its
3754 // contents are edited according to the diff between the old and new
3755 // file contents.
3756 buffer
3757 .condition(&cx, |buffer, _| buffer.text() == new_contents)
3758 .await;
3759
3760 buffer.update(&mut cx, |buffer, _| {
3761 assert_eq!(buffer.text(), new_contents);
3762 assert!(!buffer.is_dirty());
3763 assert!(!buffer.has_conflict());
3764
3765 // TODO
3766 // let cursor_positions = buffer
3767 // .selection_set(selection_set_id)
3768 // .unwrap()
3769 // .selections::<Point>(&*buffer)
3770 // .map(|selection| {
3771 // assert_eq!(selection.start, selection.end);
3772 // selection.start
3773 // })
3774 // .collect::<Vec<_>>();
3775 // assert_eq!(
3776 // cursor_positions,
3777 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3778 // );
3779 });
3780
3781 // Modify the buffer
3782 buffer.update(&mut cx, |buffer, cx| {
3783 buffer.edit(vec![0..0], " ", cx);
3784 assert!(buffer.is_dirty());
3785 assert!(!buffer.has_conflict());
3786 });
3787
3788 // Change the file on disk again, adding blank lines to the beginning.
3789 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3790
3791 // Because the buffer is modified, it doesn't reload from disk, but is
3792 // marked as having a conflict.
3793 buffer
3794 .condition(&cx, |buffer, _| buffer.has_conflict())
3795 .await;
3796 }
3797
3798 #[gpui::test]
3799 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3800 let (language_server_config, mut fake_server) =
3801 LanguageServerConfig::fake(cx.background()).await;
3802 let progress_token = language_server_config
3803 .disk_based_diagnostics_progress_token
3804 .clone()
3805 .unwrap();
3806 let mut languages = LanguageRegistry::new();
3807 languages.add(Arc::new(Language::new(
3808 LanguageConfig {
3809 name: "Rust".to_string(),
3810 path_suffixes: vec!["rs".to_string()],
3811 language_server: Some(language_server_config),
3812 ..Default::default()
3813 },
3814 Some(tree_sitter_rust::language()),
3815 )));
3816
3817 let dir = temp_tree(json!({
3818 "a.rs": "fn a() { A }",
3819 "b.rs": "const y: i32 = 1",
3820 }));
3821
3822 let http_client = FakeHttpClient::with_404_response();
3823 let client = Client::new(http_client.clone());
3824 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3825
3826 let tree = Worktree::open_local(
3827 client,
3828 user_store,
3829 dir.path(),
3830 Arc::new(RealFs),
3831 Arc::new(languages),
3832 &mut cx.to_async(),
3833 )
3834 .await
3835 .unwrap();
3836 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3837 .await;
3838
3839 // Cause worktree to start the fake language server
3840 let _buffer = tree
3841 .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3842 .await
3843 .unwrap();
3844
3845 let mut events = subscribe(&tree, &mut cx);
3846
3847 fake_server.start_progress(&progress_token).await;
3848 fake_server.start_progress(&progress_token).await;
3849 fake_server.end_progress(&progress_token).await;
3850 fake_server.start_progress(&progress_token).await;
3851
3852 fake_server
3853 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3854 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3855 version: None,
3856 diagnostics: vec![lsp::Diagnostic {
3857 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
3858 severity: Some(lsp::DiagnosticSeverity::ERROR),
3859 message: "undefined variable 'A'".to_string(),
3860 ..Default::default()
3861 }],
3862 })
3863 .await;
3864
3865 let event = events.next().await.unwrap();
3866 assert_eq!(
3867 event,
3868 Event::DiagnosticsUpdated(Arc::from(Path::new("a.rs")))
3869 );
3870
3871 fake_server.end_progress(&progress_token).await;
3872 fake_server.end_progress(&progress_token).await;
3873
3874 let event = events.next().await.unwrap();
3875 assert_eq!(event, Event::DiskBasedDiagnosticsUpdated);
3876
3877 let buffer = tree
3878 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3879 .await
3880 .unwrap();
3881
3882 buffer.read_with(&cx, |buffer, _| {
3883 let snapshot = buffer.snapshot();
3884 let diagnostics = snapshot
3885 .diagnostics_in_range::<_, Point>(0..buffer.len())
3886 .collect::<Vec<_>>();
3887 assert_eq!(
3888 diagnostics,
3889 &[DiagnosticEntry {
3890 range: Point::new(0, 9)..Point::new(0, 10),
3891 diagnostic: Diagnostic {
3892 severity: lsp::DiagnosticSeverity::ERROR,
3893 message: "undefined variable 'A'".to_string(),
3894 group_id: 0,
3895 is_primary: true,
3896 ..Default::default()
3897 }
3898 }]
3899 )
3900 });
3901 }
3902
3903 #[gpui::test]
3904 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3905 let fs = Arc::new(FakeFs::new());
3906 let http_client = FakeHttpClient::with_404_response();
3907 let client = Client::new(http_client.clone());
3908 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3909
3910 fs.insert_tree(
3911 "/the-dir",
3912 json!({
3913 "a.rs": "
3914 fn foo(mut v: Vec<usize>) {
3915 for x in &v {
3916 v.push(1);
3917 }
3918 }
3919 "
3920 .unindent(),
3921 }),
3922 )
3923 .await;
3924
3925 let worktree = Worktree::open_local(
3926 client.clone(),
3927 user_store,
3928 "/the-dir".as_ref(),
3929 fs,
3930 Default::default(),
3931 &mut cx.to_async(),
3932 )
3933 .await
3934 .unwrap();
3935
3936 let buffer = worktree
3937 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3938 .await
3939 .unwrap();
3940
3941 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3942 let message = lsp::PublishDiagnosticsParams {
3943 uri: buffer_uri.clone(),
3944 diagnostics: vec![
3945 lsp::Diagnostic {
3946 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3947 severity: Some(DiagnosticSeverity::WARNING),
3948 message: "error 1".to_string(),
3949 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3950 location: lsp::Location {
3951 uri: buffer_uri.clone(),
3952 range: lsp::Range::new(
3953 lsp::Position::new(1, 8),
3954 lsp::Position::new(1, 9),
3955 ),
3956 },
3957 message: "error 1 hint 1".to_string(),
3958 }]),
3959 ..Default::default()
3960 },
3961 lsp::Diagnostic {
3962 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3963 severity: Some(DiagnosticSeverity::HINT),
3964 message: "error 1 hint 1".to_string(),
3965 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3966 location: lsp::Location {
3967 uri: buffer_uri.clone(),
3968 range: lsp::Range::new(
3969 lsp::Position::new(1, 8),
3970 lsp::Position::new(1, 9),
3971 ),
3972 },
3973 message: "original diagnostic".to_string(),
3974 }]),
3975 ..Default::default()
3976 },
3977 lsp::Diagnostic {
3978 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3979 severity: Some(DiagnosticSeverity::ERROR),
3980 message: "error 2".to_string(),
3981 related_information: Some(vec![
3982 lsp::DiagnosticRelatedInformation {
3983 location: lsp::Location {
3984 uri: buffer_uri.clone(),
3985 range: lsp::Range::new(
3986 lsp::Position::new(1, 13),
3987 lsp::Position::new(1, 15),
3988 ),
3989 },
3990 message: "error 2 hint 1".to_string(),
3991 },
3992 lsp::DiagnosticRelatedInformation {
3993 location: lsp::Location {
3994 uri: buffer_uri.clone(),
3995 range: lsp::Range::new(
3996 lsp::Position::new(1, 13),
3997 lsp::Position::new(1, 15),
3998 ),
3999 },
4000 message: "error 2 hint 2".to_string(),
4001 },
4002 ]),
4003 ..Default::default()
4004 },
4005 lsp::Diagnostic {
4006 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
4007 severity: Some(DiagnosticSeverity::HINT),
4008 message: "error 2 hint 1".to_string(),
4009 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4010 location: lsp::Location {
4011 uri: buffer_uri.clone(),
4012 range: lsp::Range::new(
4013 lsp::Position::new(2, 8),
4014 lsp::Position::new(2, 17),
4015 ),
4016 },
4017 message: "original diagnostic".to_string(),
4018 }]),
4019 ..Default::default()
4020 },
4021 lsp::Diagnostic {
4022 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
4023 severity: Some(DiagnosticSeverity::HINT),
4024 message: "error 2 hint 2".to_string(),
4025 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4026 location: lsp::Location {
4027 uri: buffer_uri.clone(),
4028 range: lsp::Range::new(
4029 lsp::Position::new(2, 8),
4030 lsp::Position::new(2, 17),
4031 ),
4032 },
4033 message: "original diagnostic".to_string(),
4034 }]),
4035 ..Default::default()
4036 },
4037 ],
4038 version: None,
4039 };
4040
4041 worktree
4042 .update(&mut cx, |tree, cx| {
4043 tree.update_diagnostics(message, &Default::default(), cx)
4044 })
4045 .unwrap();
4046 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
4047
4048 assert_eq!(
4049 buffer
4050 .diagnostics_in_range::<_, Point>(0..buffer.len())
4051 .collect::<Vec<_>>(),
4052 &[
4053 DiagnosticEntry {
4054 range: Point::new(1, 8)..Point::new(1, 9),
4055 diagnostic: Diagnostic {
4056 severity: DiagnosticSeverity::WARNING,
4057 message: "error 1".to_string(),
4058 group_id: 0,
4059 is_primary: true,
4060 ..Default::default()
4061 }
4062 },
4063 DiagnosticEntry {
4064 range: Point::new(1, 8)..Point::new(1, 9),
4065 diagnostic: Diagnostic {
4066 severity: DiagnosticSeverity::HINT,
4067 message: "error 1 hint 1".to_string(),
4068 group_id: 0,
4069 is_primary: false,
4070 ..Default::default()
4071 }
4072 },
4073 DiagnosticEntry {
4074 range: Point::new(1, 13)..Point::new(1, 15),
4075 diagnostic: Diagnostic {
4076 severity: DiagnosticSeverity::HINT,
4077 message: "error 2 hint 1".to_string(),
4078 group_id: 1,
4079 is_primary: false,
4080 ..Default::default()
4081 }
4082 },
4083 DiagnosticEntry {
4084 range: Point::new(1, 13)..Point::new(1, 15),
4085 diagnostic: Diagnostic {
4086 severity: DiagnosticSeverity::HINT,
4087 message: "error 2 hint 2".to_string(),
4088 group_id: 1,
4089 is_primary: false,
4090 ..Default::default()
4091 }
4092 },
4093 DiagnosticEntry {
4094 range: Point::new(2, 8)..Point::new(2, 17),
4095 diagnostic: Diagnostic {
4096 severity: DiagnosticSeverity::ERROR,
4097 message: "error 2".to_string(),
4098 group_id: 1,
4099 is_primary: true,
4100 ..Default::default()
4101 }
4102 }
4103 ]
4104 );
4105
4106 assert_eq!(
4107 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
4108 &[
4109 DiagnosticEntry {
4110 range: Point::new(1, 8)..Point::new(1, 9),
4111 diagnostic: Diagnostic {
4112 severity: DiagnosticSeverity::WARNING,
4113 message: "error 1".to_string(),
4114 group_id: 0,
4115 is_primary: true,
4116 ..Default::default()
4117 }
4118 },
4119 DiagnosticEntry {
4120 range: Point::new(1, 8)..Point::new(1, 9),
4121 diagnostic: Diagnostic {
4122 severity: DiagnosticSeverity::HINT,
4123 message: "error 1 hint 1".to_string(),
4124 group_id: 0,
4125 is_primary: false,
4126 ..Default::default()
4127 }
4128 },
4129 ]
4130 );
4131 assert_eq!(
4132 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
4133 &[
4134 DiagnosticEntry {
4135 range: Point::new(1, 13)..Point::new(1, 15),
4136 diagnostic: Diagnostic {
4137 severity: DiagnosticSeverity::HINT,
4138 message: "error 2 hint 1".to_string(),
4139 group_id: 1,
4140 is_primary: false,
4141 ..Default::default()
4142 }
4143 },
4144 DiagnosticEntry {
4145 range: Point::new(1, 13)..Point::new(1, 15),
4146 diagnostic: Diagnostic {
4147 severity: DiagnosticSeverity::HINT,
4148 message: "error 2 hint 2".to_string(),
4149 group_id: 1,
4150 is_primary: false,
4151 ..Default::default()
4152 }
4153 },
4154 DiagnosticEntry {
4155 range: Point::new(2, 8)..Point::new(2, 17),
4156 diagnostic: Diagnostic {
4157 severity: DiagnosticSeverity::ERROR,
4158 message: "error 2".to_string(),
4159 group_id: 1,
4160 is_primary: true,
4161 ..Default::default()
4162 }
4163 }
4164 ]
4165 );
4166 }
4167
4168 #[gpui::test(iterations = 100)]
4169 fn test_random(mut rng: StdRng) {
4170 let operations = env::var("OPERATIONS")
4171 .map(|o| o.parse().unwrap())
4172 .unwrap_or(40);
4173 let initial_entries = env::var("INITIAL_ENTRIES")
4174 .map(|o| o.parse().unwrap())
4175 .unwrap_or(20);
4176
4177 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
4178 for _ in 0..initial_entries {
4179 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
4180 }
4181 log::info!("Generated initial tree");
4182
4183 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4184 let fs = Arc::new(RealFs);
4185 let next_entry_id = Arc::new(AtomicUsize::new(0));
4186 let mut initial_snapshot = Snapshot {
4187 id: WorktreeId::from_usize(0),
4188 scan_id: 0,
4189 abs_path: root_dir.path().into(),
4190 entries_by_path: Default::default(),
4191 entries_by_id: Default::default(),
4192 removed_entry_ids: Default::default(),
4193 ignores: Default::default(),
4194 root_name: Default::default(),
4195 root_char_bag: Default::default(),
4196 next_entry_id: next_entry_id.clone(),
4197 diagnostic_summaries: Default::default(),
4198 };
4199 initial_snapshot.insert_entry(
4200 Entry::new(
4201 Path::new("").into(),
4202 &smol::block_on(fs.metadata(root_dir.path()))
4203 .unwrap()
4204 .unwrap(),
4205 &next_entry_id,
4206 Default::default(),
4207 ),
4208 fs.as_ref(),
4209 );
4210 let mut scanner = BackgroundScanner::new(
4211 Arc::new(Mutex::new(initial_snapshot.clone())),
4212 notify_tx,
4213 fs.clone(),
4214 Arc::new(gpui::executor::Background::new()),
4215 );
4216 smol::block_on(scanner.scan_dirs()).unwrap();
4217 scanner.snapshot().check_invariants();
4218
4219 let mut events = Vec::new();
4220 let mut snapshots = Vec::new();
4221 let mut mutations_len = operations;
4222 while mutations_len > 1 {
4223 if !events.is_empty() && rng.gen_bool(0.4) {
4224 let len = rng.gen_range(0..=events.len());
4225 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4226 log::info!("Delivering events: {:#?}", to_deliver);
4227 smol::block_on(scanner.process_events(to_deliver));
4228 scanner.snapshot().check_invariants();
4229 } else {
4230 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4231 mutations_len -= 1;
4232 }
4233
4234 if rng.gen_bool(0.2) {
4235 snapshots.push(scanner.snapshot());
4236 }
4237 }
4238 log::info!("Quiescing: {:#?}", events);
4239 smol::block_on(scanner.process_events(events));
4240 scanner.snapshot().check_invariants();
4241
4242 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4243 let mut new_scanner = BackgroundScanner::new(
4244 Arc::new(Mutex::new(initial_snapshot)),
4245 notify_tx,
4246 scanner.fs.clone(),
4247 scanner.executor.clone(),
4248 );
4249 smol::block_on(new_scanner.scan_dirs()).unwrap();
4250 assert_eq!(
4251 scanner.snapshot().to_vec(true),
4252 new_scanner.snapshot().to_vec(true)
4253 );
4254
4255 for mut prev_snapshot in snapshots {
4256 let include_ignored = rng.gen::<bool>();
4257 if !include_ignored {
4258 let mut entries_by_path_edits = Vec::new();
4259 let mut entries_by_id_edits = Vec::new();
4260 for entry in prev_snapshot
4261 .entries_by_id
4262 .cursor::<()>()
4263 .filter(|e| e.is_ignored)
4264 {
4265 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4266 entries_by_id_edits.push(Edit::Remove(entry.id));
4267 }
4268
4269 prev_snapshot
4270 .entries_by_path
4271 .edit(entries_by_path_edits, &());
4272 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4273 }
4274
4275 let update = scanner
4276 .snapshot()
4277 .build_update(&prev_snapshot, 0, 0, include_ignored);
4278 prev_snapshot.apply_update(update).unwrap();
4279 assert_eq!(
4280 prev_snapshot.to_vec(true),
4281 scanner.snapshot().to_vec(include_ignored)
4282 );
4283 }
4284 }
4285
4286 fn randomly_mutate_tree(
4287 root_path: &Path,
4288 insertion_probability: f64,
4289 rng: &mut impl Rng,
4290 ) -> Result<Vec<fsevent::Event>> {
4291 let root_path = root_path.canonicalize().unwrap();
4292 let (dirs, files) = read_dir_recursive(root_path.clone());
4293
4294 let mut events = Vec::new();
4295 let mut record_event = |path: PathBuf| {
4296 events.push(fsevent::Event {
4297 event_id: SystemTime::now()
4298 .duration_since(UNIX_EPOCH)
4299 .unwrap()
4300 .as_secs(),
4301 flags: fsevent::StreamFlags::empty(),
4302 path,
4303 });
4304 };
4305
4306 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4307 let path = dirs.choose(rng).unwrap();
4308 let new_path = path.join(gen_name(rng));
4309
4310 if rng.gen() {
4311 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4312 std::fs::create_dir(&new_path)?;
4313 } else {
4314 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4315 std::fs::write(&new_path, "")?;
4316 }
4317 record_event(new_path);
4318 } else if rng.gen_bool(0.05) {
4319 let ignore_dir_path = dirs.choose(rng).unwrap();
4320 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4321
4322 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4323 let files_to_ignore = {
4324 let len = rng.gen_range(0..=subfiles.len());
4325 subfiles.choose_multiple(rng, len)
4326 };
4327 let dirs_to_ignore = {
4328 let len = rng.gen_range(0..subdirs.len());
4329 subdirs.choose_multiple(rng, len)
4330 };
4331
4332 let mut ignore_contents = String::new();
4333 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4334 write!(
4335 ignore_contents,
4336 "{}\n",
4337 path_to_ignore
4338 .strip_prefix(&ignore_dir_path)?
4339 .to_str()
4340 .unwrap()
4341 )
4342 .unwrap();
4343 }
4344 log::info!(
4345 "Creating {:?} with contents:\n{}",
4346 ignore_path.strip_prefix(&root_path)?,
4347 ignore_contents
4348 );
4349 std::fs::write(&ignore_path, ignore_contents).unwrap();
4350 record_event(ignore_path);
4351 } else {
4352 let old_path = {
4353 let file_path = files.choose(rng);
4354 let dir_path = dirs[1..].choose(rng);
4355 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4356 };
4357
4358 let is_rename = rng.gen();
4359 if is_rename {
4360 let new_path_parent = dirs
4361 .iter()
4362 .filter(|d| !d.starts_with(old_path))
4363 .choose(rng)
4364 .unwrap();
4365
4366 let overwrite_existing_dir =
4367 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4368 let new_path = if overwrite_existing_dir {
4369 std::fs::remove_dir_all(&new_path_parent).ok();
4370 new_path_parent.to_path_buf()
4371 } else {
4372 new_path_parent.join(gen_name(rng))
4373 };
4374
4375 log::info!(
4376 "Renaming {:?} to {}{:?}",
4377 old_path.strip_prefix(&root_path)?,
4378 if overwrite_existing_dir {
4379 "overwrite "
4380 } else {
4381 ""
4382 },
4383 new_path.strip_prefix(&root_path)?
4384 );
4385 std::fs::rename(&old_path, &new_path)?;
4386 record_event(old_path.clone());
4387 record_event(new_path);
4388 } else if old_path.is_dir() {
4389 let (dirs, files) = read_dir_recursive(old_path.clone());
4390
4391 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4392 std::fs::remove_dir_all(&old_path).unwrap();
4393 for file in files {
4394 record_event(file);
4395 }
4396 for dir in dirs {
4397 record_event(dir);
4398 }
4399 } else {
4400 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4401 std::fs::remove_file(old_path).unwrap();
4402 record_event(old_path.clone());
4403 }
4404 }
4405
4406 Ok(events)
4407 }
4408
4409 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4410 let child_entries = std::fs::read_dir(&path).unwrap();
4411 let mut dirs = vec![path];
4412 let mut files = Vec::new();
4413 for child_entry in child_entries {
4414 let child_path = child_entry.unwrap().path();
4415 if child_path.is_dir() {
4416 let (child_dirs, child_files) = read_dir_recursive(child_path);
4417 dirs.extend(child_dirs);
4418 files.extend(child_files);
4419 } else {
4420 files.push(child_path);
4421 }
4422 }
4423 (dirs, files)
4424 }
4425
4426 fn gen_name(rng: &mut impl Rng) -> String {
4427 (0..6)
4428 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4429 .map(char::from)
4430 .collect()
4431 }
4432
4433 impl Snapshot {
4434 fn check_invariants(&self) {
4435 let mut files = self.files(true, 0);
4436 let mut visible_files = self.files(false, 0);
4437 for entry in self.entries_by_path.cursor::<()>() {
4438 if entry.is_file() {
4439 assert_eq!(files.next().unwrap().inode, entry.inode);
4440 if !entry.is_ignored {
4441 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4442 }
4443 }
4444 }
4445 assert!(files.next().is_none());
4446 assert!(visible_files.next().is_none());
4447
4448 let mut bfs_paths = Vec::new();
4449 let mut stack = vec![Path::new("")];
4450 while let Some(path) = stack.pop() {
4451 bfs_paths.push(path);
4452 let ix = stack.len();
4453 for child_entry in self.child_entries(path) {
4454 stack.insert(ix, &child_entry.path);
4455 }
4456 }
4457
4458 let dfs_paths = self
4459 .entries_by_path
4460 .cursor::<()>()
4461 .map(|e| e.path.as_ref())
4462 .collect::<Vec<_>>();
4463 assert_eq!(bfs_paths, dfs_paths);
4464
4465 for (ignore_parent_path, _) in &self.ignores {
4466 assert!(self.entry_for_path(ignore_parent_path).is_some());
4467 assert!(self
4468 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4469 .is_some());
4470 }
4471 }
4472
4473 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4474 let mut paths = Vec::new();
4475 for entry in self.entries_by_path.cursor::<()>() {
4476 if include_ignored || !entry.is_ignored {
4477 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4478 }
4479 }
4480 paths.sort_by(|a, b| a.0.cmp(&b.0));
4481 paths
4482 }
4483 }
4484}