1use super::{
2 fs::{self, Fs},
3 ignore::IgnoreStack,
4 DiagnosticSummary, ProjectEntry,
5};
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context, Result};
8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
9use clock::ReplicaId;
10use collections::{hash_map, HashMap, HashSet};
11use futures::{Stream, StreamExt};
12use fuzzy::CharBag;
13use gpui::{
14 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
15 Task, UpgradeModelHandle, WeakModelHandle,
16};
17use language::{
18 range_from_lsp, Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Language,
19 LanguageRegistry, Operation, PointUtf16, Rope,
20};
21use lazy_static::lazy_static;
22use lsp::LanguageServer;
23use parking_lot::Mutex;
24use postage::{
25 prelude::{Sink as _, Stream as _},
26 watch,
27};
28use serde::Deserialize;
29use smol::channel::{self, Sender};
30use std::{
31 any::Any,
32 cmp::{self, Ordering},
33 convert::{TryFrom, TryInto},
34 ffi::{OsStr, OsString},
35 fmt,
36 future::Future,
37 ops::Deref,
38 path::{Path, PathBuf},
39 sync::{
40 atomic::{AtomicUsize, Ordering::SeqCst},
41 Arc,
42 },
43 time::{Duration, SystemTime},
44};
45use sum_tree::{Bias, TreeMap};
46use sum_tree::{Edit, SeekTarget, SumTree};
47use util::{post_inc, ResultExt, TryFutureExt};
48
49lazy_static! {
50 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
51}
52
53#[derive(Clone, Debug)]
54enum ScanState {
55 Idle,
56 Scanning,
57 Err(Arc<anyhow::Error>),
58}
59
60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
61pub struct WorktreeId(usize);
62
63pub enum Worktree {
64 Local(LocalWorktree),
65 Remote(RemoteWorktree),
66}
67
68#[derive(Clone, Debug, Eq, PartialEq)]
69pub enum Event {
70 DiskBasedDiagnosticsUpdating,
71 DiskBasedDiagnosticsUpdated,
72 DiagnosticsUpdated(Arc<Path>),
73}
74
75impl Entity for Worktree {
76 type Event = Event;
77
78 fn app_will_quit(
79 &mut self,
80 _: &mut MutableAppContext,
81 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
82 use futures::FutureExt;
83
84 if let Self::Local(worktree) = self {
85 let shutdown_futures = worktree
86 .language_servers
87 .drain()
88 .filter_map(|(_, server)| server.shutdown())
89 .collect::<Vec<_>>();
90 Some(
91 async move {
92 futures::future::join_all(shutdown_futures).await;
93 }
94 .boxed(),
95 )
96 } else {
97 None
98 }
99 }
100}
101
102impl Worktree {
103 pub async fn open_local(
104 client: Arc<Client>,
105 user_store: ModelHandle<UserStore>,
106 path: impl Into<Arc<Path>>,
107 fs: Arc<dyn Fs>,
108 languages: Arc<LanguageRegistry>,
109 cx: &mut AsyncAppContext,
110 ) -> Result<ModelHandle<Self>> {
111 let (tree, scan_states_tx) =
112 LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
113 tree.update(cx, |tree, cx| {
114 let tree = tree.as_local_mut().unwrap();
115 let abs_path = tree.snapshot.abs_path.clone();
116 let background_snapshot = tree.background_snapshot.clone();
117 let background = cx.background().clone();
118 tree._background_scanner_task = Some(cx.background().spawn(async move {
119 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
120 let scanner =
121 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
122 scanner.run(events).await;
123 }));
124 });
125 Ok(tree)
126 }
127
128 pub async fn remote(
129 project_remote_id: u64,
130 replica_id: ReplicaId,
131 worktree: proto::Worktree,
132 client: Arc<Client>,
133 user_store: ModelHandle<UserStore>,
134 languages: Arc<LanguageRegistry>,
135 cx: &mut AsyncAppContext,
136 ) -> Result<ModelHandle<Self>> {
137 let remote_id = worktree.id;
138 let root_char_bag: CharBag = worktree
139 .root_name
140 .chars()
141 .map(|c| c.to_ascii_lowercase())
142 .collect();
143 let root_name = worktree.root_name.clone();
144 let (entries_by_path, entries_by_id, diagnostic_summaries) = cx
145 .background()
146 .spawn(async move {
147 let mut entries_by_path_edits = Vec::new();
148 let mut entries_by_id_edits = Vec::new();
149 for entry in worktree.entries {
150 match Entry::try_from((&root_char_bag, entry)) {
151 Ok(entry) => {
152 entries_by_id_edits.push(Edit::Insert(PathEntry {
153 id: entry.id,
154 path: entry.path.clone(),
155 is_ignored: entry.is_ignored,
156 scan_id: 0,
157 }));
158 entries_by_path_edits.push(Edit::Insert(entry));
159 }
160 Err(err) => log::warn!("error for remote worktree entry {:?}", err),
161 }
162 }
163
164 let mut entries_by_path = SumTree::new();
165 let mut entries_by_id = SumTree::new();
166 entries_by_path.edit(entries_by_path_edits, &());
167 entries_by_id.edit(entries_by_id_edits, &());
168
169 let diagnostic_summaries = TreeMap::from_ordered_entries(
170 worktree.diagnostic_summaries.into_iter().map(|summary| {
171 (
172 PathKey(PathBuf::from(summary.path).into()),
173 DiagnosticSummary {
174 error_count: summary.error_count as usize,
175 warning_count: summary.warning_count as usize,
176 info_count: summary.info_count as usize,
177 hint_count: summary.hint_count as usize,
178 },
179 )
180 }),
181 );
182
183 (entries_by_path, entries_by_id, diagnostic_summaries)
184 })
185 .await;
186
187 let worktree = cx.update(|cx| {
188 cx.add_model(|cx: &mut ModelContext<Worktree>| {
189 let snapshot = Snapshot {
190 id: WorktreeId(remote_id as usize),
191 scan_id: 0,
192 abs_path: Path::new("").into(),
193 root_name,
194 root_char_bag,
195 ignores: Default::default(),
196 entries_by_path,
197 entries_by_id,
198 removed_entry_ids: Default::default(),
199 next_entry_id: Default::default(),
200 };
201
202 let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
203 let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
204
205 cx.background()
206 .spawn(async move {
207 while let Some(update) = updates_rx.recv().await {
208 let mut snapshot = snapshot_tx.borrow().clone();
209 if let Err(error) = snapshot.apply_update(update) {
210 log::error!("error applying worktree update: {}", error);
211 }
212 *snapshot_tx.borrow_mut() = snapshot;
213 }
214 })
215 .detach();
216
217 {
218 let mut snapshot_rx = snapshot_rx.clone();
219 cx.spawn_weak(|this, mut cx| async move {
220 while let Some(_) = snapshot_rx.recv().await {
221 if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
222 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
223 } else {
224 break;
225 }
226 }
227 })
228 .detach();
229 }
230
231 Worktree::Remote(RemoteWorktree {
232 project_id: project_remote_id,
233 replica_id,
234 snapshot,
235 snapshot_rx,
236 updates_tx,
237 client: client.clone(),
238 loading_buffers: Default::default(),
239 open_buffers: Default::default(),
240 queued_operations: Default::default(),
241 languages,
242 user_store,
243 diagnostic_summaries,
244 })
245 })
246 });
247
248 Ok(worktree)
249 }
250
251 pub fn as_local(&self) -> Option<&LocalWorktree> {
252 if let Worktree::Local(worktree) = self {
253 Some(worktree)
254 } else {
255 None
256 }
257 }
258
259 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
260 if let Worktree::Remote(worktree) = self {
261 Some(worktree)
262 } else {
263 None
264 }
265 }
266
267 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
268 if let Worktree::Local(worktree) = self {
269 Some(worktree)
270 } else {
271 None
272 }
273 }
274
275 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
276 if let Worktree::Remote(worktree) = self {
277 Some(worktree)
278 } else {
279 None
280 }
281 }
282
283 pub fn snapshot(&self) -> Snapshot {
284 match self {
285 Worktree::Local(worktree) => worktree.snapshot(),
286 Worktree::Remote(worktree) => worktree.snapshot(),
287 }
288 }
289
290 pub fn replica_id(&self) -> ReplicaId {
291 match self {
292 Worktree::Local(_) => 0,
293 Worktree::Remote(worktree) => worktree.replica_id,
294 }
295 }
296
297 pub fn remove_collaborator(
298 &mut self,
299 peer_id: PeerId,
300 replica_id: ReplicaId,
301 cx: &mut ModelContext<Self>,
302 ) {
303 match self {
304 Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
305 Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
306 }
307 }
308
309 pub fn languages(&self) -> &Arc<LanguageRegistry> {
310 match self {
311 Worktree::Local(worktree) => &worktree.language_registry,
312 Worktree::Remote(worktree) => &worktree.languages,
313 }
314 }
315
316 pub fn user_store(&self) -> &ModelHandle<UserStore> {
317 match self {
318 Worktree::Local(worktree) => &worktree.user_store,
319 Worktree::Remote(worktree) => &worktree.user_store,
320 }
321 }
322
323 pub fn handle_open_buffer(
324 &mut self,
325 envelope: TypedEnvelope<proto::OpenBuffer>,
326 rpc: Arc<Client>,
327 cx: &mut ModelContext<Self>,
328 ) -> anyhow::Result<()> {
329 let receipt = envelope.receipt();
330
331 let response = self
332 .as_local_mut()
333 .unwrap()
334 .open_remote_buffer(envelope, cx);
335
336 cx.background()
337 .spawn(
338 async move {
339 rpc.respond(receipt, response.await?).await?;
340 Ok(())
341 }
342 .log_err(),
343 )
344 .detach();
345
346 Ok(())
347 }
348
349 pub fn handle_close_buffer(
350 &mut self,
351 envelope: TypedEnvelope<proto::CloseBuffer>,
352 _: Arc<Client>,
353 cx: &mut ModelContext<Self>,
354 ) -> anyhow::Result<()> {
355 self.as_local_mut()
356 .unwrap()
357 .close_remote_buffer(envelope, cx)
358 }
359
360 pub fn diagnostic_summaries<'a>(
361 &'a self,
362 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
363 match self {
364 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
365 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
366 }
367 .iter()
368 .map(|(path, summary)| (path.0.clone(), summary.clone()))
369 }
370
371 pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
372 match self {
373 Worktree::Local(worktree) => &mut worktree.loading_buffers,
374 Worktree::Remote(worktree) => &mut worktree.loading_buffers,
375 }
376 }
377
378 pub fn open_buffer(
379 &mut self,
380 path: impl AsRef<Path>,
381 cx: &mut ModelContext<Self>,
382 ) -> Task<Result<ModelHandle<Buffer>>> {
383 let path = path.as_ref();
384
385 // If there is already a buffer for the given path, then return it.
386 let existing_buffer = match self {
387 Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
388 Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
389 };
390 if let Some(existing_buffer) = existing_buffer {
391 return cx.spawn(move |_, _| async move { Ok(existing_buffer) });
392 }
393
394 let path: Arc<Path> = Arc::from(path);
395 let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
396 // If the given path is already being loaded, then wait for that existing
397 // task to complete and return the same buffer.
398 hash_map::Entry::Occupied(e) => e.get().clone(),
399
400 // Otherwise, record the fact that this path is now being loaded.
401 hash_map::Entry::Vacant(entry) => {
402 let (mut tx, rx) = postage::watch::channel();
403 entry.insert(rx.clone());
404
405 let load_buffer = match self {
406 Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
407 Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
408 };
409 cx.spawn(move |this, mut cx| async move {
410 let result = load_buffer.await;
411
412 // After the buffer loads, record the fact that it is no longer
413 // loading.
414 this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
415 *tx.borrow_mut() = Some(result.map_err(|e| Arc::new(e)));
416 })
417 .detach();
418 rx
419 }
420 };
421
422 cx.spawn(|_, _| async move {
423 loop {
424 if let Some(result) = loading_watch.borrow().as_ref() {
425 return result.clone().map_err(|e| anyhow!("{}", e));
426 }
427 loading_watch.recv().await;
428 }
429 })
430 }
431
432 #[cfg(feature = "test-support")]
433 pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
434 let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
435 Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
436 Worktree::Remote(worktree) => {
437 Box::new(worktree.open_buffers.values().filter_map(|buf| {
438 if let RemoteBuffer::Loaded(buf) = buf {
439 Some(buf)
440 } else {
441 None
442 }
443 }))
444 }
445 };
446
447 let path = path.as_ref();
448 open_buffers
449 .find(|buffer| {
450 if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
451 file.path().as_ref() == path
452 } else {
453 false
454 }
455 })
456 .is_some()
457 }
458
459 pub fn handle_update_buffer(
460 &mut self,
461 envelope: TypedEnvelope<proto::UpdateBuffer>,
462 cx: &mut ModelContext<Self>,
463 ) -> Result<()> {
464 let payload = envelope.payload.clone();
465 let buffer_id = payload.buffer_id as usize;
466 let ops = payload
467 .operations
468 .into_iter()
469 .map(|op| language::proto::deserialize_operation(op))
470 .collect::<Result<Vec<_>, _>>()?;
471
472 match self {
473 Worktree::Local(worktree) => {
474 let buffer = worktree
475 .open_buffers
476 .get(&buffer_id)
477 .and_then(|buf| buf.upgrade(cx))
478 .ok_or_else(|| {
479 anyhow!("invalid buffer {} in update buffer message", buffer_id)
480 })?;
481 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
482 }
483 Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
484 Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
485 Some(RemoteBuffer::Loaded(buffer)) => {
486 if let Some(buffer) = buffer.upgrade(cx) {
487 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
488 } else {
489 worktree
490 .open_buffers
491 .insert(buffer_id, RemoteBuffer::Operations(ops));
492 }
493 }
494 None => {
495 worktree
496 .open_buffers
497 .insert(buffer_id, RemoteBuffer::Operations(ops));
498 }
499 },
500 }
501
502 Ok(())
503 }
504
505 pub fn handle_save_buffer(
506 &mut self,
507 envelope: TypedEnvelope<proto::SaveBuffer>,
508 rpc: Arc<Client>,
509 cx: &mut ModelContext<Self>,
510 ) -> Result<()> {
511 let sender_id = envelope.original_sender_id()?;
512 let this = self.as_local().unwrap();
513 let project_id = this
514 .share
515 .as_ref()
516 .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
517 .project_id;
518
519 let buffer = this
520 .shared_buffers
521 .get(&sender_id)
522 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
523 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
524
525 let receipt = envelope.receipt();
526 let worktree_id = envelope.payload.worktree_id;
527 let buffer_id = envelope.payload.buffer_id;
528 let save = cx.spawn(|_, mut cx| async move {
529 buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
530 });
531
532 cx.background()
533 .spawn(
534 async move {
535 let (version, mtime) = save.await?;
536
537 rpc.respond(
538 receipt,
539 proto::BufferSaved {
540 project_id,
541 worktree_id,
542 buffer_id,
543 version: (&version).into(),
544 mtime: Some(mtime.into()),
545 },
546 )
547 .await?;
548
549 Ok(())
550 }
551 .log_err(),
552 )
553 .detach();
554
555 Ok(())
556 }
557
558 pub fn handle_buffer_saved(
559 &mut self,
560 envelope: TypedEnvelope<proto::BufferSaved>,
561 cx: &mut ModelContext<Self>,
562 ) -> Result<()> {
563 let payload = envelope.payload.clone();
564 let worktree = self.as_remote_mut().unwrap();
565 if let Some(buffer) = worktree
566 .open_buffers
567 .get(&(payload.buffer_id as usize))
568 .and_then(|buf| buf.upgrade(cx))
569 {
570 buffer.update(cx, |buffer, cx| {
571 let version = payload.version.try_into()?;
572 let mtime = payload
573 .mtime
574 .ok_or_else(|| anyhow!("missing mtime"))?
575 .into();
576 buffer.did_save(version, mtime, None, cx);
577 Result::<_, anyhow::Error>::Ok(())
578 })?;
579 }
580 Ok(())
581 }
582
583 pub fn handle_format_buffer(
584 &mut self,
585 envelope: TypedEnvelope<proto::FormatBuffer>,
586 rpc: Arc<Client>,
587 cx: &mut ModelContext<Self>,
588 ) -> Result<()> {
589 let sender_id = envelope.original_sender_id()?;
590 let this = self.as_local().unwrap();
591 let buffer = this
592 .shared_buffers
593 .get(&sender_id)
594 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
595 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
596
597 let receipt = envelope.receipt();
598 cx.spawn(|_, mut cx| async move {
599 let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
600 // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
601 // associated with formatting.
602 cx.spawn(|_| async move {
603 match format {
604 Ok(()) => rpc.respond(receipt, proto::Ack {}).await?,
605 Err(error) => {
606 rpc.respond_with_error(
607 receipt,
608 proto::Error {
609 message: error.to_string(),
610 },
611 )
612 .await?
613 }
614 }
615 Ok::<_, anyhow::Error>(())
616 })
617 .await
618 .log_err();
619 })
620 .detach();
621
622 Ok(())
623 }
624
625 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
626 match self {
627 Self::Local(worktree) => {
628 let is_fake_fs = worktree.fs.is_fake();
629 worktree.snapshot = worktree.background_snapshot.lock().clone();
630 if worktree.is_scanning() {
631 if worktree.poll_task.is_none() {
632 worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
633 if is_fake_fs {
634 smol::future::yield_now().await;
635 } else {
636 smol::Timer::after(Duration::from_millis(100)).await;
637 }
638 this.update(&mut cx, |this, cx| {
639 this.as_local_mut().unwrap().poll_task = None;
640 this.poll_snapshot(cx);
641 })
642 }));
643 }
644 } else {
645 worktree.poll_task.take();
646 self.update_open_buffers(cx);
647 }
648 }
649 Self::Remote(worktree) => {
650 worktree.snapshot = worktree.snapshot_rx.borrow().clone();
651 self.update_open_buffers(cx);
652 }
653 };
654
655 cx.notify();
656 }
657
658 fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
659 let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
660 Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
661 Self::Remote(worktree) => {
662 Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
663 if let RemoteBuffer::Loaded(buf) = buf {
664 Some((id, buf))
665 } else {
666 None
667 }
668 }))
669 }
670 };
671
672 let local = self.as_local().is_some();
673 let worktree_path = self.abs_path.clone();
674 let worktree_handle = cx.handle();
675 let mut buffers_to_delete = Vec::new();
676 for (buffer_id, buffer) in open_buffers {
677 if let Some(buffer) = buffer.upgrade(cx) {
678 buffer.update(cx, |buffer, cx| {
679 if let Some(old_file) = File::from_dyn(buffer.file()) {
680 let new_file = if let Some(entry) = old_file
681 .entry_id
682 .and_then(|entry_id| self.entry_for_id(entry_id))
683 {
684 File {
685 is_local: local,
686 worktree_path: worktree_path.clone(),
687 entry_id: Some(entry.id),
688 mtime: entry.mtime,
689 path: entry.path.clone(),
690 worktree: worktree_handle.clone(),
691 }
692 } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
693 File {
694 is_local: local,
695 worktree_path: worktree_path.clone(),
696 entry_id: Some(entry.id),
697 mtime: entry.mtime,
698 path: entry.path.clone(),
699 worktree: worktree_handle.clone(),
700 }
701 } else {
702 File {
703 is_local: local,
704 worktree_path: worktree_path.clone(),
705 entry_id: None,
706 path: old_file.path().clone(),
707 mtime: old_file.mtime(),
708 worktree: worktree_handle.clone(),
709 }
710 };
711
712 if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
713 task.detach();
714 }
715 }
716 });
717 } else {
718 buffers_to_delete.push(*buffer_id);
719 }
720 }
721
722 for buffer_id in buffers_to_delete {
723 match self {
724 Self::Local(worktree) => {
725 worktree.open_buffers.remove(&buffer_id);
726 }
727 Self::Remote(worktree) => {
728 worktree.open_buffers.remove(&buffer_id);
729 }
730 }
731 }
732 }
733
734 pub fn update_diagnostics(
735 &mut self,
736 params: lsp::PublishDiagnosticsParams,
737 disk_based_sources: &HashSet<String>,
738 cx: &mut ModelContext<Worktree>,
739 ) -> Result<()> {
740 let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
741 let abs_path = params
742 .uri
743 .to_file_path()
744 .map_err(|_| anyhow!("URI is not a file"))?;
745 let worktree_path = Arc::from(
746 abs_path
747 .strip_prefix(&this.abs_path)
748 .context("path is not within worktree")?,
749 );
750
751 let mut next_group_id = 0;
752 let mut diagnostics = Vec::default();
753 let mut primary_diagnostic_group_ids = HashMap::default();
754 let mut sources_by_group_id = HashMap::default();
755 let mut supporting_diagnostic_severities = HashMap::default();
756 for diagnostic in ¶ms.diagnostics {
757 let source = diagnostic.source.as_ref();
758 let code = diagnostic.code.as_ref().map(|code| match code {
759 lsp::NumberOrString::Number(code) => code.to_string(),
760 lsp::NumberOrString::String(code) => code.clone(),
761 });
762 let range = range_from_lsp(diagnostic.range);
763 let is_supporting = diagnostic
764 .related_information
765 .as_ref()
766 .map_or(false, |infos| {
767 infos.iter().any(|info| {
768 primary_diagnostic_group_ids.contains_key(&(
769 source,
770 code.clone(),
771 range_from_lsp(info.location.range),
772 ))
773 })
774 });
775
776 if is_supporting {
777 if let Some(severity) = diagnostic.severity {
778 supporting_diagnostic_severities
779 .insert((source, code.clone(), range), severity);
780 }
781 } else {
782 let group_id = post_inc(&mut next_group_id);
783 let is_disk_based =
784 source.map_or(false, |source| disk_based_sources.contains(source));
785
786 sources_by_group_id.insert(group_id, source);
787 primary_diagnostic_group_ids
788 .insert((source, code.clone(), range.clone()), group_id);
789
790 diagnostics.push(DiagnosticEntry {
791 range,
792 diagnostic: Diagnostic {
793 code: code.clone(),
794 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
795 message: diagnostic.message.clone(),
796 group_id,
797 is_primary: true,
798 is_valid: true,
799 is_disk_based,
800 },
801 });
802 if let Some(infos) = &diagnostic.related_information {
803 for info in infos {
804 if info.location.uri == params.uri {
805 let range = range_from_lsp(info.location.range);
806 diagnostics.push(DiagnosticEntry {
807 range,
808 diagnostic: Diagnostic {
809 code: code.clone(),
810 severity: DiagnosticSeverity::INFORMATION,
811 message: info.message.clone(),
812 group_id,
813 is_primary: false,
814 is_valid: true,
815 is_disk_based,
816 },
817 });
818 }
819 }
820 }
821 }
822 }
823
824 for entry in &mut diagnostics {
825 let diagnostic = &mut entry.diagnostic;
826 if !diagnostic.is_primary {
827 let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
828 if let Some(&severity) = supporting_diagnostic_severities.get(&(
829 source,
830 diagnostic.code.clone(),
831 entry.range.clone(),
832 )) {
833 diagnostic.severity = severity;
834 }
835 }
836 }
837
838 self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)?;
839 Ok(())
840 }
841
842 pub fn update_diagnostic_entries(
843 &mut self,
844 worktree_path: Arc<Path>,
845 version: Option<i32>,
846 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
847 cx: &mut ModelContext<Self>,
848 ) -> Result<()> {
849 let this = self.as_local_mut().unwrap();
850 for buffer in this.open_buffers.values() {
851 if let Some(buffer) = buffer.upgrade(cx) {
852 if buffer
853 .read(cx)
854 .file()
855 .map_or(false, |file| *file.path() == worktree_path)
856 {
857 let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
858 (
859 buffer.remote_id(),
860 buffer.update_diagnostics(version, diagnostics.clone(), cx),
861 )
862 });
863 self.send_buffer_update(remote_id, operation?, cx);
864 break;
865 }
866 }
867 }
868
869 let this = self.as_local_mut().unwrap();
870 let summary = DiagnosticSummary::new(&diagnostics);
871 this.diagnostic_summaries
872 .insert(PathKey(worktree_path.clone()), summary.clone());
873 this.diagnostics.insert(worktree_path.clone(), diagnostics);
874
875 cx.emit(Event::DiagnosticsUpdated(worktree_path.clone()));
876
877 if let Some(share) = this.share.as_ref() {
878 cx.foreground()
879 .spawn({
880 let client = this.client.clone();
881 let project_id = share.project_id;
882 let worktree_id = this.id().to_proto();
883 let path = worktree_path.to_string_lossy().to_string();
884 async move {
885 client
886 .send(proto::UpdateDiagnosticSummary {
887 project_id,
888 worktree_id,
889 summary: Some(proto::DiagnosticSummary {
890 path,
891 error_count: summary.error_count as u32,
892 warning_count: summary.warning_count as u32,
893 info_count: summary.info_count as u32,
894 hint_count: summary.hint_count as u32,
895 }),
896 })
897 .await
898 .log_err()
899 }
900 })
901 .detach();
902 }
903
904 Ok(())
905 }
906
907 fn send_buffer_update(
908 &mut self,
909 buffer_id: u64,
910 operation: Operation,
911 cx: &mut ModelContext<Self>,
912 ) {
913 if let Some((project_id, worktree_id, rpc)) = match self {
914 Worktree::Local(worktree) => worktree
915 .share
916 .as_ref()
917 .map(|share| (share.project_id, worktree.id(), worktree.client.clone())),
918 Worktree::Remote(worktree) => Some((
919 worktree.project_id,
920 worktree.snapshot.id(),
921 worktree.client.clone(),
922 )),
923 } {
924 cx.spawn(|worktree, mut cx| async move {
925 if let Err(error) = rpc
926 .request(proto::UpdateBuffer {
927 project_id,
928 worktree_id: worktree_id.0 as u64,
929 buffer_id,
930 operations: vec![language::proto::serialize_operation(&operation)],
931 })
932 .await
933 {
934 worktree.update(&mut cx, |worktree, _| {
935 log::error!("error sending buffer operation: {}", error);
936 match worktree {
937 Worktree::Local(t) => &mut t.queued_operations,
938 Worktree::Remote(t) => &mut t.queued_operations,
939 }
940 .push((buffer_id, operation));
941 });
942 }
943 })
944 .detach();
945 }
946 }
947}
948
949impl WorktreeId {
950 pub fn from_usize(handle_id: usize) -> Self {
951 Self(handle_id)
952 }
953
954 pub(crate) fn from_proto(id: u64) -> Self {
955 Self(id as usize)
956 }
957
958 pub fn to_proto(&self) -> u64 {
959 self.0 as u64
960 }
961
962 pub fn to_usize(&self) -> usize {
963 self.0
964 }
965}
966
967impl fmt::Display for WorktreeId {
968 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
969 self.0.fmt(f)
970 }
971}
972
973#[derive(Clone)]
974pub struct Snapshot {
975 id: WorktreeId,
976 scan_id: usize,
977 abs_path: Arc<Path>,
978 root_name: String,
979 root_char_bag: CharBag,
980 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
981 entries_by_path: SumTree<Entry>,
982 entries_by_id: SumTree<PathEntry>,
983 removed_entry_ids: HashMap<u64, usize>,
984 next_entry_id: Arc<AtomicUsize>,
985}
986
987pub struct LocalWorktree {
988 snapshot: Snapshot,
989 config: WorktreeConfig,
990 background_snapshot: Arc<Mutex<Snapshot>>,
991 last_scan_state_rx: watch::Receiver<ScanState>,
992 _background_scanner_task: Option<Task<()>>,
993 poll_task: Option<Task<()>>,
994 share: Option<ShareState>,
995 loading_buffers: LoadingBuffers,
996 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
997 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
998 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
999 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
1000 queued_operations: Vec<(u64, Operation)>,
1001 language_registry: Arc<LanguageRegistry>,
1002 client: Arc<Client>,
1003 user_store: ModelHandle<UserStore>,
1004 fs: Arc<dyn Fs>,
1005 languages: Vec<Arc<Language>>,
1006 language_servers: HashMap<String, Arc<LanguageServer>>,
1007}
1008
1009struct ShareState {
1010 project_id: u64,
1011 snapshots_tx: Sender<Snapshot>,
1012 _maintain_remote_snapshot: Option<Task<()>>,
1013}
1014
1015pub struct RemoteWorktree {
1016 project_id: u64,
1017 snapshot: Snapshot,
1018 snapshot_rx: watch::Receiver<Snapshot>,
1019 client: Arc<Client>,
1020 updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
1021 replica_id: ReplicaId,
1022 loading_buffers: LoadingBuffers,
1023 open_buffers: HashMap<usize, RemoteBuffer>,
1024 languages: Arc<LanguageRegistry>,
1025 user_store: ModelHandle<UserStore>,
1026 queued_operations: Vec<(u64, Operation)>,
1027 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
1028}
1029
1030type LoadingBuffers = HashMap<
1031 Arc<Path>,
1032 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
1033>;
1034
1035#[derive(Default, Deserialize)]
1036struct WorktreeConfig {
1037 collaborators: Vec<String>,
1038}
1039
1040impl LocalWorktree {
1041 async fn new(
1042 client: Arc<Client>,
1043 user_store: ModelHandle<UserStore>,
1044 path: impl Into<Arc<Path>>,
1045 fs: Arc<dyn Fs>,
1046 languages: Arc<LanguageRegistry>,
1047 cx: &mut AsyncAppContext,
1048 ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
1049 let abs_path = path.into();
1050 let path: Arc<Path> = Arc::from(Path::new(""));
1051 let next_entry_id = AtomicUsize::new(0);
1052
1053 // After determining whether the root entry is a file or a directory, populate the
1054 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
1055 let root_name = abs_path
1056 .file_name()
1057 .map_or(String::new(), |f| f.to_string_lossy().to_string());
1058 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
1059 let metadata = fs.metadata(&abs_path).await?;
1060
1061 let mut config = WorktreeConfig::default();
1062 if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
1063 if let Ok(parsed) = toml::from_str(&zed_toml) {
1064 config = parsed;
1065 }
1066 }
1067
1068 let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
1069 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
1070 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
1071 let mut snapshot = Snapshot {
1072 id: WorktreeId::from_usize(cx.model_id()),
1073 scan_id: 0,
1074 abs_path,
1075 root_name: root_name.clone(),
1076 root_char_bag,
1077 ignores: Default::default(),
1078 entries_by_path: Default::default(),
1079 entries_by_id: Default::default(),
1080 removed_entry_ids: Default::default(),
1081 next_entry_id: Arc::new(next_entry_id),
1082 };
1083 if let Some(metadata) = metadata {
1084 snapshot.insert_entry(
1085 Entry::new(
1086 path.into(),
1087 &metadata,
1088 &snapshot.next_entry_id,
1089 snapshot.root_char_bag,
1090 ),
1091 fs.as_ref(),
1092 );
1093 }
1094
1095 let tree = Self {
1096 snapshot: snapshot.clone(),
1097 config,
1098 background_snapshot: Arc::new(Mutex::new(snapshot)),
1099 last_scan_state_rx,
1100 _background_scanner_task: None,
1101 share: None,
1102 poll_task: None,
1103 loading_buffers: Default::default(),
1104 open_buffers: Default::default(),
1105 shared_buffers: Default::default(),
1106 diagnostics: Default::default(),
1107 diagnostic_summaries: Default::default(),
1108 queued_operations: Default::default(),
1109 language_registry: languages,
1110 client,
1111 user_store,
1112 fs,
1113 languages: Default::default(),
1114 language_servers: Default::default(),
1115 };
1116
1117 cx.spawn_weak(|this, mut cx| async move {
1118 while let Ok(scan_state) = scan_states_rx.recv().await {
1119 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1120 let to_send = handle.update(&mut cx, |this, cx| {
1121 last_scan_state_tx.blocking_send(scan_state).ok();
1122 this.poll_snapshot(cx);
1123 let tree = this.as_local_mut().unwrap();
1124 if !tree.is_scanning() {
1125 if let Some(share) = tree.share.as_ref() {
1126 return Some((tree.snapshot(), share.snapshots_tx.clone()));
1127 }
1128 }
1129 None
1130 });
1131
1132 if let Some((snapshot, snapshots_to_send_tx)) = to_send {
1133 if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
1134 log::error!("error submitting snapshot to send {}", err);
1135 }
1136 }
1137 } else {
1138 break;
1139 }
1140 }
1141 })
1142 .detach();
1143
1144 Worktree::Local(tree)
1145 });
1146
1147 Ok((tree, scan_states_tx))
1148 }
1149
1150 pub fn authorized_logins(&self) -> Vec<String> {
1151 self.config.collaborators.clone()
1152 }
1153
1154 pub fn language_registry(&self) -> &LanguageRegistry {
1155 &self.language_registry
1156 }
1157
1158 pub fn languages(&self) -> &[Arc<Language>] {
1159 &self.languages
1160 }
1161
1162 pub fn register_language(
1163 &mut self,
1164 language: &Arc<Language>,
1165 cx: &mut ModelContext<Worktree>,
1166 ) -> Option<Arc<LanguageServer>> {
1167 if !self.languages.iter().any(|l| Arc::ptr_eq(l, language)) {
1168 self.languages.push(language.clone());
1169 }
1170
1171 if let Some(server) = self.language_servers.get(language.name()) {
1172 return Some(server.clone());
1173 }
1174
1175 if let Some(language_server) = language
1176 .start_server(self.abs_path(), cx)
1177 .log_err()
1178 .flatten()
1179 {
1180 enum DiagnosticProgress {
1181 Updating,
1182 Updated,
1183 }
1184
1185 let disk_based_sources = language
1186 .disk_based_diagnostic_sources()
1187 .cloned()
1188 .unwrap_or_default();
1189 let disk_based_diagnostics_progress_token =
1190 language.disk_based_diagnostics_progress_token().cloned();
1191 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
1192 let (disk_based_diagnostics_done_tx, disk_based_diagnostics_done_rx) =
1193 smol::channel::unbounded();
1194 language_server
1195 .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
1196 smol::block_on(diagnostics_tx.send(params)).ok();
1197 })
1198 .detach();
1199 cx.spawn_weak(|this, mut cx| {
1200 let has_disk_based_diagnostic_progress_token =
1201 disk_based_diagnostics_progress_token.is_some();
1202 let disk_based_diagnostics_done_tx = disk_based_diagnostics_done_tx.clone();
1203 async move {
1204 while let Ok(diagnostics) = diagnostics_rx.recv().await {
1205 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1206 handle.update(&mut cx, |this, cx| {
1207 if !has_disk_based_diagnostic_progress_token {
1208 smol::block_on(
1209 disk_based_diagnostics_done_tx
1210 .send(DiagnosticProgress::Updating),
1211 )
1212 .ok();
1213 }
1214 this.update_diagnostics(diagnostics, &disk_based_sources, cx)
1215 .log_err();
1216 if !has_disk_based_diagnostic_progress_token {
1217 smol::block_on(
1218 disk_based_diagnostics_done_tx
1219 .send(DiagnosticProgress::Updated),
1220 )
1221 .ok();
1222 }
1223 })
1224 } else {
1225 break;
1226 }
1227 }
1228 }
1229 })
1230 .detach();
1231
1232 let mut pending_disk_based_diagnostics: i32 = 0;
1233 language_server
1234 .on_notification::<lsp::notification::Progress, _>(move |params| {
1235 let token = match params.token {
1236 lsp::NumberOrString::Number(_) => None,
1237 lsp::NumberOrString::String(token) => Some(token),
1238 };
1239
1240 if token == disk_based_diagnostics_progress_token {
1241 match params.value {
1242 lsp::ProgressParamsValue::WorkDone(progress) => match progress {
1243 lsp::WorkDoneProgress::Begin(_) => {
1244 if pending_disk_based_diagnostics == 0 {
1245 smol::block_on(
1246 disk_based_diagnostics_done_tx
1247 .send(DiagnosticProgress::Updating),
1248 )
1249 .ok();
1250 }
1251 pending_disk_based_diagnostics += 1;
1252 }
1253 lsp::WorkDoneProgress::End(_) => {
1254 pending_disk_based_diagnostics -= 1;
1255 if pending_disk_based_diagnostics == 0 {
1256 smol::block_on(
1257 disk_based_diagnostics_done_tx
1258 .send(DiagnosticProgress::Updated),
1259 )
1260 .ok();
1261 }
1262 }
1263 _ => {}
1264 },
1265 }
1266 }
1267 })
1268 .detach();
1269 let rpc = self.client.clone();
1270 cx.spawn_weak(|this, mut cx| async move {
1271 while let Ok(progress) = disk_based_diagnostics_done_rx.recv().await {
1272 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1273 match progress {
1274 DiagnosticProgress::Updating => {
1275 let message = handle.update(&mut cx, |this, cx| {
1276 cx.emit(Event::DiskBasedDiagnosticsUpdating);
1277 let this = this.as_local().unwrap();
1278 this.share.as_ref().map(|share| {
1279 proto::DiskBasedDiagnosticsUpdating {
1280 project_id: share.project_id,
1281 worktree_id: this.id().to_proto(),
1282 }
1283 })
1284 });
1285
1286 if let Some(message) = message {
1287 rpc.send(message).await.log_err();
1288 }
1289 }
1290 DiagnosticProgress::Updated => {
1291 let message = handle.update(&mut cx, |this, cx| {
1292 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1293 let this = this.as_local().unwrap();
1294 this.share.as_ref().map(|share| {
1295 proto::DiskBasedDiagnosticsUpdated {
1296 project_id: share.project_id,
1297 worktree_id: this.id().to_proto(),
1298 }
1299 })
1300 });
1301
1302 if let Some(message) = message {
1303 rpc.send(message).await.log_err();
1304 }
1305 }
1306 }
1307 } else {
1308 break;
1309 }
1310 }
1311 })
1312 .detach();
1313
1314 self.language_servers
1315 .insert(language.name().to_string(), language_server.clone());
1316 Some(language_server.clone())
1317 } else {
1318 None
1319 }
1320 }
1321
1322 fn get_open_buffer(
1323 &mut self,
1324 path: &Path,
1325 cx: &mut ModelContext<Worktree>,
1326 ) -> Option<ModelHandle<Buffer>> {
1327 let handle = cx.handle();
1328 let mut result = None;
1329 self.open_buffers.retain(|_buffer_id, buffer| {
1330 if let Some(buffer) = buffer.upgrade(cx) {
1331 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1332 if file.worktree == handle && file.path().as_ref() == path {
1333 result = Some(buffer);
1334 }
1335 }
1336 true
1337 } else {
1338 false
1339 }
1340 });
1341 result
1342 }
1343
1344 fn open_buffer(
1345 &mut self,
1346 path: &Path,
1347 cx: &mut ModelContext<Worktree>,
1348 ) -> Task<Result<ModelHandle<Buffer>>> {
1349 let path = Arc::from(path);
1350 cx.spawn(move |this, mut cx| async move {
1351 let (file, contents) = this
1352 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
1353 .await?;
1354
1355 let (diagnostics, language, language_server) = this.update(&mut cx, |this, cx| {
1356 let this = this.as_local_mut().unwrap();
1357 let diagnostics = this.diagnostics.get(&path).cloned();
1358 let language = this
1359 .language_registry
1360 .select_language(file.full_path())
1361 .cloned();
1362 let server = language
1363 .as_ref()
1364 .and_then(|language| this.register_language(language, cx));
1365 (diagnostics, language, server)
1366 });
1367
1368 let mut buffer_operations = Vec::new();
1369 let buffer = cx.add_model(|cx| {
1370 let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1371 buffer.set_language(language, language_server, cx);
1372 if let Some(diagnostics) = diagnostics {
1373 let op = buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1374 buffer_operations.push(op);
1375 }
1376 buffer
1377 });
1378
1379 this.update(&mut cx, |this, cx| {
1380 for op in buffer_operations {
1381 this.send_buffer_update(buffer.read(cx).remote_id(), op, cx);
1382 }
1383 let this = this.as_local_mut().unwrap();
1384 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1385 });
1386
1387 Ok(buffer)
1388 })
1389 }
1390
1391 pub fn open_remote_buffer(
1392 &mut self,
1393 envelope: TypedEnvelope<proto::OpenBuffer>,
1394 cx: &mut ModelContext<Worktree>,
1395 ) -> Task<Result<proto::OpenBufferResponse>> {
1396 cx.spawn(|this, mut cx| async move {
1397 let peer_id = envelope.original_sender_id();
1398 let path = Path::new(&envelope.payload.path);
1399 let buffer = this
1400 .update(&mut cx, |this, cx| this.open_buffer(path, cx))
1401 .await?;
1402 this.update(&mut cx, |this, cx| {
1403 this.as_local_mut()
1404 .unwrap()
1405 .shared_buffers
1406 .entry(peer_id?)
1407 .or_default()
1408 .insert(buffer.id() as u64, buffer.clone());
1409
1410 Ok(proto::OpenBufferResponse {
1411 buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1412 })
1413 })
1414 })
1415 }
1416
1417 pub fn close_remote_buffer(
1418 &mut self,
1419 envelope: TypedEnvelope<proto::CloseBuffer>,
1420 cx: &mut ModelContext<Worktree>,
1421 ) -> Result<()> {
1422 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1423 shared_buffers.remove(&envelope.payload.buffer_id);
1424 cx.notify();
1425 }
1426
1427 Ok(())
1428 }
1429
1430 pub fn remove_collaborator(
1431 &mut self,
1432 peer_id: PeerId,
1433 replica_id: ReplicaId,
1434 cx: &mut ModelContext<Worktree>,
1435 ) {
1436 self.shared_buffers.remove(&peer_id);
1437 for (_, buffer) in &self.open_buffers {
1438 if let Some(buffer) = buffer.upgrade(cx) {
1439 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1440 }
1441 }
1442 cx.notify();
1443 }
1444
1445 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1446 let mut scan_state_rx = self.last_scan_state_rx.clone();
1447 async move {
1448 let mut scan_state = Some(scan_state_rx.borrow().clone());
1449 while let Some(ScanState::Scanning) = scan_state {
1450 scan_state = scan_state_rx.recv().await;
1451 }
1452 }
1453 }
1454
1455 fn is_scanning(&self) -> bool {
1456 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1457 true
1458 } else {
1459 false
1460 }
1461 }
1462
1463 pub fn snapshot(&self) -> Snapshot {
1464 self.snapshot.clone()
1465 }
1466
1467 pub fn abs_path(&self) -> &Arc<Path> {
1468 &self.snapshot.abs_path
1469 }
1470
1471 pub fn contains_abs_path(&self, path: &Path) -> bool {
1472 path.starts_with(&self.snapshot.abs_path)
1473 }
1474
1475 fn absolutize(&self, path: &Path) -> PathBuf {
1476 if path.file_name().is_some() {
1477 self.snapshot.abs_path.join(path)
1478 } else {
1479 self.snapshot.abs_path.to_path_buf()
1480 }
1481 }
1482
1483 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1484 let handle = cx.handle();
1485 let path = Arc::from(path);
1486 let worktree_path = self.abs_path.clone();
1487 let abs_path = self.absolutize(&path);
1488 let background_snapshot = self.background_snapshot.clone();
1489 let fs = self.fs.clone();
1490 cx.spawn(|this, mut cx| async move {
1491 let text = fs.load(&abs_path).await?;
1492 // Eagerly populate the snapshot with an updated entry for the loaded file
1493 let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1494 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1495 Ok((
1496 File {
1497 entry_id: Some(entry.id),
1498 worktree: handle,
1499 worktree_path,
1500 path: entry.path,
1501 mtime: entry.mtime,
1502 is_local: true,
1503 },
1504 text,
1505 ))
1506 })
1507 }
1508
1509 pub fn save_buffer_as(
1510 &self,
1511 buffer: ModelHandle<Buffer>,
1512 path: impl Into<Arc<Path>>,
1513 text: Rope,
1514 cx: &mut ModelContext<Worktree>,
1515 ) -> Task<Result<File>> {
1516 let save = self.save(path, text, cx);
1517 cx.spawn(|this, mut cx| async move {
1518 let entry = save.await?;
1519 this.update(&mut cx, |this, cx| {
1520 let this = this.as_local_mut().unwrap();
1521 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1522 Ok(File {
1523 entry_id: Some(entry.id),
1524 worktree: cx.handle(),
1525 worktree_path: this.abs_path.clone(),
1526 path: entry.path,
1527 mtime: entry.mtime,
1528 is_local: true,
1529 })
1530 })
1531 })
1532 }
1533
1534 fn save(
1535 &self,
1536 path: impl Into<Arc<Path>>,
1537 text: Rope,
1538 cx: &mut ModelContext<Worktree>,
1539 ) -> Task<Result<Entry>> {
1540 let path = path.into();
1541 let abs_path = self.absolutize(&path);
1542 let background_snapshot = self.background_snapshot.clone();
1543 let fs = self.fs.clone();
1544 let save = cx.background().spawn(async move {
1545 fs.save(&abs_path, &text).await?;
1546 refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1547 });
1548
1549 cx.spawn(|this, mut cx| async move {
1550 let entry = save.await?;
1551 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1552 Ok(entry)
1553 })
1554 }
1555
1556 pub fn share(
1557 &mut self,
1558 project_id: u64,
1559 cx: &mut ModelContext<Worktree>,
1560 ) -> Task<anyhow::Result<()>> {
1561 if self.share.is_some() {
1562 return Task::ready(Ok(()));
1563 }
1564
1565 let snapshot = self.snapshot();
1566 let rpc = self.client.clone();
1567 let worktree_id = cx.model_id() as u64;
1568 let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1569 let maintain_remote_snapshot = cx.background().spawn({
1570 let rpc = rpc.clone();
1571 let snapshot = snapshot.clone();
1572 async move {
1573 let mut prev_snapshot = snapshot;
1574 while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1575 let message =
1576 snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1577 match rpc.send(message).await {
1578 Ok(()) => prev_snapshot = snapshot,
1579 Err(err) => log::error!("error sending snapshot diff {}", err),
1580 }
1581 }
1582 }
1583 });
1584 self.share = Some(ShareState {
1585 project_id,
1586 snapshots_tx: snapshots_to_send_tx,
1587 _maintain_remote_snapshot: Some(maintain_remote_snapshot),
1588 });
1589
1590 let diagnostic_summaries = self.diagnostic_summaries.clone();
1591 let share_message = cx.background().spawn(async move {
1592 proto::ShareWorktree {
1593 project_id,
1594 worktree: Some(snapshot.to_proto(&diagnostic_summaries)),
1595 }
1596 });
1597
1598 cx.foreground().spawn(async move {
1599 rpc.request(share_message.await).await?;
1600 Ok(())
1601 })
1602 }
1603
1604 pub fn unshare(&mut self) {
1605 self.share.take();
1606 }
1607
1608 pub fn is_shared(&self) -> bool {
1609 self.share.is_some()
1610 }
1611}
1612
1613fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1614 let contents = smol::block_on(fs.load(&abs_path))?;
1615 let parent = abs_path.parent().unwrap_or(Path::new("/"));
1616 let mut builder = GitignoreBuilder::new(parent);
1617 for line in contents.lines() {
1618 builder.add_line(Some(abs_path.into()), line)?;
1619 }
1620 Ok(builder.build()?)
1621}
1622
1623impl Deref for Worktree {
1624 type Target = Snapshot;
1625
1626 fn deref(&self) -> &Self::Target {
1627 match self {
1628 Worktree::Local(worktree) => &worktree.snapshot,
1629 Worktree::Remote(worktree) => &worktree.snapshot,
1630 }
1631 }
1632}
1633
1634impl Deref for LocalWorktree {
1635 type Target = Snapshot;
1636
1637 fn deref(&self) -> &Self::Target {
1638 &self.snapshot
1639 }
1640}
1641
1642impl Deref for RemoteWorktree {
1643 type Target = Snapshot;
1644
1645 fn deref(&self) -> &Self::Target {
1646 &self.snapshot
1647 }
1648}
1649
1650impl fmt::Debug for LocalWorktree {
1651 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1652 self.snapshot.fmt(f)
1653 }
1654}
1655
1656impl RemoteWorktree {
1657 fn get_open_buffer(
1658 &mut self,
1659 path: &Path,
1660 cx: &mut ModelContext<Worktree>,
1661 ) -> Option<ModelHandle<Buffer>> {
1662 let handle = cx.handle();
1663 let mut existing_buffer = None;
1664 self.open_buffers.retain(|_buffer_id, buffer| {
1665 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1666 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1667 if file.worktree == handle && file.path().as_ref() == path {
1668 existing_buffer = Some(buffer);
1669 }
1670 }
1671 true
1672 } else {
1673 false
1674 }
1675 });
1676 existing_buffer
1677 }
1678
1679 fn open_buffer(
1680 &mut self,
1681 path: &Path,
1682 cx: &mut ModelContext<Worktree>,
1683 ) -> Task<Result<ModelHandle<Buffer>>> {
1684 let rpc = self.client.clone();
1685 let replica_id = self.replica_id;
1686 let project_id = self.project_id;
1687 let remote_worktree_id = self.id();
1688 let root_path = self.snapshot.abs_path.clone();
1689 let path: Arc<Path> = Arc::from(path);
1690 let path_string = path.to_string_lossy().to_string();
1691 cx.spawn_weak(move |this, mut cx| async move {
1692 let entry = this
1693 .upgrade(&cx)
1694 .ok_or_else(|| anyhow!("worktree was closed"))?
1695 .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1696 .ok_or_else(|| anyhow!("file does not exist"))?;
1697 let response = rpc
1698 .request(proto::OpenBuffer {
1699 project_id,
1700 worktree_id: remote_worktree_id.to_proto(),
1701 path: path_string,
1702 })
1703 .await?;
1704
1705 let this = this
1706 .upgrade(&cx)
1707 .ok_or_else(|| anyhow!("worktree was closed"))?;
1708 let file = File {
1709 entry_id: Some(entry.id),
1710 worktree: this.clone(),
1711 worktree_path: root_path,
1712 path: entry.path,
1713 mtime: entry.mtime,
1714 is_local: false,
1715 };
1716 let language = this.read_with(&cx, |this, _| {
1717 use language::File;
1718 this.languages().select_language(file.full_path()).cloned()
1719 });
1720 let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1721 let buffer_id = remote_buffer.id as usize;
1722 let buffer = cx.add_model(|cx| {
1723 Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1724 .unwrap()
1725 .with_language(language, None, cx)
1726 });
1727 this.update(&mut cx, move |this, cx| {
1728 let this = this.as_remote_mut().unwrap();
1729 if let Some(RemoteBuffer::Operations(pending_ops)) = this
1730 .open_buffers
1731 .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1732 {
1733 buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1734 }
1735 Result::<_, anyhow::Error>::Ok(buffer)
1736 })
1737 })
1738 }
1739
1740 pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1741 for (_, buffer) in self.open_buffers.drain() {
1742 if let RemoteBuffer::Loaded(buffer) = buffer {
1743 if let Some(buffer) = buffer.upgrade(cx) {
1744 buffer.update(cx, |buffer, cx| buffer.close(cx))
1745 }
1746 }
1747 }
1748 }
1749
1750 fn snapshot(&self) -> Snapshot {
1751 self.snapshot.clone()
1752 }
1753
1754 pub fn update_from_remote(
1755 &mut self,
1756 envelope: TypedEnvelope<proto::UpdateWorktree>,
1757 cx: &mut ModelContext<Worktree>,
1758 ) -> Result<()> {
1759 let mut tx = self.updates_tx.clone();
1760 let payload = envelope.payload.clone();
1761 cx.background()
1762 .spawn(async move {
1763 tx.send(payload).await.expect("receiver runs to completion");
1764 })
1765 .detach();
1766
1767 Ok(())
1768 }
1769
1770 pub fn update_diagnostic_summary(
1771 &mut self,
1772 envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1773 cx: &mut ModelContext<Worktree>,
1774 ) {
1775 if let Some(summary) = envelope.payload.summary {
1776 let path: Arc<Path> = Path::new(&summary.path).into();
1777 self.diagnostic_summaries.insert(
1778 PathKey(path.clone()),
1779 DiagnosticSummary {
1780 error_count: summary.error_count as usize,
1781 warning_count: summary.warning_count as usize,
1782 info_count: summary.info_count as usize,
1783 hint_count: summary.hint_count as usize,
1784 },
1785 );
1786 cx.emit(Event::DiagnosticsUpdated(path));
1787 }
1788 }
1789
1790 pub fn disk_based_diagnostics_updating(&self, cx: &mut ModelContext<Worktree>) {
1791 cx.emit(Event::DiskBasedDiagnosticsUpdating);
1792 }
1793
1794 pub fn disk_based_diagnostics_updated(&self, cx: &mut ModelContext<Worktree>) {
1795 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1796 }
1797
1798 pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1799 for (_, buffer) in &self.open_buffers {
1800 if let Some(buffer) = buffer.upgrade(cx) {
1801 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1802 }
1803 }
1804 cx.notify();
1805 }
1806}
1807
1808enum RemoteBuffer {
1809 Operations(Vec<Operation>),
1810 Loaded(WeakModelHandle<Buffer>),
1811}
1812
1813impl RemoteBuffer {
1814 fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1815 match self {
1816 Self::Operations(_) => None,
1817 Self::Loaded(buffer) => buffer.upgrade(cx),
1818 }
1819 }
1820}
1821
1822impl Snapshot {
1823 pub fn id(&self) -> WorktreeId {
1824 self.id
1825 }
1826
1827 pub fn to_proto(
1828 &self,
1829 diagnostic_summaries: &TreeMap<PathKey, DiagnosticSummary>,
1830 ) -> proto::Worktree {
1831 let root_name = self.root_name.clone();
1832 proto::Worktree {
1833 id: self.id.0 as u64,
1834 root_name,
1835 entries: self
1836 .entries_by_path
1837 .iter()
1838 .filter(|e| !e.is_ignored)
1839 .map(Into::into)
1840 .collect(),
1841 diagnostic_summaries: diagnostic_summaries
1842 .iter()
1843 .map(|(path, summary)| summary.to_proto(path.0.clone()))
1844 .collect(),
1845 }
1846 }
1847
1848 pub fn build_update(
1849 &self,
1850 other: &Self,
1851 project_id: u64,
1852 worktree_id: u64,
1853 include_ignored: bool,
1854 ) -> proto::UpdateWorktree {
1855 let mut updated_entries = Vec::new();
1856 let mut removed_entries = Vec::new();
1857 let mut self_entries = self
1858 .entries_by_id
1859 .cursor::<()>()
1860 .filter(|e| include_ignored || !e.is_ignored)
1861 .peekable();
1862 let mut other_entries = other
1863 .entries_by_id
1864 .cursor::<()>()
1865 .filter(|e| include_ignored || !e.is_ignored)
1866 .peekable();
1867 loop {
1868 match (self_entries.peek(), other_entries.peek()) {
1869 (Some(self_entry), Some(other_entry)) => {
1870 match Ord::cmp(&self_entry.id, &other_entry.id) {
1871 Ordering::Less => {
1872 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1873 updated_entries.push(entry);
1874 self_entries.next();
1875 }
1876 Ordering::Equal => {
1877 if self_entry.scan_id != other_entry.scan_id {
1878 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1879 updated_entries.push(entry);
1880 }
1881
1882 self_entries.next();
1883 other_entries.next();
1884 }
1885 Ordering::Greater => {
1886 removed_entries.push(other_entry.id as u64);
1887 other_entries.next();
1888 }
1889 }
1890 }
1891 (Some(self_entry), None) => {
1892 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1893 updated_entries.push(entry);
1894 self_entries.next();
1895 }
1896 (None, Some(other_entry)) => {
1897 removed_entries.push(other_entry.id as u64);
1898 other_entries.next();
1899 }
1900 (None, None) => break,
1901 }
1902 }
1903
1904 proto::UpdateWorktree {
1905 project_id,
1906 worktree_id,
1907 root_name: self.root_name().to_string(),
1908 updated_entries,
1909 removed_entries,
1910 }
1911 }
1912
1913 fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1914 self.scan_id += 1;
1915 let scan_id = self.scan_id;
1916
1917 let mut entries_by_path_edits = Vec::new();
1918 let mut entries_by_id_edits = Vec::new();
1919 for entry_id in update.removed_entries {
1920 let entry_id = entry_id as usize;
1921 let entry = self
1922 .entry_for_id(entry_id)
1923 .ok_or_else(|| anyhow!("unknown entry"))?;
1924 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1925 entries_by_id_edits.push(Edit::Remove(entry.id));
1926 }
1927
1928 for entry in update.updated_entries {
1929 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1930 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1931 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1932 }
1933 entries_by_id_edits.push(Edit::Insert(PathEntry {
1934 id: entry.id,
1935 path: entry.path.clone(),
1936 is_ignored: entry.is_ignored,
1937 scan_id,
1938 }));
1939 entries_by_path_edits.push(Edit::Insert(entry));
1940 }
1941
1942 self.entries_by_path.edit(entries_by_path_edits, &());
1943 self.entries_by_id.edit(entries_by_id_edits, &());
1944
1945 Ok(())
1946 }
1947
1948 pub fn file_count(&self) -> usize {
1949 self.entries_by_path.summary().file_count
1950 }
1951
1952 pub fn visible_file_count(&self) -> usize {
1953 self.entries_by_path.summary().visible_file_count
1954 }
1955
1956 fn traverse_from_offset(
1957 &self,
1958 include_dirs: bool,
1959 include_ignored: bool,
1960 start_offset: usize,
1961 ) -> Traversal {
1962 let mut cursor = self.entries_by_path.cursor();
1963 cursor.seek(
1964 &TraversalTarget::Count {
1965 count: start_offset,
1966 include_dirs,
1967 include_ignored,
1968 },
1969 Bias::Right,
1970 &(),
1971 );
1972 Traversal {
1973 cursor,
1974 include_dirs,
1975 include_ignored,
1976 }
1977 }
1978
1979 fn traverse_from_path(
1980 &self,
1981 include_dirs: bool,
1982 include_ignored: bool,
1983 path: &Path,
1984 ) -> Traversal {
1985 let mut cursor = self.entries_by_path.cursor();
1986 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1987 Traversal {
1988 cursor,
1989 include_dirs,
1990 include_ignored,
1991 }
1992 }
1993
1994 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1995 self.traverse_from_offset(false, include_ignored, start)
1996 }
1997
1998 pub fn entries(&self, include_ignored: bool) -> Traversal {
1999 self.traverse_from_offset(true, include_ignored, 0)
2000 }
2001
2002 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
2003 let empty_path = Path::new("");
2004 self.entries_by_path
2005 .cursor::<()>()
2006 .filter(move |entry| entry.path.as_ref() != empty_path)
2007 .map(|entry| &entry.path)
2008 }
2009
2010 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
2011 let mut cursor = self.entries_by_path.cursor();
2012 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
2013 let traversal = Traversal {
2014 cursor,
2015 include_dirs: true,
2016 include_ignored: true,
2017 };
2018 ChildEntriesIter {
2019 traversal,
2020 parent_path,
2021 }
2022 }
2023
2024 pub fn root_entry(&self) -> Option<&Entry> {
2025 self.entry_for_path("")
2026 }
2027
2028 pub fn root_name(&self) -> &str {
2029 &self.root_name
2030 }
2031
2032 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
2033 let path = path.as_ref();
2034 self.traverse_from_path(true, true, path)
2035 .entry()
2036 .and_then(|entry| {
2037 if entry.path.as_ref() == path {
2038 Some(entry)
2039 } else {
2040 None
2041 }
2042 })
2043 }
2044
2045 pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
2046 let entry = self.entries_by_id.get(&id, &())?;
2047 self.entry_for_path(&entry.path)
2048 }
2049
2050 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
2051 self.entry_for_path(path.as_ref()).map(|e| e.inode)
2052 }
2053
2054 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2055 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
2056 let abs_path = self.abs_path.join(&entry.path);
2057 match build_gitignore(&abs_path, fs) {
2058 Ok(ignore) => {
2059 let ignore_dir_path = entry.path.parent().unwrap();
2060 self.ignores
2061 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
2062 }
2063 Err(error) => {
2064 log::error!(
2065 "error loading .gitignore file {:?} - {:?}",
2066 &entry.path,
2067 error
2068 );
2069 }
2070 }
2071 }
2072
2073 self.reuse_entry_id(&mut entry);
2074 self.entries_by_path.insert_or_replace(entry.clone(), &());
2075 self.entries_by_id.insert_or_replace(
2076 PathEntry {
2077 id: entry.id,
2078 path: entry.path.clone(),
2079 is_ignored: entry.is_ignored,
2080 scan_id: self.scan_id,
2081 },
2082 &(),
2083 );
2084 entry
2085 }
2086
2087 fn populate_dir(
2088 &mut self,
2089 parent_path: Arc<Path>,
2090 entries: impl IntoIterator<Item = Entry>,
2091 ignore: Option<Arc<Gitignore>>,
2092 ) {
2093 let mut parent_entry = self
2094 .entries_by_path
2095 .get(&PathKey(parent_path.clone()), &())
2096 .unwrap()
2097 .clone();
2098 if let Some(ignore) = ignore {
2099 self.ignores.insert(parent_path, (ignore, self.scan_id));
2100 }
2101 if matches!(parent_entry.kind, EntryKind::PendingDir) {
2102 parent_entry.kind = EntryKind::Dir;
2103 } else {
2104 unreachable!();
2105 }
2106
2107 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2108 let mut entries_by_id_edits = Vec::new();
2109
2110 for mut entry in entries {
2111 self.reuse_entry_id(&mut entry);
2112 entries_by_id_edits.push(Edit::Insert(PathEntry {
2113 id: entry.id,
2114 path: entry.path.clone(),
2115 is_ignored: entry.is_ignored,
2116 scan_id: self.scan_id,
2117 }));
2118 entries_by_path_edits.push(Edit::Insert(entry));
2119 }
2120
2121 self.entries_by_path.edit(entries_by_path_edits, &());
2122 self.entries_by_id.edit(entries_by_id_edits, &());
2123 }
2124
2125 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2126 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2127 entry.id = removed_entry_id;
2128 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2129 entry.id = existing_entry.id;
2130 }
2131 }
2132
2133 fn remove_path(&mut self, path: &Path) {
2134 let mut new_entries;
2135 let removed_entries;
2136 {
2137 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2138 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2139 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2140 new_entries.push_tree(cursor.suffix(&()), &());
2141 }
2142 self.entries_by_path = new_entries;
2143
2144 let mut entries_by_id_edits = Vec::new();
2145 for entry in removed_entries.cursor::<()>() {
2146 let removed_entry_id = self
2147 .removed_entry_ids
2148 .entry(entry.inode)
2149 .or_insert(entry.id);
2150 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2151 entries_by_id_edits.push(Edit::Remove(entry.id));
2152 }
2153 self.entries_by_id.edit(entries_by_id_edits, &());
2154
2155 if path.file_name() == Some(&GITIGNORE) {
2156 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
2157 *scan_id = self.scan_id;
2158 }
2159 }
2160 }
2161
2162 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2163 let mut new_ignores = Vec::new();
2164 for ancestor in path.ancestors().skip(1) {
2165 if let Some((ignore, _)) = self.ignores.get(ancestor) {
2166 new_ignores.push((ancestor, Some(ignore.clone())));
2167 } else {
2168 new_ignores.push((ancestor, None));
2169 }
2170 }
2171
2172 let mut ignore_stack = IgnoreStack::none();
2173 for (parent_path, ignore) in new_ignores.into_iter().rev() {
2174 if ignore_stack.is_path_ignored(&parent_path, true) {
2175 ignore_stack = IgnoreStack::all();
2176 break;
2177 } else if let Some(ignore) = ignore {
2178 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
2179 }
2180 }
2181
2182 if ignore_stack.is_path_ignored(path, is_dir) {
2183 ignore_stack = IgnoreStack::all();
2184 }
2185
2186 ignore_stack
2187 }
2188}
2189
2190impl fmt::Debug for Snapshot {
2191 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2192 for entry in self.entries_by_path.cursor::<()>() {
2193 for _ in entry.path.ancestors().skip(1) {
2194 write!(f, " ")?;
2195 }
2196 writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
2197 }
2198 Ok(())
2199 }
2200}
2201
2202#[derive(Clone, PartialEq)]
2203pub struct File {
2204 entry_id: Option<usize>,
2205 worktree: ModelHandle<Worktree>,
2206 worktree_path: Arc<Path>,
2207 pub path: Arc<Path>,
2208 pub mtime: SystemTime,
2209 is_local: bool,
2210}
2211
2212impl language::File for File {
2213 fn mtime(&self) -> SystemTime {
2214 self.mtime
2215 }
2216
2217 fn path(&self) -> &Arc<Path> {
2218 &self.path
2219 }
2220
2221 fn abs_path(&self) -> Option<PathBuf> {
2222 if self.is_local {
2223 Some(self.worktree_path.join(&self.path))
2224 } else {
2225 None
2226 }
2227 }
2228
2229 fn full_path(&self) -> PathBuf {
2230 let mut full_path = PathBuf::new();
2231 if let Some(worktree_name) = self.worktree_path.file_name() {
2232 full_path.push(worktree_name);
2233 }
2234 full_path.push(&self.path);
2235 full_path
2236 }
2237
2238 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2239 /// of its worktree, then this method will return the name of the worktree itself.
2240 fn file_name<'a>(&'a self) -> Option<OsString> {
2241 self.path
2242 .file_name()
2243 .or_else(|| self.worktree_path.file_name())
2244 .map(Into::into)
2245 }
2246
2247 fn is_deleted(&self) -> bool {
2248 self.entry_id.is_none()
2249 }
2250
2251 fn save(
2252 &self,
2253 buffer_id: u64,
2254 text: Rope,
2255 version: clock::Global,
2256 cx: &mut MutableAppContext,
2257 ) -> Task<Result<(clock::Global, SystemTime)>> {
2258 let worktree_id = self.worktree.read(cx).id().to_proto();
2259 self.worktree.update(cx, |worktree, cx| match worktree {
2260 Worktree::Local(worktree) => {
2261 let rpc = worktree.client.clone();
2262 let project_id = worktree.share.as_ref().map(|share| share.project_id);
2263 let save = worktree.save(self.path.clone(), text, cx);
2264 cx.background().spawn(async move {
2265 let entry = save.await?;
2266 if let Some(project_id) = project_id {
2267 rpc.send(proto::BufferSaved {
2268 project_id,
2269 worktree_id,
2270 buffer_id,
2271 version: (&version).into(),
2272 mtime: Some(entry.mtime.into()),
2273 })
2274 .await?;
2275 }
2276 Ok((version, entry.mtime))
2277 })
2278 }
2279 Worktree::Remote(worktree) => {
2280 let rpc = worktree.client.clone();
2281 let project_id = worktree.project_id;
2282 cx.foreground().spawn(async move {
2283 let response = rpc
2284 .request(proto::SaveBuffer {
2285 project_id,
2286 worktree_id,
2287 buffer_id,
2288 })
2289 .await?;
2290 let version = response.version.try_into()?;
2291 let mtime = response
2292 .mtime
2293 .ok_or_else(|| anyhow!("missing mtime"))?
2294 .into();
2295 Ok((version, mtime))
2296 })
2297 }
2298 })
2299 }
2300
2301 fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2302 let worktree = self.worktree.read(cx).as_local()?;
2303 let abs_path = worktree.absolutize(&self.path);
2304 let fs = worktree.fs.clone();
2305 Some(
2306 cx.background()
2307 .spawn(async move { fs.load(&abs_path).await }),
2308 )
2309 }
2310
2311 fn format_remote(
2312 &self,
2313 buffer_id: u64,
2314 cx: &mut MutableAppContext,
2315 ) -> Option<Task<Result<()>>> {
2316 let worktree = self.worktree.read(cx);
2317 let worktree_id = worktree.id().to_proto();
2318 let worktree = worktree.as_remote()?;
2319 let rpc = worktree.client.clone();
2320 let project_id = worktree.project_id;
2321 Some(cx.foreground().spawn(async move {
2322 rpc.request(proto::FormatBuffer {
2323 project_id,
2324 worktree_id,
2325 buffer_id,
2326 })
2327 .await?;
2328 Ok(())
2329 }))
2330 }
2331
2332 fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2333 self.worktree.update(cx, |worktree, cx| {
2334 worktree.send_buffer_update(buffer_id, operation, cx);
2335 });
2336 }
2337
2338 fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2339 self.worktree.update(cx, |worktree, cx| {
2340 if let Worktree::Remote(worktree) = worktree {
2341 let project_id = worktree.project_id;
2342 let worktree_id = worktree.id().to_proto();
2343 let rpc = worktree.client.clone();
2344 cx.background()
2345 .spawn(async move {
2346 if let Err(error) = rpc
2347 .send(proto::CloseBuffer {
2348 project_id,
2349 worktree_id,
2350 buffer_id,
2351 })
2352 .await
2353 {
2354 log::error!("error closing remote buffer: {}", error);
2355 }
2356 })
2357 .detach();
2358 }
2359 });
2360 }
2361
2362 fn as_any(&self) -> &dyn Any {
2363 self
2364 }
2365}
2366
2367impl File {
2368 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2369 file.and_then(|f| f.as_any().downcast_ref())
2370 }
2371
2372 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2373 self.worktree.read(cx).id()
2374 }
2375
2376 pub fn project_entry(&self, cx: &AppContext) -> Option<ProjectEntry> {
2377 self.entry_id.map(|entry_id| ProjectEntry {
2378 worktree_id: self.worktree_id(cx),
2379 entry_id,
2380 })
2381 }
2382}
2383
2384#[derive(Clone, Debug)]
2385pub struct Entry {
2386 pub id: usize,
2387 pub kind: EntryKind,
2388 pub path: Arc<Path>,
2389 pub inode: u64,
2390 pub mtime: SystemTime,
2391 pub is_symlink: bool,
2392 pub is_ignored: bool,
2393}
2394
2395#[derive(Clone, Debug)]
2396pub enum EntryKind {
2397 PendingDir,
2398 Dir,
2399 File(CharBag),
2400}
2401
2402impl Entry {
2403 fn new(
2404 path: Arc<Path>,
2405 metadata: &fs::Metadata,
2406 next_entry_id: &AtomicUsize,
2407 root_char_bag: CharBag,
2408 ) -> Self {
2409 Self {
2410 id: next_entry_id.fetch_add(1, SeqCst),
2411 kind: if metadata.is_dir {
2412 EntryKind::PendingDir
2413 } else {
2414 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2415 },
2416 path,
2417 inode: metadata.inode,
2418 mtime: metadata.mtime,
2419 is_symlink: metadata.is_symlink,
2420 is_ignored: false,
2421 }
2422 }
2423
2424 pub fn is_dir(&self) -> bool {
2425 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2426 }
2427
2428 pub fn is_file(&self) -> bool {
2429 matches!(self.kind, EntryKind::File(_))
2430 }
2431}
2432
2433impl sum_tree::Item for Entry {
2434 type Summary = EntrySummary;
2435
2436 fn summary(&self) -> Self::Summary {
2437 let visible_count = if self.is_ignored { 0 } else { 1 };
2438 let file_count;
2439 let visible_file_count;
2440 if self.is_file() {
2441 file_count = 1;
2442 visible_file_count = visible_count;
2443 } else {
2444 file_count = 0;
2445 visible_file_count = 0;
2446 }
2447
2448 EntrySummary {
2449 max_path: self.path.clone(),
2450 count: 1,
2451 visible_count,
2452 file_count,
2453 visible_file_count,
2454 }
2455 }
2456}
2457
2458impl sum_tree::KeyedItem for Entry {
2459 type Key = PathKey;
2460
2461 fn key(&self) -> Self::Key {
2462 PathKey(self.path.clone())
2463 }
2464}
2465
2466#[derive(Clone, Debug)]
2467pub struct EntrySummary {
2468 max_path: Arc<Path>,
2469 count: usize,
2470 visible_count: usize,
2471 file_count: usize,
2472 visible_file_count: usize,
2473}
2474
2475impl Default for EntrySummary {
2476 fn default() -> Self {
2477 Self {
2478 max_path: Arc::from(Path::new("")),
2479 count: 0,
2480 visible_count: 0,
2481 file_count: 0,
2482 visible_file_count: 0,
2483 }
2484 }
2485}
2486
2487impl sum_tree::Summary for EntrySummary {
2488 type Context = ();
2489
2490 fn add_summary(&mut self, rhs: &Self, _: &()) {
2491 self.max_path = rhs.max_path.clone();
2492 self.visible_count += rhs.visible_count;
2493 self.file_count += rhs.file_count;
2494 self.visible_file_count += rhs.visible_file_count;
2495 }
2496}
2497
2498#[derive(Clone, Debug)]
2499struct PathEntry {
2500 id: usize,
2501 path: Arc<Path>,
2502 is_ignored: bool,
2503 scan_id: usize,
2504}
2505
2506impl sum_tree::Item for PathEntry {
2507 type Summary = PathEntrySummary;
2508
2509 fn summary(&self) -> Self::Summary {
2510 PathEntrySummary { max_id: self.id }
2511 }
2512}
2513
2514impl sum_tree::KeyedItem for PathEntry {
2515 type Key = usize;
2516
2517 fn key(&self) -> Self::Key {
2518 self.id
2519 }
2520}
2521
2522#[derive(Clone, Debug, Default)]
2523struct PathEntrySummary {
2524 max_id: usize,
2525}
2526
2527impl sum_tree::Summary for PathEntrySummary {
2528 type Context = ();
2529
2530 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2531 self.max_id = summary.max_id;
2532 }
2533}
2534
2535impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2536 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2537 *self = summary.max_id;
2538 }
2539}
2540
2541#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2542pub struct PathKey(Arc<Path>);
2543
2544impl Default for PathKey {
2545 fn default() -> Self {
2546 Self(Path::new("").into())
2547 }
2548}
2549
2550impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2551 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2552 self.0 = summary.max_path.clone();
2553 }
2554}
2555
2556struct BackgroundScanner {
2557 fs: Arc<dyn Fs>,
2558 snapshot: Arc<Mutex<Snapshot>>,
2559 notify: Sender<ScanState>,
2560 executor: Arc<executor::Background>,
2561}
2562
2563impl BackgroundScanner {
2564 fn new(
2565 snapshot: Arc<Mutex<Snapshot>>,
2566 notify: Sender<ScanState>,
2567 fs: Arc<dyn Fs>,
2568 executor: Arc<executor::Background>,
2569 ) -> Self {
2570 Self {
2571 fs,
2572 snapshot,
2573 notify,
2574 executor,
2575 }
2576 }
2577
2578 fn abs_path(&self) -> Arc<Path> {
2579 self.snapshot.lock().abs_path.clone()
2580 }
2581
2582 fn snapshot(&self) -> Snapshot {
2583 self.snapshot.lock().clone()
2584 }
2585
2586 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2587 if self.notify.send(ScanState::Scanning).await.is_err() {
2588 return;
2589 }
2590
2591 if let Err(err) = self.scan_dirs().await {
2592 if self
2593 .notify
2594 .send(ScanState::Err(Arc::new(err)))
2595 .await
2596 .is_err()
2597 {
2598 return;
2599 }
2600 }
2601
2602 if self.notify.send(ScanState::Idle).await.is_err() {
2603 return;
2604 }
2605
2606 futures::pin_mut!(events_rx);
2607 while let Some(events) = events_rx.next().await {
2608 if self.notify.send(ScanState::Scanning).await.is_err() {
2609 break;
2610 }
2611
2612 if !self.process_events(events).await {
2613 break;
2614 }
2615
2616 if self.notify.send(ScanState::Idle).await.is_err() {
2617 break;
2618 }
2619 }
2620 }
2621
2622 async fn scan_dirs(&mut self) -> Result<()> {
2623 let root_char_bag;
2624 let next_entry_id;
2625 let is_dir;
2626 {
2627 let snapshot = self.snapshot.lock();
2628 root_char_bag = snapshot.root_char_bag;
2629 next_entry_id = snapshot.next_entry_id.clone();
2630 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2631 };
2632
2633 if is_dir {
2634 let path: Arc<Path> = Arc::from(Path::new(""));
2635 let abs_path = self.abs_path();
2636 let (tx, rx) = channel::unbounded();
2637 tx.send(ScanJob {
2638 abs_path: abs_path.to_path_buf(),
2639 path,
2640 ignore_stack: IgnoreStack::none(),
2641 scan_queue: tx.clone(),
2642 })
2643 .await
2644 .unwrap();
2645 drop(tx);
2646
2647 self.executor
2648 .scoped(|scope| {
2649 for _ in 0..self.executor.num_cpus() {
2650 scope.spawn(async {
2651 while let Ok(job) = rx.recv().await {
2652 if let Err(err) = self
2653 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2654 .await
2655 {
2656 log::error!("error scanning {:?}: {}", job.abs_path, err);
2657 }
2658 }
2659 });
2660 }
2661 })
2662 .await;
2663 }
2664
2665 Ok(())
2666 }
2667
2668 async fn scan_dir(
2669 &self,
2670 root_char_bag: CharBag,
2671 next_entry_id: Arc<AtomicUsize>,
2672 job: &ScanJob,
2673 ) -> Result<()> {
2674 let mut new_entries: Vec<Entry> = Vec::new();
2675 let mut new_jobs: Vec<ScanJob> = Vec::new();
2676 let mut ignore_stack = job.ignore_stack.clone();
2677 let mut new_ignore = None;
2678
2679 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2680 while let Some(child_abs_path) = child_paths.next().await {
2681 let child_abs_path = match child_abs_path {
2682 Ok(child_abs_path) => child_abs_path,
2683 Err(error) => {
2684 log::error!("error processing entry {:?}", error);
2685 continue;
2686 }
2687 };
2688 let child_name = child_abs_path.file_name().unwrap();
2689 let child_path: Arc<Path> = job.path.join(child_name).into();
2690 let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2691 Some(metadata) => metadata,
2692 None => continue,
2693 };
2694
2695 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2696 if child_name == *GITIGNORE {
2697 match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2698 Ok(ignore) => {
2699 let ignore = Arc::new(ignore);
2700 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2701 new_ignore = Some(ignore);
2702 }
2703 Err(error) => {
2704 log::error!(
2705 "error loading .gitignore file {:?} - {:?}",
2706 child_name,
2707 error
2708 );
2709 }
2710 }
2711
2712 // Update ignore status of any child entries we've already processed to reflect the
2713 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2714 // there should rarely be too numerous. Update the ignore stack associated with any
2715 // new jobs as well.
2716 let mut new_jobs = new_jobs.iter_mut();
2717 for entry in &mut new_entries {
2718 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2719 if entry.is_dir() {
2720 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2721 IgnoreStack::all()
2722 } else {
2723 ignore_stack.clone()
2724 };
2725 }
2726 }
2727 }
2728
2729 let mut child_entry = Entry::new(
2730 child_path.clone(),
2731 &child_metadata,
2732 &next_entry_id,
2733 root_char_bag,
2734 );
2735
2736 if child_metadata.is_dir {
2737 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2738 child_entry.is_ignored = is_ignored;
2739 new_entries.push(child_entry);
2740 new_jobs.push(ScanJob {
2741 abs_path: child_abs_path,
2742 path: child_path,
2743 ignore_stack: if is_ignored {
2744 IgnoreStack::all()
2745 } else {
2746 ignore_stack.clone()
2747 },
2748 scan_queue: job.scan_queue.clone(),
2749 });
2750 } else {
2751 child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2752 new_entries.push(child_entry);
2753 };
2754 }
2755
2756 self.snapshot
2757 .lock()
2758 .populate_dir(job.path.clone(), new_entries, new_ignore);
2759 for new_job in new_jobs {
2760 job.scan_queue.send(new_job).await.unwrap();
2761 }
2762
2763 Ok(())
2764 }
2765
2766 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2767 let mut snapshot = self.snapshot();
2768 snapshot.scan_id += 1;
2769
2770 let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2771 abs_path
2772 } else {
2773 return false;
2774 };
2775 let root_char_bag = snapshot.root_char_bag;
2776 let next_entry_id = snapshot.next_entry_id.clone();
2777
2778 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2779 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2780
2781 for event in &events {
2782 match event.path.strip_prefix(&root_abs_path) {
2783 Ok(path) => snapshot.remove_path(&path),
2784 Err(_) => {
2785 log::error!(
2786 "unexpected event {:?} for root path {:?}",
2787 event.path,
2788 root_abs_path
2789 );
2790 continue;
2791 }
2792 }
2793 }
2794
2795 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2796 for event in events {
2797 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2798 Ok(path) => Arc::from(path.to_path_buf()),
2799 Err(_) => {
2800 log::error!(
2801 "unexpected event {:?} for root path {:?}",
2802 event.path,
2803 root_abs_path
2804 );
2805 continue;
2806 }
2807 };
2808
2809 match self.fs.metadata(&event.path).await {
2810 Ok(Some(metadata)) => {
2811 let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2812 let mut fs_entry = Entry::new(
2813 path.clone(),
2814 &metadata,
2815 snapshot.next_entry_id.as_ref(),
2816 snapshot.root_char_bag,
2817 );
2818 fs_entry.is_ignored = ignore_stack.is_all();
2819 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2820 if metadata.is_dir {
2821 scan_queue_tx
2822 .send(ScanJob {
2823 abs_path: event.path,
2824 path,
2825 ignore_stack,
2826 scan_queue: scan_queue_tx.clone(),
2827 })
2828 .await
2829 .unwrap();
2830 }
2831 }
2832 Ok(None) => {}
2833 Err(err) => {
2834 // TODO - create a special 'error' entry in the entries tree to mark this
2835 log::error!("error reading file on event {:?}", err);
2836 }
2837 }
2838 }
2839
2840 *self.snapshot.lock() = snapshot;
2841
2842 // Scan any directories that were created as part of this event batch.
2843 drop(scan_queue_tx);
2844 self.executor
2845 .scoped(|scope| {
2846 for _ in 0..self.executor.num_cpus() {
2847 scope.spawn(async {
2848 while let Ok(job) = scan_queue_rx.recv().await {
2849 if let Err(err) = self
2850 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2851 .await
2852 {
2853 log::error!("error scanning {:?}: {}", job.abs_path, err);
2854 }
2855 }
2856 });
2857 }
2858 })
2859 .await;
2860
2861 // Attempt to detect renames only over a single batch of file-system events.
2862 self.snapshot.lock().removed_entry_ids.clear();
2863
2864 self.update_ignore_statuses().await;
2865 true
2866 }
2867
2868 async fn update_ignore_statuses(&self) {
2869 let mut snapshot = self.snapshot();
2870
2871 let mut ignores_to_update = Vec::new();
2872 let mut ignores_to_delete = Vec::new();
2873 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2874 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2875 ignores_to_update.push(parent_path.clone());
2876 }
2877
2878 let ignore_path = parent_path.join(&*GITIGNORE);
2879 if snapshot.entry_for_path(ignore_path).is_none() {
2880 ignores_to_delete.push(parent_path.clone());
2881 }
2882 }
2883
2884 for parent_path in ignores_to_delete {
2885 snapshot.ignores.remove(&parent_path);
2886 self.snapshot.lock().ignores.remove(&parent_path);
2887 }
2888
2889 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2890 ignores_to_update.sort_unstable();
2891 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2892 while let Some(parent_path) = ignores_to_update.next() {
2893 while ignores_to_update
2894 .peek()
2895 .map_or(false, |p| p.starts_with(&parent_path))
2896 {
2897 ignores_to_update.next().unwrap();
2898 }
2899
2900 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2901 ignore_queue_tx
2902 .send(UpdateIgnoreStatusJob {
2903 path: parent_path,
2904 ignore_stack,
2905 ignore_queue: ignore_queue_tx.clone(),
2906 })
2907 .await
2908 .unwrap();
2909 }
2910 drop(ignore_queue_tx);
2911
2912 self.executor
2913 .scoped(|scope| {
2914 for _ in 0..self.executor.num_cpus() {
2915 scope.spawn(async {
2916 while let Ok(job) = ignore_queue_rx.recv().await {
2917 self.update_ignore_status(job, &snapshot).await;
2918 }
2919 });
2920 }
2921 })
2922 .await;
2923 }
2924
2925 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2926 let mut ignore_stack = job.ignore_stack;
2927 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2928 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2929 }
2930
2931 let mut entries_by_id_edits = Vec::new();
2932 let mut entries_by_path_edits = Vec::new();
2933 for mut entry in snapshot.child_entries(&job.path).cloned() {
2934 let was_ignored = entry.is_ignored;
2935 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2936 if entry.is_dir() {
2937 let child_ignore_stack = if entry.is_ignored {
2938 IgnoreStack::all()
2939 } else {
2940 ignore_stack.clone()
2941 };
2942 job.ignore_queue
2943 .send(UpdateIgnoreStatusJob {
2944 path: entry.path.clone(),
2945 ignore_stack: child_ignore_stack,
2946 ignore_queue: job.ignore_queue.clone(),
2947 })
2948 .await
2949 .unwrap();
2950 }
2951
2952 if entry.is_ignored != was_ignored {
2953 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2954 path_entry.scan_id = snapshot.scan_id;
2955 path_entry.is_ignored = entry.is_ignored;
2956 entries_by_id_edits.push(Edit::Insert(path_entry));
2957 entries_by_path_edits.push(Edit::Insert(entry));
2958 }
2959 }
2960
2961 let mut snapshot = self.snapshot.lock();
2962 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2963 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2964 }
2965}
2966
2967async fn refresh_entry(
2968 fs: &dyn Fs,
2969 snapshot: &Mutex<Snapshot>,
2970 path: Arc<Path>,
2971 abs_path: &Path,
2972) -> Result<Entry> {
2973 let root_char_bag;
2974 let next_entry_id;
2975 {
2976 let snapshot = snapshot.lock();
2977 root_char_bag = snapshot.root_char_bag;
2978 next_entry_id = snapshot.next_entry_id.clone();
2979 }
2980 let entry = Entry::new(
2981 path,
2982 &fs.metadata(abs_path)
2983 .await?
2984 .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2985 &next_entry_id,
2986 root_char_bag,
2987 );
2988 Ok(snapshot.lock().insert_entry(entry, fs))
2989}
2990
2991fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2992 let mut result = root_char_bag;
2993 result.extend(
2994 path.to_string_lossy()
2995 .chars()
2996 .map(|c| c.to_ascii_lowercase()),
2997 );
2998 result
2999}
3000
3001struct ScanJob {
3002 abs_path: PathBuf,
3003 path: Arc<Path>,
3004 ignore_stack: Arc<IgnoreStack>,
3005 scan_queue: Sender<ScanJob>,
3006}
3007
3008struct UpdateIgnoreStatusJob {
3009 path: Arc<Path>,
3010 ignore_stack: Arc<IgnoreStack>,
3011 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3012}
3013
3014pub trait WorktreeHandle {
3015 #[cfg(test)]
3016 fn flush_fs_events<'a>(
3017 &self,
3018 cx: &'a gpui::TestAppContext,
3019 ) -> futures::future::LocalBoxFuture<'a, ()>;
3020}
3021
3022impl WorktreeHandle for ModelHandle<Worktree> {
3023 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3024 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3025 // extra directory scans, and emit extra scan-state notifications.
3026 //
3027 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3028 // to ensure that all redundant FS events have already been processed.
3029 #[cfg(test)]
3030 fn flush_fs_events<'a>(
3031 &self,
3032 cx: &'a gpui::TestAppContext,
3033 ) -> futures::future::LocalBoxFuture<'a, ()> {
3034 use smol::future::FutureExt;
3035
3036 let filename = "fs-event-sentinel";
3037 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
3038 let tree = self.clone();
3039 async move {
3040 std::fs::write(root_path.join(filename), "").unwrap();
3041 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
3042 .await;
3043
3044 std::fs::remove_file(root_path.join(filename)).unwrap();
3045 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
3046 .await;
3047
3048 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3049 .await;
3050 }
3051 .boxed_local()
3052 }
3053}
3054
3055#[derive(Clone, Debug)]
3056struct TraversalProgress<'a> {
3057 max_path: &'a Path,
3058 count: usize,
3059 visible_count: usize,
3060 file_count: usize,
3061 visible_file_count: usize,
3062}
3063
3064impl<'a> TraversalProgress<'a> {
3065 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3066 match (include_ignored, include_dirs) {
3067 (true, true) => self.count,
3068 (true, false) => self.file_count,
3069 (false, true) => self.visible_count,
3070 (false, false) => self.visible_file_count,
3071 }
3072 }
3073}
3074
3075impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3076 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3077 self.max_path = summary.max_path.as_ref();
3078 self.count += summary.count;
3079 self.visible_count += summary.visible_count;
3080 self.file_count += summary.file_count;
3081 self.visible_file_count += summary.visible_file_count;
3082 }
3083}
3084
3085impl<'a> Default for TraversalProgress<'a> {
3086 fn default() -> Self {
3087 Self {
3088 max_path: Path::new(""),
3089 count: 0,
3090 visible_count: 0,
3091 file_count: 0,
3092 visible_file_count: 0,
3093 }
3094 }
3095}
3096
3097pub struct Traversal<'a> {
3098 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3099 include_ignored: bool,
3100 include_dirs: bool,
3101}
3102
3103impl<'a> Traversal<'a> {
3104 pub fn advance(&mut self) -> bool {
3105 self.advance_to_offset(self.offset() + 1)
3106 }
3107
3108 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3109 self.cursor.seek_forward(
3110 &TraversalTarget::Count {
3111 count: offset,
3112 include_dirs: self.include_dirs,
3113 include_ignored: self.include_ignored,
3114 },
3115 Bias::Right,
3116 &(),
3117 )
3118 }
3119
3120 pub fn advance_to_sibling(&mut self) -> bool {
3121 while let Some(entry) = self.cursor.item() {
3122 self.cursor.seek_forward(
3123 &TraversalTarget::PathSuccessor(&entry.path),
3124 Bias::Left,
3125 &(),
3126 );
3127 if let Some(entry) = self.cursor.item() {
3128 if (self.include_dirs || !entry.is_dir())
3129 && (self.include_ignored || !entry.is_ignored)
3130 {
3131 return true;
3132 }
3133 }
3134 }
3135 false
3136 }
3137
3138 pub fn entry(&self) -> Option<&'a Entry> {
3139 self.cursor.item()
3140 }
3141
3142 pub fn offset(&self) -> usize {
3143 self.cursor
3144 .start()
3145 .count(self.include_dirs, self.include_ignored)
3146 }
3147}
3148
3149impl<'a> Iterator for Traversal<'a> {
3150 type Item = &'a Entry;
3151
3152 fn next(&mut self) -> Option<Self::Item> {
3153 if let Some(item) = self.entry() {
3154 self.advance();
3155 Some(item)
3156 } else {
3157 None
3158 }
3159 }
3160}
3161
3162#[derive(Debug)]
3163enum TraversalTarget<'a> {
3164 Path(&'a Path),
3165 PathSuccessor(&'a Path),
3166 Count {
3167 count: usize,
3168 include_ignored: bool,
3169 include_dirs: bool,
3170 },
3171}
3172
3173impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3174 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3175 match self {
3176 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3177 TraversalTarget::PathSuccessor(path) => {
3178 if !cursor_location.max_path.starts_with(path) {
3179 Ordering::Equal
3180 } else {
3181 Ordering::Greater
3182 }
3183 }
3184 TraversalTarget::Count {
3185 count,
3186 include_dirs,
3187 include_ignored,
3188 } => Ord::cmp(
3189 count,
3190 &cursor_location.count(*include_dirs, *include_ignored),
3191 ),
3192 }
3193 }
3194}
3195
3196struct ChildEntriesIter<'a> {
3197 parent_path: &'a Path,
3198 traversal: Traversal<'a>,
3199}
3200
3201impl<'a> Iterator for ChildEntriesIter<'a> {
3202 type Item = &'a Entry;
3203
3204 fn next(&mut self) -> Option<Self::Item> {
3205 if let Some(item) = self.traversal.entry() {
3206 if item.path.starts_with(&self.parent_path) {
3207 self.traversal.advance_to_sibling();
3208 return Some(item);
3209 }
3210 }
3211 None
3212 }
3213}
3214
3215impl<'a> From<&'a Entry> for proto::Entry {
3216 fn from(entry: &'a Entry) -> Self {
3217 Self {
3218 id: entry.id as u64,
3219 is_dir: entry.is_dir(),
3220 path: entry.path.to_string_lossy().to_string(),
3221 inode: entry.inode,
3222 mtime: Some(entry.mtime.into()),
3223 is_symlink: entry.is_symlink,
3224 is_ignored: entry.is_ignored,
3225 }
3226 }
3227}
3228
3229impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3230 type Error = anyhow::Error;
3231
3232 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3233 if let Some(mtime) = entry.mtime {
3234 let kind = if entry.is_dir {
3235 EntryKind::Dir
3236 } else {
3237 let mut char_bag = root_char_bag.clone();
3238 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3239 EntryKind::File(char_bag)
3240 };
3241 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
3242 Ok(Entry {
3243 id: entry.id as usize,
3244 kind,
3245 path: path.clone(),
3246 inode: entry.inode,
3247 mtime: mtime.into(),
3248 is_symlink: entry.is_symlink,
3249 is_ignored: entry.is_ignored,
3250 })
3251 } else {
3252 Err(anyhow!(
3253 "missing mtime in remote worktree entry {:?}",
3254 entry.path
3255 ))
3256 }
3257 }
3258}
3259
3260#[cfg(test)]
3261mod tests {
3262 use super::*;
3263 use crate::fs::FakeFs;
3264 use anyhow::Result;
3265 use client::test::{FakeHttpClient, FakeServer};
3266 use fs::RealFs;
3267 use gpui::test::subscribe;
3268 use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
3269 use language::{Diagnostic, LanguageConfig};
3270 use lsp::Url;
3271 use rand::prelude::*;
3272 use serde_json::json;
3273 use std::{cell::RefCell, rc::Rc};
3274 use std::{
3275 env,
3276 fmt::Write,
3277 time::{SystemTime, UNIX_EPOCH},
3278 };
3279 use text::Point;
3280 use unindent::Unindent as _;
3281 use util::test::temp_tree;
3282
3283 #[gpui::test]
3284 async fn test_traversal(mut cx: gpui::TestAppContext) {
3285 let fs = FakeFs::new();
3286 fs.insert_tree(
3287 "/root",
3288 json!({
3289 ".gitignore": "a/b\n",
3290 "a": {
3291 "b": "",
3292 "c": "",
3293 }
3294 }),
3295 )
3296 .await;
3297
3298 let http_client = FakeHttpClient::with_404_response();
3299 let client = Client::new(http_client.clone());
3300 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3301
3302 let tree = Worktree::open_local(
3303 client,
3304 user_store,
3305 Arc::from(Path::new("/root")),
3306 Arc::new(fs),
3307 Default::default(),
3308 &mut cx.to_async(),
3309 )
3310 .await
3311 .unwrap();
3312 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3313 .await;
3314
3315 tree.read_with(&cx, |tree, _| {
3316 assert_eq!(
3317 tree.entries(false)
3318 .map(|entry| entry.path.as_ref())
3319 .collect::<Vec<_>>(),
3320 vec![
3321 Path::new(""),
3322 Path::new(".gitignore"),
3323 Path::new("a"),
3324 Path::new("a/c"),
3325 ]
3326 );
3327 })
3328 }
3329
3330 #[gpui::test]
3331 async fn test_save_file(mut cx: gpui::TestAppContext) {
3332 let dir = temp_tree(json!({
3333 "file1": "the old contents",
3334 }));
3335
3336 let http_client = FakeHttpClient::with_404_response();
3337 let client = Client::new(http_client.clone());
3338 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3339
3340 let tree = Worktree::open_local(
3341 client,
3342 user_store,
3343 dir.path(),
3344 Arc::new(RealFs),
3345 Default::default(),
3346 &mut cx.to_async(),
3347 )
3348 .await
3349 .unwrap();
3350 let buffer = tree
3351 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3352 .await
3353 .unwrap();
3354 let save = buffer.update(&mut cx, |buffer, cx| {
3355 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3356 buffer.save(cx).unwrap()
3357 });
3358 save.await.unwrap();
3359
3360 let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3361 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3362 }
3363
3364 #[gpui::test]
3365 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3366 let dir = temp_tree(json!({
3367 "file1": "the old contents",
3368 }));
3369 let file_path = dir.path().join("file1");
3370
3371 let http_client = FakeHttpClient::with_404_response();
3372 let client = Client::new(http_client.clone());
3373 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3374
3375 let tree = Worktree::open_local(
3376 client,
3377 user_store,
3378 file_path.clone(),
3379 Arc::new(RealFs),
3380 Default::default(),
3381 &mut cx.to_async(),
3382 )
3383 .await
3384 .unwrap();
3385 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3386 .await;
3387 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3388
3389 let buffer = tree
3390 .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3391 .await
3392 .unwrap();
3393 let save = buffer.update(&mut cx, |buffer, cx| {
3394 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3395 buffer.save(cx).unwrap()
3396 });
3397 save.await.unwrap();
3398
3399 let new_text = std::fs::read_to_string(file_path).unwrap();
3400 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3401 }
3402
3403 #[gpui::test]
3404 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3405 let dir = temp_tree(json!({
3406 "a": {
3407 "file1": "",
3408 "file2": "",
3409 "file3": "",
3410 },
3411 "b": {
3412 "c": {
3413 "file4": "",
3414 "file5": "",
3415 }
3416 }
3417 }));
3418
3419 let user_id = 5;
3420 let http_client = FakeHttpClient::with_404_response();
3421 let mut client = Client::new(http_client.clone());
3422 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3423 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3424 let tree = Worktree::open_local(
3425 client,
3426 user_store.clone(),
3427 dir.path(),
3428 Arc::new(RealFs),
3429 Default::default(),
3430 &mut cx.to_async(),
3431 )
3432 .await
3433 .unwrap();
3434
3435 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3436 let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3437 async move { buffer.await.unwrap() }
3438 };
3439 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3440 tree.read_with(cx, |tree, _| {
3441 tree.entry_for_path(path)
3442 .expect(&format!("no entry for path {}", path))
3443 .id
3444 })
3445 };
3446
3447 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3448 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3449 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3450 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3451
3452 let file2_id = id_for_path("a/file2", &cx);
3453 let file3_id = id_for_path("a/file3", &cx);
3454 let file4_id = id_for_path("b/c/file4", &cx);
3455
3456 // Wait for the initial scan.
3457 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3458 .await;
3459
3460 // Create a remote copy of this worktree.
3461 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3462 let remote = Worktree::remote(
3463 1,
3464 1,
3465 initial_snapshot.to_proto(&Default::default()),
3466 Client::new(http_client.clone()),
3467 user_store,
3468 Default::default(),
3469 &mut cx.to_async(),
3470 )
3471 .await
3472 .unwrap();
3473
3474 cx.read(|cx| {
3475 assert!(!buffer2.read(cx).is_dirty());
3476 assert!(!buffer3.read(cx).is_dirty());
3477 assert!(!buffer4.read(cx).is_dirty());
3478 assert!(!buffer5.read(cx).is_dirty());
3479 });
3480
3481 // Rename and delete files and directories.
3482 tree.flush_fs_events(&cx).await;
3483 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3484 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3485 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3486 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3487 tree.flush_fs_events(&cx).await;
3488
3489 let expected_paths = vec![
3490 "a",
3491 "a/file1",
3492 "a/file2.new",
3493 "b",
3494 "d",
3495 "d/file3",
3496 "d/file4",
3497 ];
3498
3499 cx.read(|app| {
3500 assert_eq!(
3501 tree.read(app)
3502 .paths()
3503 .map(|p| p.to_str().unwrap())
3504 .collect::<Vec<_>>(),
3505 expected_paths
3506 );
3507
3508 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3509 assert_eq!(id_for_path("d/file3", &cx), file3_id);
3510 assert_eq!(id_for_path("d/file4", &cx), file4_id);
3511
3512 assert_eq!(
3513 buffer2.read(app).file().unwrap().path().as_ref(),
3514 Path::new("a/file2.new")
3515 );
3516 assert_eq!(
3517 buffer3.read(app).file().unwrap().path().as_ref(),
3518 Path::new("d/file3")
3519 );
3520 assert_eq!(
3521 buffer4.read(app).file().unwrap().path().as_ref(),
3522 Path::new("d/file4")
3523 );
3524 assert_eq!(
3525 buffer5.read(app).file().unwrap().path().as_ref(),
3526 Path::new("b/c/file5")
3527 );
3528
3529 assert!(!buffer2.read(app).file().unwrap().is_deleted());
3530 assert!(!buffer3.read(app).file().unwrap().is_deleted());
3531 assert!(!buffer4.read(app).file().unwrap().is_deleted());
3532 assert!(buffer5.read(app).file().unwrap().is_deleted());
3533 });
3534
3535 // Update the remote worktree. Check that it becomes consistent with the
3536 // local worktree.
3537 remote.update(&mut cx, |remote, cx| {
3538 let update_message =
3539 tree.read(cx)
3540 .snapshot()
3541 .build_update(&initial_snapshot, 1, 1, true);
3542 remote
3543 .as_remote_mut()
3544 .unwrap()
3545 .snapshot
3546 .apply_update(update_message)
3547 .unwrap();
3548
3549 assert_eq!(
3550 remote
3551 .paths()
3552 .map(|p| p.to_str().unwrap())
3553 .collect::<Vec<_>>(),
3554 expected_paths
3555 );
3556 });
3557 }
3558
3559 #[gpui::test]
3560 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3561 let dir = temp_tree(json!({
3562 ".git": {},
3563 ".gitignore": "ignored-dir\n",
3564 "tracked-dir": {
3565 "tracked-file1": "tracked contents",
3566 },
3567 "ignored-dir": {
3568 "ignored-file1": "ignored contents",
3569 }
3570 }));
3571
3572 let http_client = FakeHttpClient::with_404_response();
3573 let client = Client::new(http_client.clone());
3574 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3575
3576 let tree = Worktree::open_local(
3577 client,
3578 user_store,
3579 dir.path(),
3580 Arc::new(RealFs),
3581 Default::default(),
3582 &mut cx.to_async(),
3583 )
3584 .await
3585 .unwrap();
3586 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3587 .await;
3588 tree.flush_fs_events(&cx).await;
3589 cx.read(|cx| {
3590 let tree = tree.read(cx);
3591 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3592 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3593 assert_eq!(tracked.is_ignored, false);
3594 assert_eq!(ignored.is_ignored, true);
3595 });
3596
3597 std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3598 std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3599 tree.flush_fs_events(&cx).await;
3600 cx.read(|cx| {
3601 let tree = tree.read(cx);
3602 let dot_git = tree.entry_for_path(".git").unwrap();
3603 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3604 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3605 assert_eq!(tracked.is_ignored, false);
3606 assert_eq!(ignored.is_ignored, true);
3607 assert_eq!(dot_git.is_ignored, true);
3608 });
3609 }
3610
3611 #[gpui::test]
3612 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3613 let user_id = 100;
3614 let http_client = FakeHttpClient::with_404_response();
3615 let mut client = Client::new(http_client);
3616 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3617 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3618
3619 let fs = Arc::new(FakeFs::new());
3620 fs.insert_tree(
3621 "/the-dir",
3622 json!({
3623 "a.txt": "a-contents",
3624 "b.txt": "b-contents",
3625 }),
3626 )
3627 .await;
3628
3629 let worktree = Worktree::open_local(
3630 client.clone(),
3631 user_store,
3632 "/the-dir".as_ref(),
3633 fs,
3634 Default::default(),
3635 &mut cx.to_async(),
3636 )
3637 .await
3638 .unwrap();
3639
3640 // Spawn multiple tasks to open paths, repeating some paths.
3641 let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3642 (
3643 worktree.open_buffer("a.txt", cx),
3644 worktree.open_buffer("b.txt", cx),
3645 worktree.open_buffer("a.txt", cx),
3646 )
3647 });
3648
3649 let buffer_a_1 = buffer_a_1.await.unwrap();
3650 let buffer_a_2 = buffer_a_2.await.unwrap();
3651 let buffer_b = buffer_b.await.unwrap();
3652 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3653 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3654
3655 // There is only one buffer per path.
3656 let buffer_a_id = buffer_a_1.id();
3657 assert_eq!(buffer_a_2.id(), buffer_a_id);
3658
3659 // Open the same path again while it is still open.
3660 drop(buffer_a_1);
3661 let buffer_a_3 = worktree
3662 .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3663 .await
3664 .unwrap();
3665
3666 // There's still only one buffer per path.
3667 assert_eq!(buffer_a_3.id(), buffer_a_id);
3668 }
3669
3670 #[gpui::test]
3671 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3672 use std::fs;
3673
3674 let dir = temp_tree(json!({
3675 "file1": "abc",
3676 "file2": "def",
3677 "file3": "ghi",
3678 }));
3679 let http_client = FakeHttpClient::with_404_response();
3680 let client = Client::new(http_client.clone());
3681 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3682
3683 let tree = Worktree::open_local(
3684 client,
3685 user_store,
3686 dir.path(),
3687 Arc::new(RealFs),
3688 Default::default(),
3689 &mut cx.to_async(),
3690 )
3691 .await
3692 .unwrap();
3693 tree.flush_fs_events(&cx).await;
3694 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3695 .await;
3696
3697 let buffer1 = tree
3698 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3699 .await
3700 .unwrap();
3701 let events = Rc::new(RefCell::new(Vec::new()));
3702
3703 // initially, the buffer isn't dirty.
3704 buffer1.update(&mut cx, |buffer, cx| {
3705 cx.subscribe(&buffer1, {
3706 let events = events.clone();
3707 move |_, _, event, _| events.borrow_mut().push(event.clone())
3708 })
3709 .detach();
3710
3711 assert!(!buffer.is_dirty());
3712 assert!(events.borrow().is_empty());
3713
3714 buffer.edit(vec![1..2], "", cx);
3715 });
3716
3717 // after the first edit, the buffer is dirty, and emits a dirtied event.
3718 buffer1.update(&mut cx, |buffer, cx| {
3719 assert!(buffer.text() == "ac");
3720 assert!(buffer.is_dirty());
3721 assert_eq!(
3722 *events.borrow(),
3723 &[language::Event::Edited, language::Event::Dirtied]
3724 );
3725 events.borrow_mut().clear();
3726 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3727 });
3728
3729 // after saving, the buffer is not dirty, and emits a saved event.
3730 buffer1.update(&mut cx, |buffer, cx| {
3731 assert!(!buffer.is_dirty());
3732 assert_eq!(*events.borrow(), &[language::Event::Saved]);
3733 events.borrow_mut().clear();
3734
3735 buffer.edit(vec![1..1], "B", cx);
3736 buffer.edit(vec![2..2], "D", cx);
3737 });
3738
3739 // after editing again, the buffer is dirty, and emits another dirty event.
3740 buffer1.update(&mut cx, |buffer, cx| {
3741 assert!(buffer.text() == "aBDc");
3742 assert!(buffer.is_dirty());
3743 assert_eq!(
3744 *events.borrow(),
3745 &[
3746 language::Event::Edited,
3747 language::Event::Dirtied,
3748 language::Event::Edited,
3749 ],
3750 );
3751 events.borrow_mut().clear();
3752
3753 // TODO - currently, after restoring the buffer to its
3754 // previously-saved state, the is still considered dirty.
3755 buffer.edit([1..3], "", cx);
3756 assert!(buffer.text() == "ac");
3757 assert!(buffer.is_dirty());
3758 });
3759
3760 assert_eq!(*events.borrow(), &[language::Event::Edited]);
3761
3762 // When a file is deleted, the buffer is considered dirty.
3763 let events = Rc::new(RefCell::new(Vec::new()));
3764 let buffer2 = tree
3765 .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3766 .await
3767 .unwrap();
3768 buffer2.update(&mut cx, |_, cx| {
3769 cx.subscribe(&buffer2, {
3770 let events = events.clone();
3771 move |_, _, event, _| events.borrow_mut().push(event.clone())
3772 })
3773 .detach();
3774 });
3775
3776 fs::remove_file(dir.path().join("file2")).unwrap();
3777 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3778 assert_eq!(
3779 *events.borrow(),
3780 &[language::Event::Dirtied, language::Event::FileHandleChanged]
3781 );
3782
3783 // When a file is already dirty when deleted, we don't emit a Dirtied event.
3784 let events = Rc::new(RefCell::new(Vec::new()));
3785 let buffer3 = tree
3786 .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3787 .await
3788 .unwrap();
3789 buffer3.update(&mut cx, |_, cx| {
3790 cx.subscribe(&buffer3, {
3791 let events = events.clone();
3792 move |_, _, event, _| events.borrow_mut().push(event.clone())
3793 })
3794 .detach();
3795 });
3796
3797 tree.flush_fs_events(&cx).await;
3798 buffer3.update(&mut cx, |buffer, cx| {
3799 buffer.edit(Some(0..0), "x", cx);
3800 });
3801 events.borrow_mut().clear();
3802 fs::remove_file(dir.path().join("file3")).unwrap();
3803 buffer3
3804 .condition(&cx, |_, _| !events.borrow().is_empty())
3805 .await;
3806 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3807 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3808 }
3809
3810 #[gpui::test]
3811 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3812 use std::fs;
3813
3814 let initial_contents = "aaa\nbbbbb\nc\n";
3815 let dir = temp_tree(json!({ "the-file": initial_contents }));
3816 let http_client = FakeHttpClient::with_404_response();
3817 let client = Client::new(http_client.clone());
3818 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3819
3820 let tree = Worktree::open_local(
3821 client,
3822 user_store,
3823 dir.path(),
3824 Arc::new(RealFs),
3825 Default::default(),
3826 &mut cx.to_async(),
3827 )
3828 .await
3829 .unwrap();
3830 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3831 .await;
3832
3833 let abs_path = dir.path().join("the-file");
3834 let buffer = tree
3835 .update(&mut cx, |tree, cx| {
3836 tree.open_buffer(Path::new("the-file"), cx)
3837 })
3838 .await
3839 .unwrap();
3840
3841 // TODO
3842 // Add a cursor on each row.
3843 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3844 // assert!(!buffer.is_dirty());
3845 // buffer.add_selection_set(
3846 // &(0..3)
3847 // .map(|row| Selection {
3848 // id: row as usize,
3849 // start: Point::new(row, 1),
3850 // end: Point::new(row, 1),
3851 // reversed: false,
3852 // goal: SelectionGoal::None,
3853 // })
3854 // .collect::<Vec<_>>(),
3855 // cx,
3856 // )
3857 // });
3858
3859 // Change the file on disk, adding two new lines of text, and removing
3860 // one line.
3861 buffer.read_with(&cx, |buffer, _| {
3862 assert!(!buffer.is_dirty());
3863 assert!(!buffer.has_conflict());
3864 });
3865 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3866 fs::write(&abs_path, new_contents).unwrap();
3867
3868 // Because the buffer was not modified, it is reloaded from disk. Its
3869 // contents are edited according to the diff between the old and new
3870 // file contents.
3871 buffer
3872 .condition(&cx, |buffer, _| buffer.text() == new_contents)
3873 .await;
3874
3875 buffer.update(&mut cx, |buffer, _| {
3876 assert_eq!(buffer.text(), new_contents);
3877 assert!(!buffer.is_dirty());
3878 assert!(!buffer.has_conflict());
3879
3880 // TODO
3881 // let cursor_positions = buffer
3882 // .selection_set(selection_set_id)
3883 // .unwrap()
3884 // .selections::<Point>(&*buffer)
3885 // .map(|selection| {
3886 // assert_eq!(selection.start, selection.end);
3887 // selection.start
3888 // })
3889 // .collect::<Vec<_>>();
3890 // assert_eq!(
3891 // cursor_positions,
3892 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3893 // );
3894 });
3895
3896 // Modify the buffer
3897 buffer.update(&mut cx, |buffer, cx| {
3898 buffer.edit(vec![0..0], " ", cx);
3899 assert!(buffer.is_dirty());
3900 assert!(!buffer.has_conflict());
3901 });
3902
3903 // Change the file on disk again, adding blank lines to the beginning.
3904 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3905
3906 // Because the buffer is modified, it doesn't reload from disk, but is
3907 // marked as having a conflict.
3908 buffer
3909 .condition(&cx, |buffer, _| buffer.has_conflict())
3910 .await;
3911 }
3912
3913 #[gpui::test]
3914 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3915 let (language_server_config, mut fake_server) =
3916 LanguageServerConfig::fake(cx.background()).await;
3917 let progress_token = language_server_config
3918 .disk_based_diagnostics_progress_token
3919 .clone()
3920 .unwrap();
3921 let mut languages = LanguageRegistry::new();
3922 languages.add(Arc::new(Language::new(
3923 LanguageConfig {
3924 name: "Rust".to_string(),
3925 path_suffixes: vec!["rs".to_string()],
3926 language_server: Some(language_server_config),
3927 ..Default::default()
3928 },
3929 Some(tree_sitter_rust::language()),
3930 )));
3931
3932 let dir = temp_tree(json!({
3933 "a.rs": "fn a() { A }",
3934 "b.rs": "const y: i32 = 1",
3935 }));
3936
3937 let http_client = FakeHttpClient::with_404_response();
3938 let client = Client::new(http_client.clone());
3939 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3940
3941 let tree = Worktree::open_local(
3942 client,
3943 user_store,
3944 dir.path(),
3945 Arc::new(RealFs),
3946 Arc::new(languages),
3947 &mut cx.to_async(),
3948 )
3949 .await
3950 .unwrap();
3951 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3952 .await;
3953
3954 // Cause worktree to start the fake language server
3955 let _buffer = tree
3956 .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3957 .await
3958 .unwrap();
3959
3960 let mut events = subscribe(&tree, &mut cx);
3961
3962 fake_server.start_progress(&progress_token).await;
3963 assert_eq!(
3964 events.next().await.unwrap(),
3965 Event::DiskBasedDiagnosticsUpdating
3966 );
3967
3968 fake_server.start_progress(&progress_token).await;
3969 fake_server.end_progress(&progress_token).await;
3970 fake_server.start_progress(&progress_token).await;
3971
3972 fake_server
3973 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3974 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3975 version: None,
3976 diagnostics: vec![lsp::Diagnostic {
3977 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
3978 severity: Some(lsp::DiagnosticSeverity::ERROR),
3979 message: "undefined variable 'A'".to_string(),
3980 ..Default::default()
3981 }],
3982 })
3983 .await;
3984 assert_eq!(
3985 events.next().await.unwrap(),
3986 Event::DiagnosticsUpdated(Arc::from(Path::new("a.rs")))
3987 );
3988
3989 fake_server.end_progress(&progress_token).await;
3990 fake_server.end_progress(&progress_token).await;
3991 assert_eq!(
3992 events.next().await.unwrap(),
3993 Event::DiskBasedDiagnosticsUpdated
3994 );
3995
3996 let buffer = tree
3997 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3998 .await
3999 .unwrap();
4000
4001 buffer.read_with(&cx, |buffer, _| {
4002 let snapshot = buffer.snapshot();
4003 let diagnostics = snapshot
4004 .diagnostics_in_range::<_, Point>(0..buffer.len())
4005 .collect::<Vec<_>>();
4006 assert_eq!(
4007 diagnostics,
4008 &[DiagnosticEntry {
4009 range: Point::new(0, 9)..Point::new(0, 10),
4010 diagnostic: Diagnostic {
4011 severity: lsp::DiagnosticSeverity::ERROR,
4012 message: "undefined variable 'A'".to_string(),
4013 group_id: 0,
4014 is_primary: true,
4015 ..Default::default()
4016 }
4017 }]
4018 )
4019 });
4020 }
4021
4022 #[gpui::test]
4023 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
4024 let fs = Arc::new(FakeFs::new());
4025 let http_client = FakeHttpClient::with_404_response();
4026 let client = Client::new(http_client.clone());
4027 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
4028
4029 fs.insert_tree(
4030 "/the-dir",
4031 json!({
4032 "a.rs": "
4033 fn foo(mut v: Vec<usize>) {
4034 for x in &v {
4035 v.push(1);
4036 }
4037 }
4038 "
4039 .unindent(),
4040 }),
4041 )
4042 .await;
4043
4044 let worktree = Worktree::open_local(
4045 client.clone(),
4046 user_store,
4047 "/the-dir".as_ref(),
4048 fs,
4049 Default::default(),
4050 &mut cx.to_async(),
4051 )
4052 .await
4053 .unwrap();
4054
4055 let buffer = worktree
4056 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
4057 .await
4058 .unwrap();
4059
4060 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
4061 let message = lsp::PublishDiagnosticsParams {
4062 uri: buffer_uri.clone(),
4063 diagnostics: vec![
4064 lsp::Diagnostic {
4065 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
4066 severity: Some(DiagnosticSeverity::WARNING),
4067 message: "error 1".to_string(),
4068 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4069 location: lsp::Location {
4070 uri: buffer_uri.clone(),
4071 range: lsp::Range::new(
4072 lsp::Position::new(1, 8),
4073 lsp::Position::new(1, 9),
4074 ),
4075 },
4076 message: "error 1 hint 1".to_string(),
4077 }]),
4078 ..Default::default()
4079 },
4080 lsp::Diagnostic {
4081 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
4082 severity: Some(DiagnosticSeverity::HINT),
4083 message: "error 1 hint 1".to_string(),
4084 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4085 location: lsp::Location {
4086 uri: buffer_uri.clone(),
4087 range: lsp::Range::new(
4088 lsp::Position::new(1, 8),
4089 lsp::Position::new(1, 9),
4090 ),
4091 },
4092 message: "original diagnostic".to_string(),
4093 }]),
4094 ..Default::default()
4095 },
4096 lsp::Diagnostic {
4097 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
4098 severity: Some(DiagnosticSeverity::ERROR),
4099 message: "error 2".to_string(),
4100 related_information: Some(vec![
4101 lsp::DiagnosticRelatedInformation {
4102 location: lsp::Location {
4103 uri: buffer_uri.clone(),
4104 range: lsp::Range::new(
4105 lsp::Position::new(1, 13),
4106 lsp::Position::new(1, 15),
4107 ),
4108 },
4109 message: "error 2 hint 1".to_string(),
4110 },
4111 lsp::DiagnosticRelatedInformation {
4112 location: lsp::Location {
4113 uri: buffer_uri.clone(),
4114 range: lsp::Range::new(
4115 lsp::Position::new(1, 13),
4116 lsp::Position::new(1, 15),
4117 ),
4118 },
4119 message: "error 2 hint 2".to_string(),
4120 },
4121 ]),
4122 ..Default::default()
4123 },
4124 lsp::Diagnostic {
4125 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
4126 severity: Some(DiagnosticSeverity::HINT),
4127 message: "error 2 hint 1".to_string(),
4128 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4129 location: lsp::Location {
4130 uri: buffer_uri.clone(),
4131 range: lsp::Range::new(
4132 lsp::Position::new(2, 8),
4133 lsp::Position::new(2, 17),
4134 ),
4135 },
4136 message: "original diagnostic".to_string(),
4137 }]),
4138 ..Default::default()
4139 },
4140 lsp::Diagnostic {
4141 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
4142 severity: Some(DiagnosticSeverity::HINT),
4143 message: "error 2 hint 2".to_string(),
4144 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4145 location: lsp::Location {
4146 uri: buffer_uri.clone(),
4147 range: lsp::Range::new(
4148 lsp::Position::new(2, 8),
4149 lsp::Position::new(2, 17),
4150 ),
4151 },
4152 message: "original diagnostic".to_string(),
4153 }]),
4154 ..Default::default()
4155 },
4156 ],
4157 version: None,
4158 };
4159
4160 worktree
4161 .update(&mut cx, |tree, cx| {
4162 tree.update_diagnostics(message, &Default::default(), cx)
4163 })
4164 .unwrap();
4165 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
4166
4167 assert_eq!(
4168 buffer
4169 .diagnostics_in_range::<_, Point>(0..buffer.len())
4170 .collect::<Vec<_>>(),
4171 &[
4172 DiagnosticEntry {
4173 range: Point::new(1, 8)..Point::new(1, 9),
4174 diagnostic: Diagnostic {
4175 severity: DiagnosticSeverity::WARNING,
4176 message: "error 1".to_string(),
4177 group_id: 0,
4178 is_primary: true,
4179 ..Default::default()
4180 }
4181 },
4182 DiagnosticEntry {
4183 range: Point::new(1, 8)..Point::new(1, 9),
4184 diagnostic: Diagnostic {
4185 severity: DiagnosticSeverity::HINT,
4186 message: "error 1 hint 1".to_string(),
4187 group_id: 0,
4188 is_primary: false,
4189 ..Default::default()
4190 }
4191 },
4192 DiagnosticEntry {
4193 range: Point::new(1, 13)..Point::new(1, 15),
4194 diagnostic: Diagnostic {
4195 severity: DiagnosticSeverity::HINT,
4196 message: "error 2 hint 1".to_string(),
4197 group_id: 1,
4198 is_primary: false,
4199 ..Default::default()
4200 }
4201 },
4202 DiagnosticEntry {
4203 range: Point::new(1, 13)..Point::new(1, 15),
4204 diagnostic: Diagnostic {
4205 severity: DiagnosticSeverity::HINT,
4206 message: "error 2 hint 2".to_string(),
4207 group_id: 1,
4208 is_primary: false,
4209 ..Default::default()
4210 }
4211 },
4212 DiagnosticEntry {
4213 range: Point::new(2, 8)..Point::new(2, 17),
4214 diagnostic: Diagnostic {
4215 severity: DiagnosticSeverity::ERROR,
4216 message: "error 2".to_string(),
4217 group_id: 1,
4218 is_primary: true,
4219 ..Default::default()
4220 }
4221 }
4222 ]
4223 );
4224
4225 assert_eq!(
4226 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
4227 &[
4228 DiagnosticEntry {
4229 range: Point::new(1, 8)..Point::new(1, 9),
4230 diagnostic: Diagnostic {
4231 severity: DiagnosticSeverity::WARNING,
4232 message: "error 1".to_string(),
4233 group_id: 0,
4234 is_primary: true,
4235 ..Default::default()
4236 }
4237 },
4238 DiagnosticEntry {
4239 range: Point::new(1, 8)..Point::new(1, 9),
4240 diagnostic: Diagnostic {
4241 severity: DiagnosticSeverity::HINT,
4242 message: "error 1 hint 1".to_string(),
4243 group_id: 0,
4244 is_primary: false,
4245 ..Default::default()
4246 }
4247 },
4248 ]
4249 );
4250 assert_eq!(
4251 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
4252 &[
4253 DiagnosticEntry {
4254 range: Point::new(1, 13)..Point::new(1, 15),
4255 diagnostic: Diagnostic {
4256 severity: DiagnosticSeverity::HINT,
4257 message: "error 2 hint 1".to_string(),
4258 group_id: 1,
4259 is_primary: false,
4260 ..Default::default()
4261 }
4262 },
4263 DiagnosticEntry {
4264 range: Point::new(1, 13)..Point::new(1, 15),
4265 diagnostic: Diagnostic {
4266 severity: DiagnosticSeverity::HINT,
4267 message: "error 2 hint 2".to_string(),
4268 group_id: 1,
4269 is_primary: false,
4270 ..Default::default()
4271 }
4272 },
4273 DiagnosticEntry {
4274 range: Point::new(2, 8)..Point::new(2, 17),
4275 diagnostic: Diagnostic {
4276 severity: DiagnosticSeverity::ERROR,
4277 message: "error 2".to_string(),
4278 group_id: 1,
4279 is_primary: true,
4280 ..Default::default()
4281 }
4282 }
4283 ]
4284 );
4285 }
4286
4287 #[gpui::test(iterations = 100)]
4288 fn test_random(mut rng: StdRng) {
4289 let operations = env::var("OPERATIONS")
4290 .map(|o| o.parse().unwrap())
4291 .unwrap_or(40);
4292 let initial_entries = env::var("INITIAL_ENTRIES")
4293 .map(|o| o.parse().unwrap())
4294 .unwrap_or(20);
4295
4296 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
4297 for _ in 0..initial_entries {
4298 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
4299 }
4300 log::info!("Generated initial tree");
4301
4302 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4303 let fs = Arc::new(RealFs);
4304 let next_entry_id = Arc::new(AtomicUsize::new(0));
4305 let mut initial_snapshot = Snapshot {
4306 id: WorktreeId::from_usize(0),
4307 scan_id: 0,
4308 abs_path: root_dir.path().into(),
4309 entries_by_path: Default::default(),
4310 entries_by_id: Default::default(),
4311 removed_entry_ids: Default::default(),
4312 ignores: Default::default(),
4313 root_name: Default::default(),
4314 root_char_bag: Default::default(),
4315 next_entry_id: next_entry_id.clone(),
4316 };
4317 initial_snapshot.insert_entry(
4318 Entry::new(
4319 Path::new("").into(),
4320 &smol::block_on(fs.metadata(root_dir.path()))
4321 .unwrap()
4322 .unwrap(),
4323 &next_entry_id,
4324 Default::default(),
4325 ),
4326 fs.as_ref(),
4327 );
4328 let mut scanner = BackgroundScanner::new(
4329 Arc::new(Mutex::new(initial_snapshot.clone())),
4330 notify_tx,
4331 fs.clone(),
4332 Arc::new(gpui::executor::Background::new()),
4333 );
4334 smol::block_on(scanner.scan_dirs()).unwrap();
4335 scanner.snapshot().check_invariants();
4336
4337 let mut events = Vec::new();
4338 let mut snapshots = Vec::new();
4339 let mut mutations_len = operations;
4340 while mutations_len > 1 {
4341 if !events.is_empty() && rng.gen_bool(0.4) {
4342 let len = rng.gen_range(0..=events.len());
4343 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4344 log::info!("Delivering events: {:#?}", to_deliver);
4345 smol::block_on(scanner.process_events(to_deliver));
4346 scanner.snapshot().check_invariants();
4347 } else {
4348 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4349 mutations_len -= 1;
4350 }
4351
4352 if rng.gen_bool(0.2) {
4353 snapshots.push(scanner.snapshot());
4354 }
4355 }
4356 log::info!("Quiescing: {:#?}", events);
4357 smol::block_on(scanner.process_events(events));
4358 scanner.snapshot().check_invariants();
4359
4360 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4361 let mut new_scanner = BackgroundScanner::new(
4362 Arc::new(Mutex::new(initial_snapshot)),
4363 notify_tx,
4364 scanner.fs.clone(),
4365 scanner.executor.clone(),
4366 );
4367 smol::block_on(new_scanner.scan_dirs()).unwrap();
4368 assert_eq!(
4369 scanner.snapshot().to_vec(true),
4370 new_scanner.snapshot().to_vec(true)
4371 );
4372
4373 for mut prev_snapshot in snapshots {
4374 let include_ignored = rng.gen::<bool>();
4375 if !include_ignored {
4376 let mut entries_by_path_edits = Vec::new();
4377 let mut entries_by_id_edits = Vec::new();
4378 for entry in prev_snapshot
4379 .entries_by_id
4380 .cursor::<()>()
4381 .filter(|e| e.is_ignored)
4382 {
4383 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4384 entries_by_id_edits.push(Edit::Remove(entry.id));
4385 }
4386
4387 prev_snapshot
4388 .entries_by_path
4389 .edit(entries_by_path_edits, &());
4390 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4391 }
4392
4393 let update = scanner
4394 .snapshot()
4395 .build_update(&prev_snapshot, 0, 0, include_ignored);
4396 prev_snapshot.apply_update(update).unwrap();
4397 assert_eq!(
4398 prev_snapshot.to_vec(true),
4399 scanner.snapshot().to_vec(include_ignored)
4400 );
4401 }
4402 }
4403
4404 fn randomly_mutate_tree(
4405 root_path: &Path,
4406 insertion_probability: f64,
4407 rng: &mut impl Rng,
4408 ) -> Result<Vec<fsevent::Event>> {
4409 let root_path = root_path.canonicalize().unwrap();
4410 let (dirs, files) = read_dir_recursive(root_path.clone());
4411
4412 let mut events = Vec::new();
4413 let mut record_event = |path: PathBuf| {
4414 events.push(fsevent::Event {
4415 event_id: SystemTime::now()
4416 .duration_since(UNIX_EPOCH)
4417 .unwrap()
4418 .as_secs(),
4419 flags: fsevent::StreamFlags::empty(),
4420 path,
4421 });
4422 };
4423
4424 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4425 let path = dirs.choose(rng).unwrap();
4426 let new_path = path.join(gen_name(rng));
4427
4428 if rng.gen() {
4429 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4430 std::fs::create_dir(&new_path)?;
4431 } else {
4432 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4433 std::fs::write(&new_path, "")?;
4434 }
4435 record_event(new_path);
4436 } else if rng.gen_bool(0.05) {
4437 let ignore_dir_path = dirs.choose(rng).unwrap();
4438 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4439
4440 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4441 let files_to_ignore = {
4442 let len = rng.gen_range(0..=subfiles.len());
4443 subfiles.choose_multiple(rng, len)
4444 };
4445 let dirs_to_ignore = {
4446 let len = rng.gen_range(0..subdirs.len());
4447 subdirs.choose_multiple(rng, len)
4448 };
4449
4450 let mut ignore_contents = String::new();
4451 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4452 write!(
4453 ignore_contents,
4454 "{}\n",
4455 path_to_ignore
4456 .strip_prefix(&ignore_dir_path)?
4457 .to_str()
4458 .unwrap()
4459 )
4460 .unwrap();
4461 }
4462 log::info!(
4463 "Creating {:?} with contents:\n{}",
4464 ignore_path.strip_prefix(&root_path)?,
4465 ignore_contents
4466 );
4467 std::fs::write(&ignore_path, ignore_contents).unwrap();
4468 record_event(ignore_path);
4469 } else {
4470 let old_path = {
4471 let file_path = files.choose(rng);
4472 let dir_path = dirs[1..].choose(rng);
4473 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4474 };
4475
4476 let is_rename = rng.gen();
4477 if is_rename {
4478 let new_path_parent = dirs
4479 .iter()
4480 .filter(|d| !d.starts_with(old_path))
4481 .choose(rng)
4482 .unwrap();
4483
4484 let overwrite_existing_dir =
4485 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4486 let new_path = if overwrite_existing_dir {
4487 std::fs::remove_dir_all(&new_path_parent).ok();
4488 new_path_parent.to_path_buf()
4489 } else {
4490 new_path_parent.join(gen_name(rng))
4491 };
4492
4493 log::info!(
4494 "Renaming {:?} to {}{:?}",
4495 old_path.strip_prefix(&root_path)?,
4496 if overwrite_existing_dir {
4497 "overwrite "
4498 } else {
4499 ""
4500 },
4501 new_path.strip_prefix(&root_path)?
4502 );
4503 std::fs::rename(&old_path, &new_path)?;
4504 record_event(old_path.clone());
4505 record_event(new_path);
4506 } else if old_path.is_dir() {
4507 let (dirs, files) = read_dir_recursive(old_path.clone());
4508
4509 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4510 std::fs::remove_dir_all(&old_path).unwrap();
4511 for file in files {
4512 record_event(file);
4513 }
4514 for dir in dirs {
4515 record_event(dir);
4516 }
4517 } else {
4518 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4519 std::fs::remove_file(old_path).unwrap();
4520 record_event(old_path.clone());
4521 }
4522 }
4523
4524 Ok(events)
4525 }
4526
4527 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4528 let child_entries = std::fs::read_dir(&path).unwrap();
4529 let mut dirs = vec![path];
4530 let mut files = Vec::new();
4531 for child_entry in child_entries {
4532 let child_path = child_entry.unwrap().path();
4533 if child_path.is_dir() {
4534 let (child_dirs, child_files) = read_dir_recursive(child_path);
4535 dirs.extend(child_dirs);
4536 files.extend(child_files);
4537 } else {
4538 files.push(child_path);
4539 }
4540 }
4541 (dirs, files)
4542 }
4543
4544 fn gen_name(rng: &mut impl Rng) -> String {
4545 (0..6)
4546 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4547 .map(char::from)
4548 .collect()
4549 }
4550
4551 impl Snapshot {
4552 fn check_invariants(&self) {
4553 let mut files = self.files(true, 0);
4554 let mut visible_files = self.files(false, 0);
4555 for entry in self.entries_by_path.cursor::<()>() {
4556 if entry.is_file() {
4557 assert_eq!(files.next().unwrap().inode, entry.inode);
4558 if !entry.is_ignored {
4559 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4560 }
4561 }
4562 }
4563 assert!(files.next().is_none());
4564 assert!(visible_files.next().is_none());
4565
4566 let mut bfs_paths = Vec::new();
4567 let mut stack = vec![Path::new("")];
4568 while let Some(path) = stack.pop() {
4569 bfs_paths.push(path);
4570 let ix = stack.len();
4571 for child_entry in self.child_entries(path) {
4572 stack.insert(ix, &child_entry.path);
4573 }
4574 }
4575
4576 let dfs_paths = self
4577 .entries_by_path
4578 .cursor::<()>()
4579 .map(|e| e.path.as_ref())
4580 .collect::<Vec<_>>();
4581 assert_eq!(bfs_paths, dfs_paths);
4582
4583 for (ignore_parent_path, _) in &self.ignores {
4584 assert!(self.entry_for_path(ignore_parent_path).is_some());
4585 assert!(self
4586 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4587 .is_some());
4588 }
4589 }
4590
4591 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4592 let mut paths = Vec::new();
4593 for entry in self.entries_by_path.cursor::<()>() {
4594 if include_ignored || !entry.is_ignored {
4595 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4596 }
4597 }
4598 paths.sort_by(|a, b| a.0.cmp(&b.0));
4599 paths
4600 }
4601 }
4602}