1use super::{
2 fs::{self, Fs},
3 ignore::IgnoreStack,
4 DiagnosticSummary, ProjectEntry,
5};
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context, Result};
8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
9use clock::ReplicaId;
10use collections::{hash_map, HashMap, HashSet};
11use futures::{Stream, StreamExt};
12use fuzzy::CharBag;
13use gpui::{
14 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
15 Task, UpgradeModelHandle, WeakModelHandle,
16};
17use language::{
18 range_from_lsp, Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Language,
19 LanguageRegistry, Operation, PointUtf16, Rope,
20};
21use lazy_static::lazy_static;
22use lsp::LanguageServer;
23use parking_lot::Mutex;
24use postage::{
25 prelude::{Sink as _, Stream as _},
26 watch,
27};
28use serde::Deserialize;
29use smol::channel::{self, Sender};
30use std::{
31 any::Any,
32 cmp::{self, Ordering},
33 convert::{TryFrom, TryInto},
34 ffi::{OsStr, OsString},
35 fmt,
36 future::Future,
37 ops::Deref,
38 path::{Path, PathBuf},
39 sync::{
40 atomic::{AtomicUsize, Ordering::SeqCst},
41 Arc,
42 },
43 time::{Duration, SystemTime},
44};
45use sum_tree::{Bias, TreeMap};
46use sum_tree::{Edit, SeekTarget, SumTree};
47use util::{post_inc, ResultExt, TryFutureExt};
48
49lazy_static! {
50 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
51}
52
53#[derive(Clone, Debug)]
54enum ScanState {
55 Idle,
56 Scanning,
57 Err(Arc<anyhow::Error>),
58}
59
60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
61pub struct WorktreeId(usize);
62
63pub enum Worktree {
64 Local(LocalWorktree),
65 Remote(RemoteWorktree),
66}
67
68#[derive(Clone, Debug, Eq, PartialEq)]
69pub enum Event {
70 DiskBasedDiagnosticsUpdating,
71 DiskBasedDiagnosticsUpdated,
72 DiagnosticsUpdated(Arc<Path>),
73}
74
75impl Entity for Worktree {
76 type Event = Event;
77
78 fn app_will_quit(
79 &mut self,
80 _: &mut MutableAppContext,
81 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
82 use futures::FutureExt;
83
84 if let Self::Local(worktree) = self {
85 let shutdown_futures = worktree
86 .language_servers
87 .drain()
88 .filter_map(|(_, server)| server.shutdown())
89 .collect::<Vec<_>>();
90 Some(
91 async move {
92 futures::future::join_all(shutdown_futures).await;
93 }
94 .boxed(),
95 )
96 } else {
97 None
98 }
99 }
100}
101
102impl Worktree {
103 pub async fn open_local(
104 client: Arc<Client>,
105 user_store: ModelHandle<UserStore>,
106 path: impl Into<Arc<Path>>,
107 fs: Arc<dyn Fs>,
108 languages: Arc<LanguageRegistry>,
109 cx: &mut AsyncAppContext,
110 ) -> Result<ModelHandle<Self>> {
111 let (tree, scan_states_tx) =
112 LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
113 tree.update(cx, |tree, cx| {
114 let tree = tree.as_local_mut().unwrap();
115 let abs_path = tree.snapshot.abs_path.clone();
116 let background_snapshot = tree.background_snapshot.clone();
117 let background = cx.background().clone();
118 tree._background_scanner_task = Some(cx.background().spawn(async move {
119 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
120 let scanner =
121 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
122 scanner.run(events).await;
123 }));
124 });
125 Ok(tree)
126 }
127
128 pub async fn remote(
129 project_remote_id: u64,
130 replica_id: ReplicaId,
131 worktree: proto::Worktree,
132 client: Arc<Client>,
133 user_store: ModelHandle<UserStore>,
134 languages: Arc<LanguageRegistry>,
135 cx: &mut AsyncAppContext,
136 ) -> Result<ModelHandle<Self>> {
137 let remote_id = worktree.id;
138 let root_char_bag: CharBag = worktree
139 .root_name
140 .chars()
141 .map(|c| c.to_ascii_lowercase())
142 .collect();
143 let root_name = worktree.root_name.clone();
144 let (entries_by_path, entries_by_id, diagnostic_summaries) = cx
145 .background()
146 .spawn(async move {
147 let mut entries_by_path_edits = Vec::new();
148 let mut entries_by_id_edits = Vec::new();
149 for entry in worktree.entries {
150 match Entry::try_from((&root_char_bag, entry)) {
151 Ok(entry) => {
152 entries_by_id_edits.push(Edit::Insert(PathEntry {
153 id: entry.id,
154 path: entry.path.clone(),
155 is_ignored: entry.is_ignored,
156 scan_id: 0,
157 }));
158 entries_by_path_edits.push(Edit::Insert(entry));
159 }
160 Err(err) => log::warn!("error for remote worktree entry {:?}", err),
161 }
162 }
163
164 let mut entries_by_path = SumTree::new();
165 let mut entries_by_id = SumTree::new();
166 entries_by_path.edit(entries_by_path_edits, &());
167 entries_by_id.edit(entries_by_id_edits, &());
168
169 let diagnostic_summaries = TreeMap::from_ordered_entries(
170 worktree.diagnostic_summaries.into_iter().map(|summary| {
171 (
172 PathKey(PathBuf::from(summary.path).into()),
173 DiagnosticSummary {
174 error_count: summary.error_count as usize,
175 warning_count: summary.warning_count as usize,
176 info_count: summary.info_count as usize,
177 hint_count: summary.hint_count as usize,
178 },
179 )
180 }),
181 );
182
183 (entries_by_path, entries_by_id, diagnostic_summaries)
184 })
185 .await;
186
187 let worktree = cx.update(|cx| {
188 cx.add_model(|cx: &mut ModelContext<Worktree>| {
189 let snapshot = Snapshot {
190 id: WorktreeId(remote_id as usize),
191 scan_id: 0,
192 abs_path: Path::new("").into(),
193 root_name,
194 root_char_bag,
195 ignores: Default::default(),
196 entries_by_path,
197 entries_by_id,
198 removed_entry_ids: Default::default(),
199 next_entry_id: Default::default(),
200 };
201
202 let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
203 let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
204
205 cx.background()
206 .spawn(async move {
207 while let Some(update) = updates_rx.recv().await {
208 let mut snapshot = snapshot_tx.borrow().clone();
209 if let Err(error) = snapshot.apply_update(update) {
210 log::error!("error applying worktree update: {}", error);
211 }
212 *snapshot_tx.borrow_mut() = snapshot;
213 }
214 })
215 .detach();
216
217 {
218 let mut snapshot_rx = snapshot_rx.clone();
219 cx.spawn_weak(|this, mut cx| async move {
220 while let Some(_) = snapshot_rx.recv().await {
221 if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
222 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
223 } else {
224 break;
225 }
226 }
227 })
228 .detach();
229 }
230
231 Worktree::Remote(RemoteWorktree {
232 project_id: project_remote_id,
233 replica_id,
234 snapshot,
235 snapshot_rx,
236 updates_tx,
237 client: client.clone(),
238 loading_buffers: Default::default(),
239 open_buffers: Default::default(),
240 queued_operations: Default::default(),
241 languages,
242 user_store,
243 diagnostic_summaries,
244 })
245 })
246 });
247
248 Ok(worktree)
249 }
250
251 pub fn as_local(&self) -> Option<&LocalWorktree> {
252 if let Worktree::Local(worktree) = self {
253 Some(worktree)
254 } else {
255 None
256 }
257 }
258
259 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
260 if let Worktree::Remote(worktree) = self {
261 Some(worktree)
262 } else {
263 None
264 }
265 }
266
267 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
268 if let Worktree::Local(worktree) = self {
269 Some(worktree)
270 } else {
271 None
272 }
273 }
274
275 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
276 if let Worktree::Remote(worktree) = self {
277 Some(worktree)
278 } else {
279 None
280 }
281 }
282
283 pub fn snapshot(&self) -> Snapshot {
284 match self {
285 Worktree::Local(worktree) => worktree.snapshot(),
286 Worktree::Remote(worktree) => worktree.snapshot(),
287 }
288 }
289
290 pub fn replica_id(&self) -> ReplicaId {
291 match self {
292 Worktree::Local(_) => 0,
293 Worktree::Remote(worktree) => worktree.replica_id,
294 }
295 }
296
297 pub fn remove_collaborator(
298 &mut self,
299 peer_id: PeerId,
300 replica_id: ReplicaId,
301 cx: &mut ModelContext<Self>,
302 ) {
303 match self {
304 Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
305 Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
306 }
307 }
308
309 pub fn languages(&self) -> &Arc<LanguageRegistry> {
310 match self {
311 Worktree::Local(worktree) => &worktree.language_registry,
312 Worktree::Remote(worktree) => &worktree.languages,
313 }
314 }
315
316 pub fn user_store(&self) -> &ModelHandle<UserStore> {
317 match self {
318 Worktree::Local(worktree) => &worktree.user_store,
319 Worktree::Remote(worktree) => &worktree.user_store,
320 }
321 }
322
323 pub fn handle_open_buffer(
324 &mut self,
325 envelope: TypedEnvelope<proto::OpenBuffer>,
326 rpc: Arc<Client>,
327 cx: &mut ModelContext<Self>,
328 ) -> anyhow::Result<()> {
329 let receipt = envelope.receipt();
330
331 let response = self
332 .as_local_mut()
333 .unwrap()
334 .open_remote_buffer(envelope, cx);
335
336 cx.background()
337 .spawn(
338 async move {
339 rpc.respond(receipt, response.await?).await?;
340 Ok(())
341 }
342 .log_err(),
343 )
344 .detach();
345
346 Ok(())
347 }
348
349 pub fn handle_close_buffer(
350 &mut self,
351 envelope: TypedEnvelope<proto::CloseBuffer>,
352 _: Arc<Client>,
353 cx: &mut ModelContext<Self>,
354 ) -> anyhow::Result<()> {
355 self.as_local_mut()
356 .unwrap()
357 .close_remote_buffer(envelope, cx)
358 }
359
360 pub fn diagnostic_summaries<'a>(
361 &'a self,
362 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
363 match self {
364 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
365 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
366 }
367 .iter()
368 .map(|(path, summary)| (path.0.clone(), summary.clone()))
369 }
370
371 pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
372 match self {
373 Worktree::Local(worktree) => &mut worktree.loading_buffers,
374 Worktree::Remote(worktree) => &mut worktree.loading_buffers,
375 }
376 }
377
378 pub fn open_buffer(
379 &mut self,
380 path: impl AsRef<Path>,
381 cx: &mut ModelContext<Self>,
382 ) -> Task<Result<ModelHandle<Buffer>>> {
383 let path = path.as_ref();
384
385 // If there is already a buffer for the given path, then return it.
386 let existing_buffer = match self {
387 Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
388 Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
389 };
390 if let Some(existing_buffer) = existing_buffer {
391 return cx.spawn(move |_, _| async move { Ok(existing_buffer) });
392 }
393
394 let path: Arc<Path> = Arc::from(path);
395 let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
396 // If the given path is already being loaded, then wait for that existing
397 // task to complete and return the same buffer.
398 hash_map::Entry::Occupied(e) => e.get().clone(),
399
400 // Otherwise, record the fact that this path is now being loaded.
401 hash_map::Entry::Vacant(entry) => {
402 let (mut tx, rx) = postage::watch::channel();
403 entry.insert(rx.clone());
404
405 let load_buffer = match self {
406 Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
407 Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
408 };
409 cx.spawn(move |this, mut cx| async move {
410 let result = load_buffer.await;
411
412 // After the buffer loads, record the fact that it is no longer
413 // loading.
414 this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
415 *tx.borrow_mut() = Some(result.map_err(|e| Arc::new(e)));
416 })
417 .detach();
418 rx
419 }
420 };
421
422 cx.spawn(|_, _| async move {
423 loop {
424 if let Some(result) = loading_watch.borrow().as_ref() {
425 return result.clone().map_err(|e| anyhow!("{}", e));
426 }
427 loading_watch.recv().await;
428 }
429 })
430 }
431
432 #[cfg(feature = "test-support")]
433 pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
434 let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
435 Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
436 Worktree::Remote(worktree) => {
437 Box::new(worktree.open_buffers.values().filter_map(|buf| {
438 if let RemoteBuffer::Loaded(buf) = buf {
439 Some(buf)
440 } else {
441 None
442 }
443 }))
444 }
445 };
446
447 let path = path.as_ref();
448 open_buffers
449 .find(|buffer| {
450 if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
451 file.path().as_ref() == path
452 } else {
453 false
454 }
455 })
456 .is_some()
457 }
458
459 pub fn handle_update_buffer(
460 &mut self,
461 envelope: TypedEnvelope<proto::UpdateBuffer>,
462 cx: &mut ModelContext<Self>,
463 ) -> Result<()> {
464 let payload = envelope.payload.clone();
465 let buffer_id = payload.buffer_id as usize;
466 let ops = payload
467 .operations
468 .into_iter()
469 .map(|op| language::proto::deserialize_operation(op))
470 .collect::<Result<Vec<_>, _>>()?;
471
472 match self {
473 Worktree::Local(worktree) => {
474 let buffer = worktree
475 .open_buffers
476 .get(&buffer_id)
477 .and_then(|buf| buf.upgrade(cx))
478 .ok_or_else(|| {
479 anyhow!("invalid buffer {} in update buffer message", buffer_id)
480 })?;
481 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
482 }
483 Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
484 Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
485 Some(RemoteBuffer::Loaded(buffer)) => {
486 if let Some(buffer) = buffer.upgrade(cx) {
487 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
488 } else {
489 worktree
490 .open_buffers
491 .insert(buffer_id, RemoteBuffer::Operations(ops));
492 }
493 }
494 None => {
495 worktree
496 .open_buffers
497 .insert(buffer_id, RemoteBuffer::Operations(ops));
498 }
499 },
500 }
501
502 Ok(())
503 }
504
505 pub fn handle_save_buffer(
506 &mut self,
507 envelope: TypedEnvelope<proto::SaveBuffer>,
508 rpc: Arc<Client>,
509 cx: &mut ModelContext<Self>,
510 ) -> Result<()> {
511 let sender_id = envelope.original_sender_id()?;
512 let this = self.as_local().unwrap();
513 let project_id = this
514 .share
515 .as_ref()
516 .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
517 .project_id;
518
519 let buffer = this
520 .shared_buffers
521 .get(&sender_id)
522 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
523 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
524
525 let receipt = envelope.receipt();
526 let worktree_id = envelope.payload.worktree_id;
527 let buffer_id = envelope.payload.buffer_id;
528 let save = cx.spawn(|_, mut cx| async move {
529 buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
530 });
531
532 cx.background()
533 .spawn(
534 async move {
535 let (version, mtime) = save.await?;
536
537 rpc.respond(
538 receipt,
539 proto::BufferSaved {
540 project_id,
541 worktree_id,
542 buffer_id,
543 version: (&version).into(),
544 mtime: Some(mtime.into()),
545 },
546 )
547 .await?;
548
549 Ok(())
550 }
551 .log_err(),
552 )
553 .detach();
554
555 Ok(())
556 }
557
558 pub fn handle_buffer_saved(
559 &mut self,
560 envelope: TypedEnvelope<proto::BufferSaved>,
561 cx: &mut ModelContext<Self>,
562 ) -> Result<()> {
563 let payload = envelope.payload.clone();
564 let worktree = self.as_remote_mut().unwrap();
565 if let Some(buffer) = worktree
566 .open_buffers
567 .get(&(payload.buffer_id as usize))
568 .and_then(|buf| buf.upgrade(cx))
569 {
570 buffer.update(cx, |buffer, cx| {
571 let version = payload.version.try_into()?;
572 let mtime = payload
573 .mtime
574 .ok_or_else(|| anyhow!("missing mtime"))?
575 .into();
576 buffer.did_save(version, mtime, None, cx);
577 Result::<_, anyhow::Error>::Ok(())
578 })?;
579 }
580 Ok(())
581 }
582
583 pub fn handle_format_buffer(
584 &mut self,
585 envelope: TypedEnvelope<proto::FormatBuffer>,
586 rpc: Arc<Client>,
587 cx: &mut ModelContext<Self>,
588 ) -> Result<()> {
589 let sender_id = envelope.original_sender_id()?;
590 let this = self.as_local().unwrap();
591 let buffer = this
592 .shared_buffers
593 .get(&sender_id)
594 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
595 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
596
597 let receipt = envelope.receipt();
598 cx.spawn(|_, mut cx| async move {
599 let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
600 // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
601 // associated with formatting.
602 cx.spawn(|_| async move {
603 match format {
604 Ok(()) => rpc.respond(receipt, proto::Ack {}).await?,
605 Err(error) => {
606 rpc.respond_with_error(
607 receipt,
608 proto::Error {
609 message: error.to_string(),
610 },
611 )
612 .await?
613 }
614 }
615 Ok::<_, anyhow::Error>(())
616 })
617 .await
618 .log_err();
619 })
620 .detach();
621
622 Ok(())
623 }
624
625 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
626 match self {
627 Self::Local(worktree) => {
628 let is_fake_fs = worktree.fs.is_fake();
629 worktree.snapshot = worktree.background_snapshot.lock().clone();
630 if worktree.is_scanning() {
631 if worktree.poll_task.is_none() {
632 worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
633 if is_fake_fs {
634 smol::future::yield_now().await;
635 } else {
636 smol::Timer::after(Duration::from_millis(100)).await;
637 }
638 this.update(&mut cx, |this, cx| {
639 this.as_local_mut().unwrap().poll_task = None;
640 this.poll_snapshot(cx);
641 })
642 }));
643 }
644 } else {
645 worktree.poll_task.take();
646 self.update_open_buffers(cx);
647 }
648 }
649 Self::Remote(worktree) => {
650 worktree.snapshot = worktree.snapshot_rx.borrow().clone();
651 self.update_open_buffers(cx);
652 }
653 };
654
655 cx.notify();
656 }
657
658 fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
659 let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
660 Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
661 Self::Remote(worktree) => {
662 Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
663 if let RemoteBuffer::Loaded(buf) = buf {
664 Some((id, buf))
665 } else {
666 None
667 }
668 }))
669 }
670 };
671
672 let local = self.as_local().is_some();
673 let worktree_path = self.abs_path.clone();
674 let worktree_handle = cx.handle();
675 let mut buffers_to_delete = Vec::new();
676 for (buffer_id, buffer) in open_buffers {
677 if let Some(buffer) = buffer.upgrade(cx) {
678 buffer.update(cx, |buffer, cx| {
679 if let Some(old_file) = File::from_dyn(buffer.file()) {
680 let new_file = if let Some(entry) = old_file
681 .entry_id
682 .and_then(|entry_id| self.entry_for_id(entry_id))
683 {
684 File {
685 is_local: local,
686 worktree_path: worktree_path.clone(),
687 entry_id: Some(entry.id),
688 mtime: entry.mtime,
689 path: entry.path.clone(),
690 worktree: worktree_handle.clone(),
691 }
692 } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
693 File {
694 is_local: local,
695 worktree_path: worktree_path.clone(),
696 entry_id: Some(entry.id),
697 mtime: entry.mtime,
698 path: entry.path.clone(),
699 worktree: worktree_handle.clone(),
700 }
701 } else {
702 File {
703 is_local: local,
704 worktree_path: worktree_path.clone(),
705 entry_id: None,
706 path: old_file.path().clone(),
707 mtime: old_file.mtime(),
708 worktree: worktree_handle.clone(),
709 }
710 };
711
712 if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
713 task.detach();
714 }
715 }
716 });
717 } else {
718 buffers_to_delete.push(*buffer_id);
719 }
720 }
721
722 for buffer_id in buffers_to_delete {
723 match self {
724 Self::Local(worktree) => {
725 worktree.open_buffers.remove(&buffer_id);
726 }
727 Self::Remote(worktree) => {
728 worktree.open_buffers.remove(&buffer_id);
729 }
730 }
731 }
732 }
733
734 pub fn update_diagnostics(
735 &mut self,
736 params: lsp::PublishDiagnosticsParams,
737 disk_based_sources: &HashSet<String>,
738 cx: &mut ModelContext<Worktree>,
739 ) -> Result<()> {
740 let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
741 let abs_path = params
742 .uri
743 .to_file_path()
744 .map_err(|_| anyhow!("URI is not a file"))?;
745 let worktree_path = Arc::from(
746 abs_path
747 .strip_prefix(&this.abs_path)
748 .context("path is not within worktree")?,
749 );
750
751 let mut next_group_id = 0;
752 let mut diagnostics = Vec::default();
753 let mut primary_diagnostic_group_ids = HashMap::default();
754 let mut sources_by_group_id = HashMap::default();
755 let mut supporting_diagnostic_severities = HashMap::default();
756 for diagnostic in ¶ms.diagnostics {
757 let source = diagnostic.source.as_ref();
758 let code = diagnostic.code.as_ref().map(|code| match code {
759 lsp::NumberOrString::Number(code) => code.to_string(),
760 lsp::NumberOrString::String(code) => code.clone(),
761 });
762 let range = range_from_lsp(diagnostic.range);
763 let is_supporting = diagnostic
764 .related_information
765 .as_ref()
766 .map_or(false, |infos| {
767 infos.iter().any(|info| {
768 primary_diagnostic_group_ids.contains_key(&(
769 source,
770 code.clone(),
771 range_from_lsp(info.location.range),
772 ))
773 })
774 });
775
776 if is_supporting {
777 if let Some(severity) = diagnostic.severity {
778 supporting_diagnostic_severities
779 .insert((source, code.clone(), range), severity);
780 }
781 } else {
782 let group_id = post_inc(&mut next_group_id);
783 let is_disk_based =
784 source.map_or(false, |source| disk_based_sources.contains(source));
785
786 sources_by_group_id.insert(group_id, source);
787 primary_diagnostic_group_ids
788 .insert((source, code.clone(), range.clone()), group_id);
789
790 diagnostics.push(DiagnosticEntry {
791 range,
792 diagnostic: Diagnostic {
793 code: code.clone(),
794 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
795 message: diagnostic.message.clone(),
796 group_id,
797 is_primary: true,
798 is_valid: true,
799 is_disk_based,
800 },
801 });
802 if let Some(infos) = &diagnostic.related_information {
803 for info in infos {
804 if info.location.uri == params.uri {
805 let range = range_from_lsp(info.location.range);
806 diagnostics.push(DiagnosticEntry {
807 range,
808 diagnostic: Diagnostic {
809 code: code.clone(),
810 severity: DiagnosticSeverity::INFORMATION,
811 message: info.message.clone(),
812 group_id,
813 is_primary: false,
814 is_valid: true,
815 is_disk_based,
816 },
817 });
818 }
819 }
820 }
821 }
822 }
823
824 for entry in &mut diagnostics {
825 let diagnostic = &mut entry.diagnostic;
826 if !diagnostic.is_primary {
827 let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
828 if let Some(&severity) = supporting_diagnostic_severities.get(&(
829 source,
830 diagnostic.code.clone(),
831 entry.range.clone(),
832 )) {
833 diagnostic.severity = severity;
834 }
835 }
836 }
837
838 self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)?;
839 Ok(())
840 }
841
842 pub fn update_diagnostic_entries(
843 &mut self,
844 worktree_path: Arc<Path>,
845 version: Option<i32>,
846 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
847 cx: &mut ModelContext<Self>,
848 ) -> Result<()> {
849 let this = self.as_local_mut().unwrap();
850 for buffer in this.open_buffers.values() {
851 if let Some(buffer) = buffer.upgrade(cx) {
852 if buffer
853 .read(cx)
854 .file()
855 .map_or(false, |file| *file.path() == worktree_path)
856 {
857 let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
858 (
859 buffer.remote_id(),
860 buffer.update_diagnostics(version, diagnostics.clone(), cx),
861 )
862 });
863 self.send_buffer_update(remote_id, operation?, cx);
864 break;
865 }
866 }
867 }
868
869 let this = self.as_local_mut().unwrap();
870 let summary = DiagnosticSummary::new(&diagnostics);
871 this.diagnostic_summaries
872 .insert(PathKey(worktree_path.clone()), summary.clone());
873 this.diagnostics.insert(worktree_path.clone(), diagnostics);
874
875 cx.emit(Event::DiagnosticsUpdated(worktree_path.clone()));
876
877 if let Some(share) = this.share.as_ref() {
878 cx.foreground()
879 .spawn({
880 let client = this.client.clone();
881 let project_id = share.project_id;
882 let worktree_id = this.id().to_proto();
883 let path = worktree_path.to_string_lossy().to_string();
884 async move {
885 client
886 .send(proto::UpdateDiagnosticSummary {
887 project_id,
888 worktree_id,
889 summary: Some(proto::DiagnosticSummary {
890 path,
891 error_count: summary.error_count as u32,
892 warning_count: summary.warning_count as u32,
893 info_count: summary.info_count as u32,
894 hint_count: summary.hint_count as u32,
895 }),
896 })
897 .await
898 .log_err()
899 }
900 })
901 .detach();
902 }
903
904 Ok(())
905 }
906
907 fn send_buffer_update(
908 &mut self,
909 buffer_id: u64,
910 operation: Operation,
911 cx: &mut ModelContext<Self>,
912 ) {
913 if let Some((project_id, worktree_id, rpc)) = match self {
914 Worktree::Local(worktree) => worktree
915 .share
916 .as_ref()
917 .map(|share| (share.project_id, worktree.id(), worktree.client.clone())),
918 Worktree::Remote(worktree) => Some((
919 worktree.project_id,
920 worktree.snapshot.id(),
921 worktree.client.clone(),
922 )),
923 } {
924 cx.spawn(|worktree, mut cx| async move {
925 if let Err(error) = rpc
926 .request(proto::UpdateBuffer {
927 project_id,
928 worktree_id: worktree_id.0 as u64,
929 buffer_id,
930 operations: vec![language::proto::serialize_operation(&operation)],
931 })
932 .await
933 {
934 worktree.update(&mut cx, |worktree, _| {
935 log::error!("error sending buffer operation: {}", error);
936 match worktree {
937 Worktree::Local(t) => &mut t.queued_operations,
938 Worktree::Remote(t) => &mut t.queued_operations,
939 }
940 .push((buffer_id, operation));
941 });
942 }
943 })
944 .detach();
945 }
946 }
947}
948
949impl WorktreeId {
950 pub fn from_usize(handle_id: usize) -> Self {
951 Self(handle_id)
952 }
953
954 pub(crate) fn from_proto(id: u64) -> Self {
955 Self(id as usize)
956 }
957
958 pub fn to_proto(&self) -> u64 {
959 self.0 as u64
960 }
961
962 pub fn to_usize(&self) -> usize {
963 self.0
964 }
965}
966
967impl fmt::Display for WorktreeId {
968 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
969 self.0.fmt(f)
970 }
971}
972
973#[derive(Clone)]
974pub struct Snapshot {
975 id: WorktreeId,
976 scan_id: usize,
977 abs_path: Arc<Path>,
978 root_name: String,
979 root_char_bag: CharBag,
980 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
981 entries_by_path: SumTree<Entry>,
982 entries_by_id: SumTree<PathEntry>,
983 removed_entry_ids: HashMap<u64, usize>,
984 next_entry_id: Arc<AtomicUsize>,
985}
986
987pub struct LocalWorktree {
988 snapshot: Snapshot,
989 config: WorktreeConfig,
990 background_snapshot: Arc<Mutex<Snapshot>>,
991 last_scan_state_rx: watch::Receiver<ScanState>,
992 _background_scanner_task: Option<Task<()>>,
993 poll_task: Option<Task<()>>,
994 share: Option<ShareState>,
995 loading_buffers: LoadingBuffers,
996 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
997 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
998 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
999 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
1000 queued_operations: Vec<(u64, Operation)>,
1001 language_registry: Arc<LanguageRegistry>,
1002 client: Arc<Client>,
1003 user_store: ModelHandle<UserStore>,
1004 fs: Arc<dyn Fs>,
1005 languages: Vec<Arc<Language>>,
1006 language_servers: HashMap<String, Arc<LanguageServer>>,
1007}
1008
1009struct ShareState {
1010 project_id: u64,
1011 snapshots_tx: Sender<Snapshot>,
1012 _maintain_remote_snapshot: Option<Task<()>>,
1013}
1014
1015pub struct RemoteWorktree {
1016 project_id: u64,
1017 snapshot: Snapshot,
1018 snapshot_rx: watch::Receiver<Snapshot>,
1019 client: Arc<Client>,
1020 updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
1021 replica_id: ReplicaId,
1022 loading_buffers: LoadingBuffers,
1023 open_buffers: HashMap<usize, RemoteBuffer>,
1024 languages: Arc<LanguageRegistry>,
1025 user_store: ModelHandle<UserStore>,
1026 queued_operations: Vec<(u64, Operation)>,
1027 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
1028}
1029
1030type LoadingBuffers = HashMap<
1031 Arc<Path>,
1032 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
1033>;
1034
1035#[derive(Default, Deserialize)]
1036struct WorktreeConfig {
1037 collaborators: Vec<String>,
1038}
1039
1040impl LocalWorktree {
1041 async fn new(
1042 client: Arc<Client>,
1043 user_store: ModelHandle<UserStore>,
1044 path: impl Into<Arc<Path>>,
1045 fs: Arc<dyn Fs>,
1046 languages: Arc<LanguageRegistry>,
1047 cx: &mut AsyncAppContext,
1048 ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
1049 let abs_path = path.into();
1050 let path: Arc<Path> = Arc::from(Path::new(""));
1051 let next_entry_id = AtomicUsize::new(0);
1052
1053 // After determining whether the root entry is a file or a directory, populate the
1054 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
1055 let root_name = abs_path
1056 .file_name()
1057 .map_or(String::new(), |f| f.to_string_lossy().to_string());
1058 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
1059 let metadata = fs.metadata(&abs_path).await?;
1060
1061 let mut config = WorktreeConfig::default();
1062 if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
1063 if let Ok(parsed) = toml::from_str(&zed_toml) {
1064 config = parsed;
1065 }
1066 }
1067
1068 let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
1069 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
1070 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
1071 let mut snapshot = Snapshot {
1072 id: WorktreeId::from_usize(cx.model_id()),
1073 scan_id: 0,
1074 abs_path,
1075 root_name: root_name.clone(),
1076 root_char_bag,
1077 ignores: Default::default(),
1078 entries_by_path: Default::default(),
1079 entries_by_id: Default::default(),
1080 removed_entry_ids: Default::default(),
1081 next_entry_id: Arc::new(next_entry_id),
1082 };
1083 if let Some(metadata) = metadata {
1084 snapshot.insert_entry(
1085 Entry::new(
1086 path.into(),
1087 &metadata,
1088 &snapshot.next_entry_id,
1089 snapshot.root_char_bag,
1090 ),
1091 fs.as_ref(),
1092 );
1093 }
1094
1095 let tree = Self {
1096 snapshot: snapshot.clone(),
1097 config,
1098 background_snapshot: Arc::new(Mutex::new(snapshot)),
1099 last_scan_state_rx,
1100 _background_scanner_task: None,
1101 share: None,
1102 poll_task: None,
1103 loading_buffers: Default::default(),
1104 open_buffers: Default::default(),
1105 shared_buffers: Default::default(),
1106 diagnostics: Default::default(),
1107 diagnostic_summaries: Default::default(),
1108 queued_operations: Default::default(),
1109 language_registry: languages,
1110 client,
1111 user_store,
1112 fs,
1113 languages: Default::default(),
1114 language_servers: Default::default(),
1115 };
1116
1117 cx.spawn_weak(|this, mut cx| async move {
1118 while let Ok(scan_state) = scan_states_rx.recv().await {
1119 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1120 let to_send = handle.update(&mut cx, |this, cx| {
1121 last_scan_state_tx.blocking_send(scan_state).ok();
1122 this.poll_snapshot(cx);
1123 let tree = this.as_local_mut().unwrap();
1124 if !tree.is_scanning() {
1125 if let Some(share) = tree.share.as_ref() {
1126 return Some((tree.snapshot(), share.snapshots_tx.clone()));
1127 }
1128 }
1129 None
1130 });
1131
1132 if let Some((snapshot, snapshots_to_send_tx)) = to_send {
1133 if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
1134 log::error!("error submitting snapshot to send {}", err);
1135 }
1136 }
1137 } else {
1138 break;
1139 }
1140 }
1141 })
1142 .detach();
1143
1144 Worktree::Local(tree)
1145 });
1146
1147 Ok((tree, scan_states_tx))
1148 }
1149
1150 pub fn authorized_logins(&self) -> Vec<String> {
1151 self.config.collaborators.clone()
1152 }
1153
1154 pub fn language_registry(&self) -> &LanguageRegistry {
1155 &self.language_registry
1156 }
1157
1158 pub fn languages(&self) -> &[Arc<Language>] {
1159 &self.languages
1160 }
1161
1162 pub fn register_language(
1163 &mut self,
1164 language: &Arc<Language>,
1165 cx: &mut ModelContext<Worktree>,
1166 ) -> Option<Arc<LanguageServer>> {
1167 if !self.languages.iter().any(|l| Arc::ptr_eq(l, language)) {
1168 self.languages.push(language.clone());
1169 }
1170
1171 if let Some(server) = self.language_servers.get(language.name()) {
1172 return Some(server.clone());
1173 }
1174
1175 if let Some(language_server) = language
1176 .start_server(self.abs_path(), cx)
1177 .log_err()
1178 .flatten()
1179 {
1180 enum DiagnosticProgress {
1181 Updating,
1182 Updated,
1183 }
1184
1185 let disk_based_sources = language
1186 .disk_based_diagnostic_sources()
1187 .cloned()
1188 .unwrap_or_default();
1189 let disk_based_diagnostics_progress_token =
1190 language.disk_based_diagnostics_progress_token().cloned();
1191 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
1192 let (disk_based_diagnostics_done_tx, disk_based_diagnostics_done_rx) =
1193 smol::channel::unbounded();
1194 language_server
1195 .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
1196 smol::block_on(diagnostics_tx.send(params)).ok();
1197 })
1198 .detach();
1199 cx.spawn_weak(|this, mut cx| {
1200 let has_disk_based_diagnostic_progress_token =
1201 disk_based_diagnostics_progress_token.is_some();
1202 let disk_based_diagnostics_done_tx = disk_based_diagnostics_done_tx.clone();
1203 async move {
1204 while let Ok(diagnostics) = diagnostics_rx.recv().await {
1205 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1206 handle.update(&mut cx, |this, cx| {
1207 if !has_disk_based_diagnostic_progress_token {
1208 smol::block_on(
1209 disk_based_diagnostics_done_tx
1210 .send(DiagnosticProgress::Updating),
1211 )
1212 .ok();
1213 }
1214 this.update_diagnostics(diagnostics, &disk_based_sources, cx)
1215 .log_err();
1216 if !has_disk_based_diagnostic_progress_token {
1217 smol::block_on(
1218 disk_based_diagnostics_done_tx
1219 .send(DiagnosticProgress::Updated),
1220 )
1221 .ok();
1222 }
1223 })
1224 } else {
1225 break;
1226 }
1227 }
1228 }
1229 })
1230 .detach();
1231
1232 let mut pending_disk_based_diagnostics: i32 = 0;
1233 language_server
1234 .on_notification::<lsp::notification::Progress, _>(move |params| {
1235 let token = match params.token {
1236 lsp::NumberOrString::Number(_) => None,
1237 lsp::NumberOrString::String(token) => Some(token),
1238 };
1239
1240 if token == disk_based_diagnostics_progress_token {
1241 match params.value {
1242 lsp::ProgressParamsValue::WorkDone(progress) => match progress {
1243 lsp::WorkDoneProgress::Begin(_) => {
1244 if pending_disk_based_diagnostics == 0 {
1245 smol::block_on(
1246 disk_based_diagnostics_done_tx
1247 .send(DiagnosticProgress::Updating),
1248 )
1249 .ok();
1250 }
1251 pending_disk_based_diagnostics += 1;
1252 }
1253 lsp::WorkDoneProgress::End(_) => {
1254 pending_disk_based_diagnostics -= 1;
1255 if pending_disk_based_diagnostics == 0 {
1256 smol::block_on(
1257 disk_based_diagnostics_done_tx
1258 .send(DiagnosticProgress::Updated),
1259 )
1260 .ok();
1261 }
1262 }
1263 _ => {}
1264 },
1265 }
1266 }
1267 })
1268 .detach();
1269 let rpc = self.client.clone();
1270 cx.spawn_weak(|this, mut cx| async move {
1271 while let Ok(progress) = disk_based_diagnostics_done_rx.recv().await {
1272 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1273 match progress {
1274 DiagnosticProgress::Updating => {
1275 let message = handle.update(&mut cx, |this, cx| {
1276 cx.emit(Event::DiskBasedDiagnosticsUpdating);
1277 let this = this.as_local().unwrap();
1278 this.share.as_ref().map(|share| {
1279 proto::DiskBasedDiagnosticsUpdating {
1280 project_id: share.project_id,
1281 worktree_id: this.id().to_proto(),
1282 }
1283 })
1284 });
1285
1286 if let Some(message) = message {
1287 rpc.send(message).await.log_err();
1288 }
1289 }
1290 DiagnosticProgress::Updated => {
1291 let message = handle.update(&mut cx, |this, cx| {
1292 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1293 let this = this.as_local().unwrap();
1294 this.share.as_ref().map(|share| {
1295 proto::DiskBasedDiagnosticsUpdated {
1296 project_id: share.project_id,
1297 worktree_id: this.id().to_proto(),
1298 }
1299 })
1300 });
1301
1302 if let Some(message) = message {
1303 rpc.send(message).await.log_err();
1304 }
1305 }
1306 }
1307 } else {
1308 break;
1309 }
1310 }
1311 })
1312 .detach();
1313
1314 self.language_servers
1315 .insert(language.name().to_string(), language_server.clone());
1316 Some(language_server.clone())
1317 } else {
1318 None
1319 }
1320 }
1321
1322 fn get_open_buffer(
1323 &mut self,
1324 path: &Path,
1325 cx: &mut ModelContext<Worktree>,
1326 ) -> Option<ModelHandle<Buffer>> {
1327 let handle = cx.handle();
1328 let mut result = None;
1329 self.open_buffers.retain(|_buffer_id, buffer| {
1330 if let Some(buffer) = buffer.upgrade(cx) {
1331 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1332 if file.worktree == handle && file.path().as_ref() == path {
1333 result = Some(buffer);
1334 }
1335 }
1336 true
1337 } else {
1338 false
1339 }
1340 });
1341 result
1342 }
1343
1344 fn open_buffer(
1345 &mut self,
1346 path: &Path,
1347 cx: &mut ModelContext<Worktree>,
1348 ) -> Task<Result<ModelHandle<Buffer>>> {
1349 let path = Arc::from(path);
1350 cx.spawn(move |this, mut cx| async move {
1351 let (file, contents) = this
1352 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
1353 .await?;
1354
1355 let (diagnostics, language, language_server) = this.update(&mut cx, |this, cx| {
1356 let this = this.as_local_mut().unwrap();
1357 let diagnostics = this.diagnostics.get(&path).cloned();
1358 let language = this
1359 .language_registry
1360 .select_language(file.full_path())
1361 .cloned();
1362 let server = language
1363 .as_ref()
1364 .and_then(|language| this.register_language(language, cx));
1365 (diagnostics, language, server)
1366 });
1367
1368 let mut buffer_operations = Vec::new();
1369 let buffer = cx.add_model(|cx| {
1370 let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1371 buffer.set_language(language, language_server, cx);
1372 if let Some(diagnostics) = diagnostics {
1373 let op = buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1374 buffer_operations.push(op);
1375 }
1376 buffer
1377 });
1378
1379 this.update(&mut cx, |this, cx| {
1380 for op in buffer_operations {
1381 this.send_buffer_update(buffer.read(cx).remote_id(), op, cx);
1382 }
1383 let this = this.as_local_mut().unwrap();
1384 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1385 });
1386
1387 Ok(buffer)
1388 })
1389 }
1390
1391 pub fn open_remote_buffer(
1392 &mut self,
1393 envelope: TypedEnvelope<proto::OpenBuffer>,
1394 cx: &mut ModelContext<Worktree>,
1395 ) -> Task<Result<proto::OpenBufferResponse>> {
1396 cx.spawn(|this, mut cx| async move {
1397 let peer_id = envelope.original_sender_id();
1398 let path = Path::new(&envelope.payload.path);
1399 let buffer = this
1400 .update(&mut cx, |this, cx| this.open_buffer(path, cx))
1401 .await?;
1402 this.update(&mut cx, |this, cx| {
1403 this.as_local_mut()
1404 .unwrap()
1405 .shared_buffers
1406 .entry(peer_id?)
1407 .or_default()
1408 .insert(buffer.id() as u64, buffer.clone());
1409
1410 Ok(proto::OpenBufferResponse {
1411 buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1412 })
1413 })
1414 })
1415 }
1416
1417 pub fn close_remote_buffer(
1418 &mut self,
1419 envelope: TypedEnvelope<proto::CloseBuffer>,
1420 cx: &mut ModelContext<Worktree>,
1421 ) -> Result<()> {
1422 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1423 shared_buffers.remove(&envelope.payload.buffer_id);
1424 cx.notify();
1425 }
1426
1427 Ok(())
1428 }
1429
1430 pub fn remove_collaborator(
1431 &mut self,
1432 peer_id: PeerId,
1433 replica_id: ReplicaId,
1434 cx: &mut ModelContext<Worktree>,
1435 ) {
1436 self.shared_buffers.remove(&peer_id);
1437 for (_, buffer) in &self.open_buffers {
1438 if let Some(buffer) = buffer.upgrade(cx) {
1439 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1440 }
1441 }
1442 cx.notify();
1443 }
1444
1445 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1446 let mut scan_state_rx = self.last_scan_state_rx.clone();
1447 async move {
1448 let mut scan_state = Some(scan_state_rx.borrow().clone());
1449 while let Some(ScanState::Scanning) = scan_state {
1450 scan_state = scan_state_rx.recv().await;
1451 }
1452 }
1453 }
1454
1455 fn is_scanning(&self) -> bool {
1456 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1457 true
1458 } else {
1459 false
1460 }
1461 }
1462
1463 pub fn snapshot(&self) -> Snapshot {
1464 self.snapshot.clone()
1465 }
1466
1467 pub fn abs_path(&self) -> &Arc<Path> {
1468 &self.snapshot.abs_path
1469 }
1470
1471 pub fn contains_abs_path(&self, path: &Path) -> bool {
1472 path.starts_with(&self.snapshot.abs_path)
1473 }
1474
1475 fn absolutize(&self, path: &Path) -> PathBuf {
1476 if path.file_name().is_some() {
1477 self.snapshot.abs_path.join(path)
1478 } else {
1479 self.snapshot.abs_path.to_path_buf()
1480 }
1481 }
1482
1483 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1484 let handle = cx.handle();
1485 let path = Arc::from(path);
1486 let worktree_path = self.abs_path.clone();
1487 let abs_path = self.absolutize(&path);
1488 let background_snapshot = self.background_snapshot.clone();
1489 let fs = self.fs.clone();
1490 cx.spawn(|this, mut cx| async move {
1491 let text = fs.load(&abs_path).await?;
1492 // Eagerly populate the snapshot with an updated entry for the loaded file
1493 let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1494 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1495 Ok((
1496 File {
1497 entry_id: Some(entry.id),
1498 worktree: handle,
1499 worktree_path,
1500 path: entry.path,
1501 mtime: entry.mtime,
1502 is_local: true,
1503 },
1504 text,
1505 ))
1506 })
1507 }
1508
1509 pub fn save_buffer_as(
1510 &self,
1511 buffer_handle: ModelHandle<Buffer>,
1512 path: impl Into<Arc<Path>>,
1513 cx: &mut ModelContext<Worktree>,
1514 ) -> Task<Result<()>> {
1515 let buffer = buffer_handle.read(cx);
1516 let text = buffer.as_rope().clone();
1517 let version = buffer.version();
1518 let save = self.save(path, text, cx);
1519 cx.spawn(|this, mut cx| async move {
1520 let entry = save.await?;
1521 let file = this.update(&mut cx, |this, cx| {
1522 let this = this.as_local_mut().unwrap();
1523 this.open_buffers
1524 .insert(buffer_handle.id(), buffer_handle.downgrade());
1525 File {
1526 entry_id: Some(entry.id),
1527 worktree: cx.handle(),
1528 worktree_path: this.abs_path.clone(),
1529 path: entry.path,
1530 mtime: entry.mtime,
1531 is_local: true,
1532 }
1533 });
1534
1535 let (language, language_server) = this.update(&mut cx, |worktree, cx| {
1536 let worktree = worktree.as_local_mut().unwrap();
1537 let language = worktree
1538 .language_registry()
1539 .select_language(file.full_path())
1540 .cloned();
1541 let language_server = language
1542 .as_ref()
1543 .and_then(|language| worktree.register_language(language, cx));
1544 (language, language_server.clone())
1545 });
1546
1547 buffer_handle.update(&mut cx, |buffer, cx| {
1548 buffer.did_save(version, file.mtime, Some(Box::new(file)), cx);
1549 buffer.set_language(language, language_server, cx);
1550 });
1551
1552 Ok(())
1553 })
1554 }
1555
1556 fn save(
1557 &self,
1558 path: impl Into<Arc<Path>>,
1559 text: Rope,
1560 cx: &mut ModelContext<Worktree>,
1561 ) -> Task<Result<Entry>> {
1562 let path = path.into();
1563 let abs_path = self.absolutize(&path);
1564 let background_snapshot = self.background_snapshot.clone();
1565 let fs = self.fs.clone();
1566 let save = cx.background().spawn(async move {
1567 fs.save(&abs_path, &text).await?;
1568 refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1569 });
1570
1571 cx.spawn(|this, mut cx| async move {
1572 let entry = save.await?;
1573 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1574 Ok(entry)
1575 })
1576 }
1577
1578 pub fn share(
1579 &mut self,
1580 project_id: u64,
1581 cx: &mut ModelContext<Worktree>,
1582 ) -> Task<anyhow::Result<()>> {
1583 if self.share.is_some() {
1584 return Task::ready(Ok(()));
1585 }
1586
1587 let snapshot = self.snapshot();
1588 let rpc = self.client.clone();
1589 let worktree_id = cx.model_id() as u64;
1590 let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1591 let maintain_remote_snapshot = cx.background().spawn({
1592 let rpc = rpc.clone();
1593 let snapshot = snapshot.clone();
1594 async move {
1595 let mut prev_snapshot = snapshot;
1596 while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1597 let message =
1598 snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1599 match rpc.send(message).await {
1600 Ok(()) => prev_snapshot = snapshot,
1601 Err(err) => log::error!("error sending snapshot diff {}", err),
1602 }
1603 }
1604 }
1605 });
1606 self.share = Some(ShareState {
1607 project_id,
1608 snapshots_tx: snapshots_to_send_tx,
1609 _maintain_remote_snapshot: Some(maintain_remote_snapshot),
1610 });
1611
1612 let diagnostic_summaries = self.diagnostic_summaries.clone();
1613 let share_message = cx.background().spawn(async move {
1614 proto::ShareWorktree {
1615 project_id,
1616 worktree: Some(snapshot.to_proto(&diagnostic_summaries)),
1617 }
1618 });
1619
1620 cx.foreground().spawn(async move {
1621 rpc.request(share_message.await).await?;
1622 Ok(())
1623 })
1624 }
1625
1626 pub fn unshare(&mut self) {
1627 self.share.take();
1628 }
1629
1630 pub fn is_shared(&self) -> bool {
1631 self.share.is_some()
1632 }
1633}
1634
1635fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1636 let contents = smol::block_on(fs.load(&abs_path))?;
1637 let parent = abs_path.parent().unwrap_or(Path::new("/"));
1638 let mut builder = GitignoreBuilder::new(parent);
1639 for line in contents.lines() {
1640 builder.add_line(Some(abs_path.into()), line)?;
1641 }
1642 Ok(builder.build()?)
1643}
1644
1645impl Deref for Worktree {
1646 type Target = Snapshot;
1647
1648 fn deref(&self) -> &Self::Target {
1649 match self {
1650 Worktree::Local(worktree) => &worktree.snapshot,
1651 Worktree::Remote(worktree) => &worktree.snapshot,
1652 }
1653 }
1654}
1655
1656impl Deref for LocalWorktree {
1657 type Target = Snapshot;
1658
1659 fn deref(&self) -> &Self::Target {
1660 &self.snapshot
1661 }
1662}
1663
1664impl Deref for RemoteWorktree {
1665 type Target = Snapshot;
1666
1667 fn deref(&self) -> &Self::Target {
1668 &self.snapshot
1669 }
1670}
1671
1672impl fmt::Debug for LocalWorktree {
1673 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1674 self.snapshot.fmt(f)
1675 }
1676}
1677
1678impl RemoteWorktree {
1679 fn get_open_buffer(
1680 &mut self,
1681 path: &Path,
1682 cx: &mut ModelContext<Worktree>,
1683 ) -> Option<ModelHandle<Buffer>> {
1684 let handle = cx.handle();
1685 let mut existing_buffer = None;
1686 self.open_buffers.retain(|_buffer_id, buffer| {
1687 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1688 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1689 if file.worktree == handle && file.path().as_ref() == path {
1690 existing_buffer = Some(buffer);
1691 }
1692 }
1693 true
1694 } else {
1695 false
1696 }
1697 });
1698 existing_buffer
1699 }
1700
1701 fn open_buffer(
1702 &mut self,
1703 path: &Path,
1704 cx: &mut ModelContext<Worktree>,
1705 ) -> Task<Result<ModelHandle<Buffer>>> {
1706 let rpc = self.client.clone();
1707 let replica_id = self.replica_id;
1708 let project_id = self.project_id;
1709 let remote_worktree_id = self.id();
1710 let root_path = self.snapshot.abs_path.clone();
1711 let path: Arc<Path> = Arc::from(path);
1712 let path_string = path.to_string_lossy().to_string();
1713 cx.spawn_weak(move |this, mut cx| async move {
1714 let entry = this
1715 .upgrade(&cx)
1716 .ok_or_else(|| anyhow!("worktree was closed"))?
1717 .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1718 .ok_or_else(|| anyhow!("file does not exist"))?;
1719 let response = rpc
1720 .request(proto::OpenBuffer {
1721 project_id,
1722 worktree_id: remote_worktree_id.to_proto(),
1723 path: path_string,
1724 })
1725 .await?;
1726
1727 let this = this
1728 .upgrade(&cx)
1729 .ok_or_else(|| anyhow!("worktree was closed"))?;
1730 let file = File {
1731 entry_id: Some(entry.id),
1732 worktree: this.clone(),
1733 worktree_path: root_path,
1734 path: entry.path,
1735 mtime: entry.mtime,
1736 is_local: false,
1737 };
1738 let language = this.read_with(&cx, |this, _| {
1739 use language::File;
1740 this.languages().select_language(file.full_path()).cloned()
1741 });
1742 let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1743 let buffer_id = remote_buffer.id as usize;
1744 let buffer = cx.add_model(|cx| {
1745 Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1746 .unwrap()
1747 .with_language(language, None, cx)
1748 });
1749 this.update(&mut cx, move |this, cx| {
1750 let this = this.as_remote_mut().unwrap();
1751 if let Some(RemoteBuffer::Operations(pending_ops)) = this
1752 .open_buffers
1753 .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1754 {
1755 buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1756 }
1757 Result::<_, anyhow::Error>::Ok(buffer)
1758 })
1759 })
1760 }
1761
1762 pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1763 for (_, buffer) in self.open_buffers.drain() {
1764 if let RemoteBuffer::Loaded(buffer) = buffer {
1765 if let Some(buffer) = buffer.upgrade(cx) {
1766 buffer.update(cx, |buffer, cx| buffer.close(cx))
1767 }
1768 }
1769 }
1770 }
1771
1772 fn snapshot(&self) -> Snapshot {
1773 self.snapshot.clone()
1774 }
1775
1776 pub fn update_from_remote(
1777 &mut self,
1778 envelope: TypedEnvelope<proto::UpdateWorktree>,
1779 cx: &mut ModelContext<Worktree>,
1780 ) -> Result<()> {
1781 let mut tx = self.updates_tx.clone();
1782 let payload = envelope.payload.clone();
1783 cx.background()
1784 .spawn(async move {
1785 tx.send(payload).await.expect("receiver runs to completion");
1786 })
1787 .detach();
1788
1789 Ok(())
1790 }
1791
1792 pub fn update_diagnostic_summary(
1793 &mut self,
1794 envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1795 cx: &mut ModelContext<Worktree>,
1796 ) {
1797 if let Some(summary) = envelope.payload.summary {
1798 let path: Arc<Path> = Path::new(&summary.path).into();
1799 self.diagnostic_summaries.insert(
1800 PathKey(path.clone()),
1801 DiagnosticSummary {
1802 error_count: summary.error_count as usize,
1803 warning_count: summary.warning_count as usize,
1804 info_count: summary.info_count as usize,
1805 hint_count: summary.hint_count as usize,
1806 },
1807 );
1808 cx.emit(Event::DiagnosticsUpdated(path));
1809 }
1810 }
1811
1812 pub fn disk_based_diagnostics_updating(&self, cx: &mut ModelContext<Worktree>) {
1813 cx.emit(Event::DiskBasedDiagnosticsUpdating);
1814 }
1815
1816 pub fn disk_based_diagnostics_updated(&self, cx: &mut ModelContext<Worktree>) {
1817 cx.emit(Event::DiskBasedDiagnosticsUpdated);
1818 }
1819
1820 pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1821 for (_, buffer) in &self.open_buffers {
1822 if let Some(buffer) = buffer.upgrade(cx) {
1823 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1824 }
1825 }
1826 cx.notify();
1827 }
1828}
1829
1830enum RemoteBuffer {
1831 Operations(Vec<Operation>),
1832 Loaded(WeakModelHandle<Buffer>),
1833}
1834
1835impl RemoteBuffer {
1836 fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1837 match self {
1838 Self::Operations(_) => None,
1839 Self::Loaded(buffer) => buffer.upgrade(cx),
1840 }
1841 }
1842}
1843
1844impl Snapshot {
1845 pub fn id(&self) -> WorktreeId {
1846 self.id
1847 }
1848
1849 pub fn to_proto(
1850 &self,
1851 diagnostic_summaries: &TreeMap<PathKey, DiagnosticSummary>,
1852 ) -> proto::Worktree {
1853 let root_name = self.root_name.clone();
1854 proto::Worktree {
1855 id: self.id.0 as u64,
1856 root_name,
1857 entries: self
1858 .entries_by_path
1859 .iter()
1860 .filter(|e| !e.is_ignored)
1861 .map(Into::into)
1862 .collect(),
1863 diagnostic_summaries: diagnostic_summaries
1864 .iter()
1865 .map(|(path, summary)| summary.to_proto(path.0.clone()))
1866 .collect(),
1867 }
1868 }
1869
1870 pub fn build_update(
1871 &self,
1872 other: &Self,
1873 project_id: u64,
1874 worktree_id: u64,
1875 include_ignored: bool,
1876 ) -> proto::UpdateWorktree {
1877 let mut updated_entries = Vec::new();
1878 let mut removed_entries = Vec::new();
1879 let mut self_entries = self
1880 .entries_by_id
1881 .cursor::<()>()
1882 .filter(|e| include_ignored || !e.is_ignored)
1883 .peekable();
1884 let mut other_entries = other
1885 .entries_by_id
1886 .cursor::<()>()
1887 .filter(|e| include_ignored || !e.is_ignored)
1888 .peekable();
1889 loop {
1890 match (self_entries.peek(), other_entries.peek()) {
1891 (Some(self_entry), Some(other_entry)) => {
1892 match Ord::cmp(&self_entry.id, &other_entry.id) {
1893 Ordering::Less => {
1894 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1895 updated_entries.push(entry);
1896 self_entries.next();
1897 }
1898 Ordering::Equal => {
1899 if self_entry.scan_id != other_entry.scan_id {
1900 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1901 updated_entries.push(entry);
1902 }
1903
1904 self_entries.next();
1905 other_entries.next();
1906 }
1907 Ordering::Greater => {
1908 removed_entries.push(other_entry.id as u64);
1909 other_entries.next();
1910 }
1911 }
1912 }
1913 (Some(self_entry), None) => {
1914 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1915 updated_entries.push(entry);
1916 self_entries.next();
1917 }
1918 (None, Some(other_entry)) => {
1919 removed_entries.push(other_entry.id as u64);
1920 other_entries.next();
1921 }
1922 (None, None) => break,
1923 }
1924 }
1925
1926 proto::UpdateWorktree {
1927 project_id,
1928 worktree_id,
1929 root_name: self.root_name().to_string(),
1930 updated_entries,
1931 removed_entries,
1932 }
1933 }
1934
1935 fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1936 self.scan_id += 1;
1937 let scan_id = self.scan_id;
1938
1939 let mut entries_by_path_edits = Vec::new();
1940 let mut entries_by_id_edits = Vec::new();
1941 for entry_id in update.removed_entries {
1942 let entry_id = entry_id as usize;
1943 let entry = self
1944 .entry_for_id(entry_id)
1945 .ok_or_else(|| anyhow!("unknown entry"))?;
1946 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1947 entries_by_id_edits.push(Edit::Remove(entry.id));
1948 }
1949
1950 for entry in update.updated_entries {
1951 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1952 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1953 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1954 }
1955 entries_by_id_edits.push(Edit::Insert(PathEntry {
1956 id: entry.id,
1957 path: entry.path.clone(),
1958 is_ignored: entry.is_ignored,
1959 scan_id,
1960 }));
1961 entries_by_path_edits.push(Edit::Insert(entry));
1962 }
1963
1964 self.entries_by_path.edit(entries_by_path_edits, &());
1965 self.entries_by_id.edit(entries_by_id_edits, &());
1966
1967 Ok(())
1968 }
1969
1970 pub fn file_count(&self) -> usize {
1971 self.entries_by_path.summary().file_count
1972 }
1973
1974 pub fn visible_file_count(&self) -> usize {
1975 self.entries_by_path.summary().visible_file_count
1976 }
1977
1978 fn traverse_from_offset(
1979 &self,
1980 include_dirs: bool,
1981 include_ignored: bool,
1982 start_offset: usize,
1983 ) -> Traversal {
1984 let mut cursor = self.entries_by_path.cursor();
1985 cursor.seek(
1986 &TraversalTarget::Count {
1987 count: start_offset,
1988 include_dirs,
1989 include_ignored,
1990 },
1991 Bias::Right,
1992 &(),
1993 );
1994 Traversal {
1995 cursor,
1996 include_dirs,
1997 include_ignored,
1998 }
1999 }
2000
2001 fn traverse_from_path(
2002 &self,
2003 include_dirs: bool,
2004 include_ignored: bool,
2005 path: &Path,
2006 ) -> Traversal {
2007 let mut cursor = self.entries_by_path.cursor();
2008 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
2009 Traversal {
2010 cursor,
2011 include_dirs,
2012 include_ignored,
2013 }
2014 }
2015
2016 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
2017 self.traverse_from_offset(false, include_ignored, start)
2018 }
2019
2020 pub fn entries(&self, include_ignored: bool) -> Traversal {
2021 self.traverse_from_offset(true, include_ignored, 0)
2022 }
2023
2024 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
2025 let empty_path = Path::new("");
2026 self.entries_by_path
2027 .cursor::<()>()
2028 .filter(move |entry| entry.path.as_ref() != empty_path)
2029 .map(|entry| &entry.path)
2030 }
2031
2032 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
2033 let mut cursor = self.entries_by_path.cursor();
2034 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
2035 let traversal = Traversal {
2036 cursor,
2037 include_dirs: true,
2038 include_ignored: true,
2039 };
2040 ChildEntriesIter {
2041 traversal,
2042 parent_path,
2043 }
2044 }
2045
2046 pub fn root_entry(&self) -> Option<&Entry> {
2047 self.entry_for_path("")
2048 }
2049
2050 pub fn root_name(&self) -> &str {
2051 &self.root_name
2052 }
2053
2054 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
2055 let path = path.as_ref();
2056 self.traverse_from_path(true, true, path)
2057 .entry()
2058 .and_then(|entry| {
2059 if entry.path.as_ref() == path {
2060 Some(entry)
2061 } else {
2062 None
2063 }
2064 })
2065 }
2066
2067 pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
2068 let entry = self.entries_by_id.get(&id, &())?;
2069 self.entry_for_path(&entry.path)
2070 }
2071
2072 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
2073 self.entry_for_path(path.as_ref()).map(|e| e.inode)
2074 }
2075
2076 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2077 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
2078 let abs_path = self.abs_path.join(&entry.path);
2079 match build_gitignore(&abs_path, fs) {
2080 Ok(ignore) => {
2081 let ignore_dir_path = entry.path.parent().unwrap();
2082 self.ignores
2083 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
2084 }
2085 Err(error) => {
2086 log::error!(
2087 "error loading .gitignore file {:?} - {:?}",
2088 &entry.path,
2089 error
2090 );
2091 }
2092 }
2093 }
2094
2095 self.reuse_entry_id(&mut entry);
2096 self.entries_by_path.insert_or_replace(entry.clone(), &());
2097 self.entries_by_id.insert_or_replace(
2098 PathEntry {
2099 id: entry.id,
2100 path: entry.path.clone(),
2101 is_ignored: entry.is_ignored,
2102 scan_id: self.scan_id,
2103 },
2104 &(),
2105 );
2106 entry
2107 }
2108
2109 fn populate_dir(
2110 &mut self,
2111 parent_path: Arc<Path>,
2112 entries: impl IntoIterator<Item = Entry>,
2113 ignore: Option<Arc<Gitignore>>,
2114 ) {
2115 let mut parent_entry = self
2116 .entries_by_path
2117 .get(&PathKey(parent_path.clone()), &())
2118 .unwrap()
2119 .clone();
2120 if let Some(ignore) = ignore {
2121 self.ignores.insert(parent_path, (ignore, self.scan_id));
2122 }
2123 if matches!(parent_entry.kind, EntryKind::PendingDir) {
2124 parent_entry.kind = EntryKind::Dir;
2125 } else {
2126 unreachable!();
2127 }
2128
2129 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2130 let mut entries_by_id_edits = Vec::new();
2131
2132 for mut entry in entries {
2133 self.reuse_entry_id(&mut entry);
2134 entries_by_id_edits.push(Edit::Insert(PathEntry {
2135 id: entry.id,
2136 path: entry.path.clone(),
2137 is_ignored: entry.is_ignored,
2138 scan_id: self.scan_id,
2139 }));
2140 entries_by_path_edits.push(Edit::Insert(entry));
2141 }
2142
2143 self.entries_by_path.edit(entries_by_path_edits, &());
2144 self.entries_by_id.edit(entries_by_id_edits, &());
2145 }
2146
2147 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2148 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2149 entry.id = removed_entry_id;
2150 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2151 entry.id = existing_entry.id;
2152 }
2153 }
2154
2155 fn remove_path(&mut self, path: &Path) {
2156 let mut new_entries;
2157 let removed_entries;
2158 {
2159 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2160 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2161 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2162 new_entries.push_tree(cursor.suffix(&()), &());
2163 }
2164 self.entries_by_path = new_entries;
2165
2166 let mut entries_by_id_edits = Vec::new();
2167 for entry in removed_entries.cursor::<()>() {
2168 let removed_entry_id = self
2169 .removed_entry_ids
2170 .entry(entry.inode)
2171 .or_insert(entry.id);
2172 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2173 entries_by_id_edits.push(Edit::Remove(entry.id));
2174 }
2175 self.entries_by_id.edit(entries_by_id_edits, &());
2176
2177 if path.file_name() == Some(&GITIGNORE) {
2178 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
2179 *scan_id = self.scan_id;
2180 }
2181 }
2182 }
2183
2184 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2185 let mut new_ignores = Vec::new();
2186 for ancestor in path.ancestors().skip(1) {
2187 if let Some((ignore, _)) = self.ignores.get(ancestor) {
2188 new_ignores.push((ancestor, Some(ignore.clone())));
2189 } else {
2190 new_ignores.push((ancestor, None));
2191 }
2192 }
2193
2194 let mut ignore_stack = IgnoreStack::none();
2195 for (parent_path, ignore) in new_ignores.into_iter().rev() {
2196 if ignore_stack.is_path_ignored(&parent_path, true) {
2197 ignore_stack = IgnoreStack::all();
2198 break;
2199 } else if let Some(ignore) = ignore {
2200 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
2201 }
2202 }
2203
2204 if ignore_stack.is_path_ignored(path, is_dir) {
2205 ignore_stack = IgnoreStack::all();
2206 }
2207
2208 ignore_stack
2209 }
2210}
2211
2212impl fmt::Debug for Snapshot {
2213 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2214 for entry in self.entries_by_path.cursor::<()>() {
2215 for _ in entry.path.ancestors().skip(1) {
2216 write!(f, " ")?;
2217 }
2218 writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
2219 }
2220 Ok(())
2221 }
2222}
2223
2224#[derive(Clone, PartialEq)]
2225pub struct File {
2226 entry_id: Option<usize>,
2227 worktree: ModelHandle<Worktree>,
2228 worktree_path: Arc<Path>,
2229 pub path: Arc<Path>,
2230 pub mtime: SystemTime,
2231 is_local: bool,
2232}
2233
2234impl language::File for File {
2235 fn mtime(&self) -> SystemTime {
2236 self.mtime
2237 }
2238
2239 fn path(&self) -> &Arc<Path> {
2240 &self.path
2241 }
2242
2243 fn abs_path(&self) -> Option<PathBuf> {
2244 if self.is_local {
2245 Some(self.worktree_path.join(&self.path))
2246 } else {
2247 None
2248 }
2249 }
2250
2251 fn full_path(&self) -> PathBuf {
2252 let mut full_path = PathBuf::new();
2253 if let Some(worktree_name) = self.worktree_path.file_name() {
2254 full_path.push(worktree_name);
2255 }
2256 full_path.push(&self.path);
2257 full_path
2258 }
2259
2260 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2261 /// of its worktree, then this method will return the name of the worktree itself.
2262 fn file_name<'a>(&'a self) -> Option<OsString> {
2263 self.path
2264 .file_name()
2265 .or_else(|| self.worktree_path.file_name())
2266 .map(Into::into)
2267 }
2268
2269 fn is_deleted(&self) -> bool {
2270 self.entry_id.is_none()
2271 }
2272
2273 fn save(
2274 &self,
2275 buffer_id: u64,
2276 text: Rope,
2277 version: clock::Global,
2278 cx: &mut MutableAppContext,
2279 ) -> Task<Result<(clock::Global, SystemTime)>> {
2280 let worktree_id = self.worktree.read(cx).id().to_proto();
2281 self.worktree.update(cx, |worktree, cx| match worktree {
2282 Worktree::Local(worktree) => {
2283 let rpc = worktree.client.clone();
2284 let project_id = worktree.share.as_ref().map(|share| share.project_id);
2285 let save = worktree.save(self.path.clone(), text, cx);
2286 cx.background().spawn(async move {
2287 let entry = save.await?;
2288 if let Some(project_id) = project_id {
2289 rpc.send(proto::BufferSaved {
2290 project_id,
2291 worktree_id,
2292 buffer_id,
2293 version: (&version).into(),
2294 mtime: Some(entry.mtime.into()),
2295 })
2296 .await?;
2297 }
2298 Ok((version, entry.mtime))
2299 })
2300 }
2301 Worktree::Remote(worktree) => {
2302 let rpc = worktree.client.clone();
2303 let project_id = worktree.project_id;
2304 cx.foreground().spawn(async move {
2305 let response = rpc
2306 .request(proto::SaveBuffer {
2307 project_id,
2308 worktree_id,
2309 buffer_id,
2310 })
2311 .await?;
2312 let version = response.version.try_into()?;
2313 let mtime = response
2314 .mtime
2315 .ok_or_else(|| anyhow!("missing mtime"))?
2316 .into();
2317 Ok((version, mtime))
2318 })
2319 }
2320 })
2321 }
2322
2323 fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2324 let worktree = self.worktree.read(cx).as_local()?;
2325 let abs_path = worktree.absolutize(&self.path);
2326 let fs = worktree.fs.clone();
2327 Some(
2328 cx.background()
2329 .spawn(async move { fs.load(&abs_path).await }),
2330 )
2331 }
2332
2333 fn format_remote(
2334 &self,
2335 buffer_id: u64,
2336 cx: &mut MutableAppContext,
2337 ) -> Option<Task<Result<()>>> {
2338 let worktree = self.worktree.read(cx);
2339 let worktree_id = worktree.id().to_proto();
2340 let worktree = worktree.as_remote()?;
2341 let rpc = worktree.client.clone();
2342 let project_id = worktree.project_id;
2343 Some(cx.foreground().spawn(async move {
2344 rpc.request(proto::FormatBuffer {
2345 project_id,
2346 worktree_id,
2347 buffer_id,
2348 })
2349 .await?;
2350 Ok(())
2351 }))
2352 }
2353
2354 fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2355 self.worktree.update(cx, |worktree, cx| {
2356 worktree.send_buffer_update(buffer_id, operation, cx);
2357 });
2358 }
2359
2360 fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2361 self.worktree.update(cx, |worktree, cx| {
2362 if let Worktree::Remote(worktree) = worktree {
2363 let project_id = worktree.project_id;
2364 let worktree_id = worktree.id().to_proto();
2365 let rpc = worktree.client.clone();
2366 cx.background()
2367 .spawn(async move {
2368 if let Err(error) = rpc
2369 .send(proto::CloseBuffer {
2370 project_id,
2371 worktree_id,
2372 buffer_id,
2373 })
2374 .await
2375 {
2376 log::error!("error closing remote buffer: {}", error);
2377 }
2378 })
2379 .detach();
2380 }
2381 });
2382 }
2383
2384 fn as_any(&self) -> &dyn Any {
2385 self
2386 }
2387}
2388
2389impl File {
2390 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2391 file.and_then(|f| f.as_any().downcast_ref())
2392 }
2393
2394 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2395 self.worktree.read(cx).id()
2396 }
2397
2398 pub fn project_entry(&self, cx: &AppContext) -> Option<ProjectEntry> {
2399 self.entry_id.map(|entry_id| ProjectEntry {
2400 worktree_id: self.worktree_id(cx),
2401 entry_id,
2402 })
2403 }
2404}
2405
2406#[derive(Clone, Debug)]
2407pub struct Entry {
2408 pub id: usize,
2409 pub kind: EntryKind,
2410 pub path: Arc<Path>,
2411 pub inode: u64,
2412 pub mtime: SystemTime,
2413 pub is_symlink: bool,
2414 pub is_ignored: bool,
2415}
2416
2417#[derive(Clone, Debug)]
2418pub enum EntryKind {
2419 PendingDir,
2420 Dir,
2421 File(CharBag),
2422}
2423
2424impl Entry {
2425 fn new(
2426 path: Arc<Path>,
2427 metadata: &fs::Metadata,
2428 next_entry_id: &AtomicUsize,
2429 root_char_bag: CharBag,
2430 ) -> Self {
2431 Self {
2432 id: next_entry_id.fetch_add(1, SeqCst),
2433 kind: if metadata.is_dir {
2434 EntryKind::PendingDir
2435 } else {
2436 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2437 },
2438 path,
2439 inode: metadata.inode,
2440 mtime: metadata.mtime,
2441 is_symlink: metadata.is_symlink,
2442 is_ignored: false,
2443 }
2444 }
2445
2446 pub fn is_dir(&self) -> bool {
2447 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2448 }
2449
2450 pub fn is_file(&self) -> bool {
2451 matches!(self.kind, EntryKind::File(_))
2452 }
2453}
2454
2455impl sum_tree::Item for Entry {
2456 type Summary = EntrySummary;
2457
2458 fn summary(&self) -> Self::Summary {
2459 let visible_count = if self.is_ignored { 0 } else { 1 };
2460 let file_count;
2461 let visible_file_count;
2462 if self.is_file() {
2463 file_count = 1;
2464 visible_file_count = visible_count;
2465 } else {
2466 file_count = 0;
2467 visible_file_count = 0;
2468 }
2469
2470 EntrySummary {
2471 max_path: self.path.clone(),
2472 count: 1,
2473 visible_count,
2474 file_count,
2475 visible_file_count,
2476 }
2477 }
2478}
2479
2480impl sum_tree::KeyedItem for Entry {
2481 type Key = PathKey;
2482
2483 fn key(&self) -> Self::Key {
2484 PathKey(self.path.clone())
2485 }
2486}
2487
2488#[derive(Clone, Debug)]
2489pub struct EntrySummary {
2490 max_path: Arc<Path>,
2491 count: usize,
2492 visible_count: usize,
2493 file_count: usize,
2494 visible_file_count: usize,
2495}
2496
2497impl Default for EntrySummary {
2498 fn default() -> Self {
2499 Self {
2500 max_path: Arc::from(Path::new("")),
2501 count: 0,
2502 visible_count: 0,
2503 file_count: 0,
2504 visible_file_count: 0,
2505 }
2506 }
2507}
2508
2509impl sum_tree::Summary for EntrySummary {
2510 type Context = ();
2511
2512 fn add_summary(&mut self, rhs: &Self, _: &()) {
2513 self.max_path = rhs.max_path.clone();
2514 self.visible_count += rhs.visible_count;
2515 self.file_count += rhs.file_count;
2516 self.visible_file_count += rhs.visible_file_count;
2517 }
2518}
2519
2520#[derive(Clone, Debug)]
2521struct PathEntry {
2522 id: usize,
2523 path: Arc<Path>,
2524 is_ignored: bool,
2525 scan_id: usize,
2526}
2527
2528impl sum_tree::Item for PathEntry {
2529 type Summary = PathEntrySummary;
2530
2531 fn summary(&self) -> Self::Summary {
2532 PathEntrySummary { max_id: self.id }
2533 }
2534}
2535
2536impl sum_tree::KeyedItem for PathEntry {
2537 type Key = usize;
2538
2539 fn key(&self) -> Self::Key {
2540 self.id
2541 }
2542}
2543
2544#[derive(Clone, Debug, Default)]
2545struct PathEntrySummary {
2546 max_id: usize,
2547}
2548
2549impl sum_tree::Summary for PathEntrySummary {
2550 type Context = ();
2551
2552 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2553 self.max_id = summary.max_id;
2554 }
2555}
2556
2557impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2558 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2559 *self = summary.max_id;
2560 }
2561}
2562
2563#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2564pub struct PathKey(Arc<Path>);
2565
2566impl Default for PathKey {
2567 fn default() -> Self {
2568 Self(Path::new("").into())
2569 }
2570}
2571
2572impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2573 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2574 self.0 = summary.max_path.clone();
2575 }
2576}
2577
2578struct BackgroundScanner {
2579 fs: Arc<dyn Fs>,
2580 snapshot: Arc<Mutex<Snapshot>>,
2581 notify: Sender<ScanState>,
2582 executor: Arc<executor::Background>,
2583}
2584
2585impl BackgroundScanner {
2586 fn new(
2587 snapshot: Arc<Mutex<Snapshot>>,
2588 notify: Sender<ScanState>,
2589 fs: Arc<dyn Fs>,
2590 executor: Arc<executor::Background>,
2591 ) -> Self {
2592 Self {
2593 fs,
2594 snapshot,
2595 notify,
2596 executor,
2597 }
2598 }
2599
2600 fn abs_path(&self) -> Arc<Path> {
2601 self.snapshot.lock().abs_path.clone()
2602 }
2603
2604 fn snapshot(&self) -> Snapshot {
2605 self.snapshot.lock().clone()
2606 }
2607
2608 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2609 if self.notify.send(ScanState::Scanning).await.is_err() {
2610 return;
2611 }
2612
2613 if let Err(err) = self.scan_dirs().await {
2614 if self
2615 .notify
2616 .send(ScanState::Err(Arc::new(err)))
2617 .await
2618 .is_err()
2619 {
2620 return;
2621 }
2622 }
2623
2624 if self.notify.send(ScanState::Idle).await.is_err() {
2625 return;
2626 }
2627
2628 futures::pin_mut!(events_rx);
2629 while let Some(events) = events_rx.next().await {
2630 if self.notify.send(ScanState::Scanning).await.is_err() {
2631 break;
2632 }
2633
2634 if !self.process_events(events).await {
2635 break;
2636 }
2637
2638 if self.notify.send(ScanState::Idle).await.is_err() {
2639 break;
2640 }
2641 }
2642 }
2643
2644 async fn scan_dirs(&mut self) -> Result<()> {
2645 let root_char_bag;
2646 let next_entry_id;
2647 let is_dir;
2648 {
2649 let snapshot = self.snapshot.lock();
2650 root_char_bag = snapshot.root_char_bag;
2651 next_entry_id = snapshot.next_entry_id.clone();
2652 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2653 };
2654
2655 if is_dir {
2656 let path: Arc<Path> = Arc::from(Path::new(""));
2657 let abs_path = self.abs_path();
2658 let (tx, rx) = channel::unbounded();
2659 tx.send(ScanJob {
2660 abs_path: abs_path.to_path_buf(),
2661 path,
2662 ignore_stack: IgnoreStack::none(),
2663 scan_queue: tx.clone(),
2664 })
2665 .await
2666 .unwrap();
2667 drop(tx);
2668
2669 self.executor
2670 .scoped(|scope| {
2671 for _ in 0..self.executor.num_cpus() {
2672 scope.spawn(async {
2673 while let Ok(job) = rx.recv().await {
2674 if let Err(err) = self
2675 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2676 .await
2677 {
2678 log::error!("error scanning {:?}: {}", job.abs_path, err);
2679 }
2680 }
2681 });
2682 }
2683 })
2684 .await;
2685 }
2686
2687 Ok(())
2688 }
2689
2690 async fn scan_dir(
2691 &self,
2692 root_char_bag: CharBag,
2693 next_entry_id: Arc<AtomicUsize>,
2694 job: &ScanJob,
2695 ) -> Result<()> {
2696 let mut new_entries: Vec<Entry> = Vec::new();
2697 let mut new_jobs: Vec<ScanJob> = Vec::new();
2698 let mut ignore_stack = job.ignore_stack.clone();
2699 let mut new_ignore = None;
2700
2701 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2702 while let Some(child_abs_path) = child_paths.next().await {
2703 let child_abs_path = match child_abs_path {
2704 Ok(child_abs_path) => child_abs_path,
2705 Err(error) => {
2706 log::error!("error processing entry {:?}", error);
2707 continue;
2708 }
2709 };
2710 let child_name = child_abs_path.file_name().unwrap();
2711 let child_path: Arc<Path> = job.path.join(child_name).into();
2712 let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2713 Some(metadata) => metadata,
2714 None => continue,
2715 };
2716
2717 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2718 if child_name == *GITIGNORE {
2719 match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2720 Ok(ignore) => {
2721 let ignore = Arc::new(ignore);
2722 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2723 new_ignore = Some(ignore);
2724 }
2725 Err(error) => {
2726 log::error!(
2727 "error loading .gitignore file {:?} - {:?}",
2728 child_name,
2729 error
2730 );
2731 }
2732 }
2733
2734 // Update ignore status of any child entries we've already processed to reflect the
2735 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2736 // there should rarely be too numerous. Update the ignore stack associated with any
2737 // new jobs as well.
2738 let mut new_jobs = new_jobs.iter_mut();
2739 for entry in &mut new_entries {
2740 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2741 if entry.is_dir() {
2742 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2743 IgnoreStack::all()
2744 } else {
2745 ignore_stack.clone()
2746 };
2747 }
2748 }
2749 }
2750
2751 let mut child_entry = Entry::new(
2752 child_path.clone(),
2753 &child_metadata,
2754 &next_entry_id,
2755 root_char_bag,
2756 );
2757
2758 if child_metadata.is_dir {
2759 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2760 child_entry.is_ignored = is_ignored;
2761 new_entries.push(child_entry);
2762 new_jobs.push(ScanJob {
2763 abs_path: child_abs_path,
2764 path: child_path,
2765 ignore_stack: if is_ignored {
2766 IgnoreStack::all()
2767 } else {
2768 ignore_stack.clone()
2769 },
2770 scan_queue: job.scan_queue.clone(),
2771 });
2772 } else {
2773 child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2774 new_entries.push(child_entry);
2775 };
2776 }
2777
2778 self.snapshot
2779 .lock()
2780 .populate_dir(job.path.clone(), new_entries, new_ignore);
2781 for new_job in new_jobs {
2782 job.scan_queue.send(new_job).await.unwrap();
2783 }
2784
2785 Ok(())
2786 }
2787
2788 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2789 let mut snapshot = self.snapshot();
2790 snapshot.scan_id += 1;
2791
2792 let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2793 abs_path
2794 } else {
2795 return false;
2796 };
2797 let root_char_bag = snapshot.root_char_bag;
2798 let next_entry_id = snapshot.next_entry_id.clone();
2799
2800 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2801 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2802
2803 for event in &events {
2804 match event.path.strip_prefix(&root_abs_path) {
2805 Ok(path) => snapshot.remove_path(&path),
2806 Err(_) => {
2807 log::error!(
2808 "unexpected event {:?} for root path {:?}",
2809 event.path,
2810 root_abs_path
2811 );
2812 continue;
2813 }
2814 }
2815 }
2816
2817 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2818 for event in events {
2819 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2820 Ok(path) => Arc::from(path.to_path_buf()),
2821 Err(_) => {
2822 log::error!(
2823 "unexpected event {:?} for root path {:?}",
2824 event.path,
2825 root_abs_path
2826 );
2827 continue;
2828 }
2829 };
2830
2831 match self.fs.metadata(&event.path).await {
2832 Ok(Some(metadata)) => {
2833 let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2834 let mut fs_entry = Entry::new(
2835 path.clone(),
2836 &metadata,
2837 snapshot.next_entry_id.as_ref(),
2838 snapshot.root_char_bag,
2839 );
2840 fs_entry.is_ignored = ignore_stack.is_all();
2841 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2842 if metadata.is_dir {
2843 scan_queue_tx
2844 .send(ScanJob {
2845 abs_path: event.path,
2846 path,
2847 ignore_stack,
2848 scan_queue: scan_queue_tx.clone(),
2849 })
2850 .await
2851 .unwrap();
2852 }
2853 }
2854 Ok(None) => {}
2855 Err(err) => {
2856 // TODO - create a special 'error' entry in the entries tree to mark this
2857 log::error!("error reading file on event {:?}", err);
2858 }
2859 }
2860 }
2861
2862 *self.snapshot.lock() = snapshot;
2863
2864 // Scan any directories that were created as part of this event batch.
2865 drop(scan_queue_tx);
2866 self.executor
2867 .scoped(|scope| {
2868 for _ in 0..self.executor.num_cpus() {
2869 scope.spawn(async {
2870 while let Ok(job) = scan_queue_rx.recv().await {
2871 if let Err(err) = self
2872 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2873 .await
2874 {
2875 log::error!("error scanning {:?}: {}", job.abs_path, err);
2876 }
2877 }
2878 });
2879 }
2880 })
2881 .await;
2882
2883 // Attempt to detect renames only over a single batch of file-system events.
2884 self.snapshot.lock().removed_entry_ids.clear();
2885
2886 self.update_ignore_statuses().await;
2887 true
2888 }
2889
2890 async fn update_ignore_statuses(&self) {
2891 let mut snapshot = self.snapshot();
2892
2893 let mut ignores_to_update = Vec::new();
2894 let mut ignores_to_delete = Vec::new();
2895 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2896 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2897 ignores_to_update.push(parent_path.clone());
2898 }
2899
2900 let ignore_path = parent_path.join(&*GITIGNORE);
2901 if snapshot.entry_for_path(ignore_path).is_none() {
2902 ignores_to_delete.push(parent_path.clone());
2903 }
2904 }
2905
2906 for parent_path in ignores_to_delete {
2907 snapshot.ignores.remove(&parent_path);
2908 self.snapshot.lock().ignores.remove(&parent_path);
2909 }
2910
2911 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2912 ignores_to_update.sort_unstable();
2913 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2914 while let Some(parent_path) = ignores_to_update.next() {
2915 while ignores_to_update
2916 .peek()
2917 .map_or(false, |p| p.starts_with(&parent_path))
2918 {
2919 ignores_to_update.next().unwrap();
2920 }
2921
2922 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2923 ignore_queue_tx
2924 .send(UpdateIgnoreStatusJob {
2925 path: parent_path,
2926 ignore_stack,
2927 ignore_queue: ignore_queue_tx.clone(),
2928 })
2929 .await
2930 .unwrap();
2931 }
2932 drop(ignore_queue_tx);
2933
2934 self.executor
2935 .scoped(|scope| {
2936 for _ in 0..self.executor.num_cpus() {
2937 scope.spawn(async {
2938 while let Ok(job) = ignore_queue_rx.recv().await {
2939 self.update_ignore_status(job, &snapshot).await;
2940 }
2941 });
2942 }
2943 })
2944 .await;
2945 }
2946
2947 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2948 let mut ignore_stack = job.ignore_stack;
2949 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2950 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2951 }
2952
2953 let mut entries_by_id_edits = Vec::new();
2954 let mut entries_by_path_edits = Vec::new();
2955 for mut entry in snapshot.child_entries(&job.path).cloned() {
2956 let was_ignored = entry.is_ignored;
2957 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2958 if entry.is_dir() {
2959 let child_ignore_stack = if entry.is_ignored {
2960 IgnoreStack::all()
2961 } else {
2962 ignore_stack.clone()
2963 };
2964 job.ignore_queue
2965 .send(UpdateIgnoreStatusJob {
2966 path: entry.path.clone(),
2967 ignore_stack: child_ignore_stack,
2968 ignore_queue: job.ignore_queue.clone(),
2969 })
2970 .await
2971 .unwrap();
2972 }
2973
2974 if entry.is_ignored != was_ignored {
2975 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2976 path_entry.scan_id = snapshot.scan_id;
2977 path_entry.is_ignored = entry.is_ignored;
2978 entries_by_id_edits.push(Edit::Insert(path_entry));
2979 entries_by_path_edits.push(Edit::Insert(entry));
2980 }
2981 }
2982
2983 let mut snapshot = self.snapshot.lock();
2984 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2985 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2986 }
2987}
2988
2989async fn refresh_entry(
2990 fs: &dyn Fs,
2991 snapshot: &Mutex<Snapshot>,
2992 path: Arc<Path>,
2993 abs_path: &Path,
2994) -> Result<Entry> {
2995 let root_char_bag;
2996 let next_entry_id;
2997 {
2998 let snapshot = snapshot.lock();
2999 root_char_bag = snapshot.root_char_bag;
3000 next_entry_id = snapshot.next_entry_id.clone();
3001 }
3002 let entry = Entry::new(
3003 path,
3004 &fs.metadata(abs_path)
3005 .await?
3006 .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
3007 &next_entry_id,
3008 root_char_bag,
3009 );
3010 Ok(snapshot.lock().insert_entry(entry, fs))
3011}
3012
3013fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3014 let mut result = root_char_bag;
3015 result.extend(
3016 path.to_string_lossy()
3017 .chars()
3018 .map(|c| c.to_ascii_lowercase()),
3019 );
3020 result
3021}
3022
3023struct ScanJob {
3024 abs_path: PathBuf,
3025 path: Arc<Path>,
3026 ignore_stack: Arc<IgnoreStack>,
3027 scan_queue: Sender<ScanJob>,
3028}
3029
3030struct UpdateIgnoreStatusJob {
3031 path: Arc<Path>,
3032 ignore_stack: Arc<IgnoreStack>,
3033 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3034}
3035
3036pub trait WorktreeHandle {
3037 #[cfg(test)]
3038 fn flush_fs_events<'a>(
3039 &self,
3040 cx: &'a gpui::TestAppContext,
3041 ) -> futures::future::LocalBoxFuture<'a, ()>;
3042}
3043
3044impl WorktreeHandle for ModelHandle<Worktree> {
3045 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3046 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3047 // extra directory scans, and emit extra scan-state notifications.
3048 //
3049 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3050 // to ensure that all redundant FS events have already been processed.
3051 #[cfg(test)]
3052 fn flush_fs_events<'a>(
3053 &self,
3054 cx: &'a gpui::TestAppContext,
3055 ) -> futures::future::LocalBoxFuture<'a, ()> {
3056 use smol::future::FutureExt;
3057
3058 let filename = "fs-event-sentinel";
3059 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
3060 let tree = self.clone();
3061 async move {
3062 std::fs::write(root_path.join(filename), "").unwrap();
3063 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
3064 .await;
3065
3066 std::fs::remove_file(root_path.join(filename)).unwrap();
3067 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
3068 .await;
3069
3070 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3071 .await;
3072 }
3073 .boxed_local()
3074 }
3075}
3076
3077#[derive(Clone, Debug)]
3078struct TraversalProgress<'a> {
3079 max_path: &'a Path,
3080 count: usize,
3081 visible_count: usize,
3082 file_count: usize,
3083 visible_file_count: usize,
3084}
3085
3086impl<'a> TraversalProgress<'a> {
3087 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3088 match (include_ignored, include_dirs) {
3089 (true, true) => self.count,
3090 (true, false) => self.file_count,
3091 (false, true) => self.visible_count,
3092 (false, false) => self.visible_file_count,
3093 }
3094 }
3095}
3096
3097impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3098 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3099 self.max_path = summary.max_path.as_ref();
3100 self.count += summary.count;
3101 self.visible_count += summary.visible_count;
3102 self.file_count += summary.file_count;
3103 self.visible_file_count += summary.visible_file_count;
3104 }
3105}
3106
3107impl<'a> Default for TraversalProgress<'a> {
3108 fn default() -> Self {
3109 Self {
3110 max_path: Path::new(""),
3111 count: 0,
3112 visible_count: 0,
3113 file_count: 0,
3114 visible_file_count: 0,
3115 }
3116 }
3117}
3118
3119pub struct Traversal<'a> {
3120 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3121 include_ignored: bool,
3122 include_dirs: bool,
3123}
3124
3125impl<'a> Traversal<'a> {
3126 pub fn advance(&mut self) -> bool {
3127 self.advance_to_offset(self.offset() + 1)
3128 }
3129
3130 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3131 self.cursor.seek_forward(
3132 &TraversalTarget::Count {
3133 count: offset,
3134 include_dirs: self.include_dirs,
3135 include_ignored: self.include_ignored,
3136 },
3137 Bias::Right,
3138 &(),
3139 )
3140 }
3141
3142 pub fn advance_to_sibling(&mut self) -> bool {
3143 while let Some(entry) = self.cursor.item() {
3144 self.cursor.seek_forward(
3145 &TraversalTarget::PathSuccessor(&entry.path),
3146 Bias::Left,
3147 &(),
3148 );
3149 if let Some(entry) = self.cursor.item() {
3150 if (self.include_dirs || !entry.is_dir())
3151 && (self.include_ignored || !entry.is_ignored)
3152 {
3153 return true;
3154 }
3155 }
3156 }
3157 false
3158 }
3159
3160 pub fn entry(&self) -> Option<&'a Entry> {
3161 self.cursor.item()
3162 }
3163
3164 pub fn offset(&self) -> usize {
3165 self.cursor
3166 .start()
3167 .count(self.include_dirs, self.include_ignored)
3168 }
3169}
3170
3171impl<'a> Iterator for Traversal<'a> {
3172 type Item = &'a Entry;
3173
3174 fn next(&mut self) -> Option<Self::Item> {
3175 if let Some(item) = self.entry() {
3176 self.advance();
3177 Some(item)
3178 } else {
3179 None
3180 }
3181 }
3182}
3183
3184#[derive(Debug)]
3185enum TraversalTarget<'a> {
3186 Path(&'a Path),
3187 PathSuccessor(&'a Path),
3188 Count {
3189 count: usize,
3190 include_ignored: bool,
3191 include_dirs: bool,
3192 },
3193}
3194
3195impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3196 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3197 match self {
3198 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3199 TraversalTarget::PathSuccessor(path) => {
3200 if !cursor_location.max_path.starts_with(path) {
3201 Ordering::Equal
3202 } else {
3203 Ordering::Greater
3204 }
3205 }
3206 TraversalTarget::Count {
3207 count,
3208 include_dirs,
3209 include_ignored,
3210 } => Ord::cmp(
3211 count,
3212 &cursor_location.count(*include_dirs, *include_ignored),
3213 ),
3214 }
3215 }
3216}
3217
3218struct ChildEntriesIter<'a> {
3219 parent_path: &'a Path,
3220 traversal: Traversal<'a>,
3221}
3222
3223impl<'a> Iterator for ChildEntriesIter<'a> {
3224 type Item = &'a Entry;
3225
3226 fn next(&mut self) -> Option<Self::Item> {
3227 if let Some(item) = self.traversal.entry() {
3228 if item.path.starts_with(&self.parent_path) {
3229 self.traversal.advance_to_sibling();
3230 return Some(item);
3231 }
3232 }
3233 None
3234 }
3235}
3236
3237impl<'a> From<&'a Entry> for proto::Entry {
3238 fn from(entry: &'a Entry) -> Self {
3239 Self {
3240 id: entry.id as u64,
3241 is_dir: entry.is_dir(),
3242 path: entry.path.to_string_lossy().to_string(),
3243 inode: entry.inode,
3244 mtime: Some(entry.mtime.into()),
3245 is_symlink: entry.is_symlink,
3246 is_ignored: entry.is_ignored,
3247 }
3248 }
3249}
3250
3251impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3252 type Error = anyhow::Error;
3253
3254 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3255 if let Some(mtime) = entry.mtime {
3256 let kind = if entry.is_dir {
3257 EntryKind::Dir
3258 } else {
3259 let mut char_bag = root_char_bag.clone();
3260 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3261 EntryKind::File(char_bag)
3262 };
3263 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
3264 Ok(Entry {
3265 id: entry.id as usize,
3266 kind,
3267 path: path.clone(),
3268 inode: entry.inode,
3269 mtime: mtime.into(),
3270 is_symlink: entry.is_symlink,
3271 is_ignored: entry.is_ignored,
3272 })
3273 } else {
3274 Err(anyhow!(
3275 "missing mtime in remote worktree entry {:?}",
3276 entry.path
3277 ))
3278 }
3279 }
3280}
3281
3282#[cfg(test)]
3283mod tests {
3284 use super::*;
3285 use crate::fs::FakeFs;
3286 use anyhow::Result;
3287 use client::test::{FakeHttpClient, FakeServer};
3288 use fs::RealFs;
3289 use gpui::test::subscribe;
3290 use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
3291 use language::{Diagnostic, LanguageConfig};
3292 use lsp::Url;
3293 use rand::prelude::*;
3294 use serde_json::json;
3295 use std::{cell::RefCell, rc::Rc};
3296 use std::{
3297 env,
3298 fmt::Write,
3299 time::{SystemTime, UNIX_EPOCH},
3300 };
3301 use text::Point;
3302 use unindent::Unindent as _;
3303 use util::test::temp_tree;
3304
3305 #[gpui::test]
3306 async fn test_traversal(mut cx: gpui::TestAppContext) {
3307 let fs = FakeFs::new();
3308 fs.insert_tree(
3309 "/root",
3310 json!({
3311 ".gitignore": "a/b\n",
3312 "a": {
3313 "b": "",
3314 "c": "",
3315 }
3316 }),
3317 )
3318 .await;
3319
3320 let http_client = FakeHttpClient::with_404_response();
3321 let client = Client::new(http_client.clone());
3322 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3323
3324 let tree = Worktree::open_local(
3325 client,
3326 user_store,
3327 Arc::from(Path::new("/root")),
3328 Arc::new(fs),
3329 Default::default(),
3330 &mut cx.to_async(),
3331 )
3332 .await
3333 .unwrap();
3334 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3335 .await;
3336
3337 tree.read_with(&cx, |tree, _| {
3338 assert_eq!(
3339 tree.entries(false)
3340 .map(|entry| entry.path.as_ref())
3341 .collect::<Vec<_>>(),
3342 vec![
3343 Path::new(""),
3344 Path::new(".gitignore"),
3345 Path::new("a"),
3346 Path::new("a/c"),
3347 ]
3348 );
3349 })
3350 }
3351
3352 #[gpui::test]
3353 async fn test_save_file(mut cx: gpui::TestAppContext) {
3354 let dir = temp_tree(json!({
3355 "file1": "the old contents",
3356 }));
3357
3358 let http_client = FakeHttpClient::with_404_response();
3359 let client = Client::new(http_client.clone());
3360 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3361
3362 let tree = Worktree::open_local(
3363 client,
3364 user_store,
3365 dir.path(),
3366 Arc::new(RealFs),
3367 Default::default(),
3368 &mut cx.to_async(),
3369 )
3370 .await
3371 .unwrap();
3372 let buffer = tree
3373 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3374 .await
3375 .unwrap();
3376 let save = buffer.update(&mut cx, |buffer, cx| {
3377 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3378 buffer.save(cx).unwrap()
3379 });
3380 save.await.unwrap();
3381
3382 let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3383 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3384 }
3385
3386 #[gpui::test]
3387 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3388 let dir = temp_tree(json!({
3389 "file1": "the old contents",
3390 }));
3391 let file_path = dir.path().join("file1");
3392
3393 let http_client = FakeHttpClient::with_404_response();
3394 let client = Client::new(http_client.clone());
3395 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3396
3397 let tree = Worktree::open_local(
3398 client,
3399 user_store,
3400 file_path.clone(),
3401 Arc::new(RealFs),
3402 Default::default(),
3403 &mut cx.to_async(),
3404 )
3405 .await
3406 .unwrap();
3407 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3408 .await;
3409 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3410
3411 let buffer = tree
3412 .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3413 .await
3414 .unwrap();
3415 let save = buffer.update(&mut cx, |buffer, cx| {
3416 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3417 buffer.save(cx).unwrap()
3418 });
3419 save.await.unwrap();
3420
3421 let new_text = std::fs::read_to_string(file_path).unwrap();
3422 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3423 }
3424
3425 #[gpui::test]
3426 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3427 let dir = temp_tree(json!({
3428 "a": {
3429 "file1": "",
3430 "file2": "",
3431 "file3": "",
3432 },
3433 "b": {
3434 "c": {
3435 "file4": "",
3436 "file5": "",
3437 }
3438 }
3439 }));
3440
3441 let user_id = 5;
3442 let http_client = FakeHttpClient::with_404_response();
3443 let mut client = Client::new(http_client.clone());
3444 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3445 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3446 let tree = Worktree::open_local(
3447 client,
3448 user_store.clone(),
3449 dir.path(),
3450 Arc::new(RealFs),
3451 Default::default(),
3452 &mut cx.to_async(),
3453 )
3454 .await
3455 .unwrap();
3456
3457 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3458 let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3459 async move { buffer.await.unwrap() }
3460 };
3461 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3462 tree.read_with(cx, |tree, _| {
3463 tree.entry_for_path(path)
3464 .expect(&format!("no entry for path {}", path))
3465 .id
3466 })
3467 };
3468
3469 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3470 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3471 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3472 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3473
3474 let file2_id = id_for_path("a/file2", &cx);
3475 let file3_id = id_for_path("a/file3", &cx);
3476 let file4_id = id_for_path("b/c/file4", &cx);
3477
3478 // Wait for the initial scan.
3479 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3480 .await;
3481
3482 // Create a remote copy of this worktree.
3483 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3484 let remote = Worktree::remote(
3485 1,
3486 1,
3487 initial_snapshot.to_proto(&Default::default()),
3488 Client::new(http_client.clone()),
3489 user_store,
3490 Default::default(),
3491 &mut cx.to_async(),
3492 )
3493 .await
3494 .unwrap();
3495
3496 cx.read(|cx| {
3497 assert!(!buffer2.read(cx).is_dirty());
3498 assert!(!buffer3.read(cx).is_dirty());
3499 assert!(!buffer4.read(cx).is_dirty());
3500 assert!(!buffer5.read(cx).is_dirty());
3501 });
3502
3503 // Rename and delete files and directories.
3504 tree.flush_fs_events(&cx).await;
3505 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3506 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3507 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3508 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3509 tree.flush_fs_events(&cx).await;
3510
3511 let expected_paths = vec![
3512 "a",
3513 "a/file1",
3514 "a/file2.new",
3515 "b",
3516 "d",
3517 "d/file3",
3518 "d/file4",
3519 ];
3520
3521 cx.read(|app| {
3522 assert_eq!(
3523 tree.read(app)
3524 .paths()
3525 .map(|p| p.to_str().unwrap())
3526 .collect::<Vec<_>>(),
3527 expected_paths
3528 );
3529
3530 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3531 assert_eq!(id_for_path("d/file3", &cx), file3_id);
3532 assert_eq!(id_for_path("d/file4", &cx), file4_id);
3533
3534 assert_eq!(
3535 buffer2.read(app).file().unwrap().path().as_ref(),
3536 Path::new("a/file2.new")
3537 );
3538 assert_eq!(
3539 buffer3.read(app).file().unwrap().path().as_ref(),
3540 Path::new("d/file3")
3541 );
3542 assert_eq!(
3543 buffer4.read(app).file().unwrap().path().as_ref(),
3544 Path::new("d/file4")
3545 );
3546 assert_eq!(
3547 buffer5.read(app).file().unwrap().path().as_ref(),
3548 Path::new("b/c/file5")
3549 );
3550
3551 assert!(!buffer2.read(app).file().unwrap().is_deleted());
3552 assert!(!buffer3.read(app).file().unwrap().is_deleted());
3553 assert!(!buffer4.read(app).file().unwrap().is_deleted());
3554 assert!(buffer5.read(app).file().unwrap().is_deleted());
3555 });
3556
3557 // Update the remote worktree. Check that it becomes consistent with the
3558 // local worktree.
3559 remote.update(&mut cx, |remote, cx| {
3560 let update_message =
3561 tree.read(cx)
3562 .snapshot()
3563 .build_update(&initial_snapshot, 1, 1, true);
3564 remote
3565 .as_remote_mut()
3566 .unwrap()
3567 .snapshot
3568 .apply_update(update_message)
3569 .unwrap();
3570
3571 assert_eq!(
3572 remote
3573 .paths()
3574 .map(|p| p.to_str().unwrap())
3575 .collect::<Vec<_>>(),
3576 expected_paths
3577 );
3578 });
3579 }
3580
3581 #[gpui::test]
3582 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3583 let dir = temp_tree(json!({
3584 ".git": {},
3585 ".gitignore": "ignored-dir\n",
3586 "tracked-dir": {
3587 "tracked-file1": "tracked contents",
3588 },
3589 "ignored-dir": {
3590 "ignored-file1": "ignored contents",
3591 }
3592 }));
3593
3594 let http_client = FakeHttpClient::with_404_response();
3595 let client = Client::new(http_client.clone());
3596 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3597
3598 let tree = Worktree::open_local(
3599 client,
3600 user_store,
3601 dir.path(),
3602 Arc::new(RealFs),
3603 Default::default(),
3604 &mut cx.to_async(),
3605 )
3606 .await
3607 .unwrap();
3608 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3609 .await;
3610 tree.flush_fs_events(&cx).await;
3611 cx.read(|cx| {
3612 let tree = tree.read(cx);
3613 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3614 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3615 assert_eq!(tracked.is_ignored, false);
3616 assert_eq!(ignored.is_ignored, true);
3617 });
3618
3619 std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3620 std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3621 tree.flush_fs_events(&cx).await;
3622 cx.read(|cx| {
3623 let tree = tree.read(cx);
3624 let dot_git = tree.entry_for_path(".git").unwrap();
3625 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3626 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3627 assert_eq!(tracked.is_ignored, false);
3628 assert_eq!(ignored.is_ignored, true);
3629 assert_eq!(dot_git.is_ignored, true);
3630 });
3631 }
3632
3633 #[gpui::test]
3634 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3635 let user_id = 100;
3636 let http_client = FakeHttpClient::with_404_response();
3637 let mut client = Client::new(http_client);
3638 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3639 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3640
3641 let fs = Arc::new(FakeFs::new());
3642 fs.insert_tree(
3643 "/the-dir",
3644 json!({
3645 "a.txt": "a-contents",
3646 "b.txt": "b-contents",
3647 }),
3648 )
3649 .await;
3650
3651 let worktree = Worktree::open_local(
3652 client.clone(),
3653 user_store,
3654 "/the-dir".as_ref(),
3655 fs,
3656 Default::default(),
3657 &mut cx.to_async(),
3658 )
3659 .await
3660 .unwrap();
3661
3662 // Spawn multiple tasks to open paths, repeating some paths.
3663 let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3664 (
3665 worktree.open_buffer("a.txt", cx),
3666 worktree.open_buffer("b.txt", cx),
3667 worktree.open_buffer("a.txt", cx),
3668 )
3669 });
3670
3671 let buffer_a_1 = buffer_a_1.await.unwrap();
3672 let buffer_a_2 = buffer_a_2.await.unwrap();
3673 let buffer_b = buffer_b.await.unwrap();
3674 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3675 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3676
3677 // There is only one buffer per path.
3678 let buffer_a_id = buffer_a_1.id();
3679 assert_eq!(buffer_a_2.id(), buffer_a_id);
3680
3681 // Open the same path again while it is still open.
3682 drop(buffer_a_1);
3683 let buffer_a_3 = worktree
3684 .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3685 .await
3686 .unwrap();
3687
3688 // There's still only one buffer per path.
3689 assert_eq!(buffer_a_3.id(), buffer_a_id);
3690 }
3691
3692 #[gpui::test]
3693 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3694 use std::fs;
3695
3696 let dir = temp_tree(json!({
3697 "file1": "abc",
3698 "file2": "def",
3699 "file3": "ghi",
3700 }));
3701 let http_client = FakeHttpClient::with_404_response();
3702 let client = Client::new(http_client.clone());
3703 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3704
3705 let tree = Worktree::open_local(
3706 client,
3707 user_store,
3708 dir.path(),
3709 Arc::new(RealFs),
3710 Default::default(),
3711 &mut cx.to_async(),
3712 )
3713 .await
3714 .unwrap();
3715 tree.flush_fs_events(&cx).await;
3716 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3717 .await;
3718
3719 let buffer1 = tree
3720 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3721 .await
3722 .unwrap();
3723 let events = Rc::new(RefCell::new(Vec::new()));
3724
3725 // initially, the buffer isn't dirty.
3726 buffer1.update(&mut cx, |buffer, cx| {
3727 cx.subscribe(&buffer1, {
3728 let events = events.clone();
3729 move |_, _, event, _| events.borrow_mut().push(event.clone())
3730 })
3731 .detach();
3732
3733 assert!(!buffer.is_dirty());
3734 assert!(events.borrow().is_empty());
3735
3736 buffer.edit(vec![1..2], "", cx);
3737 });
3738
3739 // after the first edit, the buffer is dirty, and emits a dirtied event.
3740 buffer1.update(&mut cx, |buffer, cx| {
3741 assert!(buffer.text() == "ac");
3742 assert!(buffer.is_dirty());
3743 assert_eq!(
3744 *events.borrow(),
3745 &[language::Event::Edited, language::Event::Dirtied]
3746 );
3747 events.borrow_mut().clear();
3748 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3749 });
3750
3751 // after saving, the buffer is not dirty, and emits a saved event.
3752 buffer1.update(&mut cx, |buffer, cx| {
3753 assert!(!buffer.is_dirty());
3754 assert_eq!(*events.borrow(), &[language::Event::Saved]);
3755 events.borrow_mut().clear();
3756
3757 buffer.edit(vec![1..1], "B", cx);
3758 buffer.edit(vec![2..2], "D", cx);
3759 });
3760
3761 // after editing again, the buffer is dirty, and emits another dirty event.
3762 buffer1.update(&mut cx, |buffer, cx| {
3763 assert!(buffer.text() == "aBDc");
3764 assert!(buffer.is_dirty());
3765 assert_eq!(
3766 *events.borrow(),
3767 &[
3768 language::Event::Edited,
3769 language::Event::Dirtied,
3770 language::Event::Edited,
3771 ],
3772 );
3773 events.borrow_mut().clear();
3774
3775 // TODO - currently, after restoring the buffer to its
3776 // previously-saved state, the is still considered dirty.
3777 buffer.edit([1..3], "", cx);
3778 assert!(buffer.text() == "ac");
3779 assert!(buffer.is_dirty());
3780 });
3781
3782 assert_eq!(*events.borrow(), &[language::Event::Edited]);
3783
3784 // When a file is deleted, the buffer is considered dirty.
3785 let events = Rc::new(RefCell::new(Vec::new()));
3786 let buffer2 = tree
3787 .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3788 .await
3789 .unwrap();
3790 buffer2.update(&mut cx, |_, cx| {
3791 cx.subscribe(&buffer2, {
3792 let events = events.clone();
3793 move |_, _, event, _| events.borrow_mut().push(event.clone())
3794 })
3795 .detach();
3796 });
3797
3798 fs::remove_file(dir.path().join("file2")).unwrap();
3799 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3800 assert_eq!(
3801 *events.borrow(),
3802 &[language::Event::Dirtied, language::Event::FileHandleChanged]
3803 );
3804
3805 // When a file is already dirty when deleted, we don't emit a Dirtied event.
3806 let events = Rc::new(RefCell::new(Vec::new()));
3807 let buffer3 = tree
3808 .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3809 .await
3810 .unwrap();
3811 buffer3.update(&mut cx, |_, cx| {
3812 cx.subscribe(&buffer3, {
3813 let events = events.clone();
3814 move |_, _, event, _| events.borrow_mut().push(event.clone())
3815 })
3816 .detach();
3817 });
3818
3819 tree.flush_fs_events(&cx).await;
3820 buffer3.update(&mut cx, |buffer, cx| {
3821 buffer.edit(Some(0..0), "x", cx);
3822 });
3823 events.borrow_mut().clear();
3824 fs::remove_file(dir.path().join("file3")).unwrap();
3825 buffer3
3826 .condition(&cx, |_, _| !events.borrow().is_empty())
3827 .await;
3828 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3829 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3830 }
3831
3832 #[gpui::test]
3833 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3834 use std::fs;
3835
3836 let initial_contents = "aaa\nbbbbb\nc\n";
3837 let dir = temp_tree(json!({ "the-file": initial_contents }));
3838 let http_client = FakeHttpClient::with_404_response();
3839 let client = Client::new(http_client.clone());
3840 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3841
3842 let tree = Worktree::open_local(
3843 client,
3844 user_store,
3845 dir.path(),
3846 Arc::new(RealFs),
3847 Default::default(),
3848 &mut cx.to_async(),
3849 )
3850 .await
3851 .unwrap();
3852 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3853 .await;
3854
3855 let abs_path = dir.path().join("the-file");
3856 let buffer = tree
3857 .update(&mut cx, |tree, cx| {
3858 tree.open_buffer(Path::new("the-file"), cx)
3859 })
3860 .await
3861 .unwrap();
3862
3863 // TODO
3864 // Add a cursor on each row.
3865 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3866 // assert!(!buffer.is_dirty());
3867 // buffer.add_selection_set(
3868 // &(0..3)
3869 // .map(|row| Selection {
3870 // id: row as usize,
3871 // start: Point::new(row, 1),
3872 // end: Point::new(row, 1),
3873 // reversed: false,
3874 // goal: SelectionGoal::None,
3875 // })
3876 // .collect::<Vec<_>>(),
3877 // cx,
3878 // )
3879 // });
3880
3881 // Change the file on disk, adding two new lines of text, and removing
3882 // one line.
3883 buffer.read_with(&cx, |buffer, _| {
3884 assert!(!buffer.is_dirty());
3885 assert!(!buffer.has_conflict());
3886 });
3887 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3888 fs::write(&abs_path, new_contents).unwrap();
3889
3890 // Because the buffer was not modified, it is reloaded from disk. Its
3891 // contents are edited according to the diff between the old and new
3892 // file contents.
3893 buffer
3894 .condition(&cx, |buffer, _| buffer.text() == new_contents)
3895 .await;
3896
3897 buffer.update(&mut cx, |buffer, _| {
3898 assert_eq!(buffer.text(), new_contents);
3899 assert!(!buffer.is_dirty());
3900 assert!(!buffer.has_conflict());
3901
3902 // TODO
3903 // let cursor_positions = buffer
3904 // .selection_set(selection_set_id)
3905 // .unwrap()
3906 // .selections::<Point>(&*buffer)
3907 // .map(|selection| {
3908 // assert_eq!(selection.start, selection.end);
3909 // selection.start
3910 // })
3911 // .collect::<Vec<_>>();
3912 // assert_eq!(
3913 // cursor_positions,
3914 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3915 // );
3916 });
3917
3918 // Modify the buffer
3919 buffer.update(&mut cx, |buffer, cx| {
3920 buffer.edit(vec![0..0], " ", cx);
3921 assert!(buffer.is_dirty());
3922 assert!(!buffer.has_conflict());
3923 });
3924
3925 // Change the file on disk again, adding blank lines to the beginning.
3926 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3927
3928 // Because the buffer is modified, it doesn't reload from disk, but is
3929 // marked as having a conflict.
3930 buffer
3931 .condition(&cx, |buffer, _| buffer.has_conflict())
3932 .await;
3933 }
3934
3935 #[gpui::test]
3936 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3937 let (language_server_config, mut fake_server) =
3938 LanguageServerConfig::fake(cx.background()).await;
3939 let progress_token = language_server_config
3940 .disk_based_diagnostics_progress_token
3941 .clone()
3942 .unwrap();
3943 let mut languages = LanguageRegistry::new();
3944 languages.add(Arc::new(Language::new(
3945 LanguageConfig {
3946 name: "Rust".to_string(),
3947 path_suffixes: vec!["rs".to_string()],
3948 language_server: Some(language_server_config),
3949 ..Default::default()
3950 },
3951 Some(tree_sitter_rust::language()),
3952 )));
3953
3954 let dir = temp_tree(json!({
3955 "a.rs": "fn a() { A }",
3956 "b.rs": "const y: i32 = 1",
3957 }));
3958
3959 let http_client = FakeHttpClient::with_404_response();
3960 let client = Client::new(http_client.clone());
3961 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3962
3963 let tree = Worktree::open_local(
3964 client,
3965 user_store,
3966 dir.path(),
3967 Arc::new(RealFs),
3968 Arc::new(languages),
3969 &mut cx.to_async(),
3970 )
3971 .await
3972 .unwrap();
3973 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3974 .await;
3975
3976 // Cause worktree to start the fake language server
3977 let _buffer = tree
3978 .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3979 .await
3980 .unwrap();
3981
3982 let mut events = subscribe(&tree, &mut cx);
3983
3984 fake_server.start_progress(&progress_token).await;
3985 assert_eq!(
3986 events.next().await.unwrap(),
3987 Event::DiskBasedDiagnosticsUpdating
3988 );
3989
3990 fake_server.start_progress(&progress_token).await;
3991 fake_server.end_progress(&progress_token).await;
3992 fake_server.start_progress(&progress_token).await;
3993
3994 fake_server
3995 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3996 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3997 version: None,
3998 diagnostics: vec![lsp::Diagnostic {
3999 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
4000 severity: Some(lsp::DiagnosticSeverity::ERROR),
4001 message: "undefined variable 'A'".to_string(),
4002 ..Default::default()
4003 }],
4004 })
4005 .await;
4006 assert_eq!(
4007 events.next().await.unwrap(),
4008 Event::DiagnosticsUpdated(Arc::from(Path::new("a.rs")))
4009 );
4010
4011 fake_server.end_progress(&progress_token).await;
4012 fake_server.end_progress(&progress_token).await;
4013 assert_eq!(
4014 events.next().await.unwrap(),
4015 Event::DiskBasedDiagnosticsUpdated
4016 );
4017
4018 let buffer = tree
4019 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
4020 .await
4021 .unwrap();
4022
4023 buffer.read_with(&cx, |buffer, _| {
4024 let snapshot = buffer.snapshot();
4025 let diagnostics = snapshot
4026 .diagnostics_in_range::<_, Point>(0..buffer.len())
4027 .collect::<Vec<_>>();
4028 assert_eq!(
4029 diagnostics,
4030 &[DiagnosticEntry {
4031 range: Point::new(0, 9)..Point::new(0, 10),
4032 diagnostic: Diagnostic {
4033 severity: lsp::DiagnosticSeverity::ERROR,
4034 message: "undefined variable 'A'".to_string(),
4035 group_id: 0,
4036 is_primary: true,
4037 ..Default::default()
4038 }
4039 }]
4040 )
4041 });
4042 }
4043
4044 #[gpui::test]
4045 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
4046 let fs = Arc::new(FakeFs::new());
4047 let http_client = FakeHttpClient::with_404_response();
4048 let client = Client::new(http_client.clone());
4049 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
4050
4051 fs.insert_tree(
4052 "/the-dir",
4053 json!({
4054 "a.rs": "
4055 fn foo(mut v: Vec<usize>) {
4056 for x in &v {
4057 v.push(1);
4058 }
4059 }
4060 "
4061 .unindent(),
4062 }),
4063 )
4064 .await;
4065
4066 let worktree = Worktree::open_local(
4067 client.clone(),
4068 user_store,
4069 "/the-dir".as_ref(),
4070 fs,
4071 Default::default(),
4072 &mut cx.to_async(),
4073 )
4074 .await
4075 .unwrap();
4076
4077 let buffer = worktree
4078 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
4079 .await
4080 .unwrap();
4081
4082 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
4083 let message = lsp::PublishDiagnosticsParams {
4084 uri: buffer_uri.clone(),
4085 diagnostics: vec![
4086 lsp::Diagnostic {
4087 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
4088 severity: Some(DiagnosticSeverity::WARNING),
4089 message: "error 1".to_string(),
4090 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4091 location: lsp::Location {
4092 uri: buffer_uri.clone(),
4093 range: lsp::Range::new(
4094 lsp::Position::new(1, 8),
4095 lsp::Position::new(1, 9),
4096 ),
4097 },
4098 message: "error 1 hint 1".to_string(),
4099 }]),
4100 ..Default::default()
4101 },
4102 lsp::Diagnostic {
4103 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
4104 severity: Some(DiagnosticSeverity::HINT),
4105 message: "error 1 hint 1".to_string(),
4106 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4107 location: lsp::Location {
4108 uri: buffer_uri.clone(),
4109 range: lsp::Range::new(
4110 lsp::Position::new(1, 8),
4111 lsp::Position::new(1, 9),
4112 ),
4113 },
4114 message: "original diagnostic".to_string(),
4115 }]),
4116 ..Default::default()
4117 },
4118 lsp::Diagnostic {
4119 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
4120 severity: Some(DiagnosticSeverity::ERROR),
4121 message: "error 2".to_string(),
4122 related_information: Some(vec![
4123 lsp::DiagnosticRelatedInformation {
4124 location: lsp::Location {
4125 uri: buffer_uri.clone(),
4126 range: lsp::Range::new(
4127 lsp::Position::new(1, 13),
4128 lsp::Position::new(1, 15),
4129 ),
4130 },
4131 message: "error 2 hint 1".to_string(),
4132 },
4133 lsp::DiagnosticRelatedInformation {
4134 location: lsp::Location {
4135 uri: buffer_uri.clone(),
4136 range: lsp::Range::new(
4137 lsp::Position::new(1, 13),
4138 lsp::Position::new(1, 15),
4139 ),
4140 },
4141 message: "error 2 hint 2".to_string(),
4142 },
4143 ]),
4144 ..Default::default()
4145 },
4146 lsp::Diagnostic {
4147 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
4148 severity: Some(DiagnosticSeverity::HINT),
4149 message: "error 2 hint 1".to_string(),
4150 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4151 location: lsp::Location {
4152 uri: buffer_uri.clone(),
4153 range: lsp::Range::new(
4154 lsp::Position::new(2, 8),
4155 lsp::Position::new(2, 17),
4156 ),
4157 },
4158 message: "original diagnostic".to_string(),
4159 }]),
4160 ..Default::default()
4161 },
4162 lsp::Diagnostic {
4163 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
4164 severity: Some(DiagnosticSeverity::HINT),
4165 message: "error 2 hint 2".to_string(),
4166 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4167 location: lsp::Location {
4168 uri: buffer_uri.clone(),
4169 range: lsp::Range::new(
4170 lsp::Position::new(2, 8),
4171 lsp::Position::new(2, 17),
4172 ),
4173 },
4174 message: "original diagnostic".to_string(),
4175 }]),
4176 ..Default::default()
4177 },
4178 ],
4179 version: None,
4180 };
4181
4182 worktree
4183 .update(&mut cx, |tree, cx| {
4184 tree.update_diagnostics(message, &Default::default(), cx)
4185 })
4186 .unwrap();
4187 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
4188
4189 assert_eq!(
4190 buffer
4191 .diagnostics_in_range::<_, Point>(0..buffer.len())
4192 .collect::<Vec<_>>(),
4193 &[
4194 DiagnosticEntry {
4195 range: Point::new(1, 8)..Point::new(1, 9),
4196 diagnostic: Diagnostic {
4197 severity: DiagnosticSeverity::WARNING,
4198 message: "error 1".to_string(),
4199 group_id: 0,
4200 is_primary: true,
4201 ..Default::default()
4202 }
4203 },
4204 DiagnosticEntry {
4205 range: Point::new(1, 8)..Point::new(1, 9),
4206 diagnostic: Diagnostic {
4207 severity: DiagnosticSeverity::HINT,
4208 message: "error 1 hint 1".to_string(),
4209 group_id: 0,
4210 is_primary: false,
4211 ..Default::default()
4212 }
4213 },
4214 DiagnosticEntry {
4215 range: Point::new(1, 13)..Point::new(1, 15),
4216 diagnostic: Diagnostic {
4217 severity: DiagnosticSeverity::HINT,
4218 message: "error 2 hint 1".to_string(),
4219 group_id: 1,
4220 is_primary: false,
4221 ..Default::default()
4222 }
4223 },
4224 DiagnosticEntry {
4225 range: Point::new(1, 13)..Point::new(1, 15),
4226 diagnostic: Diagnostic {
4227 severity: DiagnosticSeverity::HINT,
4228 message: "error 2 hint 2".to_string(),
4229 group_id: 1,
4230 is_primary: false,
4231 ..Default::default()
4232 }
4233 },
4234 DiagnosticEntry {
4235 range: Point::new(2, 8)..Point::new(2, 17),
4236 diagnostic: Diagnostic {
4237 severity: DiagnosticSeverity::ERROR,
4238 message: "error 2".to_string(),
4239 group_id: 1,
4240 is_primary: true,
4241 ..Default::default()
4242 }
4243 }
4244 ]
4245 );
4246
4247 assert_eq!(
4248 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
4249 &[
4250 DiagnosticEntry {
4251 range: Point::new(1, 8)..Point::new(1, 9),
4252 diagnostic: Diagnostic {
4253 severity: DiagnosticSeverity::WARNING,
4254 message: "error 1".to_string(),
4255 group_id: 0,
4256 is_primary: true,
4257 ..Default::default()
4258 }
4259 },
4260 DiagnosticEntry {
4261 range: Point::new(1, 8)..Point::new(1, 9),
4262 diagnostic: Diagnostic {
4263 severity: DiagnosticSeverity::HINT,
4264 message: "error 1 hint 1".to_string(),
4265 group_id: 0,
4266 is_primary: false,
4267 ..Default::default()
4268 }
4269 },
4270 ]
4271 );
4272 assert_eq!(
4273 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
4274 &[
4275 DiagnosticEntry {
4276 range: Point::new(1, 13)..Point::new(1, 15),
4277 diagnostic: Diagnostic {
4278 severity: DiagnosticSeverity::HINT,
4279 message: "error 2 hint 1".to_string(),
4280 group_id: 1,
4281 is_primary: false,
4282 ..Default::default()
4283 }
4284 },
4285 DiagnosticEntry {
4286 range: Point::new(1, 13)..Point::new(1, 15),
4287 diagnostic: Diagnostic {
4288 severity: DiagnosticSeverity::HINT,
4289 message: "error 2 hint 2".to_string(),
4290 group_id: 1,
4291 is_primary: false,
4292 ..Default::default()
4293 }
4294 },
4295 DiagnosticEntry {
4296 range: Point::new(2, 8)..Point::new(2, 17),
4297 diagnostic: Diagnostic {
4298 severity: DiagnosticSeverity::ERROR,
4299 message: "error 2".to_string(),
4300 group_id: 1,
4301 is_primary: true,
4302 ..Default::default()
4303 }
4304 }
4305 ]
4306 );
4307 }
4308
4309 #[gpui::test(iterations = 100)]
4310 fn test_random(mut rng: StdRng) {
4311 let operations = env::var("OPERATIONS")
4312 .map(|o| o.parse().unwrap())
4313 .unwrap_or(40);
4314 let initial_entries = env::var("INITIAL_ENTRIES")
4315 .map(|o| o.parse().unwrap())
4316 .unwrap_or(20);
4317
4318 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
4319 for _ in 0..initial_entries {
4320 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
4321 }
4322 log::info!("Generated initial tree");
4323
4324 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4325 let fs = Arc::new(RealFs);
4326 let next_entry_id = Arc::new(AtomicUsize::new(0));
4327 let mut initial_snapshot = Snapshot {
4328 id: WorktreeId::from_usize(0),
4329 scan_id: 0,
4330 abs_path: root_dir.path().into(),
4331 entries_by_path: Default::default(),
4332 entries_by_id: Default::default(),
4333 removed_entry_ids: Default::default(),
4334 ignores: Default::default(),
4335 root_name: Default::default(),
4336 root_char_bag: Default::default(),
4337 next_entry_id: next_entry_id.clone(),
4338 };
4339 initial_snapshot.insert_entry(
4340 Entry::new(
4341 Path::new("").into(),
4342 &smol::block_on(fs.metadata(root_dir.path()))
4343 .unwrap()
4344 .unwrap(),
4345 &next_entry_id,
4346 Default::default(),
4347 ),
4348 fs.as_ref(),
4349 );
4350 let mut scanner = BackgroundScanner::new(
4351 Arc::new(Mutex::new(initial_snapshot.clone())),
4352 notify_tx,
4353 fs.clone(),
4354 Arc::new(gpui::executor::Background::new()),
4355 );
4356 smol::block_on(scanner.scan_dirs()).unwrap();
4357 scanner.snapshot().check_invariants();
4358
4359 let mut events = Vec::new();
4360 let mut snapshots = Vec::new();
4361 let mut mutations_len = operations;
4362 while mutations_len > 1 {
4363 if !events.is_empty() && rng.gen_bool(0.4) {
4364 let len = rng.gen_range(0..=events.len());
4365 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4366 log::info!("Delivering events: {:#?}", to_deliver);
4367 smol::block_on(scanner.process_events(to_deliver));
4368 scanner.snapshot().check_invariants();
4369 } else {
4370 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4371 mutations_len -= 1;
4372 }
4373
4374 if rng.gen_bool(0.2) {
4375 snapshots.push(scanner.snapshot());
4376 }
4377 }
4378 log::info!("Quiescing: {:#?}", events);
4379 smol::block_on(scanner.process_events(events));
4380 scanner.snapshot().check_invariants();
4381
4382 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4383 let mut new_scanner = BackgroundScanner::new(
4384 Arc::new(Mutex::new(initial_snapshot)),
4385 notify_tx,
4386 scanner.fs.clone(),
4387 scanner.executor.clone(),
4388 );
4389 smol::block_on(new_scanner.scan_dirs()).unwrap();
4390 assert_eq!(
4391 scanner.snapshot().to_vec(true),
4392 new_scanner.snapshot().to_vec(true)
4393 );
4394
4395 for mut prev_snapshot in snapshots {
4396 let include_ignored = rng.gen::<bool>();
4397 if !include_ignored {
4398 let mut entries_by_path_edits = Vec::new();
4399 let mut entries_by_id_edits = Vec::new();
4400 for entry in prev_snapshot
4401 .entries_by_id
4402 .cursor::<()>()
4403 .filter(|e| e.is_ignored)
4404 {
4405 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4406 entries_by_id_edits.push(Edit::Remove(entry.id));
4407 }
4408
4409 prev_snapshot
4410 .entries_by_path
4411 .edit(entries_by_path_edits, &());
4412 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4413 }
4414
4415 let update = scanner
4416 .snapshot()
4417 .build_update(&prev_snapshot, 0, 0, include_ignored);
4418 prev_snapshot.apply_update(update).unwrap();
4419 assert_eq!(
4420 prev_snapshot.to_vec(true),
4421 scanner.snapshot().to_vec(include_ignored)
4422 );
4423 }
4424 }
4425
4426 fn randomly_mutate_tree(
4427 root_path: &Path,
4428 insertion_probability: f64,
4429 rng: &mut impl Rng,
4430 ) -> Result<Vec<fsevent::Event>> {
4431 let root_path = root_path.canonicalize().unwrap();
4432 let (dirs, files) = read_dir_recursive(root_path.clone());
4433
4434 let mut events = Vec::new();
4435 let mut record_event = |path: PathBuf| {
4436 events.push(fsevent::Event {
4437 event_id: SystemTime::now()
4438 .duration_since(UNIX_EPOCH)
4439 .unwrap()
4440 .as_secs(),
4441 flags: fsevent::StreamFlags::empty(),
4442 path,
4443 });
4444 };
4445
4446 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4447 let path = dirs.choose(rng).unwrap();
4448 let new_path = path.join(gen_name(rng));
4449
4450 if rng.gen() {
4451 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4452 std::fs::create_dir(&new_path)?;
4453 } else {
4454 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4455 std::fs::write(&new_path, "")?;
4456 }
4457 record_event(new_path);
4458 } else if rng.gen_bool(0.05) {
4459 let ignore_dir_path = dirs.choose(rng).unwrap();
4460 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4461
4462 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4463 let files_to_ignore = {
4464 let len = rng.gen_range(0..=subfiles.len());
4465 subfiles.choose_multiple(rng, len)
4466 };
4467 let dirs_to_ignore = {
4468 let len = rng.gen_range(0..subdirs.len());
4469 subdirs.choose_multiple(rng, len)
4470 };
4471
4472 let mut ignore_contents = String::new();
4473 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4474 write!(
4475 ignore_contents,
4476 "{}\n",
4477 path_to_ignore
4478 .strip_prefix(&ignore_dir_path)?
4479 .to_str()
4480 .unwrap()
4481 )
4482 .unwrap();
4483 }
4484 log::info!(
4485 "Creating {:?} with contents:\n{}",
4486 ignore_path.strip_prefix(&root_path)?,
4487 ignore_contents
4488 );
4489 std::fs::write(&ignore_path, ignore_contents).unwrap();
4490 record_event(ignore_path);
4491 } else {
4492 let old_path = {
4493 let file_path = files.choose(rng);
4494 let dir_path = dirs[1..].choose(rng);
4495 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4496 };
4497
4498 let is_rename = rng.gen();
4499 if is_rename {
4500 let new_path_parent = dirs
4501 .iter()
4502 .filter(|d| !d.starts_with(old_path))
4503 .choose(rng)
4504 .unwrap();
4505
4506 let overwrite_existing_dir =
4507 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4508 let new_path = if overwrite_existing_dir {
4509 std::fs::remove_dir_all(&new_path_parent).ok();
4510 new_path_parent.to_path_buf()
4511 } else {
4512 new_path_parent.join(gen_name(rng))
4513 };
4514
4515 log::info!(
4516 "Renaming {:?} to {}{:?}",
4517 old_path.strip_prefix(&root_path)?,
4518 if overwrite_existing_dir {
4519 "overwrite "
4520 } else {
4521 ""
4522 },
4523 new_path.strip_prefix(&root_path)?
4524 );
4525 std::fs::rename(&old_path, &new_path)?;
4526 record_event(old_path.clone());
4527 record_event(new_path);
4528 } else if old_path.is_dir() {
4529 let (dirs, files) = read_dir_recursive(old_path.clone());
4530
4531 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4532 std::fs::remove_dir_all(&old_path).unwrap();
4533 for file in files {
4534 record_event(file);
4535 }
4536 for dir in dirs {
4537 record_event(dir);
4538 }
4539 } else {
4540 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4541 std::fs::remove_file(old_path).unwrap();
4542 record_event(old_path.clone());
4543 }
4544 }
4545
4546 Ok(events)
4547 }
4548
4549 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4550 let child_entries = std::fs::read_dir(&path).unwrap();
4551 let mut dirs = vec![path];
4552 let mut files = Vec::new();
4553 for child_entry in child_entries {
4554 let child_path = child_entry.unwrap().path();
4555 if child_path.is_dir() {
4556 let (child_dirs, child_files) = read_dir_recursive(child_path);
4557 dirs.extend(child_dirs);
4558 files.extend(child_files);
4559 } else {
4560 files.push(child_path);
4561 }
4562 }
4563 (dirs, files)
4564 }
4565
4566 fn gen_name(rng: &mut impl Rng) -> String {
4567 (0..6)
4568 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4569 .map(char::from)
4570 .collect()
4571 }
4572
4573 impl Snapshot {
4574 fn check_invariants(&self) {
4575 let mut files = self.files(true, 0);
4576 let mut visible_files = self.files(false, 0);
4577 for entry in self.entries_by_path.cursor::<()>() {
4578 if entry.is_file() {
4579 assert_eq!(files.next().unwrap().inode, entry.inode);
4580 if !entry.is_ignored {
4581 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4582 }
4583 }
4584 }
4585 assert!(files.next().is_none());
4586 assert!(visible_files.next().is_none());
4587
4588 let mut bfs_paths = Vec::new();
4589 let mut stack = vec![Path::new("")];
4590 while let Some(path) = stack.pop() {
4591 bfs_paths.push(path);
4592 let ix = stack.len();
4593 for child_entry in self.child_entries(path) {
4594 stack.insert(ix, &child_entry.path);
4595 }
4596 }
4597
4598 let dfs_paths = self
4599 .entries_by_path
4600 .cursor::<()>()
4601 .map(|e| e.path.as_ref())
4602 .collect::<Vec<_>>();
4603 assert_eq!(bfs_paths, dfs_paths);
4604
4605 for (ignore_parent_path, _) in &self.ignores {
4606 assert!(self.entry_for_path(ignore_parent_path).is_some());
4607 assert!(self
4608 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4609 .is_some());
4610 }
4611 }
4612
4613 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4614 let mut paths = Vec::new();
4615 for entry in self.entries_by_path.cursor::<()>() {
4616 if include_ignored || !entry.is_ignored {
4617 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4618 }
4619 }
4620 paths.sort_by(|a, b| a.0.cmp(&b.0));
4621 paths
4622 }
4623 }
4624}