1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{
10 repository::{GitFileStatus, GitRepository, RepoPath, RepoPathDescendants},
11 Fs, LineEnding,
12};
13use futures::{
14 channel::{
15 mpsc::{self, UnboundedSender},
16 oneshot,
17 },
18 select_biased,
19 task::Poll,
20 Stream, StreamExt,
21};
22use fuzzy::CharBag;
23use git::{DOT_GIT, GITIGNORE};
24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
25use language::{
26 proto::{
27 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
28 serialize_version,
29 },
30 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
31};
32use lsp::LanguageServerId;
33use parking_lot::Mutex;
34use postage::{
35 barrier,
36 prelude::{Sink as _, Stream as _},
37 watch,
38};
39use smol::channel::{self, Sender};
40use std::{
41 any::Any,
42 cmp::{self, Ordering},
43 convert::TryFrom,
44 ffi::OsStr,
45 fmt,
46 future::Future,
47 mem,
48 ops::{Deref, DerefMut},
49 path::{Path, PathBuf},
50 pin::Pin,
51 sync::{
52 atomic::{AtomicUsize, Ordering::SeqCst},
53 Arc,
54 },
55 time::{Duration, SystemTime},
56};
57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
58use util::{paths::HOME, ResultExt, TakeUntilExt, TryFutureExt};
59
60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
61pub struct WorktreeId(usize);
62
63pub enum Worktree {
64 Local(LocalWorktree),
65 Remote(RemoteWorktree),
66}
67
68pub struct LocalWorktree {
69 snapshot: LocalSnapshot,
70 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
71 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
72 _background_scanner_task: Task<()>,
73 share: Option<ShareState>,
74 diagnostics: HashMap<
75 Arc<Path>,
76 Vec<(
77 LanguageServerId,
78 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
79 )>,
80 >,
81 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
82 client: Arc<Client>,
83 fs: Arc<dyn Fs>,
84 visible: bool,
85}
86
87pub struct RemoteWorktree {
88 snapshot: Snapshot,
89 background_snapshot: Arc<Mutex<Snapshot>>,
90 project_id: u64,
91 client: Arc<Client>,
92 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
93 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
94 replica_id: ReplicaId,
95 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
96 visible: bool,
97 disconnected: bool,
98}
99
100#[derive(Clone)]
101pub struct Snapshot {
102 id: WorktreeId,
103 abs_path: Arc<Path>,
104 root_name: String,
105 root_char_bag: CharBag,
106 entries_by_path: SumTree<Entry>,
107 entries_by_id: SumTree<PathEntry>,
108 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
109
110 /// A number that increases every time the worktree begins scanning
111 /// a set of paths from the filesystem. This scanning could be caused
112 /// by some operation performed on the worktree, such as reading or
113 /// writing a file, or by an event reported by the filesystem.
114 scan_id: usize,
115
116 /// The latest scan id that has completed, and whose preceding scans
117 /// have all completed. The current `scan_id` could be more than one
118 /// greater than the `completed_scan_id` if operations are performed
119 /// on the worktree while it is processing a file-system event.
120 completed_scan_id: usize,
121}
122
123#[derive(Clone, Debug, PartialEq, Eq)]
124pub struct RepositoryEntry {
125 pub(crate) work_directory: WorkDirectoryEntry,
126 pub(crate) branch: Option<Arc<str>>,
127 pub(crate) statuses: TreeMap<RepoPath, GitFileStatus>,
128}
129
130fn read_git_status(git_status: i32) -> Option<GitFileStatus> {
131 proto::GitStatus::from_i32(git_status).map(|status| match status {
132 proto::GitStatus::Added => GitFileStatus::Added,
133 proto::GitStatus::Modified => GitFileStatus::Modified,
134 proto::GitStatus::Conflict => GitFileStatus::Conflict,
135 })
136}
137
138impl RepositoryEntry {
139 pub fn branch(&self) -> Option<Arc<str>> {
140 self.branch.clone()
141 }
142
143 pub fn work_directory_id(&self) -> ProjectEntryId {
144 *self.work_directory
145 }
146
147 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
148 snapshot
149 .entry_for_id(self.work_directory_id())
150 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
151 }
152
153 pub fn status_for_path(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
154 self.work_directory
155 .relativize(snapshot, path)
156 .and_then(|repo_path| {
157 self.statuses
158 .iter_from(&repo_path)
159 .take_while(|(key, _)| key.starts_with(&repo_path))
160 // Short circut once we've found the highest level
161 .take_until(|(_, status)| status == &&GitFileStatus::Conflict)
162 .map(|(_, status)| status)
163 .reduce(
164 |status_first, status_second| match (status_first, status_second) {
165 (GitFileStatus::Conflict, _) | (_, GitFileStatus::Conflict) => {
166 &GitFileStatus::Conflict
167 }
168 (GitFileStatus::Modified, _) | (_, GitFileStatus::Modified) => {
169 &GitFileStatus::Modified
170 }
171 _ => &GitFileStatus::Added,
172 },
173 )
174 .copied()
175 })
176 }
177
178 #[cfg(any(test, feature = "test-support"))]
179 pub fn status_for_file(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
180 self.work_directory
181 .relativize(snapshot, path)
182 .and_then(|repo_path| (&self.statuses).get(&repo_path))
183 .cloned()
184 }
185
186 pub fn build_update(&self, other: &Self) -> proto::RepositoryEntry {
187 let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
188 let mut removed_statuses: Vec<String> = Vec::new();
189
190 let mut self_statuses = self.statuses.iter().peekable();
191 let mut other_statuses = other.statuses.iter().peekable();
192 loop {
193 match (self_statuses.peek(), other_statuses.peek()) {
194 (Some((self_repo_path, self_status)), Some((other_repo_path, other_status))) => {
195 match Ord::cmp(self_repo_path, other_repo_path) {
196 Ordering::Less => {
197 updated_statuses.push(make_status_entry(self_repo_path, self_status));
198 self_statuses.next();
199 }
200 Ordering::Equal => {
201 if self_status != other_status {
202 updated_statuses
203 .push(make_status_entry(self_repo_path, self_status));
204 }
205
206 self_statuses.next();
207 other_statuses.next();
208 }
209 Ordering::Greater => {
210 removed_statuses.push(make_repo_path(other_repo_path));
211 other_statuses.next();
212 }
213 }
214 }
215 (Some((self_repo_path, self_status)), None) => {
216 updated_statuses.push(make_status_entry(self_repo_path, self_status));
217 self_statuses.next();
218 }
219 (None, Some((other_repo_path, _))) => {
220 removed_statuses.push(make_repo_path(other_repo_path));
221 other_statuses.next();
222 }
223 (None, None) => break,
224 }
225 }
226
227 proto::RepositoryEntry {
228 work_directory_id: self.work_directory_id().to_proto(),
229 branch: self.branch.as_ref().map(|str| str.to_string()),
230 removed_repo_paths: removed_statuses,
231 updated_statuses,
232 }
233 }
234}
235
236fn make_repo_path(path: &RepoPath) -> String {
237 path.as_os_str().to_string_lossy().to_string()
238}
239
240fn make_status_entry(path: &RepoPath, status: &GitFileStatus) -> proto::StatusEntry {
241 proto::StatusEntry {
242 repo_path: make_repo_path(path),
243 status: match status {
244 GitFileStatus::Added => proto::GitStatus::Added.into(),
245 GitFileStatus::Modified => proto::GitStatus::Modified.into(),
246 GitFileStatus::Conflict => proto::GitStatus::Conflict.into(),
247 },
248 }
249}
250
251impl From<&RepositoryEntry> for proto::RepositoryEntry {
252 fn from(value: &RepositoryEntry) -> Self {
253 proto::RepositoryEntry {
254 work_directory_id: value.work_directory.to_proto(),
255 branch: value.branch.as_ref().map(|str| str.to_string()),
256 updated_statuses: value
257 .statuses
258 .iter()
259 .map(|(repo_path, status)| make_status_entry(repo_path, status))
260 .collect(),
261 removed_repo_paths: Default::default(),
262 }
263 }
264}
265
266/// This path corresponds to the 'content path' (the folder that contains the .git)
267#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
268pub struct RepositoryWorkDirectory(Arc<Path>);
269
270impl Default for RepositoryWorkDirectory {
271 fn default() -> Self {
272 RepositoryWorkDirectory(Arc::from(Path::new("")))
273 }
274}
275
276impl AsRef<Path> for RepositoryWorkDirectory {
277 fn as_ref(&self) -> &Path {
278 self.0.as_ref()
279 }
280}
281
282#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
283pub struct WorkDirectoryEntry(ProjectEntryId);
284
285impl WorkDirectoryEntry {
286 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
287 worktree.entry_for_id(self.0).and_then(|entry| {
288 path.strip_prefix(&entry.path)
289 .ok()
290 .map(move |path| path.into())
291 })
292 }
293}
294
295impl Deref for WorkDirectoryEntry {
296 type Target = ProjectEntryId;
297
298 fn deref(&self) -> &Self::Target {
299 &self.0
300 }
301}
302
303impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
304 fn from(value: ProjectEntryId) -> Self {
305 WorkDirectoryEntry(value)
306 }
307}
308
309#[derive(Debug, Clone)]
310pub struct LocalSnapshot {
311 snapshot: Snapshot,
312 /// All of the gitignore files in the worktree, indexed by their relative path.
313 /// The boolean indicates whether the gitignore needs to be updated.
314 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
315 /// All of the git repositories in the worktree, indexed by the project entry
316 /// id of their parent directory.
317 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
318}
319
320pub struct LocalMutableSnapshot {
321 snapshot: LocalSnapshot,
322 /// The ids of all of the entries that were removed from the snapshot
323 /// as part of the current update. These entry ids may be re-used
324 /// if the same inode is discovered at a new path, or if the given
325 /// path is re-created after being deleted.
326 removed_entry_ids: HashMap<u64, ProjectEntryId>,
327}
328
329#[derive(Debug, Clone)]
330pub struct LocalRepositoryEntry {
331 pub(crate) scan_id: usize,
332 pub(crate) full_scan_id: usize,
333 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
334 /// Path to the actual .git folder.
335 /// Note: if .git is a file, this points to the folder indicated by the .git file
336 pub(crate) git_dir_path: Arc<Path>,
337}
338
339impl LocalRepositoryEntry {
340 // Note that this path should be relative to the worktree root.
341 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
342 path.starts_with(self.git_dir_path.as_ref())
343 }
344}
345
346impl Deref for LocalSnapshot {
347 type Target = Snapshot;
348
349 fn deref(&self) -> &Self::Target {
350 &self.snapshot
351 }
352}
353
354impl DerefMut for LocalSnapshot {
355 fn deref_mut(&mut self) -> &mut Self::Target {
356 &mut self.snapshot
357 }
358}
359
360impl Deref for LocalMutableSnapshot {
361 type Target = LocalSnapshot;
362
363 fn deref(&self) -> &Self::Target {
364 &self.snapshot
365 }
366}
367
368impl DerefMut for LocalMutableSnapshot {
369 fn deref_mut(&mut self) -> &mut Self::Target {
370 &mut self.snapshot
371 }
372}
373
374enum ScanState {
375 Started,
376 Updated {
377 snapshot: LocalSnapshot,
378 changes: HashMap<(Arc<Path>, ProjectEntryId), PathChange>,
379 barrier: Option<barrier::Sender>,
380 scanning: bool,
381 },
382}
383
384struct ShareState {
385 project_id: u64,
386 snapshots_tx: watch::Sender<LocalSnapshot>,
387 resume_updates: watch::Sender<()>,
388 _maintain_remote_snapshot: Task<Option<()>>,
389}
390
391pub enum Event {
392 UpdatedEntries(HashMap<(Arc<Path>, ProjectEntryId), PathChange>),
393 UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
394}
395
396impl Entity for Worktree {
397 type Event = Event;
398}
399
400impl Worktree {
401 pub async fn local(
402 client: Arc<Client>,
403 path: impl Into<Arc<Path>>,
404 visible: bool,
405 fs: Arc<dyn Fs>,
406 next_entry_id: Arc<AtomicUsize>,
407 cx: &mut AsyncAppContext,
408 ) -> Result<ModelHandle<Self>> {
409 // After determining whether the root entry is a file or a directory, populate the
410 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
411 let abs_path = path.into();
412 let metadata = fs
413 .metadata(&abs_path)
414 .await
415 .context("failed to stat worktree path")?;
416
417 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
418 let root_name = abs_path
419 .file_name()
420 .map_or(String::new(), |f| f.to_string_lossy().to_string());
421
422 let mut snapshot = LocalSnapshot {
423 ignores_by_parent_abs_path: Default::default(),
424 git_repositories: Default::default(),
425 snapshot: Snapshot {
426 id: WorktreeId::from_usize(cx.model_id()),
427 abs_path: abs_path.clone(),
428 root_name: root_name.clone(),
429 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
430 entries_by_path: Default::default(),
431 entries_by_id: Default::default(),
432 repository_entries: Default::default(),
433 scan_id: 1,
434 completed_scan_id: 0,
435 },
436 };
437
438 if let Some(metadata) = metadata {
439 snapshot.insert_entry(
440 Entry::new(
441 Arc::from(Path::new("")),
442 &metadata,
443 &next_entry_id,
444 snapshot.root_char_bag,
445 ),
446 fs.as_ref(),
447 );
448 }
449
450 let (path_changes_tx, path_changes_rx) = channel::unbounded();
451 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
452
453 cx.spawn_weak(|this, mut cx| async move {
454 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
455 this.update(&mut cx, |this, cx| {
456 let this = this.as_local_mut().unwrap();
457 match state {
458 ScanState::Started => {
459 *this.is_scanning.0.borrow_mut() = true;
460 }
461 ScanState::Updated {
462 snapshot,
463 changes,
464 barrier,
465 scanning,
466 } => {
467 *this.is_scanning.0.borrow_mut() = scanning;
468 this.set_snapshot(snapshot, cx);
469 cx.emit(Event::UpdatedEntries(changes));
470 drop(barrier);
471 }
472 }
473 cx.notify();
474 });
475 }
476 })
477 .detach();
478
479 let background_scanner_task = cx.background().spawn({
480 let fs = fs.clone();
481 let snapshot = snapshot.clone();
482 let background = cx.background().clone();
483 async move {
484 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
485 BackgroundScanner::new(
486 snapshot,
487 next_entry_id,
488 fs,
489 scan_states_tx,
490 background,
491 path_changes_rx,
492 )
493 .run(events)
494 .await;
495 }
496 });
497
498 Worktree::Local(LocalWorktree {
499 snapshot,
500 is_scanning: watch::channel_with(true),
501 share: None,
502 path_changes_tx,
503 _background_scanner_task: background_scanner_task,
504 diagnostics: Default::default(),
505 diagnostic_summaries: Default::default(),
506 client,
507 fs,
508 visible,
509 })
510 }))
511 }
512
513 pub fn remote(
514 project_remote_id: u64,
515 replica_id: ReplicaId,
516 worktree: proto::WorktreeMetadata,
517 client: Arc<Client>,
518 cx: &mut AppContext,
519 ) -> ModelHandle<Self> {
520 cx.add_model(|cx: &mut ModelContext<Self>| {
521 let snapshot = Snapshot {
522 id: WorktreeId(worktree.id as usize),
523 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
524 root_name: worktree.root_name.clone(),
525 root_char_bag: worktree
526 .root_name
527 .chars()
528 .map(|c| c.to_ascii_lowercase())
529 .collect(),
530 entries_by_path: Default::default(),
531 entries_by_id: Default::default(),
532 repository_entries: Default::default(),
533 scan_id: 1,
534 completed_scan_id: 0,
535 };
536
537 let (updates_tx, mut updates_rx) = mpsc::unbounded();
538 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
539 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
540
541 cx.background()
542 .spawn({
543 let background_snapshot = background_snapshot.clone();
544 async move {
545 while let Some(update) = updates_rx.next().await {
546 if let Err(error) =
547 background_snapshot.lock().apply_remote_update(update)
548 {
549 log::error!("error applying worktree update: {}", error);
550 }
551 snapshot_updated_tx.send(()).await.ok();
552 }
553 }
554 })
555 .detach();
556
557 cx.spawn_weak(|this, mut cx| async move {
558 while (snapshot_updated_rx.recv().await).is_some() {
559 if let Some(this) = this.upgrade(&cx) {
560 this.update(&mut cx, |this, cx| {
561 let this = this.as_remote_mut().unwrap();
562 this.snapshot = this.background_snapshot.lock().clone();
563 cx.emit(Event::UpdatedEntries(Default::default()));
564 cx.notify();
565 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
566 if this.observed_snapshot(*scan_id) {
567 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
568 let _ = tx.send(());
569 } else {
570 break;
571 }
572 }
573 });
574 } else {
575 break;
576 }
577 }
578 })
579 .detach();
580
581 Worktree::Remote(RemoteWorktree {
582 project_id: project_remote_id,
583 replica_id,
584 snapshot: snapshot.clone(),
585 background_snapshot,
586 updates_tx: Some(updates_tx),
587 snapshot_subscriptions: Default::default(),
588 client: client.clone(),
589 diagnostic_summaries: Default::default(),
590 visible: worktree.visible,
591 disconnected: false,
592 })
593 })
594 }
595
596 pub fn as_local(&self) -> Option<&LocalWorktree> {
597 if let Worktree::Local(worktree) = self {
598 Some(worktree)
599 } else {
600 None
601 }
602 }
603
604 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
605 if let Worktree::Remote(worktree) = self {
606 Some(worktree)
607 } else {
608 None
609 }
610 }
611
612 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
613 if let Worktree::Local(worktree) = self {
614 Some(worktree)
615 } else {
616 None
617 }
618 }
619
620 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
621 if let Worktree::Remote(worktree) = self {
622 Some(worktree)
623 } else {
624 None
625 }
626 }
627
628 pub fn is_local(&self) -> bool {
629 matches!(self, Worktree::Local(_))
630 }
631
632 pub fn is_remote(&self) -> bool {
633 !self.is_local()
634 }
635
636 pub fn snapshot(&self) -> Snapshot {
637 match self {
638 Worktree::Local(worktree) => worktree.snapshot().snapshot,
639 Worktree::Remote(worktree) => worktree.snapshot(),
640 }
641 }
642
643 pub fn scan_id(&self) -> usize {
644 match self {
645 Worktree::Local(worktree) => worktree.snapshot.scan_id,
646 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
647 }
648 }
649
650 pub fn completed_scan_id(&self) -> usize {
651 match self {
652 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
653 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
654 }
655 }
656
657 pub fn is_visible(&self) -> bool {
658 match self {
659 Worktree::Local(worktree) => worktree.visible,
660 Worktree::Remote(worktree) => worktree.visible,
661 }
662 }
663
664 pub fn replica_id(&self) -> ReplicaId {
665 match self {
666 Worktree::Local(_) => 0,
667 Worktree::Remote(worktree) => worktree.replica_id,
668 }
669 }
670
671 pub fn diagnostic_summaries(
672 &self,
673 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
674 match self {
675 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
676 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
677 }
678 .iter()
679 .flat_map(|(path, summaries)| {
680 summaries
681 .iter()
682 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
683 })
684 }
685
686 pub fn abs_path(&self) -> Arc<Path> {
687 match self {
688 Worktree::Local(worktree) => worktree.abs_path.clone(),
689 Worktree::Remote(worktree) => worktree.abs_path.clone(),
690 }
691 }
692}
693
694impl LocalWorktree {
695 pub fn contains_abs_path(&self, path: &Path) -> bool {
696 path.starts_with(&self.abs_path)
697 }
698
699 fn absolutize(&self, path: &Path) -> PathBuf {
700 if path.file_name().is_some() {
701 self.abs_path.join(path)
702 } else {
703 self.abs_path.to_path_buf()
704 }
705 }
706
707 pub(crate) fn load_buffer(
708 &mut self,
709 id: u64,
710 path: &Path,
711 cx: &mut ModelContext<Worktree>,
712 ) -> Task<Result<ModelHandle<Buffer>>> {
713 let path = Arc::from(path);
714 cx.spawn(move |this, mut cx| async move {
715 let (file, contents, diff_base) = this
716 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
717 .await?;
718 let text_buffer = cx
719 .background()
720 .spawn(async move { text::Buffer::new(0, id, contents) })
721 .await;
722 Ok(cx.add_model(|cx| {
723 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
724 buffer.git_diff_recalc(cx);
725 buffer
726 }))
727 })
728 }
729
730 pub fn diagnostics_for_path(
731 &self,
732 path: &Path,
733 ) -> Vec<(
734 LanguageServerId,
735 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
736 )> {
737 self.diagnostics.get(path).cloned().unwrap_or_default()
738 }
739
740 pub fn update_diagnostics(
741 &mut self,
742 server_id: LanguageServerId,
743 worktree_path: Arc<Path>,
744 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
745 _: &mut ModelContext<Worktree>,
746 ) -> Result<bool> {
747 let summaries_by_server_id = self
748 .diagnostic_summaries
749 .entry(worktree_path.clone())
750 .or_default();
751
752 let old_summary = summaries_by_server_id
753 .remove(&server_id)
754 .unwrap_or_default();
755
756 let new_summary = DiagnosticSummary::new(&diagnostics);
757 if new_summary.is_empty() {
758 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
759 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
760 diagnostics_by_server_id.remove(ix);
761 }
762 if diagnostics_by_server_id.is_empty() {
763 self.diagnostics.remove(&worktree_path);
764 }
765 }
766 } else {
767 summaries_by_server_id.insert(server_id, new_summary);
768 let diagnostics_by_server_id =
769 self.diagnostics.entry(worktree_path.clone()).or_default();
770 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
771 Ok(ix) => {
772 diagnostics_by_server_id[ix] = (server_id, diagnostics);
773 }
774 Err(ix) => {
775 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
776 }
777 }
778 }
779
780 if !old_summary.is_empty() || !new_summary.is_empty() {
781 if let Some(share) = self.share.as_ref() {
782 self.client
783 .send(proto::UpdateDiagnosticSummary {
784 project_id: share.project_id,
785 worktree_id: self.id().to_proto(),
786 summary: Some(proto::DiagnosticSummary {
787 path: worktree_path.to_string_lossy().to_string(),
788 language_server_id: server_id.0 as u64,
789 error_count: new_summary.error_count as u32,
790 warning_count: new_summary.warning_count as u32,
791 }),
792 })
793 .log_err();
794 }
795 }
796
797 Ok(!old_summary.is_empty() || !new_summary.is_empty())
798 }
799
800 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
801 let updated_repos =
802 self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
803 self.snapshot = new_snapshot;
804
805 if let Some(share) = self.share.as_mut() {
806 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
807 }
808
809 if !updated_repos.is_empty() {
810 cx.emit(Event::UpdatedGitRepositories(updated_repos));
811 }
812 }
813
814 fn changed_repos(
815 &self,
816 old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
817 new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
818 ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
819 let mut diff = HashMap::default();
820 let mut old_repos = old_repos.iter().peekable();
821 let mut new_repos = new_repos.iter().peekable();
822 loop {
823 match (old_repos.peek(), new_repos.peek()) {
824 (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
825 match Ord::cmp(old_entry_id, new_entry_id) {
826 Ordering::Less => {
827 if let Some(entry) = self.entry_for_id(**old_entry_id) {
828 diff.insert(entry.path.clone(), (*old_repo).clone());
829 }
830 old_repos.next();
831 }
832 Ordering::Equal => {
833 if old_repo.scan_id != new_repo.scan_id {
834 if let Some(entry) = self.entry_for_id(**new_entry_id) {
835 diff.insert(entry.path.clone(), (*new_repo).clone());
836 }
837 }
838
839 old_repos.next();
840 new_repos.next();
841 }
842 Ordering::Greater => {
843 if let Some(entry) = self.entry_for_id(**new_entry_id) {
844 diff.insert(entry.path.clone(), (*new_repo).clone());
845 }
846 new_repos.next();
847 }
848 }
849 }
850 (Some((old_entry_id, old_repo)), None) => {
851 if let Some(entry) = self.entry_for_id(**old_entry_id) {
852 diff.insert(entry.path.clone(), (*old_repo).clone());
853 }
854 old_repos.next();
855 }
856 (None, Some((new_entry_id, new_repo))) => {
857 if let Some(entry) = self.entry_for_id(**new_entry_id) {
858 diff.insert(entry.path.clone(), (*new_repo).clone());
859 }
860 new_repos.next();
861 }
862 (None, None) => break,
863 }
864 }
865 diff
866 }
867
868 pub fn scan_complete(&self) -> impl Future<Output = ()> {
869 let mut is_scanning_rx = self.is_scanning.1.clone();
870 async move {
871 let mut is_scanning = is_scanning_rx.borrow().clone();
872 while is_scanning {
873 if let Some(value) = is_scanning_rx.recv().await {
874 is_scanning = value;
875 } else {
876 break;
877 }
878 }
879 }
880 }
881
882 pub fn snapshot(&self) -> LocalSnapshot {
883 self.snapshot.clone()
884 }
885
886 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
887 proto::WorktreeMetadata {
888 id: self.id().to_proto(),
889 root_name: self.root_name().to_string(),
890 visible: self.visible,
891 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
892 }
893 }
894
895 fn load(
896 &self,
897 path: &Path,
898 cx: &mut ModelContext<Worktree>,
899 ) -> Task<Result<(File, String, Option<String>)>> {
900 let handle = cx.handle();
901 let path = Arc::from(path);
902 let abs_path = self.absolutize(&path);
903 let fs = self.fs.clone();
904 let snapshot = self.snapshot();
905
906 let mut index_task = None;
907
908 if let Some(repo) = snapshot.repository_for_path(&path) {
909 let repo_path = repo.work_directory.relativize(self, &path).unwrap();
910 if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
911 let repo = repo.repo_ptr.to_owned();
912 index_task = Some(
913 cx.background()
914 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
915 );
916 }
917 }
918
919 cx.spawn(|this, mut cx| async move {
920 let text = fs.load(&abs_path).await?;
921
922 let diff_base = if let Some(index_task) = index_task {
923 index_task.await
924 } else {
925 None
926 };
927
928 // Eagerly populate the snapshot with an updated entry for the loaded file
929 let entry = this
930 .update(&mut cx, |this, cx| {
931 this.as_local().unwrap().refresh_entry(path, None, cx)
932 })
933 .await?;
934
935 Ok((
936 File {
937 entry_id: entry.id,
938 worktree: handle,
939 path: entry.path,
940 mtime: entry.mtime,
941 is_local: true,
942 is_deleted: false,
943 },
944 text,
945 diff_base,
946 ))
947 })
948 }
949
950 pub fn save_buffer(
951 &self,
952 buffer_handle: ModelHandle<Buffer>,
953 path: Arc<Path>,
954 has_changed_file: bool,
955 cx: &mut ModelContext<Worktree>,
956 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
957 let handle = cx.handle();
958 let buffer = buffer_handle.read(cx);
959
960 let rpc = self.client.clone();
961 let buffer_id = buffer.remote_id();
962 let project_id = self.share.as_ref().map(|share| share.project_id);
963
964 let text = buffer.as_rope().clone();
965 let fingerprint = text.fingerprint();
966 let version = buffer.version();
967 let save = self.write_file(path, text, buffer.line_ending(), cx);
968
969 cx.as_mut().spawn(|mut cx| async move {
970 let entry = save.await?;
971
972 if has_changed_file {
973 let new_file = Arc::new(File {
974 entry_id: entry.id,
975 worktree: handle,
976 path: entry.path,
977 mtime: entry.mtime,
978 is_local: true,
979 is_deleted: false,
980 });
981
982 if let Some(project_id) = project_id {
983 rpc.send(proto::UpdateBufferFile {
984 project_id,
985 buffer_id,
986 file: Some(new_file.to_proto()),
987 })
988 .log_err();
989 }
990
991 buffer_handle.update(&mut cx, |buffer, cx| {
992 if has_changed_file {
993 buffer.file_updated(new_file, cx).detach();
994 }
995 });
996 }
997
998 if let Some(project_id) = project_id {
999 rpc.send(proto::BufferSaved {
1000 project_id,
1001 buffer_id,
1002 version: serialize_version(&version),
1003 mtime: Some(entry.mtime.into()),
1004 fingerprint: serialize_fingerprint(fingerprint),
1005 })?;
1006 }
1007
1008 buffer_handle.update(&mut cx, |buffer, cx| {
1009 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
1010 });
1011
1012 Ok((version, fingerprint, entry.mtime))
1013 })
1014 }
1015
1016 pub fn create_entry(
1017 &self,
1018 path: impl Into<Arc<Path>>,
1019 is_dir: bool,
1020 cx: &mut ModelContext<Worktree>,
1021 ) -> Task<Result<Entry>> {
1022 let path = path.into();
1023 let abs_path = self.absolutize(&path);
1024 let fs = self.fs.clone();
1025 let write = cx.background().spawn(async move {
1026 if is_dir {
1027 fs.create_dir(&abs_path).await
1028 } else {
1029 fs.save(&abs_path, &Default::default(), Default::default())
1030 .await
1031 }
1032 });
1033
1034 cx.spawn(|this, mut cx| async move {
1035 write.await?;
1036 this.update(&mut cx, |this, cx| {
1037 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1038 })
1039 .await
1040 })
1041 }
1042
1043 pub fn write_file(
1044 &self,
1045 path: impl Into<Arc<Path>>,
1046 text: Rope,
1047 line_ending: LineEnding,
1048 cx: &mut ModelContext<Worktree>,
1049 ) -> Task<Result<Entry>> {
1050 let path = path.into();
1051 let abs_path = self.absolutize(&path);
1052 let fs = self.fs.clone();
1053 let write = cx
1054 .background()
1055 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1056
1057 cx.spawn(|this, mut cx| async move {
1058 write.await?;
1059 this.update(&mut cx, |this, cx| {
1060 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1061 })
1062 .await
1063 })
1064 }
1065
1066 pub fn delete_entry(
1067 &self,
1068 entry_id: ProjectEntryId,
1069 cx: &mut ModelContext<Worktree>,
1070 ) -> Option<Task<Result<()>>> {
1071 let entry = self.entry_for_id(entry_id)?.clone();
1072 let abs_path = self.abs_path.clone();
1073 let fs = self.fs.clone();
1074
1075 let delete = cx.background().spawn(async move {
1076 let mut abs_path = fs.canonicalize(&abs_path).await?;
1077 if entry.path.file_name().is_some() {
1078 abs_path = abs_path.join(&entry.path);
1079 }
1080 if entry.is_file() {
1081 fs.remove_file(&abs_path, Default::default()).await?;
1082 } else {
1083 fs.remove_dir(
1084 &abs_path,
1085 RemoveOptions {
1086 recursive: true,
1087 ignore_if_not_exists: false,
1088 },
1089 )
1090 .await?;
1091 }
1092 anyhow::Ok(abs_path)
1093 });
1094
1095 Some(cx.spawn(|this, mut cx| async move {
1096 let abs_path = delete.await?;
1097 let (tx, mut rx) = barrier::channel();
1098 this.update(&mut cx, |this, _| {
1099 this.as_local_mut()
1100 .unwrap()
1101 .path_changes_tx
1102 .try_send((vec![abs_path], tx))
1103 })?;
1104 rx.recv().await;
1105 Ok(())
1106 }))
1107 }
1108
1109 pub fn rename_entry(
1110 &self,
1111 entry_id: ProjectEntryId,
1112 new_path: impl Into<Arc<Path>>,
1113 cx: &mut ModelContext<Worktree>,
1114 ) -> Option<Task<Result<Entry>>> {
1115 let old_path = self.entry_for_id(entry_id)?.path.clone();
1116 let new_path = new_path.into();
1117 let abs_old_path = self.absolutize(&old_path);
1118 let abs_new_path = self.absolutize(&new_path);
1119 let fs = self.fs.clone();
1120 let rename = cx.background().spawn(async move {
1121 fs.rename(&abs_old_path, &abs_new_path, Default::default())
1122 .await
1123 });
1124
1125 Some(cx.spawn(|this, mut cx| async move {
1126 rename.await?;
1127 this.update(&mut cx, |this, cx| {
1128 this.as_local_mut()
1129 .unwrap()
1130 .refresh_entry(new_path.clone(), Some(old_path), cx)
1131 })
1132 .await
1133 }))
1134 }
1135
1136 pub fn copy_entry(
1137 &self,
1138 entry_id: ProjectEntryId,
1139 new_path: impl Into<Arc<Path>>,
1140 cx: &mut ModelContext<Worktree>,
1141 ) -> Option<Task<Result<Entry>>> {
1142 let old_path = self.entry_for_id(entry_id)?.path.clone();
1143 let new_path = new_path.into();
1144 let abs_old_path = self.absolutize(&old_path);
1145 let abs_new_path = self.absolutize(&new_path);
1146 let fs = self.fs.clone();
1147 let copy = cx.background().spawn(async move {
1148 copy_recursive(
1149 fs.as_ref(),
1150 &abs_old_path,
1151 &abs_new_path,
1152 Default::default(),
1153 )
1154 .await
1155 });
1156
1157 Some(cx.spawn(|this, mut cx| async move {
1158 copy.await?;
1159 this.update(&mut cx, |this, cx| {
1160 this.as_local_mut()
1161 .unwrap()
1162 .refresh_entry(new_path.clone(), None, cx)
1163 })
1164 .await
1165 }))
1166 }
1167
1168 fn refresh_entry(
1169 &self,
1170 path: Arc<Path>,
1171 old_path: Option<Arc<Path>>,
1172 cx: &mut ModelContext<Worktree>,
1173 ) -> Task<Result<Entry>> {
1174 let fs = self.fs.clone();
1175 let abs_root_path = self.abs_path.clone();
1176 let path_changes_tx = self.path_changes_tx.clone();
1177 cx.spawn_weak(move |this, mut cx| async move {
1178 let abs_path = fs.canonicalize(&abs_root_path).await?;
1179 let mut paths = Vec::with_capacity(2);
1180 paths.push(if path.file_name().is_some() {
1181 abs_path.join(&path)
1182 } else {
1183 abs_path.clone()
1184 });
1185 if let Some(old_path) = old_path {
1186 paths.push(if old_path.file_name().is_some() {
1187 abs_path.join(&old_path)
1188 } else {
1189 abs_path.clone()
1190 });
1191 }
1192
1193 let (tx, mut rx) = barrier::channel();
1194 path_changes_tx.try_send((paths, tx))?;
1195 rx.recv().await;
1196 this.upgrade(&cx)
1197 .ok_or_else(|| anyhow!("worktree was dropped"))?
1198 .update(&mut cx, |this, _| {
1199 this.entry_for_path(path)
1200 .cloned()
1201 .ok_or_else(|| anyhow!("failed to read path after update"))
1202 })
1203 })
1204 }
1205
1206 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1207 let (share_tx, share_rx) = oneshot::channel();
1208
1209 if let Some(share) = self.share.as_mut() {
1210 let _ = share_tx.send(());
1211 *share.resume_updates.borrow_mut() = ();
1212 } else {
1213 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1214 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1215 let worktree_id = cx.model_id() as u64;
1216
1217 for (path, summaries) in &self.diagnostic_summaries {
1218 for (&server_id, summary) in summaries {
1219 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1220 project_id,
1221 worktree_id,
1222 summary: Some(summary.to_proto(server_id, &path)),
1223 }) {
1224 return Task::ready(Err(e));
1225 }
1226 }
1227 }
1228
1229 let _maintain_remote_snapshot = cx.background().spawn({
1230 let client = self.client.clone();
1231 async move {
1232 let mut share_tx = Some(share_tx);
1233 let mut prev_snapshot = LocalSnapshot {
1234 ignores_by_parent_abs_path: Default::default(),
1235 git_repositories: Default::default(),
1236 snapshot: Snapshot {
1237 id: WorktreeId(worktree_id as usize),
1238 abs_path: Path::new("").into(),
1239 root_name: Default::default(),
1240 root_char_bag: Default::default(),
1241 entries_by_path: Default::default(),
1242 entries_by_id: Default::default(),
1243 repository_entries: Default::default(),
1244 scan_id: 0,
1245 completed_scan_id: 0,
1246 },
1247 };
1248 while let Some(snapshot) = snapshots_rx.recv().await {
1249 #[cfg(any(test, feature = "test-support"))]
1250 const MAX_CHUNK_SIZE: usize = 2;
1251 #[cfg(not(any(test, feature = "test-support")))]
1252 const MAX_CHUNK_SIZE: usize = 256;
1253
1254 let update =
1255 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1256 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1257 let _ = resume_updates_rx.try_recv();
1258 while let Err(error) = client.request(update.clone()).await {
1259 log::error!("failed to send worktree update: {}", error);
1260 log::info!("waiting to resume updates");
1261 if resume_updates_rx.next().await.is_none() {
1262 return Ok(());
1263 }
1264 }
1265 }
1266
1267 if let Some(share_tx) = share_tx.take() {
1268 let _ = share_tx.send(());
1269 }
1270
1271 prev_snapshot = snapshot;
1272 }
1273
1274 Ok::<_, anyhow::Error>(())
1275 }
1276 .log_err()
1277 });
1278
1279 self.share = Some(ShareState {
1280 project_id,
1281 snapshots_tx,
1282 resume_updates: resume_updates_tx,
1283 _maintain_remote_snapshot,
1284 });
1285 }
1286
1287 cx.foreground()
1288 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1289 }
1290
1291 pub fn unshare(&mut self) {
1292 self.share.take();
1293 }
1294
1295 pub fn is_shared(&self) -> bool {
1296 self.share.is_some()
1297 }
1298}
1299
1300impl RemoteWorktree {
1301 fn snapshot(&self) -> Snapshot {
1302 self.snapshot.clone()
1303 }
1304
1305 pub fn disconnected_from_host(&mut self) {
1306 self.updates_tx.take();
1307 self.snapshot_subscriptions.clear();
1308 self.disconnected = true;
1309 }
1310
1311 pub fn save_buffer(
1312 &self,
1313 buffer_handle: ModelHandle<Buffer>,
1314 cx: &mut ModelContext<Worktree>,
1315 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1316 let buffer = buffer_handle.read(cx);
1317 let buffer_id = buffer.remote_id();
1318 let version = buffer.version();
1319 let rpc = self.client.clone();
1320 let project_id = self.project_id;
1321 cx.as_mut().spawn(|mut cx| async move {
1322 let response = rpc
1323 .request(proto::SaveBuffer {
1324 project_id,
1325 buffer_id,
1326 version: serialize_version(&version),
1327 })
1328 .await?;
1329 let version = deserialize_version(&response.version);
1330 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1331 let mtime = response
1332 .mtime
1333 .ok_or_else(|| anyhow!("missing mtime"))?
1334 .into();
1335
1336 buffer_handle.update(&mut cx, |buffer, cx| {
1337 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1338 });
1339
1340 Ok((version, fingerprint, mtime))
1341 })
1342 }
1343
1344 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1345 if let Some(updates_tx) = &self.updates_tx {
1346 updates_tx
1347 .unbounded_send(update)
1348 .expect("consumer runs to completion");
1349 }
1350 }
1351
1352 fn observed_snapshot(&self, scan_id: usize) -> bool {
1353 self.completed_scan_id >= scan_id
1354 }
1355
1356 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1357 let (tx, rx) = oneshot::channel();
1358 if self.observed_snapshot(scan_id) {
1359 let _ = tx.send(());
1360 } else if self.disconnected {
1361 drop(tx);
1362 } else {
1363 match self
1364 .snapshot_subscriptions
1365 .binary_search_by_key(&scan_id, |probe| probe.0)
1366 {
1367 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1368 }
1369 }
1370
1371 async move {
1372 rx.await?;
1373 Ok(())
1374 }
1375 }
1376
1377 pub fn update_diagnostic_summary(
1378 &mut self,
1379 path: Arc<Path>,
1380 summary: &proto::DiagnosticSummary,
1381 ) {
1382 let server_id = LanguageServerId(summary.language_server_id as usize);
1383 let summary = DiagnosticSummary {
1384 error_count: summary.error_count as usize,
1385 warning_count: summary.warning_count as usize,
1386 };
1387
1388 if summary.is_empty() {
1389 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1390 summaries.remove(&server_id);
1391 if summaries.is_empty() {
1392 self.diagnostic_summaries.remove(&path);
1393 }
1394 }
1395 } else {
1396 self.diagnostic_summaries
1397 .entry(path)
1398 .or_default()
1399 .insert(server_id, summary);
1400 }
1401 }
1402
1403 pub fn insert_entry(
1404 &mut self,
1405 entry: proto::Entry,
1406 scan_id: usize,
1407 cx: &mut ModelContext<Worktree>,
1408 ) -> Task<Result<Entry>> {
1409 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1410 cx.spawn(|this, mut cx| async move {
1411 wait_for_snapshot.await?;
1412 this.update(&mut cx, |worktree, _| {
1413 let worktree = worktree.as_remote_mut().unwrap();
1414 let mut snapshot = worktree.background_snapshot.lock();
1415 let entry = snapshot.insert_entry(entry);
1416 worktree.snapshot = snapshot.clone();
1417 entry
1418 })
1419 })
1420 }
1421
1422 pub(crate) fn delete_entry(
1423 &mut self,
1424 id: ProjectEntryId,
1425 scan_id: usize,
1426 cx: &mut ModelContext<Worktree>,
1427 ) -> Task<Result<()>> {
1428 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1429 cx.spawn(|this, mut cx| async move {
1430 wait_for_snapshot.await?;
1431 this.update(&mut cx, |worktree, _| {
1432 let worktree = worktree.as_remote_mut().unwrap();
1433 let mut snapshot = worktree.background_snapshot.lock();
1434 snapshot.delete_entry(id);
1435 worktree.snapshot = snapshot.clone();
1436 });
1437 Ok(())
1438 })
1439 }
1440}
1441
1442impl Snapshot {
1443 pub fn id(&self) -> WorktreeId {
1444 self.id
1445 }
1446
1447 pub fn abs_path(&self) -> &Arc<Path> {
1448 &self.abs_path
1449 }
1450
1451 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1452 self.entries_by_id.get(&entry_id, &()).is_some()
1453 }
1454
1455 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1456 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1457 let old_entry = self.entries_by_id.insert_or_replace(
1458 PathEntry {
1459 id: entry.id,
1460 path: entry.path.clone(),
1461 is_ignored: entry.is_ignored,
1462 scan_id: 0,
1463 },
1464 &(),
1465 );
1466 if let Some(old_entry) = old_entry {
1467 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1468 }
1469 self.entries_by_path.insert_or_replace(entry.clone(), &());
1470 Ok(entry)
1471 }
1472
1473 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1474 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1475 self.entries_by_path = {
1476 let mut cursor = self.entries_by_path.cursor();
1477 let mut new_entries_by_path =
1478 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1479 while let Some(entry) = cursor.item() {
1480 if entry.path.starts_with(&removed_entry.path) {
1481 self.entries_by_id.remove(&entry.id, &());
1482 cursor.next(&());
1483 } else {
1484 break;
1485 }
1486 }
1487 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1488 new_entries_by_path
1489 };
1490
1491 Some(removed_entry.path)
1492 }
1493
1494 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1495 let mut entries_by_path_edits = Vec::new();
1496 let mut entries_by_id_edits = Vec::new();
1497 for entry_id in update.removed_entries {
1498 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1499 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1500 entries_by_id_edits.push(Edit::Remove(entry.id));
1501 }
1502 }
1503
1504 for entry in update.updated_entries {
1505 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1506 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1507 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1508 }
1509 entries_by_id_edits.push(Edit::Insert(PathEntry {
1510 id: entry.id,
1511 path: entry.path.clone(),
1512 is_ignored: entry.is_ignored,
1513 scan_id: 0,
1514 }));
1515 entries_by_path_edits.push(Edit::Insert(entry));
1516 }
1517
1518 self.entries_by_path.edit(entries_by_path_edits, &());
1519 self.entries_by_id.edit(entries_by_id_edits, &());
1520
1521 update.removed_repositories.sort_unstable();
1522 self.repository_entries.retain(|_, entry| {
1523 if let Ok(_) = update
1524 .removed_repositories
1525 .binary_search(&entry.work_directory.to_proto())
1526 {
1527 false
1528 } else {
1529 true
1530 }
1531 });
1532
1533 for repository in update.updated_repositories {
1534 let work_directory_entry: WorkDirectoryEntry =
1535 ProjectEntryId::from_proto(repository.work_directory_id).into();
1536
1537 if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1538 let mut statuses = TreeMap::default();
1539 for status_entry in repository.updated_statuses {
1540 let Some(git_file_status) = read_git_status(status_entry.status) else {
1541 continue;
1542 };
1543
1544 let repo_path = RepoPath::new(status_entry.repo_path.into());
1545 statuses.insert(repo_path, git_file_status);
1546 }
1547
1548 let work_directory = RepositoryWorkDirectory(entry.path.clone());
1549 if self.repository_entries.get(&work_directory).is_some() {
1550 self.repository_entries.update(&work_directory, |repo| {
1551 repo.branch = repository.branch.map(Into::into);
1552 repo.statuses.insert_tree(statuses);
1553
1554 for repo_path in repository.removed_repo_paths {
1555 let repo_path = RepoPath::new(repo_path.into());
1556 repo.statuses.remove(&repo_path);
1557 }
1558 });
1559 } else {
1560 self.repository_entries.insert(
1561 work_directory,
1562 RepositoryEntry {
1563 work_directory: work_directory_entry,
1564 branch: repository.branch.map(Into::into),
1565 statuses,
1566 },
1567 )
1568 }
1569 } else {
1570 log::error!("no work directory entry for repository {:?}", repository)
1571 }
1572 }
1573
1574 self.scan_id = update.scan_id as usize;
1575 if update.is_last_update {
1576 self.completed_scan_id = update.scan_id as usize;
1577 }
1578
1579 Ok(())
1580 }
1581
1582 pub fn file_count(&self) -> usize {
1583 self.entries_by_path.summary().file_count
1584 }
1585
1586 pub fn visible_file_count(&self) -> usize {
1587 self.entries_by_path.summary().visible_file_count
1588 }
1589
1590 fn traverse_from_offset(
1591 &self,
1592 include_dirs: bool,
1593 include_ignored: bool,
1594 start_offset: usize,
1595 ) -> Traversal {
1596 let mut cursor = self.entries_by_path.cursor();
1597 cursor.seek(
1598 &TraversalTarget::Count {
1599 count: start_offset,
1600 include_dirs,
1601 include_ignored,
1602 },
1603 Bias::Right,
1604 &(),
1605 );
1606 Traversal {
1607 cursor,
1608 include_dirs,
1609 include_ignored,
1610 }
1611 }
1612
1613 fn traverse_from_path(
1614 &self,
1615 include_dirs: bool,
1616 include_ignored: bool,
1617 path: &Path,
1618 ) -> Traversal {
1619 let mut cursor = self.entries_by_path.cursor();
1620 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1621 Traversal {
1622 cursor,
1623 include_dirs,
1624 include_ignored,
1625 }
1626 }
1627
1628 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1629 self.traverse_from_offset(false, include_ignored, start)
1630 }
1631
1632 pub fn entries(&self, include_ignored: bool) -> Traversal {
1633 self.traverse_from_offset(true, include_ignored, 0)
1634 }
1635
1636 pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
1637 self.repository_entries
1638 .iter()
1639 .map(|(path, entry)| (&path.0, entry))
1640 }
1641
1642 /// Get the repository whose work directory contains the given path.
1643 pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
1644 self.repository_entries
1645 .get(&RepositoryWorkDirectory(path.into()))
1646 .cloned()
1647 }
1648
1649 /// Get the repository whose work directory contains the given path.
1650 pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
1651 let mut max_len = 0;
1652 let mut current_candidate = None;
1653 for (work_directory, repo) in (&self.repository_entries).iter() {
1654 if path.starts_with(&work_directory.0) {
1655 if work_directory.0.as_os_str().len() >= max_len {
1656 current_candidate = Some(repo);
1657 max_len = work_directory.0.as_os_str().len();
1658 } else {
1659 break;
1660 }
1661 }
1662 }
1663
1664 current_candidate.cloned()
1665 }
1666
1667 /// Given an ordered iterator of entries, returns an iterator of those entries,
1668 /// along with their containing git repository.
1669 pub fn entries_with_repositories<'a>(
1670 &'a self,
1671 entries: impl 'a + Iterator<Item = &'a Entry>,
1672 ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
1673 let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
1674 let mut repositories = self.repositories().peekable();
1675 entries.map(move |entry| {
1676 while let Some((repo_path, _)) = containing_repos.last() {
1677 if !entry.path.starts_with(repo_path) {
1678 containing_repos.pop();
1679 } else {
1680 break;
1681 }
1682 }
1683 while let Some((repo_path, _)) = repositories.peek() {
1684 if entry.path.starts_with(repo_path) {
1685 containing_repos.push(repositories.next().unwrap());
1686 } else {
1687 break;
1688 }
1689 }
1690 let repo = containing_repos.last().map(|(_, repo)| *repo);
1691 (entry, repo)
1692 })
1693 }
1694
1695 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1696 let empty_path = Path::new("");
1697 self.entries_by_path
1698 .cursor::<()>()
1699 .filter(move |entry| entry.path.as_ref() != empty_path)
1700 .map(|entry| &entry.path)
1701 }
1702
1703 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1704 let mut cursor = self.entries_by_path.cursor();
1705 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1706 let traversal = Traversal {
1707 cursor,
1708 include_dirs: true,
1709 include_ignored: true,
1710 };
1711 ChildEntriesIter {
1712 traversal,
1713 parent_path,
1714 }
1715 }
1716
1717 fn descendent_entries<'a>(
1718 &'a self,
1719 include_dirs: bool,
1720 include_ignored: bool,
1721 parent_path: &'a Path,
1722 ) -> DescendentEntriesIter<'a> {
1723 let mut cursor = self.entries_by_path.cursor();
1724 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
1725 let mut traversal = Traversal {
1726 cursor,
1727 include_dirs,
1728 include_ignored,
1729 };
1730
1731 if traversal.end_offset() == traversal.start_offset() {
1732 traversal.advance();
1733 }
1734
1735 DescendentEntriesIter {
1736 traversal,
1737 parent_path,
1738 }
1739 }
1740
1741 pub fn root_entry(&self) -> Option<&Entry> {
1742 self.entry_for_path("")
1743 }
1744
1745 pub fn root_name(&self) -> &str {
1746 &self.root_name
1747 }
1748
1749 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1750 self.repository_entries
1751 .get(&RepositoryWorkDirectory(Path::new("").into()))
1752 .map(|entry| entry.to_owned())
1753 }
1754
1755 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1756 self.repository_entries.values()
1757 }
1758
1759 pub fn scan_id(&self) -> usize {
1760 self.scan_id
1761 }
1762
1763 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1764 let path = path.as_ref();
1765 self.traverse_from_path(true, true, path)
1766 .entry()
1767 .and_then(|entry| {
1768 if entry.path.as_ref() == path {
1769 Some(entry)
1770 } else {
1771 None
1772 }
1773 })
1774 }
1775
1776 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1777 let entry = self.entries_by_id.get(&id, &())?;
1778 self.entry_for_path(&entry.path)
1779 }
1780
1781 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1782 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1783 }
1784}
1785
1786impl LocalSnapshot {
1787 pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1788 self.git_repositories.get(&repo.work_directory.0)
1789 }
1790
1791 pub(crate) fn repo_for_metadata(
1792 &self,
1793 path: &Path,
1794 ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1795 self.git_repositories
1796 .iter()
1797 .find(|(_, repo)| repo.in_dot_git(path))
1798 }
1799
1800 #[cfg(test)]
1801 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1802 let root_name = self.root_name.clone();
1803 proto::UpdateWorktree {
1804 project_id,
1805 worktree_id: self.id().to_proto(),
1806 abs_path: self.abs_path().to_string_lossy().into(),
1807 root_name,
1808 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1809 removed_entries: Default::default(),
1810 scan_id: self.scan_id as u64,
1811 is_last_update: true,
1812 updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1813 removed_repositories: Default::default(),
1814 }
1815 }
1816
1817 pub(crate) fn build_update(
1818 &self,
1819 other: &Self,
1820 project_id: u64,
1821 worktree_id: u64,
1822 include_ignored: bool,
1823 ) -> proto::UpdateWorktree {
1824 let mut updated_entries = Vec::new();
1825 let mut removed_entries = Vec::new();
1826 let mut self_entries = self
1827 .entries_by_id
1828 .cursor::<()>()
1829 .filter(|e| include_ignored || !e.is_ignored)
1830 .peekable();
1831 let mut other_entries = other
1832 .entries_by_id
1833 .cursor::<()>()
1834 .filter(|e| include_ignored || !e.is_ignored)
1835 .peekable();
1836 loop {
1837 match (self_entries.peek(), other_entries.peek()) {
1838 (Some(self_entry), Some(other_entry)) => {
1839 match Ord::cmp(&self_entry.id, &other_entry.id) {
1840 Ordering::Less => {
1841 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1842 updated_entries.push(entry);
1843 self_entries.next();
1844 }
1845 Ordering::Equal => {
1846 if self_entry.scan_id != other_entry.scan_id {
1847 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1848 updated_entries.push(entry);
1849 }
1850
1851 self_entries.next();
1852 other_entries.next();
1853 }
1854 Ordering::Greater => {
1855 removed_entries.push(other_entry.id.to_proto());
1856 other_entries.next();
1857 }
1858 }
1859 }
1860 (Some(self_entry), None) => {
1861 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1862 updated_entries.push(entry);
1863 self_entries.next();
1864 }
1865 (None, Some(other_entry)) => {
1866 removed_entries.push(other_entry.id.to_proto());
1867 other_entries.next();
1868 }
1869 (None, None) => break,
1870 }
1871 }
1872
1873 let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1874 let mut removed_repositories = Vec::new();
1875 let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1876 let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1877 loop {
1878 match (self_repos.peek(), other_repos.peek()) {
1879 (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1880 match Ord::cmp(self_work_dir, other_work_dir) {
1881 Ordering::Less => {
1882 updated_repositories.push((*self_repo).into());
1883 self_repos.next();
1884 }
1885 Ordering::Equal => {
1886 if self_repo != other_repo {
1887 updated_repositories.push(self_repo.build_update(other_repo));
1888 }
1889
1890 self_repos.next();
1891 other_repos.next();
1892 }
1893 Ordering::Greater => {
1894 removed_repositories.push(other_repo.work_directory.to_proto());
1895 other_repos.next();
1896 }
1897 }
1898 }
1899 (Some((_, self_repo)), None) => {
1900 updated_repositories.push((*self_repo).into());
1901 self_repos.next();
1902 }
1903 (None, Some((_, other_repo))) => {
1904 removed_repositories.push(other_repo.work_directory.to_proto());
1905 other_repos.next();
1906 }
1907 (None, None) => break,
1908 }
1909 }
1910
1911 proto::UpdateWorktree {
1912 project_id,
1913 worktree_id,
1914 abs_path: self.abs_path().to_string_lossy().into(),
1915 root_name: self.root_name().to_string(),
1916 updated_entries,
1917 removed_entries,
1918 scan_id: self.scan_id as u64,
1919 is_last_update: self.completed_scan_id == self.scan_id,
1920 updated_repositories,
1921 removed_repositories,
1922 }
1923 }
1924
1925 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1926 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1927 let abs_path = self.abs_path.join(&entry.path);
1928 match smol::block_on(build_gitignore(&abs_path, fs)) {
1929 Ok(ignore) => {
1930 self.ignores_by_parent_abs_path
1931 .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
1932 }
1933 Err(error) => {
1934 log::error!(
1935 "error loading .gitignore file {:?} - {:?}",
1936 &entry.path,
1937 error
1938 );
1939 }
1940 }
1941 }
1942
1943 if entry.kind == EntryKind::PendingDir {
1944 if let Some(existing_entry) =
1945 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1946 {
1947 entry.kind = existing_entry.kind;
1948 }
1949 }
1950
1951 let scan_id = self.scan_id;
1952 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1953 if let Some(removed) = removed {
1954 if removed.id != entry.id {
1955 self.entries_by_id.remove(&removed.id, &());
1956 }
1957 }
1958 self.entries_by_id.insert_or_replace(
1959 PathEntry {
1960 id: entry.id,
1961 path: entry.path.clone(),
1962 is_ignored: entry.is_ignored,
1963 scan_id,
1964 },
1965 &(),
1966 );
1967
1968 entry
1969 }
1970
1971 fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1972 let abs_path = self.abs_path.join(&parent_path);
1973 let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1974
1975 // Guard against repositories inside the repository metadata
1976 if work_dir
1977 .components()
1978 .find(|component| component.as_os_str() == *DOT_GIT)
1979 .is_some()
1980 {
1981 return None;
1982 };
1983
1984 let work_dir_id = self
1985 .entry_for_path(work_dir.clone())
1986 .map(|entry| entry.id)?;
1987
1988 if self.git_repositories.get(&work_dir_id).is_none() {
1989 let repo = fs.open_repo(abs_path.as_path())?;
1990 let work_directory = RepositoryWorkDirectory(work_dir.clone());
1991 let scan_id = self.scan_id;
1992
1993 let repo_lock = repo.lock();
1994
1995 self.repository_entries.insert(
1996 work_directory,
1997 RepositoryEntry {
1998 work_directory: work_dir_id.into(),
1999 branch: repo_lock.branch_name().map(Into::into),
2000 statuses: repo_lock.statuses().unwrap_or_default(),
2001 },
2002 );
2003 drop(repo_lock);
2004
2005 self.git_repositories.insert(
2006 work_dir_id,
2007 LocalRepositoryEntry {
2008 scan_id,
2009 full_scan_id: scan_id,
2010 repo_ptr: repo,
2011 git_dir_path: parent_path.clone(),
2012 },
2013 )
2014 }
2015
2016 Some(())
2017 }
2018
2019 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2020 let mut inodes = TreeSet::default();
2021 for ancestor in path.ancestors().skip(1) {
2022 if let Some(entry) = self.entry_for_path(ancestor) {
2023 inodes.insert(entry.inode);
2024 }
2025 }
2026 inodes
2027 }
2028
2029 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2030 let mut new_ignores = Vec::new();
2031 for ancestor in abs_path.ancestors().skip(1) {
2032 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2033 new_ignores.push((ancestor, Some(ignore.clone())));
2034 } else {
2035 new_ignores.push((ancestor, None));
2036 }
2037 }
2038
2039 let mut ignore_stack = IgnoreStack::none();
2040 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2041 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2042 ignore_stack = IgnoreStack::all();
2043 break;
2044 } else if let Some(ignore) = ignore {
2045 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2046 }
2047 }
2048
2049 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2050 ignore_stack = IgnoreStack::all();
2051 }
2052
2053 ignore_stack
2054 }
2055}
2056
2057impl LocalMutableSnapshot {
2058 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2059 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2060 entry.id = removed_entry_id;
2061 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2062 entry.id = existing_entry.id;
2063 }
2064 }
2065
2066 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2067 self.reuse_entry_id(&mut entry);
2068 self.snapshot.insert_entry(entry, fs)
2069 }
2070
2071 fn populate_dir(
2072 &mut self,
2073 parent_path: Arc<Path>,
2074 entries: impl IntoIterator<Item = Entry>,
2075 ignore: Option<Arc<Gitignore>>,
2076 fs: &dyn Fs,
2077 ) {
2078 let mut parent_entry = if let Some(parent_entry) =
2079 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
2080 {
2081 parent_entry.clone()
2082 } else {
2083 log::warn!(
2084 "populating a directory {:?} that has been removed",
2085 parent_path
2086 );
2087 return;
2088 };
2089
2090 match parent_entry.kind {
2091 EntryKind::PendingDir => {
2092 parent_entry.kind = EntryKind::Dir;
2093 }
2094 EntryKind::Dir => {}
2095 _ => return,
2096 }
2097
2098 if let Some(ignore) = ignore {
2099 let abs_parent_path = self.abs_path.join(&parent_path).into();
2100 self.ignores_by_parent_abs_path
2101 .insert(abs_parent_path, (ignore, false));
2102 }
2103
2104 if parent_path.file_name() == Some(&DOT_GIT) {
2105 self.build_repo(parent_path, fs);
2106 }
2107
2108 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2109 let mut entries_by_id_edits = Vec::new();
2110
2111 for mut entry in entries {
2112 self.reuse_entry_id(&mut entry);
2113 entries_by_id_edits.push(Edit::Insert(PathEntry {
2114 id: entry.id,
2115 path: entry.path.clone(),
2116 is_ignored: entry.is_ignored,
2117 scan_id: self.scan_id,
2118 }));
2119 entries_by_path_edits.push(Edit::Insert(entry));
2120 }
2121
2122 self.entries_by_path.edit(entries_by_path_edits, &());
2123 self.entries_by_id.edit(entries_by_id_edits, &());
2124 }
2125
2126 fn remove_path(&mut self, path: &Path) {
2127 let mut new_entries;
2128 let removed_entries;
2129 {
2130 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2131 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2132 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2133 new_entries.push_tree(cursor.suffix(&()), &());
2134 }
2135 self.entries_by_path = new_entries;
2136
2137 let mut entries_by_id_edits = Vec::new();
2138 for entry in removed_entries.cursor::<()>() {
2139 let removed_entry_id = self
2140 .removed_entry_ids
2141 .entry(entry.inode)
2142 .or_insert(entry.id);
2143 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2144 entries_by_id_edits.push(Edit::Remove(entry.id));
2145 }
2146 self.entries_by_id.edit(entries_by_id_edits, &());
2147
2148 if path.file_name() == Some(&GITIGNORE) {
2149 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
2150 if let Some((_, needs_update)) = self
2151 .ignores_by_parent_abs_path
2152 .get_mut(abs_parent_path.as_path())
2153 {
2154 *needs_update = true;
2155 }
2156 }
2157 }
2158}
2159
2160async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2161 let contents = fs.load(abs_path).await?;
2162 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2163 let mut builder = GitignoreBuilder::new(parent);
2164 for line in contents.lines() {
2165 builder.add_line(Some(abs_path.into()), line)?;
2166 }
2167 Ok(builder.build()?)
2168}
2169
2170impl WorktreeId {
2171 pub fn from_usize(handle_id: usize) -> Self {
2172 Self(handle_id)
2173 }
2174
2175 pub(crate) fn from_proto(id: u64) -> Self {
2176 Self(id as usize)
2177 }
2178
2179 pub fn to_proto(&self) -> u64 {
2180 self.0 as u64
2181 }
2182
2183 pub fn to_usize(&self) -> usize {
2184 self.0
2185 }
2186}
2187
2188impl fmt::Display for WorktreeId {
2189 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2190 self.0.fmt(f)
2191 }
2192}
2193
2194impl Deref for Worktree {
2195 type Target = Snapshot;
2196
2197 fn deref(&self) -> &Self::Target {
2198 match self {
2199 Worktree::Local(worktree) => &worktree.snapshot,
2200 Worktree::Remote(worktree) => &worktree.snapshot,
2201 }
2202 }
2203}
2204
2205impl Deref for LocalWorktree {
2206 type Target = LocalSnapshot;
2207
2208 fn deref(&self) -> &Self::Target {
2209 &self.snapshot
2210 }
2211}
2212
2213impl Deref for RemoteWorktree {
2214 type Target = Snapshot;
2215
2216 fn deref(&self) -> &Self::Target {
2217 &self.snapshot
2218 }
2219}
2220
2221impl fmt::Debug for LocalWorktree {
2222 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2223 self.snapshot.fmt(f)
2224 }
2225}
2226
2227impl fmt::Debug for Snapshot {
2228 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2229 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2230 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2231
2232 impl<'a> fmt::Debug for EntriesByPath<'a> {
2233 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2234 f.debug_map()
2235 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2236 .finish()
2237 }
2238 }
2239
2240 impl<'a> fmt::Debug for EntriesById<'a> {
2241 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2242 f.debug_list().entries(self.0.iter()).finish()
2243 }
2244 }
2245
2246 f.debug_struct("Snapshot")
2247 .field("id", &self.id)
2248 .field("root_name", &self.root_name)
2249 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2250 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2251 .finish()
2252 }
2253}
2254
2255#[derive(Clone, PartialEq)]
2256pub struct File {
2257 pub worktree: ModelHandle<Worktree>,
2258 pub path: Arc<Path>,
2259 pub mtime: SystemTime,
2260 pub(crate) entry_id: ProjectEntryId,
2261 pub(crate) is_local: bool,
2262 pub(crate) is_deleted: bool,
2263}
2264
2265impl language::File for File {
2266 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2267 if self.is_local {
2268 Some(self)
2269 } else {
2270 None
2271 }
2272 }
2273
2274 fn mtime(&self) -> SystemTime {
2275 self.mtime
2276 }
2277
2278 fn path(&self) -> &Arc<Path> {
2279 &self.path
2280 }
2281
2282 fn full_path(&self, cx: &AppContext) -> PathBuf {
2283 let mut full_path = PathBuf::new();
2284 let worktree = self.worktree.read(cx);
2285
2286 if worktree.is_visible() {
2287 full_path.push(worktree.root_name());
2288 } else {
2289 let path = worktree.abs_path();
2290
2291 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2292 full_path.push("~");
2293 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2294 } else {
2295 full_path.push(path)
2296 }
2297 }
2298
2299 if self.path.components().next().is_some() {
2300 full_path.push(&self.path);
2301 }
2302
2303 full_path
2304 }
2305
2306 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2307 /// of its worktree, then this method will return the name of the worktree itself.
2308 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2309 self.path
2310 .file_name()
2311 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2312 }
2313
2314 fn is_deleted(&self) -> bool {
2315 self.is_deleted
2316 }
2317
2318 fn as_any(&self) -> &dyn Any {
2319 self
2320 }
2321
2322 fn to_proto(&self) -> rpc::proto::File {
2323 rpc::proto::File {
2324 worktree_id: self.worktree.id() as u64,
2325 entry_id: self.entry_id.to_proto(),
2326 path: self.path.to_string_lossy().into(),
2327 mtime: Some(self.mtime.into()),
2328 is_deleted: self.is_deleted,
2329 }
2330 }
2331}
2332
2333impl language::LocalFile for File {
2334 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2335 self.worktree
2336 .read(cx)
2337 .as_local()
2338 .unwrap()
2339 .abs_path
2340 .join(&self.path)
2341 }
2342
2343 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2344 let worktree = self.worktree.read(cx).as_local().unwrap();
2345 let abs_path = worktree.absolutize(&self.path);
2346 let fs = worktree.fs.clone();
2347 cx.background()
2348 .spawn(async move { fs.load(&abs_path).await })
2349 }
2350
2351 fn buffer_reloaded(
2352 &self,
2353 buffer_id: u64,
2354 version: &clock::Global,
2355 fingerprint: RopeFingerprint,
2356 line_ending: LineEnding,
2357 mtime: SystemTime,
2358 cx: &mut AppContext,
2359 ) {
2360 let worktree = self.worktree.read(cx).as_local().unwrap();
2361 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2362 worktree
2363 .client
2364 .send(proto::BufferReloaded {
2365 project_id,
2366 buffer_id,
2367 version: serialize_version(version),
2368 mtime: Some(mtime.into()),
2369 fingerprint: serialize_fingerprint(fingerprint),
2370 line_ending: serialize_line_ending(line_ending) as i32,
2371 })
2372 .log_err();
2373 }
2374 }
2375}
2376
2377impl File {
2378 pub fn from_proto(
2379 proto: rpc::proto::File,
2380 worktree: ModelHandle<Worktree>,
2381 cx: &AppContext,
2382 ) -> Result<Self> {
2383 let worktree_id = worktree
2384 .read(cx)
2385 .as_remote()
2386 .ok_or_else(|| anyhow!("not remote"))?
2387 .id();
2388
2389 if worktree_id.to_proto() != proto.worktree_id {
2390 return Err(anyhow!("worktree id does not match file"));
2391 }
2392
2393 Ok(Self {
2394 worktree,
2395 path: Path::new(&proto.path).into(),
2396 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2397 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2398 is_local: false,
2399 is_deleted: proto.is_deleted,
2400 })
2401 }
2402
2403 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2404 file.and_then(|f| f.as_any().downcast_ref())
2405 }
2406
2407 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2408 self.worktree.read(cx).id()
2409 }
2410
2411 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2412 if self.is_deleted {
2413 None
2414 } else {
2415 Some(self.entry_id)
2416 }
2417 }
2418}
2419
2420#[derive(Clone, Debug, PartialEq, Eq)]
2421pub struct Entry {
2422 pub id: ProjectEntryId,
2423 pub kind: EntryKind,
2424 pub path: Arc<Path>,
2425 pub inode: u64,
2426 pub mtime: SystemTime,
2427 pub is_symlink: bool,
2428 pub is_ignored: bool,
2429}
2430
2431#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2432pub enum EntryKind {
2433 PendingDir,
2434 Dir,
2435 File(CharBag),
2436}
2437
2438#[derive(Clone, Copy, Debug)]
2439pub enum PathChange {
2440 Added,
2441 Removed,
2442 Updated,
2443 AddedOrUpdated,
2444}
2445
2446impl Entry {
2447 fn new(
2448 path: Arc<Path>,
2449 metadata: &fs::Metadata,
2450 next_entry_id: &AtomicUsize,
2451 root_char_bag: CharBag,
2452 ) -> Self {
2453 Self {
2454 id: ProjectEntryId::new(next_entry_id),
2455 kind: if metadata.is_dir {
2456 EntryKind::PendingDir
2457 } else {
2458 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2459 },
2460 path,
2461 inode: metadata.inode,
2462 mtime: metadata.mtime,
2463 is_symlink: metadata.is_symlink,
2464 is_ignored: false,
2465 }
2466 }
2467
2468 pub fn is_dir(&self) -> bool {
2469 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2470 }
2471
2472 pub fn is_file(&self) -> bool {
2473 matches!(self.kind, EntryKind::File(_))
2474 }
2475}
2476
2477impl sum_tree::Item for Entry {
2478 type Summary = EntrySummary;
2479
2480 fn summary(&self) -> Self::Summary {
2481 let visible_count = if self.is_ignored { 0 } else { 1 };
2482 let file_count;
2483 let visible_file_count;
2484 if self.is_file() {
2485 file_count = 1;
2486 visible_file_count = visible_count;
2487 } else {
2488 file_count = 0;
2489 visible_file_count = 0;
2490 }
2491
2492 EntrySummary {
2493 max_path: self.path.clone(),
2494 count: 1,
2495 visible_count,
2496 file_count,
2497 visible_file_count,
2498 }
2499 }
2500}
2501
2502impl sum_tree::KeyedItem for Entry {
2503 type Key = PathKey;
2504
2505 fn key(&self) -> Self::Key {
2506 PathKey(self.path.clone())
2507 }
2508}
2509
2510#[derive(Clone, Debug)]
2511pub struct EntrySummary {
2512 max_path: Arc<Path>,
2513 count: usize,
2514 visible_count: usize,
2515 file_count: usize,
2516 visible_file_count: usize,
2517}
2518
2519impl Default for EntrySummary {
2520 fn default() -> Self {
2521 Self {
2522 max_path: Arc::from(Path::new("")),
2523 count: 0,
2524 visible_count: 0,
2525 file_count: 0,
2526 visible_file_count: 0,
2527 }
2528 }
2529}
2530
2531impl sum_tree::Summary for EntrySummary {
2532 type Context = ();
2533
2534 fn add_summary(&mut self, rhs: &Self, _: &()) {
2535 self.max_path = rhs.max_path.clone();
2536 self.count += rhs.count;
2537 self.visible_count += rhs.visible_count;
2538 self.file_count += rhs.file_count;
2539 self.visible_file_count += rhs.visible_file_count;
2540 }
2541}
2542
2543#[derive(Clone, Debug)]
2544struct PathEntry {
2545 id: ProjectEntryId,
2546 path: Arc<Path>,
2547 is_ignored: bool,
2548 scan_id: usize,
2549}
2550
2551impl sum_tree::Item for PathEntry {
2552 type Summary = PathEntrySummary;
2553
2554 fn summary(&self) -> Self::Summary {
2555 PathEntrySummary { max_id: self.id }
2556 }
2557}
2558
2559impl sum_tree::KeyedItem for PathEntry {
2560 type Key = ProjectEntryId;
2561
2562 fn key(&self) -> Self::Key {
2563 self.id
2564 }
2565}
2566
2567#[derive(Clone, Debug, Default)]
2568struct PathEntrySummary {
2569 max_id: ProjectEntryId,
2570}
2571
2572impl sum_tree::Summary for PathEntrySummary {
2573 type Context = ();
2574
2575 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2576 self.max_id = summary.max_id;
2577 }
2578}
2579
2580impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2581 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2582 *self = summary.max_id;
2583 }
2584}
2585
2586#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2587pub struct PathKey(Arc<Path>);
2588
2589impl Default for PathKey {
2590 fn default() -> Self {
2591 Self(Path::new("").into())
2592 }
2593}
2594
2595impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2596 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2597 self.0 = summary.max_path.clone();
2598 }
2599}
2600
2601struct BackgroundScanner {
2602 snapshot: Mutex<LocalMutableSnapshot>,
2603 fs: Arc<dyn Fs>,
2604 status_updates_tx: UnboundedSender<ScanState>,
2605 executor: Arc<executor::Background>,
2606 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2607 prev_state: Mutex<BackgroundScannerState>,
2608 next_entry_id: Arc<AtomicUsize>,
2609 finished_initial_scan: bool,
2610}
2611
2612struct BackgroundScannerState {
2613 snapshot: Snapshot,
2614 event_paths: Vec<Arc<Path>>,
2615}
2616
2617impl BackgroundScanner {
2618 fn new(
2619 snapshot: LocalSnapshot,
2620 next_entry_id: Arc<AtomicUsize>,
2621 fs: Arc<dyn Fs>,
2622 status_updates_tx: UnboundedSender<ScanState>,
2623 executor: Arc<executor::Background>,
2624 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2625 ) -> Self {
2626 Self {
2627 fs,
2628 status_updates_tx,
2629 executor,
2630 refresh_requests_rx,
2631 next_entry_id,
2632 prev_state: Mutex::new(BackgroundScannerState {
2633 snapshot: snapshot.snapshot.clone(),
2634 event_paths: Default::default(),
2635 }),
2636 snapshot: Mutex::new(LocalMutableSnapshot {
2637 snapshot,
2638 removed_entry_ids: Default::default(),
2639 }),
2640 finished_initial_scan: false,
2641 }
2642 }
2643
2644 async fn run(
2645 &mut self,
2646 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2647 ) {
2648 use futures::FutureExt as _;
2649
2650 let (root_abs_path, root_inode) = {
2651 let snapshot = self.snapshot.lock();
2652 (
2653 snapshot.abs_path.clone(),
2654 snapshot.root_entry().map(|e| e.inode),
2655 )
2656 };
2657
2658 // Populate ignores above the root.
2659 let ignore_stack;
2660 for ancestor in root_abs_path.ancestors().skip(1) {
2661 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2662 {
2663 self.snapshot
2664 .lock()
2665 .ignores_by_parent_abs_path
2666 .insert(ancestor.into(), (ignore.into(), false));
2667 }
2668 }
2669 {
2670 let mut snapshot = self.snapshot.lock();
2671 snapshot.scan_id += 1;
2672 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2673 if ignore_stack.is_all() {
2674 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2675 root_entry.is_ignored = true;
2676 snapshot.insert_entry(root_entry, self.fs.as_ref());
2677 }
2678 }
2679 };
2680
2681 // Perform an initial scan of the directory.
2682 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2683 smol::block_on(scan_job_tx.send(ScanJob {
2684 abs_path: root_abs_path,
2685 path: Arc::from(Path::new("")),
2686 ignore_stack,
2687 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2688 scan_queue: scan_job_tx.clone(),
2689 }))
2690 .unwrap();
2691 drop(scan_job_tx);
2692 self.scan_dirs(true, scan_job_rx).await;
2693 {
2694 let mut snapshot = self.snapshot.lock();
2695 snapshot.completed_scan_id = snapshot.scan_id;
2696 }
2697 self.send_status_update(false, None);
2698
2699 // Process any any FS events that occurred while performing the initial scan.
2700 // For these events, update events cannot be as precise, because we didn't
2701 // have the previous state loaded yet.
2702 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2703 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2704 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2705 paths.extend(more_events.into_iter().map(|e| e.path));
2706 }
2707 self.process_events(paths).await;
2708 }
2709
2710 self.finished_initial_scan = true;
2711
2712 // Continue processing events until the worktree is dropped.
2713 loop {
2714 select_biased! {
2715 // Process any path refresh requests from the worktree. Prioritize
2716 // these before handling changes reported by the filesystem.
2717 request = self.refresh_requests_rx.recv().fuse() => {
2718 let Ok((paths, barrier)) = request else { break };
2719 if !self.process_refresh_request(paths.clone(), barrier).await {
2720 return;
2721 }
2722 }
2723
2724 events = events_rx.next().fuse() => {
2725 let Some(events) = events else { break };
2726 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2727 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2728 paths.extend(more_events.into_iter().map(|e| e.path));
2729 }
2730 self.process_events(paths.clone()).await;
2731 }
2732 }
2733 }
2734 }
2735
2736 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2737 if let Some(mut paths) = self.reload_entries_for_paths(paths, None).await {
2738 paths.sort_unstable();
2739 util::extend_sorted(
2740 &mut self.prev_state.lock().event_paths,
2741 paths,
2742 usize::MAX,
2743 Ord::cmp,
2744 );
2745 }
2746 self.send_status_update(false, Some(barrier))
2747 }
2748
2749 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2750 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2751 let paths = self
2752 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2753 .await;
2754 if let Some(paths) = &paths {
2755 util::extend_sorted(
2756 &mut self.prev_state.lock().event_paths,
2757 paths.iter().cloned(),
2758 usize::MAX,
2759 Ord::cmp,
2760 );
2761 }
2762 drop(scan_job_tx);
2763 self.scan_dirs(false, scan_job_rx).await;
2764
2765 self.update_ignore_statuses().await;
2766
2767 let mut snapshot = self.snapshot.lock();
2768
2769 if let Some(paths) = paths {
2770 for path in paths {
2771 self.reload_repo_for_file_path(&path, &mut *snapshot, self.fs.as_ref());
2772 }
2773 }
2774
2775 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2776 git_repositories.retain(|work_directory_id, _| {
2777 snapshot
2778 .entry_for_id(*work_directory_id)
2779 .map_or(false, |entry| {
2780 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2781 })
2782 });
2783 snapshot.git_repositories = git_repositories;
2784
2785 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2786 git_repository_entries.retain(|_, entry| {
2787 snapshot
2788 .git_repositories
2789 .get(&entry.work_directory.0)
2790 .is_some()
2791 });
2792 snapshot.snapshot.repository_entries = git_repository_entries;
2793 snapshot.completed_scan_id = snapshot.scan_id;
2794 drop(snapshot);
2795
2796 self.send_status_update(false, None);
2797 self.prev_state.lock().event_paths.clear();
2798 }
2799
2800 async fn scan_dirs(
2801 &self,
2802 enable_progress_updates: bool,
2803 scan_jobs_rx: channel::Receiver<ScanJob>,
2804 ) {
2805 use futures::FutureExt as _;
2806
2807 if self
2808 .status_updates_tx
2809 .unbounded_send(ScanState::Started)
2810 .is_err()
2811 {
2812 return;
2813 }
2814
2815 let progress_update_count = AtomicUsize::new(0);
2816 self.executor
2817 .scoped(|scope| {
2818 for _ in 0..self.executor.num_cpus() {
2819 scope.spawn(async {
2820 let mut last_progress_update_count = 0;
2821 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2822 futures::pin_mut!(progress_update_timer);
2823
2824 loop {
2825 select_biased! {
2826 // Process any path refresh requests before moving on to process
2827 // the scan queue, so that user operations are prioritized.
2828 request = self.refresh_requests_rx.recv().fuse() => {
2829 let Ok((paths, barrier)) = request else { break };
2830 if !self.process_refresh_request(paths, barrier).await {
2831 return;
2832 }
2833 }
2834
2835 // Send periodic progress updates to the worktree. Use an atomic counter
2836 // to ensure that only one of the workers sends a progress update after
2837 // the update interval elapses.
2838 _ = progress_update_timer => {
2839 match progress_update_count.compare_exchange(
2840 last_progress_update_count,
2841 last_progress_update_count + 1,
2842 SeqCst,
2843 SeqCst
2844 ) {
2845 Ok(_) => {
2846 last_progress_update_count += 1;
2847 self.send_status_update(true, None);
2848 }
2849 Err(count) => {
2850 last_progress_update_count = count;
2851 }
2852 }
2853 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2854 }
2855
2856 // Recursively load directories from the file system.
2857 job = scan_jobs_rx.recv().fuse() => {
2858 let Ok(job) = job else { break };
2859 if let Err(err) = self.scan_dir(&job).await {
2860 if job.path.as_ref() != Path::new("") {
2861 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2862 }
2863 }
2864 }
2865 }
2866 }
2867 })
2868 }
2869 })
2870 .await;
2871 }
2872
2873 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2874 let mut prev_state = self.prev_state.lock();
2875 let new_snapshot = self.snapshot.lock().clone();
2876 let old_snapshot = mem::replace(&mut prev_state.snapshot, new_snapshot.snapshot.clone());
2877
2878 let changes = self.build_change_set(
2879 &old_snapshot,
2880 &new_snapshot.snapshot,
2881 &prev_state.event_paths,
2882 );
2883
2884 self.status_updates_tx
2885 .unbounded_send(ScanState::Updated {
2886 snapshot: new_snapshot,
2887 changes,
2888 scanning,
2889 barrier,
2890 })
2891 .is_ok()
2892 }
2893
2894 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2895 let mut new_entries: Vec<Entry> = Vec::new();
2896 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2897 let mut ignore_stack = job.ignore_stack.clone();
2898 let mut new_ignore = None;
2899 let (root_abs_path, root_char_bag, next_entry_id) = {
2900 let snapshot = self.snapshot.lock();
2901 (
2902 snapshot.abs_path().clone(),
2903 snapshot.root_char_bag,
2904 self.next_entry_id.clone(),
2905 )
2906 };
2907 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2908 while let Some(child_abs_path) = child_paths.next().await {
2909 let child_abs_path: Arc<Path> = match child_abs_path {
2910 Ok(child_abs_path) => child_abs_path.into(),
2911 Err(error) => {
2912 log::error!("error processing entry {:?}", error);
2913 continue;
2914 }
2915 };
2916
2917 let child_name = child_abs_path.file_name().unwrap();
2918 let child_path: Arc<Path> = job.path.join(child_name).into();
2919 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2920 Ok(Some(metadata)) => metadata,
2921 Ok(None) => continue,
2922 Err(err) => {
2923 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2924 continue;
2925 }
2926 };
2927
2928 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2929 if child_name == *GITIGNORE {
2930 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2931 Ok(ignore) => {
2932 let ignore = Arc::new(ignore);
2933 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2934 new_ignore = Some(ignore);
2935 }
2936 Err(error) => {
2937 log::error!(
2938 "error loading .gitignore file {:?} - {:?}",
2939 child_name,
2940 error
2941 );
2942 }
2943 }
2944
2945 // Update ignore status of any child entries we've already processed to reflect the
2946 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2947 // there should rarely be too numerous. Update the ignore stack associated with any
2948 // new jobs as well.
2949 let mut new_jobs = new_jobs.iter_mut();
2950 for entry in &mut new_entries {
2951 let entry_abs_path = root_abs_path.join(&entry.path);
2952 entry.is_ignored =
2953 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2954
2955 if entry.is_dir() {
2956 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2957 job.ignore_stack = if entry.is_ignored {
2958 IgnoreStack::all()
2959 } else {
2960 ignore_stack.clone()
2961 };
2962 }
2963 }
2964 }
2965 }
2966
2967 let mut child_entry = Entry::new(
2968 child_path.clone(),
2969 &child_metadata,
2970 &next_entry_id,
2971 root_char_bag,
2972 );
2973
2974 if child_entry.is_dir() {
2975 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2976 child_entry.is_ignored = is_ignored;
2977
2978 // Avoid recursing until crash in the case of a recursive symlink
2979 if !job.ancestor_inodes.contains(&child_entry.inode) {
2980 let mut ancestor_inodes = job.ancestor_inodes.clone();
2981 ancestor_inodes.insert(child_entry.inode);
2982
2983 new_jobs.push(Some(ScanJob {
2984 abs_path: child_abs_path,
2985 path: child_path,
2986 ignore_stack: if is_ignored {
2987 IgnoreStack::all()
2988 } else {
2989 ignore_stack.clone()
2990 },
2991 ancestor_inodes,
2992 scan_queue: job.scan_queue.clone(),
2993 }));
2994 } else {
2995 new_jobs.push(None);
2996 }
2997 } else {
2998 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2999 }
3000
3001 new_entries.push(child_entry);
3002 }
3003
3004 self.snapshot.lock().populate_dir(
3005 job.path.clone(),
3006 new_entries,
3007 new_ignore,
3008 self.fs.as_ref(),
3009 );
3010
3011 for new_job in new_jobs {
3012 if let Some(new_job) = new_job {
3013 job.scan_queue.send(new_job).await.unwrap();
3014 }
3015 }
3016
3017 Ok(())
3018 }
3019
3020 async fn reload_entries_for_paths(
3021 &self,
3022 mut abs_paths: Vec<PathBuf>,
3023 scan_queue_tx: Option<Sender<ScanJob>>,
3024 ) -> Option<Vec<Arc<Path>>> {
3025 let doing_recursive_update = scan_queue_tx.is_some();
3026
3027 abs_paths.sort_unstable();
3028 abs_paths.dedup_by(|a, b| a.starts_with(&b));
3029
3030 let root_abs_path = self.snapshot.lock().abs_path.clone();
3031 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
3032 let metadata = futures::future::join_all(
3033 abs_paths
3034 .iter()
3035 .map(|abs_path| self.fs.metadata(&abs_path))
3036 .collect::<Vec<_>>(),
3037 )
3038 .await;
3039
3040 let mut snapshot = self.snapshot.lock();
3041 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
3042 snapshot.scan_id += 1;
3043 if is_idle && !doing_recursive_update {
3044 snapshot.completed_scan_id = snapshot.scan_id;
3045 }
3046
3047 // Remove any entries for paths that no longer exist or are being recursively
3048 // refreshed. Do this before adding any new entries, so that renames can be
3049 // detected regardless of the order of the paths.
3050 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
3051 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
3052 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3053 if matches!(metadata, Ok(None)) || doing_recursive_update {
3054 snapshot.remove_path(path);
3055 }
3056 event_paths.push(path.into());
3057 } else {
3058 log::error!(
3059 "unexpected event {:?} for root path {:?}",
3060 abs_path,
3061 root_canonical_path
3062 );
3063 }
3064 }
3065
3066 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
3067 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
3068
3069 match metadata {
3070 Ok(Some(metadata)) => {
3071 let ignore_stack =
3072 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
3073 let mut fs_entry = Entry::new(
3074 path.clone(),
3075 &metadata,
3076 self.next_entry_id.as_ref(),
3077 snapshot.root_char_bag,
3078 );
3079 fs_entry.is_ignored = ignore_stack.is_all();
3080 snapshot.insert_entry(fs_entry, self.fs.as_ref());
3081
3082 if let Some(scan_queue_tx) = &scan_queue_tx {
3083 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
3084 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
3085 ancestor_inodes.insert(metadata.inode);
3086 smol::block_on(scan_queue_tx.send(ScanJob {
3087 abs_path,
3088 path,
3089 ignore_stack,
3090 ancestor_inodes,
3091 scan_queue: scan_queue_tx.clone(),
3092 }))
3093 .unwrap();
3094 }
3095 }
3096 }
3097 Ok(None) => {
3098 self.remove_repo_path(&path, &mut snapshot);
3099 }
3100 Err(err) => {
3101 // TODO - create a special 'error' entry in the entries tree to mark this
3102 log::error!("error reading file on event {:?}", err);
3103 }
3104 }
3105 }
3106
3107 Some(event_paths)
3108 }
3109
3110 fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3111 if !path
3112 .components()
3113 .any(|component| component.as_os_str() == *DOT_GIT)
3114 {
3115 let scan_id = snapshot.scan_id;
3116
3117 if let Some(repository) = snapshot.repository_for_work_directory(path) {
3118 let entry = repository.work_directory.0;
3119 snapshot.git_repositories.remove(&entry);
3120 snapshot
3121 .snapshot
3122 .repository_entries
3123 .remove(&RepositoryWorkDirectory(path.into()));
3124 return Some(());
3125 }
3126
3127 let repo = snapshot.repository_for_path(&path)?;
3128
3129 let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3130
3131 let work_dir = repo.work_directory(snapshot)?;
3132 let work_dir_id = repo.work_directory;
3133
3134 snapshot
3135 .git_repositories
3136 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3137
3138 snapshot.repository_entries.update(&work_dir, |entry| {
3139 entry
3140 .statuses
3141 .remove_range(&repo_path, &RepoPathDescendants(&repo_path))
3142 });
3143 }
3144
3145 Some(())
3146 }
3147
3148 fn reload_repo_for_file_path(
3149 &self,
3150 path: &Path,
3151 snapshot: &mut LocalSnapshot,
3152 fs: &dyn Fs,
3153 ) -> Option<()> {
3154 let scan_id = snapshot.scan_id;
3155
3156 if path
3157 .components()
3158 .any(|component| component.as_os_str() == *DOT_GIT)
3159 {
3160 let (entry_id, repo_ptr) = {
3161 let Some((entry_id, repo)) = snapshot.repo_for_metadata(&path) else {
3162 let dot_git_dir = path.ancestors()
3163 .skip_while(|ancestor| ancestor.file_name() != Some(&*DOT_GIT))
3164 .next()?;
3165
3166 snapshot.build_repo(dot_git_dir.into(), fs);
3167 return None;
3168 };
3169 if repo.full_scan_id == scan_id {
3170 return None;
3171 }
3172 (*entry_id, repo.repo_ptr.to_owned())
3173 };
3174
3175 let work_dir = snapshot
3176 .entry_for_id(entry_id)
3177 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3178
3179 let repo = repo_ptr.lock();
3180 repo.reload_index();
3181 let branch = repo.branch_name();
3182 let statuses = repo.statuses().unwrap_or_default();
3183
3184 snapshot.git_repositories.update(&entry_id, |entry| {
3185 entry.scan_id = scan_id;
3186 entry.full_scan_id = scan_id;
3187 });
3188
3189 snapshot.repository_entries.update(&work_dir, |entry| {
3190 entry.branch = branch.map(Into::into);
3191 entry.statuses = statuses;
3192 });
3193 } else {
3194 if snapshot
3195 .entry_for_path(&path)
3196 .map(|entry| entry.is_ignored)
3197 .unwrap_or(false)
3198 {
3199 self.remove_repo_path(&path, snapshot);
3200 return None;
3201 }
3202
3203 let repo = snapshot.repository_for_path(&path)?;
3204
3205 let work_dir = repo.work_directory(snapshot)?;
3206 let work_dir_id = repo.work_directory.clone();
3207
3208 snapshot
3209 .git_repositories
3210 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3211
3212 let local_repo = snapshot.get_local_repo(&repo)?.to_owned();
3213
3214 // Short circuit if we've already scanned everything
3215 if local_repo.full_scan_id == scan_id {
3216 return None;
3217 }
3218
3219 let mut repository = snapshot.repository_entries.remove(&work_dir)?;
3220
3221 for entry in snapshot.descendent_entries(false, false, path) {
3222 let Some(repo_path) = repo.work_directory.relativize(snapshot, &entry.path) else {
3223 continue;
3224 };
3225
3226 let status = local_repo.repo_ptr.lock().status(&repo_path);
3227 if let Some(status) = status {
3228 repository.statuses.insert(repo_path.clone(), status);
3229 } else {
3230 repository.statuses.remove(&repo_path);
3231 }
3232 }
3233
3234 snapshot.repository_entries.insert(work_dir, repository)
3235 }
3236
3237 Some(())
3238 }
3239
3240 async fn update_ignore_statuses(&self) {
3241 use futures::FutureExt as _;
3242
3243 let mut snapshot = self.snapshot.lock().clone();
3244 let mut ignores_to_update = Vec::new();
3245 let mut ignores_to_delete = Vec::new();
3246 let abs_path = snapshot.abs_path.clone();
3247 for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
3248 if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
3249 if *needs_update {
3250 *needs_update = false;
3251 if snapshot.snapshot.entry_for_path(parent_path).is_some() {
3252 ignores_to_update.push(parent_abs_path.clone());
3253 }
3254 }
3255
3256 let ignore_path = parent_path.join(&*GITIGNORE);
3257 if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
3258 ignores_to_delete.push(parent_abs_path.clone());
3259 }
3260 }
3261 }
3262
3263 for parent_abs_path in ignores_to_delete {
3264 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3265 self.snapshot
3266 .lock()
3267 .ignores_by_parent_abs_path
3268 .remove(&parent_abs_path);
3269 }
3270
3271 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3272 ignores_to_update.sort_unstable();
3273 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3274 while let Some(parent_abs_path) = ignores_to_update.next() {
3275 while ignores_to_update
3276 .peek()
3277 .map_or(false, |p| p.starts_with(&parent_abs_path))
3278 {
3279 ignores_to_update.next().unwrap();
3280 }
3281
3282 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3283 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3284 abs_path: parent_abs_path,
3285 ignore_stack,
3286 ignore_queue: ignore_queue_tx.clone(),
3287 }))
3288 .unwrap();
3289 }
3290 drop(ignore_queue_tx);
3291
3292 self.executor
3293 .scoped(|scope| {
3294 for _ in 0..self.executor.num_cpus() {
3295 scope.spawn(async {
3296 loop {
3297 select_biased! {
3298 // Process any path refresh requests before moving on to process
3299 // the queue of ignore statuses.
3300 request = self.refresh_requests_rx.recv().fuse() => {
3301 let Ok((paths, barrier)) = request else { break };
3302 if !self.process_refresh_request(paths, barrier).await {
3303 return;
3304 }
3305 }
3306
3307 // Recursively process directories whose ignores have changed.
3308 job = ignore_queue_rx.recv().fuse() => {
3309 let Ok(job) = job else { break };
3310 self.update_ignore_status(job, &snapshot).await;
3311 }
3312 }
3313 }
3314 });
3315 }
3316 })
3317 .await;
3318 }
3319
3320 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3321 let mut ignore_stack = job.ignore_stack;
3322 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3323 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3324 }
3325
3326 let mut entries_by_id_edits = Vec::new();
3327 let mut entries_by_path_edits = Vec::new();
3328 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3329 for mut entry in snapshot.child_entries(path).cloned() {
3330 let was_ignored = entry.is_ignored;
3331 let abs_path = snapshot.abs_path().join(&entry.path);
3332 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3333 if entry.is_dir() {
3334 let child_ignore_stack = if entry.is_ignored {
3335 IgnoreStack::all()
3336 } else {
3337 ignore_stack.clone()
3338 };
3339 job.ignore_queue
3340 .send(UpdateIgnoreStatusJob {
3341 abs_path: abs_path.into(),
3342 ignore_stack: child_ignore_stack,
3343 ignore_queue: job.ignore_queue.clone(),
3344 })
3345 .await
3346 .unwrap();
3347 }
3348
3349 if entry.is_ignored != was_ignored {
3350 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3351 path_entry.scan_id = snapshot.scan_id;
3352 path_entry.is_ignored = entry.is_ignored;
3353 entries_by_id_edits.push(Edit::Insert(path_entry));
3354 entries_by_path_edits.push(Edit::Insert(entry));
3355 }
3356 }
3357
3358 let mut snapshot = self.snapshot.lock();
3359 snapshot.entries_by_path.edit(entries_by_path_edits, &());
3360 snapshot.entries_by_id.edit(entries_by_id_edits, &());
3361 }
3362
3363 fn build_change_set(
3364 &self,
3365 old_snapshot: &Snapshot,
3366 new_snapshot: &Snapshot,
3367 event_paths: &[Arc<Path>],
3368 ) -> HashMap<(Arc<Path>, ProjectEntryId), PathChange> {
3369 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3370
3371 let mut changes = HashMap::default();
3372 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3373 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3374 let received_before_initialized = !self.finished_initial_scan;
3375
3376 for path in event_paths {
3377 let path = PathKey(path.clone());
3378 old_paths.seek(&path, Bias::Left, &());
3379 new_paths.seek(&path, Bias::Left, &());
3380
3381 loop {
3382 match (old_paths.item(), new_paths.item()) {
3383 (Some(old_entry), Some(new_entry)) => {
3384 if old_entry.path > path.0
3385 && new_entry.path > path.0
3386 && !old_entry.path.starts_with(&path.0)
3387 && !new_entry.path.starts_with(&path.0)
3388 {
3389 break;
3390 }
3391
3392 match Ord::cmp(&old_entry.path, &new_entry.path) {
3393 Ordering::Less => {
3394 changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3395 old_paths.next(&());
3396 }
3397 Ordering::Equal => {
3398 if received_before_initialized {
3399 // If the worktree was not fully initialized when this event was generated,
3400 // we can't know whether this entry was added during the scan or whether
3401 // it was merely updated.
3402 changes.insert(
3403 (new_entry.path.clone(), new_entry.id),
3404 AddedOrUpdated,
3405 );
3406 } else if old_entry.mtime != new_entry.mtime {
3407 changes.insert((new_entry.path.clone(), new_entry.id), Updated);
3408 }
3409 old_paths.next(&());
3410 new_paths.next(&());
3411 }
3412 Ordering::Greater => {
3413 changes.insert((new_entry.path.clone(), new_entry.id), Added);
3414 new_paths.next(&());
3415 }
3416 }
3417 }
3418 (Some(old_entry), None) => {
3419 changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3420 old_paths.next(&());
3421 }
3422 (None, Some(new_entry)) => {
3423 changes.insert((new_entry.path.clone(), new_entry.id), Added);
3424 new_paths.next(&());
3425 }
3426 (None, None) => break,
3427 }
3428 }
3429 }
3430
3431 changes
3432 }
3433
3434 async fn progress_timer(&self, running: bool) {
3435 if !running {
3436 return futures::future::pending().await;
3437 }
3438
3439 #[cfg(any(test, feature = "test-support"))]
3440 if self.fs.is_fake() {
3441 return self.executor.simulate_random_delay().await;
3442 }
3443
3444 smol::Timer::after(Duration::from_millis(100)).await;
3445 }
3446}
3447
3448fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3449 let mut result = root_char_bag;
3450 result.extend(
3451 path.to_string_lossy()
3452 .chars()
3453 .map(|c| c.to_ascii_lowercase()),
3454 );
3455 result
3456}
3457
3458struct ScanJob {
3459 abs_path: Arc<Path>,
3460 path: Arc<Path>,
3461 ignore_stack: Arc<IgnoreStack>,
3462 scan_queue: Sender<ScanJob>,
3463 ancestor_inodes: TreeSet<u64>,
3464}
3465
3466struct UpdateIgnoreStatusJob {
3467 abs_path: Arc<Path>,
3468 ignore_stack: Arc<IgnoreStack>,
3469 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3470}
3471
3472pub trait WorktreeHandle {
3473 #[cfg(any(test, feature = "test-support"))]
3474 fn flush_fs_events<'a>(
3475 &self,
3476 cx: &'a gpui::TestAppContext,
3477 ) -> futures::future::LocalBoxFuture<'a, ()>;
3478}
3479
3480impl WorktreeHandle for ModelHandle<Worktree> {
3481 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3482 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3483 // extra directory scans, and emit extra scan-state notifications.
3484 //
3485 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3486 // to ensure that all redundant FS events have already been processed.
3487 #[cfg(any(test, feature = "test-support"))]
3488 fn flush_fs_events<'a>(
3489 &self,
3490 cx: &'a gpui::TestAppContext,
3491 ) -> futures::future::LocalBoxFuture<'a, ()> {
3492 use smol::future::FutureExt;
3493
3494 let filename = "fs-event-sentinel";
3495 let tree = self.clone();
3496 let (fs, root_path) = self.read_with(cx, |tree, _| {
3497 let tree = tree.as_local().unwrap();
3498 (tree.fs.clone(), tree.abs_path().clone())
3499 });
3500
3501 async move {
3502 fs.create_file(&root_path.join(filename), Default::default())
3503 .await
3504 .unwrap();
3505 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3506 .await;
3507
3508 fs.remove_file(&root_path.join(filename), Default::default())
3509 .await
3510 .unwrap();
3511 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3512 .await;
3513
3514 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3515 .await;
3516 }
3517 .boxed_local()
3518 }
3519}
3520
3521#[derive(Clone, Debug)]
3522struct TraversalProgress<'a> {
3523 max_path: &'a Path,
3524 count: usize,
3525 visible_count: usize,
3526 file_count: usize,
3527 visible_file_count: usize,
3528}
3529
3530impl<'a> TraversalProgress<'a> {
3531 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3532 match (include_ignored, include_dirs) {
3533 (true, true) => self.count,
3534 (true, false) => self.file_count,
3535 (false, true) => self.visible_count,
3536 (false, false) => self.visible_file_count,
3537 }
3538 }
3539}
3540
3541impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3542 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3543 self.max_path = summary.max_path.as_ref();
3544 self.count += summary.count;
3545 self.visible_count += summary.visible_count;
3546 self.file_count += summary.file_count;
3547 self.visible_file_count += summary.visible_file_count;
3548 }
3549}
3550
3551impl<'a> Default for TraversalProgress<'a> {
3552 fn default() -> Self {
3553 Self {
3554 max_path: Path::new(""),
3555 count: 0,
3556 visible_count: 0,
3557 file_count: 0,
3558 visible_file_count: 0,
3559 }
3560 }
3561}
3562
3563pub struct Traversal<'a> {
3564 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3565 include_ignored: bool,
3566 include_dirs: bool,
3567}
3568
3569impl<'a> Traversal<'a> {
3570 pub fn advance(&mut self) -> bool {
3571 self.cursor.seek_forward(
3572 &TraversalTarget::Count {
3573 count: self.end_offset() + 1,
3574 include_dirs: self.include_dirs,
3575 include_ignored: self.include_ignored,
3576 },
3577 Bias::Left,
3578 &(),
3579 )
3580 }
3581
3582 pub fn advance_to_sibling(&mut self) -> bool {
3583 while let Some(entry) = self.cursor.item() {
3584 self.cursor.seek_forward(
3585 &TraversalTarget::PathSuccessor(&entry.path),
3586 Bias::Left,
3587 &(),
3588 );
3589 if let Some(entry) = self.cursor.item() {
3590 if (self.include_dirs || !entry.is_dir())
3591 && (self.include_ignored || !entry.is_ignored)
3592 {
3593 return true;
3594 }
3595 }
3596 }
3597 false
3598 }
3599
3600 pub fn entry(&self) -> Option<&'a Entry> {
3601 self.cursor.item()
3602 }
3603
3604 pub fn start_offset(&self) -> usize {
3605 self.cursor
3606 .start()
3607 .count(self.include_dirs, self.include_ignored)
3608 }
3609
3610 pub fn end_offset(&self) -> usize {
3611 self.cursor
3612 .end(&())
3613 .count(self.include_dirs, self.include_ignored)
3614 }
3615}
3616
3617impl<'a> Iterator for Traversal<'a> {
3618 type Item = &'a Entry;
3619
3620 fn next(&mut self) -> Option<Self::Item> {
3621 if let Some(item) = self.entry() {
3622 self.advance();
3623 Some(item)
3624 } else {
3625 None
3626 }
3627 }
3628}
3629
3630#[derive(Debug)]
3631enum TraversalTarget<'a> {
3632 Path(&'a Path),
3633 PathSuccessor(&'a Path),
3634 Count {
3635 count: usize,
3636 include_ignored: bool,
3637 include_dirs: bool,
3638 },
3639}
3640
3641impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3642 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3643 match self {
3644 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3645 TraversalTarget::PathSuccessor(path) => {
3646 if !cursor_location.max_path.starts_with(path) {
3647 Ordering::Equal
3648 } else {
3649 Ordering::Greater
3650 }
3651 }
3652 TraversalTarget::Count {
3653 count,
3654 include_dirs,
3655 include_ignored,
3656 } => Ord::cmp(
3657 count,
3658 &cursor_location.count(*include_dirs, *include_ignored),
3659 ),
3660 }
3661 }
3662}
3663
3664struct ChildEntriesIter<'a> {
3665 parent_path: &'a Path,
3666 traversal: Traversal<'a>,
3667}
3668
3669impl<'a> Iterator for ChildEntriesIter<'a> {
3670 type Item = &'a Entry;
3671
3672 fn next(&mut self) -> Option<Self::Item> {
3673 if let Some(item) = self.traversal.entry() {
3674 if item.path.starts_with(&self.parent_path) {
3675 self.traversal.advance_to_sibling();
3676 return Some(item);
3677 }
3678 }
3679 None
3680 }
3681}
3682
3683struct DescendentEntriesIter<'a> {
3684 parent_path: &'a Path,
3685 traversal: Traversal<'a>,
3686}
3687
3688impl<'a> Iterator for DescendentEntriesIter<'a> {
3689 type Item = &'a Entry;
3690
3691 fn next(&mut self) -> Option<Self::Item> {
3692 if let Some(item) = self.traversal.entry() {
3693 if item.path.starts_with(&self.parent_path) {
3694 self.traversal.advance();
3695 return Some(item);
3696 }
3697 }
3698 None
3699 }
3700}
3701
3702impl<'a> From<&'a Entry> for proto::Entry {
3703 fn from(entry: &'a Entry) -> Self {
3704 Self {
3705 id: entry.id.to_proto(),
3706 is_dir: entry.is_dir(),
3707 path: entry.path.to_string_lossy().into(),
3708 inode: entry.inode,
3709 mtime: Some(entry.mtime.into()),
3710 is_symlink: entry.is_symlink,
3711 is_ignored: entry.is_ignored,
3712 }
3713 }
3714}
3715
3716impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3717 type Error = anyhow::Error;
3718
3719 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3720 if let Some(mtime) = entry.mtime {
3721 let kind = if entry.is_dir {
3722 EntryKind::Dir
3723 } else {
3724 let mut char_bag = *root_char_bag;
3725 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3726 EntryKind::File(char_bag)
3727 };
3728 let path: Arc<Path> = PathBuf::from(entry.path).into();
3729 Ok(Entry {
3730 id: ProjectEntryId::from_proto(entry.id),
3731 kind,
3732 path,
3733 inode: entry.inode,
3734 mtime: mtime.into(),
3735 is_symlink: entry.is_symlink,
3736 is_ignored: entry.is_ignored,
3737 })
3738 } else {
3739 Err(anyhow!(
3740 "missing mtime in remote worktree entry {:?}",
3741 entry.path
3742 ))
3743 }
3744 }
3745}
3746
3747#[cfg(test)]
3748mod tests {
3749 use super::*;
3750 use fs::{FakeFs, RealFs};
3751 use gpui::{executor::Deterministic, TestAppContext};
3752 use pretty_assertions::assert_eq;
3753 use rand::prelude::*;
3754 use serde_json::json;
3755 use std::{env, fmt::Write};
3756 use util::{http::FakeHttpClient, test::temp_tree};
3757
3758 #[gpui::test]
3759 async fn test_traversal(cx: &mut TestAppContext) {
3760 let fs = FakeFs::new(cx.background());
3761 fs.insert_tree(
3762 "/root",
3763 json!({
3764 ".gitignore": "a/b\n",
3765 "a": {
3766 "b": "",
3767 "c": "",
3768 }
3769 }),
3770 )
3771 .await;
3772
3773 let http_client = FakeHttpClient::with_404_response();
3774 let client = cx.read(|cx| Client::new(http_client, cx));
3775
3776 let tree = Worktree::local(
3777 client,
3778 Path::new("/root"),
3779 true,
3780 fs,
3781 Default::default(),
3782 &mut cx.to_async(),
3783 )
3784 .await
3785 .unwrap();
3786 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3787 .await;
3788
3789 tree.read_with(cx, |tree, _| {
3790 assert_eq!(
3791 tree.entries(false)
3792 .map(|entry| entry.path.as_ref())
3793 .collect::<Vec<_>>(),
3794 vec![
3795 Path::new(""),
3796 Path::new(".gitignore"),
3797 Path::new("a"),
3798 Path::new("a/c"),
3799 ]
3800 );
3801 assert_eq!(
3802 tree.entries(true)
3803 .map(|entry| entry.path.as_ref())
3804 .collect::<Vec<_>>(),
3805 vec![
3806 Path::new(""),
3807 Path::new(".gitignore"),
3808 Path::new("a"),
3809 Path::new("a/b"),
3810 Path::new("a/c"),
3811 ]
3812 );
3813 })
3814 }
3815
3816 #[gpui::test]
3817 async fn test_descendent_entries(cx: &mut TestAppContext) {
3818 let fs = FakeFs::new(cx.background());
3819 fs.insert_tree(
3820 "/root",
3821 json!({
3822 "a": "",
3823 "b": {
3824 "c": {
3825 "d": ""
3826 },
3827 "e": {}
3828 },
3829 "f": "",
3830 "g": {
3831 "h": {}
3832 },
3833 "i": {
3834 "j": {
3835 "k": ""
3836 },
3837 "l": {
3838
3839 }
3840 },
3841 ".gitignore": "i/j\n",
3842 }),
3843 )
3844 .await;
3845
3846 let http_client = FakeHttpClient::with_404_response();
3847 let client = cx.read(|cx| Client::new(http_client, cx));
3848
3849 let tree = Worktree::local(
3850 client,
3851 Path::new("/root"),
3852 true,
3853 fs,
3854 Default::default(),
3855 &mut cx.to_async(),
3856 )
3857 .await
3858 .unwrap();
3859 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3860 .await;
3861
3862 tree.read_with(cx, |tree, _| {
3863 assert_eq!(
3864 tree.descendent_entries(false, false, Path::new("b"))
3865 .map(|entry| entry.path.as_ref())
3866 .collect::<Vec<_>>(),
3867 vec![Path::new("b/c/d"),]
3868 );
3869 assert_eq!(
3870 tree.descendent_entries(true, false, Path::new("b"))
3871 .map(|entry| entry.path.as_ref())
3872 .collect::<Vec<_>>(),
3873 vec![
3874 Path::new("b"),
3875 Path::new("b/c"),
3876 Path::new("b/c/d"),
3877 Path::new("b/e"),
3878 ]
3879 );
3880
3881 assert_eq!(
3882 tree.descendent_entries(false, false, Path::new("g"))
3883 .map(|entry| entry.path.as_ref())
3884 .collect::<Vec<_>>(),
3885 Vec::<PathBuf>::new()
3886 );
3887 assert_eq!(
3888 tree.descendent_entries(true, false, Path::new("g"))
3889 .map(|entry| entry.path.as_ref())
3890 .collect::<Vec<_>>(),
3891 vec![Path::new("g"), Path::new("g/h"),]
3892 );
3893
3894 assert_eq!(
3895 tree.descendent_entries(false, false, Path::new("i"))
3896 .map(|entry| entry.path.as_ref())
3897 .collect::<Vec<_>>(),
3898 Vec::<PathBuf>::new()
3899 );
3900 assert_eq!(
3901 tree.descendent_entries(false, true, Path::new("i"))
3902 .map(|entry| entry.path.as_ref())
3903 .collect::<Vec<_>>(),
3904 vec![Path::new("i/j/k")]
3905 );
3906 assert_eq!(
3907 tree.descendent_entries(true, false, Path::new("i"))
3908 .map(|entry| entry.path.as_ref())
3909 .collect::<Vec<_>>(),
3910 vec![Path::new("i"), Path::new("i/l"),]
3911 );
3912 })
3913 }
3914
3915 #[gpui::test(iterations = 10)]
3916 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3917 let fs = FakeFs::new(cx.background());
3918 fs.insert_tree(
3919 "/root",
3920 json!({
3921 "lib": {
3922 "a": {
3923 "a.txt": ""
3924 },
3925 "b": {
3926 "b.txt": ""
3927 }
3928 }
3929 }),
3930 )
3931 .await;
3932 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3933 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3934
3935 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3936 let tree = Worktree::local(
3937 client,
3938 Path::new("/root"),
3939 true,
3940 fs.clone(),
3941 Default::default(),
3942 &mut cx.to_async(),
3943 )
3944 .await
3945 .unwrap();
3946
3947 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3948 .await;
3949
3950 tree.read_with(cx, |tree, _| {
3951 assert_eq!(
3952 tree.entries(false)
3953 .map(|entry| entry.path.as_ref())
3954 .collect::<Vec<_>>(),
3955 vec![
3956 Path::new(""),
3957 Path::new("lib"),
3958 Path::new("lib/a"),
3959 Path::new("lib/a/a.txt"),
3960 Path::new("lib/a/lib"),
3961 Path::new("lib/b"),
3962 Path::new("lib/b/b.txt"),
3963 Path::new("lib/b/lib"),
3964 ]
3965 );
3966 });
3967
3968 fs.rename(
3969 Path::new("/root/lib/a/lib"),
3970 Path::new("/root/lib/a/lib-2"),
3971 Default::default(),
3972 )
3973 .await
3974 .unwrap();
3975 executor.run_until_parked();
3976 tree.read_with(cx, |tree, _| {
3977 assert_eq!(
3978 tree.entries(false)
3979 .map(|entry| entry.path.as_ref())
3980 .collect::<Vec<_>>(),
3981 vec![
3982 Path::new(""),
3983 Path::new("lib"),
3984 Path::new("lib/a"),
3985 Path::new("lib/a/a.txt"),
3986 Path::new("lib/a/lib-2"),
3987 Path::new("lib/b"),
3988 Path::new("lib/b/b.txt"),
3989 Path::new("lib/b/lib"),
3990 ]
3991 );
3992 });
3993 }
3994
3995 #[gpui::test]
3996 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3997 // .gitignores are handled explicitly by Zed and do not use the git
3998 // machinery that the git_tests module checks
3999 let parent_dir = temp_tree(json!({
4000 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
4001 "tree": {
4002 ".git": {},
4003 ".gitignore": "ignored-dir\n",
4004 "tracked-dir": {
4005 "tracked-file1": "",
4006 "ancestor-ignored-file1": "",
4007 },
4008 "ignored-dir": {
4009 "ignored-file1": ""
4010 }
4011 }
4012 }));
4013 let dir = parent_dir.path().join("tree");
4014
4015 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4016
4017 let tree = Worktree::local(
4018 client,
4019 dir.as_path(),
4020 true,
4021 Arc::new(RealFs),
4022 Default::default(),
4023 &mut cx.to_async(),
4024 )
4025 .await
4026 .unwrap();
4027 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4028 .await;
4029 tree.flush_fs_events(cx).await;
4030 cx.read(|cx| {
4031 let tree = tree.read(cx);
4032 assert!(
4033 !tree
4034 .entry_for_path("tracked-dir/tracked-file1")
4035 .unwrap()
4036 .is_ignored
4037 );
4038 assert!(
4039 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
4040 .unwrap()
4041 .is_ignored
4042 );
4043 assert!(
4044 tree.entry_for_path("ignored-dir/ignored-file1")
4045 .unwrap()
4046 .is_ignored
4047 );
4048 });
4049
4050 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
4051 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
4052 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
4053 tree.flush_fs_events(cx).await;
4054 cx.read(|cx| {
4055 let tree = tree.read(cx);
4056 assert!(
4057 !tree
4058 .entry_for_path("tracked-dir/tracked-file2")
4059 .unwrap()
4060 .is_ignored
4061 );
4062 assert!(
4063 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
4064 .unwrap()
4065 .is_ignored
4066 );
4067 assert!(
4068 tree.entry_for_path("ignored-dir/ignored-file2")
4069 .unwrap()
4070 .is_ignored
4071 );
4072 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
4073 });
4074 }
4075
4076 #[gpui::test]
4077 async fn test_write_file(cx: &mut TestAppContext) {
4078 let dir = temp_tree(json!({
4079 ".git": {},
4080 ".gitignore": "ignored-dir\n",
4081 "tracked-dir": {},
4082 "ignored-dir": {}
4083 }));
4084
4085 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4086
4087 let tree = Worktree::local(
4088 client,
4089 dir.path(),
4090 true,
4091 Arc::new(RealFs),
4092 Default::default(),
4093 &mut cx.to_async(),
4094 )
4095 .await
4096 .unwrap();
4097 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4098 .await;
4099 tree.flush_fs_events(cx).await;
4100
4101 tree.update(cx, |tree, cx| {
4102 tree.as_local().unwrap().write_file(
4103 Path::new("tracked-dir/file.txt"),
4104 "hello".into(),
4105 Default::default(),
4106 cx,
4107 )
4108 })
4109 .await
4110 .unwrap();
4111 tree.update(cx, |tree, cx| {
4112 tree.as_local().unwrap().write_file(
4113 Path::new("ignored-dir/file.txt"),
4114 "world".into(),
4115 Default::default(),
4116 cx,
4117 )
4118 })
4119 .await
4120 .unwrap();
4121
4122 tree.read_with(cx, |tree, _| {
4123 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4124 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4125 assert!(!tracked.is_ignored);
4126 assert!(ignored.is_ignored);
4127 });
4128 }
4129
4130 #[gpui::test(iterations = 30)]
4131 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4132 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4133
4134 let fs = FakeFs::new(cx.background());
4135 fs.insert_tree(
4136 "/root",
4137 json!({
4138 "b": {},
4139 "c": {},
4140 "d": {},
4141 }),
4142 )
4143 .await;
4144
4145 let tree = Worktree::local(
4146 client,
4147 "/root".as_ref(),
4148 true,
4149 fs,
4150 Default::default(),
4151 &mut cx.to_async(),
4152 )
4153 .await
4154 .unwrap();
4155
4156 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4157
4158 let entry = tree
4159 .update(cx, |tree, cx| {
4160 tree.as_local_mut()
4161 .unwrap()
4162 .create_entry("a/e".as_ref(), true, cx)
4163 })
4164 .await
4165 .unwrap();
4166 assert!(entry.is_dir());
4167
4168 cx.foreground().run_until_parked();
4169 tree.read_with(cx, |tree, _| {
4170 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4171 });
4172
4173 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4174 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
4175 snapshot1.apply_remote_update(update).unwrap();
4176 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
4177 }
4178
4179 #[gpui::test(iterations = 100)]
4180 async fn test_random_worktree_operations_during_initial_scan(
4181 cx: &mut TestAppContext,
4182 mut rng: StdRng,
4183 ) {
4184 let operations = env::var("OPERATIONS")
4185 .map(|o| o.parse().unwrap())
4186 .unwrap_or(5);
4187 let initial_entries = env::var("INITIAL_ENTRIES")
4188 .map(|o| o.parse().unwrap())
4189 .unwrap_or(20);
4190
4191 let root_dir = Path::new("/test");
4192 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4193 fs.as_fake().insert_tree(root_dir, json!({})).await;
4194 for _ in 0..initial_entries {
4195 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4196 }
4197 log::info!("generated initial tree");
4198
4199 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4200 let worktree = Worktree::local(
4201 client.clone(),
4202 root_dir,
4203 true,
4204 fs.clone(),
4205 Default::default(),
4206 &mut cx.to_async(),
4207 )
4208 .await
4209 .unwrap();
4210
4211 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4212
4213 for _ in 0..operations {
4214 worktree
4215 .update(cx, |worktree, cx| {
4216 randomly_mutate_worktree(worktree, &mut rng, cx)
4217 })
4218 .await
4219 .log_err();
4220 worktree.read_with(cx, |tree, _| {
4221 tree.as_local().unwrap().snapshot.check_invariants()
4222 });
4223
4224 if rng.gen_bool(0.6) {
4225 let new_snapshot =
4226 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4227 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4228 snapshot.apply_remote_update(update.clone()).unwrap();
4229 assert_eq!(
4230 snapshot.to_vec(true),
4231 new_snapshot.to_vec(true),
4232 "incorrect snapshot after update {:?}",
4233 update
4234 );
4235 }
4236 }
4237
4238 worktree
4239 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4240 .await;
4241 worktree.read_with(cx, |tree, _| {
4242 tree.as_local().unwrap().snapshot.check_invariants()
4243 });
4244
4245 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4246 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4247 snapshot.apply_remote_update(update.clone()).unwrap();
4248 assert_eq!(
4249 snapshot.to_vec(true),
4250 new_snapshot.to_vec(true),
4251 "incorrect snapshot after update {:?}",
4252 update
4253 );
4254 }
4255
4256 #[gpui::test(iterations = 100)]
4257 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4258 let operations = env::var("OPERATIONS")
4259 .map(|o| o.parse().unwrap())
4260 .unwrap_or(40);
4261 let initial_entries = env::var("INITIAL_ENTRIES")
4262 .map(|o| o.parse().unwrap())
4263 .unwrap_or(20);
4264
4265 let root_dir = Path::new("/test");
4266 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4267 fs.as_fake().insert_tree(root_dir, json!({})).await;
4268 for _ in 0..initial_entries {
4269 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4270 }
4271 log::info!("generated initial tree");
4272
4273 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4274 let worktree = Worktree::local(
4275 client.clone(),
4276 root_dir,
4277 true,
4278 fs.clone(),
4279 Default::default(),
4280 &mut cx.to_async(),
4281 )
4282 .await
4283 .unwrap();
4284
4285 worktree
4286 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4287 .await;
4288
4289 // After the initial scan is complete, the `UpdatedEntries` event can
4290 // be used to follow along with all changes to the worktree's snapshot.
4291 worktree.update(cx, |tree, cx| {
4292 let mut paths = tree
4293 .as_local()
4294 .unwrap()
4295 .paths()
4296 .cloned()
4297 .collect::<Vec<_>>();
4298
4299 cx.subscribe(&worktree, move |tree, _, event, _| {
4300 if let Event::UpdatedEntries(changes) = event {
4301 for ((path, _), change_type) in changes.iter() {
4302 let path = path.clone();
4303 let ix = match paths.binary_search(&path) {
4304 Ok(ix) | Err(ix) => ix,
4305 };
4306 match change_type {
4307 PathChange::Added => {
4308 assert_ne!(paths.get(ix), Some(&path));
4309 paths.insert(ix, path);
4310 }
4311
4312 PathChange::Removed => {
4313 assert_eq!(paths.get(ix), Some(&path));
4314 paths.remove(ix);
4315 }
4316
4317 PathChange::Updated => {
4318 assert_eq!(paths.get(ix), Some(&path));
4319 }
4320
4321 PathChange::AddedOrUpdated => {
4322 if paths[ix] != path {
4323 paths.insert(ix, path);
4324 }
4325 }
4326 }
4327 }
4328
4329 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
4330 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
4331 }
4332 })
4333 .detach();
4334 });
4335
4336 fs.as_fake().pause_events();
4337 let mut snapshots = Vec::new();
4338 let mut mutations_len = operations;
4339 while mutations_len > 1 {
4340 if rng.gen_bool(0.2) {
4341 worktree
4342 .update(cx, |worktree, cx| {
4343 randomly_mutate_worktree(worktree, &mut rng, cx)
4344 })
4345 .await
4346 .log_err();
4347 } else {
4348 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4349 }
4350
4351 let buffered_event_count = fs.as_fake().buffered_event_count();
4352 if buffered_event_count > 0 && rng.gen_bool(0.3) {
4353 let len = rng.gen_range(0..=buffered_event_count);
4354 log::info!("flushing {} events", len);
4355 fs.as_fake().flush_events(len);
4356 } else {
4357 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4358 mutations_len -= 1;
4359 }
4360
4361 cx.foreground().run_until_parked();
4362 if rng.gen_bool(0.2) {
4363 log::info!("storing snapshot {}", snapshots.len());
4364 let snapshot =
4365 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4366 snapshots.push(snapshot);
4367 }
4368 }
4369
4370 log::info!("quiescing");
4371 fs.as_fake().flush_events(usize::MAX);
4372 cx.foreground().run_until_parked();
4373 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4374 snapshot.check_invariants();
4375
4376 {
4377 let new_worktree = Worktree::local(
4378 client.clone(),
4379 root_dir,
4380 true,
4381 fs.clone(),
4382 Default::default(),
4383 &mut cx.to_async(),
4384 )
4385 .await
4386 .unwrap();
4387 new_worktree
4388 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4389 .await;
4390 let new_snapshot =
4391 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4392 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4393 }
4394
4395 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4396 let include_ignored = rng.gen::<bool>();
4397 if !include_ignored {
4398 let mut entries_by_path_edits = Vec::new();
4399 let mut entries_by_id_edits = Vec::new();
4400 for entry in prev_snapshot
4401 .entries_by_id
4402 .cursor::<()>()
4403 .filter(|e| e.is_ignored)
4404 {
4405 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4406 entries_by_id_edits.push(Edit::Remove(entry.id));
4407 }
4408
4409 prev_snapshot
4410 .entries_by_path
4411 .edit(entries_by_path_edits, &());
4412 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4413 }
4414
4415 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4416 prev_snapshot.apply_remote_update(update.clone()).unwrap();
4417 assert_eq!(
4418 prev_snapshot.to_vec(include_ignored),
4419 snapshot.to_vec(include_ignored),
4420 "wrong update for snapshot {i}. update: {:?}",
4421 update
4422 );
4423 }
4424 }
4425
4426 fn randomly_mutate_worktree(
4427 worktree: &mut Worktree,
4428 rng: &mut impl Rng,
4429 cx: &mut ModelContext<Worktree>,
4430 ) -> Task<Result<()>> {
4431 log::info!("mutating worktree");
4432 let worktree = worktree.as_local_mut().unwrap();
4433 let snapshot = worktree.snapshot();
4434 let entry = snapshot.entries(false).choose(rng).unwrap();
4435
4436 match rng.gen_range(0_u32..100) {
4437 0..=33 if entry.path.as_ref() != Path::new("") => {
4438 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4439 worktree.delete_entry(entry.id, cx).unwrap()
4440 }
4441 ..=66 if entry.path.as_ref() != Path::new("") => {
4442 let other_entry = snapshot.entries(false).choose(rng).unwrap();
4443 let new_parent_path = if other_entry.is_dir() {
4444 other_entry.path.clone()
4445 } else {
4446 other_entry.path.parent().unwrap().into()
4447 };
4448 let mut new_path = new_parent_path.join(gen_name(rng));
4449 if new_path.starts_with(&entry.path) {
4450 new_path = gen_name(rng).into();
4451 }
4452
4453 log::info!(
4454 "renaming entry {:?} ({}) to {:?}",
4455 entry.path,
4456 entry.id.0,
4457 new_path
4458 );
4459 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4460 cx.foreground().spawn(async move {
4461 task.await?;
4462 Ok(())
4463 })
4464 }
4465 _ => {
4466 let task = if entry.is_dir() {
4467 let child_path = entry.path.join(gen_name(rng));
4468 let is_dir = rng.gen_bool(0.3);
4469 log::info!(
4470 "creating {} at {:?}",
4471 if is_dir { "dir" } else { "file" },
4472 child_path,
4473 );
4474 worktree.create_entry(child_path, is_dir, cx)
4475 } else {
4476 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4477 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4478 };
4479 cx.foreground().spawn(async move {
4480 task.await?;
4481 Ok(())
4482 })
4483 }
4484 }
4485 }
4486
4487 async fn randomly_mutate_fs(
4488 fs: &Arc<dyn Fs>,
4489 root_path: &Path,
4490 insertion_probability: f64,
4491 rng: &mut impl Rng,
4492 ) {
4493 log::info!("mutating fs");
4494 let mut files = Vec::new();
4495 let mut dirs = Vec::new();
4496 for path in fs.as_fake().paths() {
4497 if path.starts_with(root_path) {
4498 if fs.is_file(&path).await {
4499 files.push(path);
4500 } else {
4501 dirs.push(path);
4502 }
4503 }
4504 }
4505
4506 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4507 let path = dirs.choose(rng).unwrap();
4508 let new_path = path.join(gen_name(rng));
4509
4510 if rng.gen() {
4511 log::info!(
4512 "creating dir {:?}",
4513 new_path.strip_prefix(root_path).unwrap()
4514 );
4515 fs.create_dir(&new_path).await.unwrap();
4516 } else {
4517 log::info!(
4518 "creating file {:?}",
4519 new_path.strip_prefix(root_path).unwrap()
4520 );
4521 fs.create_file(&new_path, Default::default()).await.unwrap();
4522 }
4523 } else if rng.gen_bool(0.05) {
4524 let ignore_dir_path = dirs.choose(rng).unwrap();
4525 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4526
4527 let subdirs = dirs
4528 .iter()
4529 .filter(|d| d.starts_with(&ignore_dir_path))
4530 .cloned()
4531 .collect::<Vec<_>>();
4532 let subfiles = files
4533 .iter()
4534 .filter(|d| d.starts_with(&ignore_dir_path))
4535 .cloned()
4536 .collect::<Vec<_>>();
4537 let files_to_ignore = {
4538 let len = rng.gen_range(0..=subfiles.len());
4539 subfiles.choose_multiple(rng, len)
4540 };
4541 let dirs_to_ignore = {
4542 let len = rng.gen_range(0..subdirs.len());
4543 subdirs.choose_multiple(rng, len)
4544 };
4545
4546 let mut ignore_contents = String::new();
4547 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4548 writeln!(
4549 ignore_contents,
4550 "{}",
4551 path_to_ignore
4552 .strip_prefix(&ignore_dir_path)
4553 .unwrap()
4554 .to_str()
4555 .unwrap()
4556 )
4557 .unwrap();
4558 }
4559 log::info!(
4560 "creating gitignore {:?} with contents:\n{}",
4561 ignore_path.strip_prefix(&root_path).unwrap(),
4562 ignore_contents
4563 );
4564 fs.save(
4565 &ignore_path,
4566 &ignore_contents.as_str().into(),
4567 Default::default(),
4568 )
4569 .await
4570 .unwrap();
4571 } else {
4572 let old_path = {
4573 let file_path = files.choose(rng);
4574 let dir_path = dirs[1..].choose(rng);
4575 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4576 };
4577
4578 let is_rename = rng.gen();
4579 if is_rename {
4580 let new_path_parent = dirs
4581 .iter()
4582 .filter(|d| !d.starts_with(old_path))
4583 .choose(rng)
4584 .unwrap();
4585
4586 let overwrite_existing_dir =
4587 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4588 let new_path = if overwrite_existing_dir {
4589 fs.remove_dir(
4590 &new_path_parent,
4591 RemoveOptions {
4592 recursive: true,
4593 ignore_if_not_exists: true,
4594 },
4595 )
4596 .await
4597 .unwrap();
4598 new_path_parent.to_path_buf()
4599 } else {
4600 new_path_parent.join(gen_name(rng))
4601 };
4602
4603 log::info!(
4604 "renaming {:?} to {}{:?}",
4605 old_path.strip_prefix(&root_path).unwrap(),
4606 if overwrite_existing_dir {
4607 "overwrite "
4608 } else {
4609 ""
4610 },
4611 new_path.strip_prefix(&root_path).unwrap()
4612 );
4613 fs.rename(
4614 &old_path,
4615 &new_path,
4616 fs::RenameOptions {
4617 overwrite: true,
4618 ignore_if_exists: true,
4619 },
4620 )
4621 .await
4622 .unwrap();
4623 } else if fs.is_file(&old_path).await {
4624 log::info!(
4625 "deleting file {:?}",
4626 old_path.strip_prefix(&root_path).unwrap()
4627 );
4628 fs.remove_file(old_path, Default::default()).await.unwrap();
4629 } else {
4630 log::info!(
4631 "deleting dir {:?}",
4632 old_path.strip_prefix(&root_path).unwrap()
4633 );
4634 fs.remove_dir(
4635 &old_path,
4636 RemoveOptions {
4637 recursive: true,
4638 ignore_if_not_exists: true,
4639 },
4640 )
4641 .await
4642 .unwrap();
4643 }
4644 }
4645 }
4646
4647 fn gen_name(rng: &mut impl Rng) -> String {
4648 (0..6)
4649 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4650 .map(char::from)
4651 .collect()
4652 }
4653
4654 impl LocalSnapshot {
4655 fn check_invariants(&self) {
4656 assert_eq!(
4657 self.entries_by_path
4658 .cursor::<()>()
4659 .map(|e| (&e.path, e.id))
4660 .collect::<Vec<_>>(),
4661 self.entries_by_id
4662 .cursor::<()>()
4663 .map(|e| (&e.path, e.id))
4664 .collect::<collections::BTreeSet<_>>()
4665 .into_iter()
4666 .collect::<Vec<_>>(),
4667 "entries_by_path and entries_by_id are inconsistent"
4668 );
4669
4670 let mut files = self.files(true, 0);
4671 let mut visible_files = self.files(false, 0);
4672 for entry in self.entries_by_path.cursor::<()>() {
4673 if entry.is_file() {
4674 assert_eq!(files.next().unwrap().inode, entry.inode);
4675 if !entry.is_ignored {
4676 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4677 }
4678 }
4679 }
4680
4681 assert!(files.next().is_none());
4682 assert!(visible_files.next().is_none());
4683
4684 let mut bfs_paths = Vec::new();
4685 let mut stack = vec![Path::new("")];
4686 while let Some(path) = stack.pop() {
4687 bfs_paths.push(path);
4688 let ix = stack.len();
4689 for child_entry in self.child_entries(path) {
4690 stack.insert(ix, &child_entry.path);
4691 }
4692 }
4693
4694 let dfs_paths_via_iter = self
4695 .entries_by_path
4696 .cursor::<()>()
4697 .map(|e| e.path.as_ref())
4698 .collect::<Vec<_>>();
4699 assert_eq!(bfs_paths, dfs_paths_via_iter);
4700
4701 let dfs_paths_via_traversal = self
4702 .entries(true)
4703 .map(|e| e.path.as_ref())
4704 .collect::<Vec<_>>();
4705 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4706
4707 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4708 let ignore_parent_path =
4709 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4710 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4711 assert!(self
4712 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4713 .is_some());
4714 }
4715 }
4716
4717 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4718 let mut paths = Vec::new();
4719 for entry in self.entries_by_path.cursor::<()>() {
4720 if include_ignored || !entry.is_ignored {
4721 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4722 }
4723 }
4724 paths.sort_by(|a, b| a.0.cmp(b.0));
4725 paths
4726 }
4727 }
4728
4729 mod git_tests {
4730 use super::*;
4731 use pretty_assertions::assert_eq;
4732
4733 #[gpui::test]
4734 async fn test_rename_work_directory(cx: &mut TestAppContext) {
4735 let root = temp_tree(json!({
4736 "projects": {
4737 "project1": {
4738 "a": "",
4739 "b": "",
4740 }
4741 },
4742
4743 }));
4744 let root_path = root.path();
4745
4746 let http_client = FakeHttpClient::with_404_response();
4747 let client = cx.read(|cx| Client::new(http_client, cx));
4748 let tree = Worktree::local(
4749 client,
4750 root_path,
4751 true,
4752 Arc::new(RealFs),
4753 Default::default(),
4754 &mut cx.to_async(),
4755 )
4756 .await
4757 .unwrap();
4758
4759 let repo = git_init(&root_path.join("projects/project1"));
4760 git_add("a", &repo);
4761 git_commit("init", &repo);
4762 std::fs::write(root_path.join("projects/project1/a"), "aa").ok();
4763
4764 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4765 .await;
4766
4767 tree.flush_fs_events(cx).await;
4768
4769 cx.read(|cx| {
4770 let tree = tree.read(cx);
4771 let (work_dir, repo) = tree.repositories().next().unwrap();
4772 assert_eq!(work_dir.as_ref(), Path::new("projects/project1"));
4773 assert_eq!(
4774 repo.status_for_file(tree, Path::new("projects/project1/a")),
4775 Some(GitFileStatus::Modified)
4776 );
4777 assert_eq!(
4778 repo.status_for_file(tree, Path::new("projects/project1/b")),
4779 Some(GitFileStatus::Added)
4780 );
4781 });
4782
4783 std::fs::rename(
4784 root_path.join("projects/project1"),
4785 root_path.join("projects/project2"),
4786 )
4787 .ok();
4788 tree.flush_fs_events(cx).await;
4789
4790 cx.read(|cx| {
4791 let tree = tree.read(cx);
4792 let (work_dir, repo) = tree.repositories().next().unwrap();
4793 assert_eq!(work_dir.as_ref(), Path::new("projects/project2"));
4794 assert_eq!(
4795 repo.status_for_file(tree, Path::new("projects/project2/a")),
4796 Some(GitFileStatus::Modified)
4797 );
4798 assert_eq!(
4799 repo.status_for_file(tree, Path::new("projects/project2/b")),
4800 Some(GitFileStatus::Added)
4801 );
4802 });
4803 }
4804
4805 #[gpui::test]
4806 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
4807 let root = temp_tree(json!({
4808 "c.txt": "",
4809 "dir1": {
4810 ".git": {},
4811 "deps": {
4812 "dep1": {
4813 ".git": {},
4814 "src": {
4815 "a.txt": ""
4816 }
4817 }
4818 },
4819 "src": {
4820 "b.txt": ""
4821 }
4822 },
4823 }));
4824
4825 let http_client = FakeHttpClient::with_404_response();
4826 let client = cx.read(|cx| Client::new(http_client, cx));
4827 let tree = Worktree::local(
4828 client,
4829 root.path(),
4830 true,
4831 Arc::new(RealFs),
4832 Default::default(),
4833 &mut cx.to_async(),
4834 )
4835 .await
4836 .unwrap();
4837
4838 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4839 .await;
4840 tree.flush_fs_events(cx).await;
4841
4842 tree.read_with(cx, |tree, _cx| {
4843 let tree = tree.as_local().unwrap();
4844
4845 assert!(tree.repository_for_path("c.txt".as_ref()).is_none());
4846
4847 let entry = tree.repository_for_path("dir1/src/b.txt".as_ref()).unwrap();
4848 assert_eq!(
4849 entry
4850 .work_directory(tree)
4851 .map(|directory| directory.as_ref().to_owned()),
4852 Some(Path::new("dir1").to_owned())
4853 );
4854
4855 let entry = tree
4856 .repository_for_path("dir1/deps/dep1/src/a.txt".as_ref())
4857 .unwrap();
4858 assert_eq!(
4859 entry
4860 .work_directory(tree)
4861 .map(|directory| directory.as_ref().to_owned()),
4862 Some(Path::new("dir1/deps/dep1").to_owned())
4863 );
4864
4865 let entries = tree.files(false, 0);
4866
4867 let paths_with_repos = tree
4868 .entries_with_repositories(entries)
4869 .map(|(entry, repo)| {
4870 (
4871 entry.path.as_ref(),
4872 repo.and_then(|repo| {
4873 repo.work_directory(&tree)
4874 .map(|work_directory| work_directory.0.to_path_buf())
4875 }),
4876 )
4877 })
4878 .collect::<Vec<_>>();
4879
4880 assert_eq!(
4881 paths_with_repos,
4882 &[
4883 (Path::new("c.txt"), None),
4884 (
4885 Path::new("dir1/deps/dep1/src/a.txt"),
4886 Some(Path::new("dir1/deps/dep1").into())
4887 ),
4888 (Path::new("dir1/src/b.txt"), Some(Path::new("dir1").into())),
4889 ]
4890 );
4891 });
4892
4893 let repo_update_events = Arc::new(Mutex::new(vec![]));
4894 tree.update(cx, |_, cx| {
4895 let repo_update_events = repo_update_events.clone();
4896 cx.subscribe(&tree, move |_, _, event, _| {
4897 if let Event::UpdatedGitRepositories(update) = event {
4898 repo_update_events.lock().push(update.clone());
4899 }
4900 })
4901 .detach();
4902 });
4903
4904 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
4905 tree.flush_fs_events(cx).await;
4906
4907 assert_eq!(
4908 repo_update_events.lock()[0]
4909 .keys()
4910 .cloned()
4911 .collect::<Vec<Arc<Path>>>(),
4912 vec![Path::new("dir1").into()]
4913 );
4914
4915 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
4916 tree.flush_fs_events(cx).await;
4917
4918 tree.read_with(cx, |tree, _cx| {
4919 let tree = tree.as_local().unwrap();
4920
4921 assert!(tree
4922 .repository_for_path("dir1/src/b.txt".as_ref())
4923 .is_none());
4924 });
4925 }
4926
4927 #[gpui::test]
4928 async fn test_git_status(cx: &mut TestAppContext) {
4929 const IGNORE_RULE: &'static str = "**/target";
4930
4931 let root = temp_tree(json!({
4932 "project": {
4933 "a.txt": "a",
4934 "b.txt": "bb",
4935 "c": {
4936 "d": {
4937 "e.txt": "eee"
4938 }
4939 },
4940 "f.txt": "ffff",
4941 "target": {
4942 "build_file": "???"
4943 },
4944 ".gitignore": IGNORE_RULE
4945 },
4946
4947 }));
4948
4949 let http_client = FakeHttpClient::with_404_response();
4950 let client = cx.read(|cx| Client::new(http_client, cx));
4951 let tree = Worktree::local(
4952 client,
4953 root.path(),
4954 true,
4955 Arc::new(RealFs),
4956 Default::default(),
4957 &mut cx.to_async(),
4958 )
4959 .await
4960 .unwrap();
4961
4962 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4963 .await;
4964
4965 const A_TXT: &'static str = "a.txt";
4966 const B_TXT: &'static str = "b.txt";
4967 const E_TXT: &'static str = "c/d/e.txt";
4968 const F_TXT: &'static str = "f.txt";
4969 const DOTGITIGNORE: &'static str = ".gitignore";
4970 const BUILD_FILE: &'static str = "target/build_file";
4971
4972 let work_dir = root.path().join("project");
4973 let mut repo = git_init(work_dir.as_path());
4974 repo.add_ignore_rule(IGNORE_RULE).unwrap();
4975 git_add(Path::new(A_TXT), &repo);
4976 git_add(Path::new(E_TXT), &repo);
4977 git_add(Path::new(DOTGITIGNORE), &repo);
4978 git_commit("Initial commit", &repo);
4979
4980 std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
4981
4982 tree.flush_fs_events(cx).await;
4983
4984 // Check that the right git state is observed on startup
4985 tree.read_with(cx, |tree, _cx| {
4986 let snapshot = tree.snapshot();
4987 assert_eq!(snapshot.repository_entries.iter().count(), 1);
4988 let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
4989 assert_eq!(dir.0.as_ref(), Path::new("project"));
4990
4991 assert_eq!(repo.statuses.iter().count(), 3);
4992 assert_eq!(
4993 repo.statuses.get(&Path::new(A_TXT).into()),
4994 Some(&GitFileStatus::Modified)
4995 );
4996 assert_eq!(
4997 repo.statuses.get(&Path::new(B_TXT).into()),
4998 Some(&GitFileStatus::Added)
4999 );
5000 assert_eq!(
5001 repo.statuses.get(&Path::new(F_TXT).into()),
5002 Some(&GitFileStatus::Added)
5003 );
5004 });
5005
5006 git_add(Path::new(A_TXT), &repo);
5007 git_add(Path::new(B_TXT), &repo);
5008 git_commit("Committing modified and added", &repo);
5009 tree.flush_fs_events(cx).await;
5010
5011 // Check that repo only changes are tracked
5012 tree.read_with(cx, |tree, _cx| {
5013 let snapshot = tree.snapshot();
5014 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5015
5016 assert_eq!(repo.statuses.iter().count(), 1);
5017 assert_eq!(
5018 repo.statuses.get(&Path::new(F_TXT).into()),
5019 Some(&GitFileStatus::Added)
5020 );
5021 });
5022
5023 git_reset(0, &repo);
5024 git_remove_index(Path::new(B_TXT), &repo);
5025 git_stash(&mut repo);
5026 std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
5027 std::fs::write(work_dir.join(BUILD_FILE), "this should be ignored").unwrap();
5028 tree.flush_fs_events(cx).await;
5029
5030 // Check that more complex repo changes are tracked
5031 tree.read_with(cx, |tree, _cx| {
5032 let snapshot = tree.snapshot();
5033 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5034
5035 assert_eq!(repo.statuses.iter().count(), 3);
5036 assert_eq!(repo.statuses.get(&Path::new(A_TXT).into()), None);
5037 assert_eq!(
5038 repo.statuses.get(&Path::new(B_TXT).into()),
5039 Some(&GitFileStatus::Added)
5040 );
5041 assert_eq!(
5042 repo.statuses.get(&Path::new(E_TXT).into()),
5043 Some(&GitFileStatus::Modified)
5044 );
5045 assert_eq!(
5046 repo.statuses.get(&Path::new(F_TXT).into()),
5047 Some(&GitFileStatus::Added)
5048 );
5049 });
5050
5051 std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
5052 std::fs::remove_dir_all(work_dir.join("c")).unwrap();
5053 std::fs::write(
5054 work_dir.join(DOTGITIGNORE),
5055 [IGNORE_RULE, "f.txt"].join("\n"),
5056 )
5057 .unwrap();
5058
5059 git_add(Path::new(DOTGITIGNORE), &repo);
5060 git_commit("Committing modified git ignore", &repo);
5061
5062 tree.flush_fs_events(cx).await;
5063
5064 // Check that non-repo behavior is tracked
5065 tree.read_with(cx, |tree, _cx| {
5066 let snapshot = tree.snapshot();
5067 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5068
5069 assert_eq!(repo.statuses.iter().count(), 0);
5070 });
5071
5072 let mut renamed_dir_name = "first_directory/second_directory";
5073 const RENAMED_FILE: &'static str = "rf.txt";
5074
5075 std::fs::create_dir_all(work_dir.join(renamed_dir_name)).unwrap();
5076 std::fs::write(
5077 work_dir.join(renamed_dir_name).join(RENAMED_FILE),
5078 "new-contents",
5079 )
5080 .unwrap();
5081
5082 tree.flush_fs_events(cx).await;
5083
5084 tree.read_with(cx, |tree, _cx| {
5085 let snapshot = tree.snapshot();
5086 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5087
5088 assert_eq!(repo.statuses.iter().count(), 1);
5089 assert_eq!(
5090 repo.statuses
5091 .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5092 Some(&GitFileStatus::Added)
5093 );
5094 });
5095
5096 renamed_dir_name = "new_first_directory/second_directory";
5097
5098 std::fs::rename(
5099 work_dir.join("first_directory"),
5100 work_dir.join("new_first_directory"),
5101 )
5102 .unwrap();
5103
5104 tree.flush_fs_events(cx).await;
5105
5106 tree.read_with(cx, |tree, _cx| {
5107 let snapshot = tree.snapshot();
5108 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5109
5110 assert_eq!(repo.statuses.iter().count(), 1);
5111 assert_eq!(
5112 repo.statuses
5113 .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5114 Some(&GitFileStatus::Added)
5115 );
5116 });
5117 }
5118
5119 #[track_caller]
5120 fn git_init(path: &Path) -> git2::Repository {
5121 git2::Repository::init(path).expect("Failed to initialize git repository")
5122 }
5123
5124 #[track_caller]
5125 fn git_add<P: AsRef<Path>>(path: P, repo: &git2::Repository) {
5126 let path = path.as_ref();
5127 let mut index = repo.index().expect("Failed to get index");
5128 index.add_path(path).expect("Failed to add a.txt");
5129 index.write().expect("Failed to write index");
5130 }
5131
5132 #[track_caller]
5133 fn git_remove_index(path: &Path, repo: &git2::Repository) {
5134 let mut index = repo.index().expect("Failed to get index");
5135 index.remove_path(path).expect("Failed to add a.txt");
5136 index.write().expect("Failed to write index");
5137 }
5138
5139 #[track_caller]
5140 fn git_commit(msg: &'static str, repo: &git2::Repository) {
5141 use git2::Signature;
5142
5143 let signature = Signature::now("test", "test@zed.dev").unwrap();
5144 let oid = repo.index().unwrap().write_tree().unwrap();
5145 let tree = repo.find_tree(oid).unwrap();
5146 if let Some(head) = repo.head().ok() {
5147 let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
5148
5149 let parent_commit = parent_obj.as_commit().unwrap();
5150
5151 repo.commit(
5152 Some("HEAD"),
5153 &signature,
5154 &signature,
5155 msg,
5156 &tree,
5157 &[parent_commit],
5158 )
5159 .expect("Failed to commit with parent");
5160 } else {
5161 repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
5162 .expect("Failed to commit");
5163 }
5164 }
5165
5166 #[track_caller]
5167 fn git_stash(repo: &mut git2::Repository) {
5168 use git2::Signature;
5169
5170 let signature = Signature::now("test", "test@zed.dev").unwrap();
5171 repo.stash_save(&signature, "N/A", None)
5172 .expect("Failed to stash");
5173 }
5174
5175 #[track_caller]
5176 fn git_reset(offset: usize, repo: &git2::Repository) {
5177 let head = repo.head().expect("Couldn't get repo head");
5178 let object = head.peel(git2::ObjectType::Commit).unwrap();
5179 let commit = object.as_commit().unwrap();
5180 let new_head = commit
5181 .parents()
5182 .inspect(|parnet| {
5183 parnet.message();
5184 })
5185 .skip(offset)
5186 .next()
5187 .expect("Not enough history");
5188 repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
5189 .expect("Could not reset");
5190 }
5191
5192 #[allow(dead_code)]
5193 #[track_caller]
5194 fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
5195 repo.statuses(None)
5196 .unwrap()
5197 .iter()
5198 .map(|status| (status.path().unwrap().to_string(), status.status()))
5199 .collect()
5200 }
5201 }
5202}