worktree.rs

   1use crate::{
   2    copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
   3};
   4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   5use anyhow::{anyhow, Context, Result};
   6use client::{proto, Client};
   7use clock::ReplicaId;
   8use collections::{HashMap, VecDeque};
   9use fs::{
  10    repository::{GitFileStatus, GitRepository, RepoPath, RepoPathDescendants},
  11    Fs, LineEnding,
  12};
  13use futures::{
  14    channel::{
  15        mpsc::{self, UnboundedSender},
  16        oneshot,
  17    },
  18    select_biased,
  19    task::Poll,
  20    Stream, StreamExt,
  21};
  22use fuzzy::CharBag;
  23use git::{DOT_GIT, GITIGNORE};
  24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
  25use language::{
  26    proto::{
  27        deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
  28        serialize_version,
  29    },
  30    Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
  31};
  32use lsp::LanguageServerId;
  33use parking_lot::Mutex;
  34use postage::{
  35    barrier,
  36    prelude::{Sink as _, Stream as _},
  37    watch,
  38};
  39use smol::channel::{self, Sender};
  40use std::{
  41    any::Any,
  42    cmp::{self, Ordering},
  43    convert::TryFrom,
  44    ffi::OsStr,
  45    fmt,
  46    future::Future,
  47    mem,
  48    ops::{Deref, DerefMut},
  49    path::{Path, PathBuf},
  50    pin::Pin,
  51    sync::{
  52        atomic::{AtomicUsize, Ordering::SeqCst},
  53        Arc,
  54    },
  55    time::{Duration, SystemTime},
  56};
  57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
  58use util::{paths::HOME, ResultExt, TryFutureExt};
  59
  60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  61pub struct WorktreeId(usize);
  62
  63pub enum Worktree {
  64    Local(LocalWorktree),
  65    Remote(RemoteWorktree),
  66}
  67
  68pub struct LocalWorktree {
  69    snapshot: LocalSnapshot,
  70    path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
  71    is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
  72    _background_scanner_task: Task<()>,
  73    share: Option<ShareState>,
  74    diagnostics: HashMap<
  75        Arc<Path>,
  76        Vec<(
  77            LanguageServerId,
  78            Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
  79        )>,
  80    >,
  81    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  82    client: Arc<Client>,
  83    fs: Arc<dyn Fs>,
  84    visible: bool,
  85}
  86
  87pub struct RemoteWorktree {
  88    snapshot: Snapshot,
  89    background_snapshot: Arc<Mutex<Snapshot>>,
  90    project_id: u64,
  91    client: Arc<Client>,
  92    updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
  93    snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
  94    replica_id: ReplicaId,
  95    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  96    visible: bool,
  97    disconnected: bool,
  98}
  99
 100#[derive(Clone)]
 101pub struct Snapshot {
 102    id: WorktreeId,
 103    abs_path: Arc<Path>,
 104    root_name: String,
 105    root_char_bag: CharBag,
 106    entries_by_path: SumTree<Entry>,
 107    entries_by_id: SumTree<PathEntry>,
 108    repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
 109
 110    /// A number that increases every time the worktree begins scanning
 111    /// a set of paths from the filesystem. This scanning could be caused
 112    /// by some operation performed on the worktree, such as reading or
 113    /// writing a file, or by an event reported by the filesystem.
 114    scan_id: usize,
 115
 116    /// The latest scan id that has completed, and whose preceding scans
 117    /// have all completed. The current `scan_id` could be more than one
 118    /// greater than the `completed_scan_id` if operations are performed
 119    /// on the worktree while it is processing a file-system event.
 120    completed_scan_id: usize,
 121}
 122
 123impl Snapshot {
 124    pub fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
 125        let mut max_len = 0;
 126        let mut current_candidate = None;
 127        for (work_directory, repo) in (&self.repository_entries).iter() {
 128            if repo.contains(self, path) {
 129                if work_directory.0.as_os_str().len() >= max_len {
 130                    current_candidate = Some(repo);
 131                    max_len = work_directory.0.as_os_str().len();
 132                } else {
 133                    break;
 134                }
 135            }
 136        }
 137
 138        current_candidate.map(|entry| entry.to_owned())
 139    }
 140}
 141
 142#[derive(Clone, Debug, PartialEq, Eq)]
 143pub struct RepositoryEntry {
 144    pub(crate) work_directory: WorkDirectoryEntry,
 145    pub(crate) branch: Option<Arc<str>>,
 146    pub(crate) statuses: TreeMap<RepoPath, GitFileStatus>,
 147}
 148
 149fn read_git_status(git_status: i32) -> Option<GitFileStatus> {
 150    proto::GitStatus::from_i32(git_status).map(|status| match status {
 151        proto::GitStatus::Added => GitFileStatus::Added,
 152        proto::GitStatus::Modified => GitFileStatus::Modified,
 153        proto::GitStatus::Conflict => GitFileStatus::Conflict,
 154    })
 155}
 156
 157impl RepositoryEntry {
 158    pub fn branch(&self) -> Option<Arc<str>> {
 159        self.branch.clone()
 160    }
 161
 162    pub fn work_directory_id(&self) -> ProjectEntryId {
 163        *self.work_directory
 164    }
 165
 166    pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
 167        snapshot
 168            .entry_for_id(self.work_directory_id())
 169            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
 170    }
 171
 172    pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
 173        self.work_directory.contains(snapshot, path)
 174    }
 175
 176    pub fn status_for_file(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
 177        self.work_directory
 178            .relativize(snapshot, path)
 179            .and_then(|repo_path| self.statuses.get(&repo_path))
 180            .cloned()
 181    }
 182
 183    pub fn status_for_path(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
 184        self.work_directory
 185            .relativize(snapshot, path)
 186            .and_then(|repo_path| {
 187                self.statuses
 188                    .iter_from(&repo_path)
 189                    .take_while(|(key, _)| key.starts_with(&repo_path))
 190                    .map(|(path, status)| {
 191                        if path == &repo_path {
 192                            status
 193                        } else {
 194                            &GitFileStatus::Modified
 195                        }
 196                    })
 197                    .next()
 198                    .copied()
 199            })
 200    }
 201
 202    pub fn build_update(&self, other: &Self) -> proto::RepositoryEntry {
 203        let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
 204        let mut removed_statuses: Vec<String> = Vec::new();
 205
 206        let mut self_statuses = self.statuses.iter().peekable();
 207        let mut other_statuses = other.statuses.iter().peekable();
 208        loop {
 209            match (self_statuses.peek(), other_statuses.peek()) {
 210                (Some((self_repo_path, self_status)), Some((other_repo_path, other_status))) => {
 211                    match Ord::cmp(self_repo_path, other_repo_path) {
 212                        Ordering::Less => {
 213                            updated_statuses.push(make_status_entry(self_repo_path, self_status));
 214                            self_statuses.next();
 215                        }
 216                        Ordering::Equal => {
 217                            if self_status != other_status {
 218                                updated_statuses
 219                                    .push(make_status_entry(self_repo_path, self_status));
 220                            }
 221
 222                            self_statuses.next();
 223                            other_statuses.next();
 224                        }
 225                        Ordering::Greater => {
 226                            removed_statuses.push(make_repo_path(other_repo_path));
 227                            other_statuses.next();
 228                        }
 229                    }
 230                }
 231                (Some((self_repo_path, self_status)), None) => {
 232                    updated_statuses.push(make_status_entry(self_repo_path, self_status));
 233                    self_statuses.next();
 234                }
 235                (None, Some((other_repo_path, _))) => {
 236                    removed_statuses.push(make_repo_path(other_repo_path));
 237                    other_statuses.next();
 238                }
 239                (None, None) => break,
 240            }
 241        }
 242
 243        proto::RepositoryEntry {
 244            work_directory_id: self.work_directory_id().to_proto(),
 245            branch: self.branch.as_ref().map(|str| str.to_string()),
 246            removed_repo_paths: removed_statuses,
 247            updated_statuses: updated_statuses,
 248        }
 249    }
 250}
 251
 252fn make_repo_path(path: &RepoPath) -> String {
 253    path.as_os_str().to_string_lossy().to_string()
 254}
 255
 256fn make_status_entry(path: &RepoPath, status: &GitFileStatus) -> proto::StatusEntry {
 257    proto::StatusEntry {
 258        repo_path: make_repo_path(path),
 259        status: match status {
 260            GitFileStatus::Added => proto::GitStatus::Added.into(),
 261            GitFileStatus::Modified => proto::GitStatus::Modified.into(),
 262            GitFileStatus::Conflict => proto::GitStatus::Conflict.into(),
 263        },
 264    }
 265}
 266
 267impl From<&RepositoryEntry> for proto::RepositoryEntry {
 268    fn from(value: &RepositoryEntry) -> Self {
 269        proto::RepositoryEntry {
 270            work_directory_id: value.work_directory.to_proto(),
 271            branch: value.branch.as_ref().map(|str| str.to_string()),
 272            updated_statuses: value
 273                .statuses
 274                .iter()
 275                .map(|(repo_path, status)| make_status_entry(repo_path, status))
 276                .collect(),
 277            removed_repo_paths: Default::default(),
 278        }
 279    }
 280}
 281
 282/// This path corresponds to the 'content path' (the folder that contains the .git)
 283#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 284pub struct RepositoryWorkDirectory(Arc<Path>);
 285
 286impl Default for RepositoryWorkDirectory {
 287    fn default() -> Self {
 288        RepositoryWorkDirectory(Arc::from(Path::new("")))
 289    }
 290}
 291
 292impl AsRef<Path> for RepositoryWorkDirectory {
 293    fn as_ref(&self) -> &Path {
 294        self.0.as_ref()
 295    }
 296}
 297
 298#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 299pub struct WorkDirectoryEntry(ProjectEntryId);
 300
 301impl WorkDirectoryEntry {
 302    // Note that these paths should be relative to the worktree root.
 303    pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
 304        snapshot
 305            .entry_for_id(self.0)
 306            .map(|entry| path.starts_with(&entry.path))
 307            .unwrap_or(false)
 308    }
 309
 310    pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
 311        worktree.entry_for_id(self.0).and_then(|entry| {
 312            path.strip_prefix(&entry.path)
 313                .ok()
 314                .map(move |path| path.into())
 315        })
 316    }
 317}
 318
 319impl Deref for WorkDirectoryEntry {
 320    type Target = ProjectEntryId;
 321
 322    fn deref(&self) -> &Self::Target {
 323        &self.0
 324    }
 325}
 326
 327impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
 328    fn from(value: ProjectEntryId) -> Self {
 329        WorkDirectoryEntry(value)
 330    }
 331}
 332
 333#[derive(Debug, Clone)]
 334pub struct LocalSnapshot {
 335    ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
 336    // The ProjectEntryId corresponds to the entry for the .git dir
 337    // work_directory_id
 338    git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 339    removed_entry_ids: HashMap<u64, ProjectEntryId>,
 340    next_entry_id: Arc<AtomicUsize>,
 341    snapshot: Snapshot,
 342}
 343
 344#[derive(Debug, Clone)]
 345pub struct LocalRepositoryEntry {
 346    pub(crate) scan_id: usize,
 347    pub(crate) full_scan_id: usize,
 348    pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
 349    /// Path to the actual .git folder.
 350    /// Note: if .git is a file, this points to the folder indicated by the .git file
 351    pub(crate) git_dir_path: Arc<Path>,
 352}
 353
 354impl LocalRepositoryEntry {
 355    // Note that this path should be relative to the worktree root.
 356    pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
 357        path.starts_with(self.git_dir_path.as_ref())
 358    }
 359}
 360
 361impl Deref for LocalSnapshot {
 362    type Target = Snapshot;
 363
 364    fn deref(&self) -> &Self::Target {
 365        &self.snapshot
 366    }
 367}
 368
 369impl DerefMut for LocalSnapshot {
 370    fn deref_mut(&mut self) -> &mut Self::Target {
 371        &mut self.snapshot
 372    }
 373}
 374
 375enum ScanState {
 376    Started,
 377    Updated {
 378        snapshot: LocalSnapshot,
 379        changes: HashMap<(Arc<Path>, ProjectEntryId), PathChange>,
 380        barrier: Option<barrier::Sender>,
 381        scanning: bool,
 382    },
 383}
 384
 385struct ShareState {
 386    project_id: u64,
 387    snapshots_tx: watch::Sender<LocalSnapshot>,
 388    resume_updates: watch::Sender<()>,
 389    _maintain_remote_snapshot: Task<Option<()>>,
 390}
 391
 392pub enum Event {
 393    UpdatedEntries(HashMap<(Arc<Path>, ProjectEntryId), PathChange>),
 394    UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
 395}
 396
 397impl Entity for Worktree {
 398    type Event = Event;
 399}
 400
 401impl Worktree {
 402    pub async fn local(
 403        client: Arc<Client>,
 404        path: impl Into<Arc<Path>>,
 405        visible: bool,
 406        fs: Arc<dyn Fs>,
 407        next_entry_id: Arc<AtomicUsize>,
 408        cx: &mut AsyncAppContext,
 409    ) -> Result<ModelHandle<Self>> {
 410        // After determining whether the root entry is a file or a directory, populate the
 411        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 412        let abs_path = path.into();
 413        let metadata = fs
 414            .metadata(&abs_path)
 415            .await
 416            .context("failed to stat worktree path")?;
 417
 418        Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
 419            let root_name = abs_path
 420                .file_name()
 421                .map_or(String::new(), |f| f.to_string_lossy().to_string());
 422
 423            let mut snapshot = LocalSnapshot {
 424                ignores_by_parent_abs_path: Default::default(),
 425                removed_entry_ids: Default::default(),
 426                git_repositories: Default::default(),
 427                next_entry_id,
 428                snapshot: Snapshot {
 429                    id: WorktreeId::from_usize(cx.model_id()),
 430                    abs_path: abs_path.clone(),
 431                    root_name: root_name.clone(),
 432                    root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
 433                    entries_by_path: Default::default(),
 434                    entries_by_id: Default::default(),
 435                    repository_entries: Default::default(),
 436                    scan_id: 1,
 437                    completed_scan_id: 0,
 438                },
 439            };
 440
 441            if let Some(metadata) = metadata {
 442                snapshot.insert_entry(
 443                    Entry::new(
 444                        Arc::from(Path::new("")),
 445                        &metadata,
 446                        &snapshot.next_entry_id,
 447                        snapshot.root_char_bag,
 448                    ),
 449                    fs.as_ref(),
 450                );
 451            }
 452
 453            let (path_changes_tx, path_changes_rx) = channel::unbounded();
 454            let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
 455
 456            cx.spawn_weak(|this, mut cx| async move {
 457                while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
 458                    this.update(&mut cx, |this, cx| {
 459                        let this = this.as_local_mut().unwrap();
 460                        match state {
 461                            ScanState::Started => {
 462                                *this.is_scanning.0.borrow_mut() = true;
 463                            }
 464                            ScanState::Updated {
 465                                snapshot,
 466                                changes,
 467                                barrier,
 468                                scanning,
 469                            } => {
 470                                *this.is_scanning.0.borrow_mut() = scanning;
 471                                this.set_snapshot(snapshot, cx);
 472                                cx.emit(Event::UpdatedEntries(changes));
 473                                drop(barrier);
 474                            }
 475                        }
 476                        cx.notify();
 477                    });
 478                }
 479            })
 480            .detach();
 481
 482            let background_scanner_task = cx.background().spawn({
 483                let fs = fs.clone();
 484                let snapshot = snapshot.clone();
 485                let background = cx.background().clone();
 486                async move {
 487                    let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 488                    BackgroundScanner::new(
 489                        snapshot,
 490                        fs,
 491                        scan_states_tx,
 492                        background,
 493                        path_changes_rx,
 494                    )
 495                    .run(events)
 496                    .await;
 497                }
 498            });
 499
 500            Worktree::Local(LocalWorktree {
 501                snapshot,
 502                is_scanning: watch::channel_with(true),
 503                share: None,
 504                path_changes_tx,
 505                _background_scanner_task: background_scanner_task,
 506                diagnostics: Default::default(),
 507                diagnostic_summaries: Default::default(),
 508                client,
 509                fs,
 510                visible,
 511            })
 512        }))
 513    }
 514
 515    pub fn remote(
 516        project_remote_id: u64,
 517        replica_id: ReplicaId,
 518        worktree: proto::WorktreeMetadata,
 519        client: Arc<Client>,
 520        cx: &mut AppContext,
 521    ) -> ModelHandle<Self> {
 522        cx.add_model(|cx: &mut ModelContext<Self>| {
 523            let snapshot = Snapshot {
 524                id: WorktreeId(worktree.id as usize),
 525                abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
 526                root_name: worktree.root_name.clone(),
 527                root_char_bag: worktree
 528                    .root_name
 529                    .chars()
 530                    .map(|c| c.to_ascii_lowercase())
 531                    .collect(),
 532                entries_by_path: Default::default(),
 533                entries_by_id: Default::default(),
 534                repository_entries: Default::default(),
 535                scan_id: 1,
 536                completed_scan_id: 0,
 537            };
 538
 539            let (updates_tx, mut updates_rx) = mpsc::unbounded();
 540            let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
 541            let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
 542
 543            cx.background()
 544                .spawn({
 545                    let background_snapshot = background_snapshot.clone();
 546                    async move {
 547                        while let Some(update) = updates_rx.next().await {
 548                            if let Err(error) =
 549                                background_snapshot.lock().apply_remote_update(update)
 550                            {
 551                                log::error!("error applying worktree update: {}", error);
 552                            }
 553                            snapshot_updated_tx.send(()).await.ok();
 554                        }
 555                    }
 556                })
 557                .detach();
 558
 559            cx.spawn_weak(|this, mut cx| async move {
 560                while (snapshot_updated_rx.recv().await).is_some() {
 561                    if let Some(this) = this.upgrade(&cx) {
 562                        this.update(&mut cx, |this, cx| {
 563                            let this = this.as_remote_mut().unwrap();
 564                            this.snapshot = this.background_snapshot.lock().clone();
 565                            cx.emit(Event::UpdatedEntries(Default::default()));
 566                            cx.notify();
 567                            while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
 568                                if this.observed_snapshot(*scan_id) {
 569                                    let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
 570                                    let _ = tx.send(());
 571                                } else {
 572                                    break;
 573                                }
 574                            }
 575                        });
 576                    } else {
 577                        break;
 578                    }
 579                }
 580            })
 581            .detach();
 582
 583            Worktree::Remote(RemoteWorktree {
 584                project_id: project_remote_id,
 585                replica_id,
 586                snapshot: snapshot.clone(),
 587                background_snapshot,
 588                updates_tx: Some(updates_tx),
 589                snapshot_subscriptions: Default::default(),
 590                client: client.clone(),
 591                diagnostic_summaries: Default::default(),
 592                visible: worktree.visible,
 593                disconnected: false,
 594            })
 595        })
 596    }
 597
 598    pub fn as_local(&self) -> Option<&LocalWorktree> {
 599        if let Worktree::Local(worktree) = self {
 600            Some(worktree)
 601        } else {
 602            None
 603        }
 604    }
 605
 606    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 607        if let Worktree::Remote(worktree) = self {
 608            Some(worktree)
 609        } else {
 610            None
 611        }
 612    }
 613
 614    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 615        if let Worktree::Local(worktree) = self {
 616            Some(worktree)
 617        } else {
 618            None
 619        }
 620    }
 621
 622    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 623        if let Worktree::Remote(worktree) = self {
 624            Some(worktree)
 625        } else {
 626            None
 627        }
 628    }
 629
 630    pub fn is_local(&self) -> bool {
 631        matches!(self, Worktree::Local(_))
 632    }
 633
 634    pub fn is_remote(&self) -> bool {
 635        !self.is_local()
 636    }
 637
 638    pub fn snapshot(&self) -> Snapshot {
 639        match self {
 640            Worktree::Local(worktree) => worktree.snapshot().snapshot,
 641            Worktree::Remote(worktree) => worktree.snapshot(),
 642        }
 643    }
 644
 645    pub fn scan_id(&self) -> usize {
 646        match self {
 647            Worktree::Local(worktree) => worktree.snapshot.scan_id,
 648            Worktree::Remote(worktree) => worktree.snapshot.scan_id,
 649        }
 650    }
 651
 652    pub fn completed_scan_id(&self) -> usize {
 653        match self {
 654            Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
 655            Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
 656        }
 657    }
 658
 659    pub fn is_visible(&self) -> bool {
 660        match self {
 661            Worktree::Local(worktree) => worktree.visible,
 662            Worktree::Remote(worktree) => worktree.visible,
 663        }
 664    }
 665
 666    pub fn replica_id(&self) -> ReplicaId {
 667        match self {
 668            Worktree::Local(_) => 0,
 669            Worktree::Remote(worktree) => worktree.replica_id,
 670        }
 671    }
 672
 673    pub fn diagnostic_summaries(
 674        &self,
 675    ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
 676        match self {
 677            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 678            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 679        }
 680        .iter()
 681        .flat_map(|(path, summaries)| {
 682            summaries
 683                .iter()
 684                .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
 685        })
 686    }
 687
 688    pub fn abs_path(&self) -> Arc<Path> {
 689        match self {
 690            Worktree::Local(worktree) => worktree.abs_path.clone(),
 691            Worktree::Remote(worktree) => worktree.abs_path.clone(),
 692        }
 693    }
 694}
 695
 696impl LocalWorktree {
 697    pub fn contains_abs_path(&self, path: &Path) -> bool {
 698        path.starts_with(&self.abs_path)
 699    }
 700
 701    fn absolutize(&self, path: &Path) -> PathBuf {
 702        if path.file_name().is_some() {
 703            self.abs_path.join(path)
 704        } else {
 705            self.abs_path.to_path_buf()
 706        }
 707    }
 708
 709    pub(crate) fn load_buffer(
 710        &mut self,
 711        id: u64,
 712        path: &Path,
 713        cx: &mut ModelContext<Worktree>,
 714    ) -> Task<Result<ModelHandle<Buffer>>> {
 715        let path = Arc::from(path);
 716        cx.spawn(move |this, mut cx| async move {
 717            let (file, contents, diff_base) = this
 718                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
 719                .await?;
 720            let text_buffer = cx
 721                .background()
 722                .spawn(async move { text::Buffer::new(0, id, contents) })
 723                .await;
 724            Ok(cx.add_model(|cx| {
 725                let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
 726                buffer.git_diff_recalc(cx);
 727                buffer
 728            }))
 729        })
 730    }
 731
 732    pub fn diagnostics_for_path(
 733        &self,
 734        path: &Path,
 735    ) -> Vec<(
 736        LanguageServerId,
 737        Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 738    )> {
 739        self.diagnostics.get(path).cloned().unwrap_or_default()
 740    }
 741
 742    pub fn update_diagnostics(
 743        &mut self,
 744        server_id: LanguageServerId,
 745        worktree_path: Arc<Path>,
 746        diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 747        _: &mut ModelContext<Worktree>,
 748    ) -> Result<bool> {
 749        let summaries_by_server_id = self
 750            .diagnostic_summaries
 751            .entry(worktree_path.clone())
 752            .or_default();
 753
 754        let old_summary = summaries_by_server_id
 755            .remove(&server_id)
 756            .unwrap_or_default();
 757
 758        let new_summary = DiagnosticSummary::new(&diagnostics);
 759        if new_summary.is_empty() {
 760            if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
 761                if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 762                    diagnostics_by_server_id.remove(ix);
 763                }
 764                if diagnostics_by_server_id.is_empty() {
 765                    self.diagnostics.remove(&worktree_path);
 766                }
 767            }
 768        } else {
 769            summaries_by_server_id.insert(server_id, new_summary);
 770            let diagnostics_by_server_id =
 771                self.diagnostics.entry(worktree_path.clone()).or_default();
 772            match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 773                Ok(ix) => {
 774                    diagnostics_by_server_id[ix] = (server_id, diagnostics);
 775                }
 776                Err(ix) => {
 777                    diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
 778                }
 779            }
 780        }
 781
 782        if !old_summary.is_empty() || !new_summary.is_empty() {
 783            if let Some(share) = self.share.as_ref() {
 784                self.client
 785                    .send(proto::UpdateDiagnosticSummary {
 786                        project_id: share.project_id,
 787                        worktree_id: self.id().to_proto(),
 788                        summary: Some(proto::DiagnosticSummary {
 789                            path: worktree_path.to_string_lossy().to_string(),
 790                            language_server_id: server_id.0 as u64,
 791                            error_count: new_summary.error_count as u32,
 792                            warning_count: new_summary.warning_count as u32,
 793                        }),
 794                    })
 795                    .log_err();
 796            }
 797        }
 798
 799        Ok(!old_summary.is_empty() || !new_summary.is_empty())
 800    }
 801
 802    fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
 803        let updated_repos =
 804            self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
 805        self.snapshot = new_snapshot;
 806
 807        if let Some(share) = self.share.as_mut() {
 808            *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
 809        }
 810
 811        if !updated_repos.is_empty() {
 812            cx.emit(Event::UpdatedGitRepositories(updated_repos));
 813        }
 814    }
 815
 816    fn changed_repos(
 817        &self,
 818        old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 819        new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 820    ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
 821        let mut diff = HashMap::default();
 822        let mut old_repos = old_repos.iter().peekable();
 823        let mut new_repos = new_repos.iter().peekable();
 824        loop {
 825            match (old_repos.peek(), new_repos.peek()) {
 826                (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
 827                    match Ord::cmp(old_entry_id, new_entry_id) {
 828                        Ordering::Less => {
 829                            if let Some(entry) = self.entry_for_id(**old_entry_id) {
 830                                diff.insert(entry.path.clone(), (*old_repo).clone());
 831                            }
 832                            old_repos.next();
 833                        }
 834                        Ordering::Equal => {
 835                            if old_repo.scan_id != new_repo.scan_id {
 836                                if let Some(entry) = self.entry_for_id(**new_entry_id) {
 837                                    diff.insert(entry.path.clone(), (*new_repo).clone());
 838                                }
 839                            }
 840
 841                            old_repos.next();
 842                            new_repos.next();
 843                        }
 844                        Ordering::Greater => {
 845                            if let Some(entry) = self.entry_for_id(**new_entry_id) {
 846                                diff.insert(entry.path.clone(), (*new_repo).clone());
 847                            }
 848                            new_repos.next();
 849                        }
 850                    }
 851                }
 852                (Some((old_entry_id, old_repo)), None) => {
 853                    if let Some(entry) = self.entry_for_id(**old_entry_id) {
 854                        diff.insert(entry.path.clone(), (*old_repo).clone());
 855                    }
 856                    old_repos.next();
 857                }
 858                (None, Some((new_entry_id, new_repo))) => {
 859                    if let Some(entry) = self.entry_for_id(**new_entry_id) {
 860                        diff.insert(entry.path.clone(), (*new_repo).clone());
 861                    }
 862                    new_repos.next();
 863                }
 864                (None, None) => break,
 865            }
 866        }
 867        diff
 868    }
 869
 870    pub fn scan_complete(&self) -> impl Future<Output = ()> {
 871        let mut is_scanning_rx = self.is_scanning.1.clone();
 872        async move {
 873            let mut is_scanning = is_scanning_rx.borrow().clone();
 874            while is_scanning {
 875                if let Some(value) = is_scanning_rx.recv().await {
 876                    is_scanning = value;
 877                } else {
 878                    break;
 879                }
 880            }
 881        }
 882    }
 883
 884    pub fn snapshot(&self) -> LocalSnapshot {
 885        self.snapshot.clone()
 886    }
 887
 888    pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
 889        proto::WorktreeMetadata {
 890            id: self.id().to_proto(),
 891            root_name: self.root_name().to_string(),
 892            visible: self.visible,
 893            abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
 894        }
 895    }
 896
 897    fn load(
 898        &self,
 899        path: &Path,
 900        cx: &mut ModelContext<Worktree>,
 901    ) -> Task<Result<(File, String, Option<String>)>> {
 902        let handle = cx.handle();
 903        let path = Arc::from(path);
 904        let abs_path = self.absolutize(&path);
 905        let fs = self.fs.clone();
 906        let snapshot = self.snapshot();
 907
 908        let mut index_task = None;
 909
 910        if let Some(repo) = snapshot.repo_for(&path) {
 911            let repo_path = repo.work_directory.relativize(self, &path).unwrap();
 912            if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
 913                let repo = repo.repo_ptr.to_owned();
 914                index_task = Some(
 915                    cx.background()
 916                        .spawn(async move { repo.lock().load_index_text(&repo_path) }),
 917                );
 918            }
 919        }
 920
 921        cx.spawn(|this, mut cx| async move {
 922            let text = fs.load(&abs_path).await?;
 923
 924            let diff_base = if let Some(index_task) = index_task {
 925                index_task.await
 926            } else {
 927                None
 928            };
 929
 930            // Eagerly populate the snapshot with an updated entry for the loaded file
 931            let entry = this
 932                .update(&mut cx, |this, cx| {
 933                    this.as_local().unwrap().refresh_entry(path, None, cx)
 934                })
 935                .await?;
 936
 937            Ok((
 938                File {
 939                    entry_id: entry.id,
 940                    worktree: handle,
 941                    path: entry.path,
 942                    mtime: entry.mtime,
 943                    is_local: true,
 944                    is_deleted: false,
 945                },
 946                text,
 947                diff_base,
 948            ))
 949        })
 950    }
 951
 952    pub fn save_buffer(
 953        &self,
 954        buffer_handle: ModelHandle<Buffer>,
 955        path: Arc<Path>,
 956        has_changed_file: bool,
 957        cx: &mut ModelContext<Worktree>,
 958    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
 959        let handle = cx.handle();
 960        let buffer = buffer_handle.read(cx);
 961
 962        let rpc = self.client.clone();
 963        let buffer_id = buffer.remote_id();
 964        let project_id = self.share.as_ref().map(|share| share.project_id);
 965
 966        let text = buffer.as_rope().clone();
 967        let fingerprint = text.fingerprint();
 968        let version = buffer.version();
 969        let save = self.write_file(path, text, buffer.line_ending(), cx);
 970
 971        cx.as_mut().spawn(|mut cx| async move {
 972            let entry = save.await?;
 973
 974            if has_changed_file {
 975                let new_file = Arc::new(File {
 976                    entry_id: entry.id,
 977                    worktree: handle,
 978                    path: entry.path,
 979                    mtime: entry.mtime,
 980                    is_local: true,
 981                    is_deleted: false,
 982                });
 983
 984                if let Some(project_id) = project_id {
 985                    rpc.send(proto::UpdateBufferFile {
 986                        project_id,
 987                        buffer_id,
 988                        file: Some(new_file.to_proto()),
 989                    })
 990                    .log_err();
 991                }
 992
 993                buffer_handle.update(&mut cx, |buffer, cx| {
 994                    if has_changed_file {
 995                        buffer.file_updated(new_file, cx).detach();
 996                    }
 997                });
 998            }
 999
1000            if let Some(project_id) = project_id {
1001                rpc.send(proto::BufferSaved {
1002                    project_id,
1003                    buffer_id,
1004                    version: serialize_version(&version),
1005                    mtime: Some(entry.mtime.into()),
1006                    fingerprint: serialize_fingerprint(fingerprint),
1007                })?;
1008            }
1009
1010            buffer_handle.update(&mut cx, |buffer, cx| {
1011                buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
1012            });
1013
1014            Ok((version, fingerprint, entry.mtime))
1015        })
1016    }
1017
1018    pub fn create_entry(
1019        &self,
1020        path: impl Into<Arc<Path>>,
1021        is_dir: bool,
1022        cx: &mut ModelContext<Worktree>,
1023    ) -> Task<Result<Entry>> {
1024        let path = path.into();
1025        let abs_path = self.absolutize(&path);
1026        let fs = self.fs.clone();
1027        let write = cx.background().spawn(async move {
1028            if is_dir {
1029                fs.create_dir(&abs_path).await
1030            } else {
1031                fs.save(&abs_path, &Default::default(), Default::default())
1032                    .await
1033            }
1034        });
1035
1036        cx.spawn(|this, mut cx| async move {
1037            write.await?;
1038            this.update(&mut cx, |this, cx| {
1039                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1040            })
1041            .await
1042        })
1043    }
1044
1045    pub fn write_file(
1046        &self,
1047        path: impl Into<Arc<Path>>,
1048        text: Rope,
1049        line_ending: LineEnding,
1050        cx: &mut ModelContext<Worktree>,
1051    ) -> Task<Result<Entry>> {
1052        let path = path.into();
1053        let abs_path = self.absolutize(&path);
1054        let fs = self.fs.clone();
1055        let write = cx
1056            .background()
1057            .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1058
1059        cx.spawn(|this, mut cx| async move {
1060            write.await?;
1061            this.update(&mut cx, |this, cx| {
1062                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1063            })
1064            .await
1065        })
1066    }
1067
1068    pub fn delete_entry(
1069        &self,
1070        entry_id: ProjectEntryId,
1071        cx: &mut ModelContext<Worktree>,
1072    ) -> Option<Task<Result<()>>> {
1073        let entry = self.entry_for_id(entry_id)?.clone();
1074        let abs_path = self.abs_path.clone();
1075        let fs = self.fs.clone();
1076
1077        let delete = cx.background().spawn(async move {
1078            let mut abs_path = fs.canonicalize(&abs_path).await?;
1079            if entry.path.file_name().is_some() {
1080                abs_path = abs_path.join(&entry.path);
1081            }
1082            if entry.is_file() {
1083                fs.remove_file(&abs_path, Default::default()).await?;
1084            } else {
1085                fs.remove_dir(
1086                    &abs_path,
1087                    RemoveOptions {
1088                        recursive: true,
1089                        ignore_if_not_exists: false,
1090                    },
1091                )
1092                .await?;
1093            }
1094            anyhow::Ok(abs_path)
1095        });
1096
1097        Some(cx.spawn(|this, mut cx| async move {
1098            let abs_path = delete.await?;
1099            let (tx, mut rx) = barrier::channel();
1100            this.update(&mut cx, |this, _| {
1101                this.as_local_mut()
1102                    .unwrap()
1103                    .path_changes_tx
1104                    .try_send((vec![abs_path], tx))
1105            })?;
1106            rx.recv().await;
1107            Ok(())
1108        }))
1109    }
1110
1111    pub fn rename_entry(
1112        &self,
1113        entry_id: ProjectEntryId,
1114        new_path: impl Into<Arc<Path>>,
1115        cx: &mut ModelContext<Worktree>,
1116    ) -> Option<Task<Result<Entry>>> {
1117        let old_path = self.entry_for_id(entry_id)?.path.clone();
1118        let new_path = new_path.into();
1119        let abs_old_path = self.absolutize(&old_path);
1120        let abs_new_path = self.absolutize(&new_path);
1121        let fs = self.fs.clone();
1122        let rename = cx.background().spawn(async move {
1123            fs.rename(&abs_old_path, &abs_new_path, Default::default())
1124                .await
1125        });
1126
1127        Some(cx.spawn(|this, mut cx| async move {
1128            rename.await?;
1129            this.update(&mut cx, |this, cx| {
1130                this.as_local_mut()
1131                    .unwrap()
1132                    .refresh_entry(new_path.clone(), Some(old_path), cx)
1133            })
1134            .await
1135        }))
1136    }
1137
1138    pub fn copy_entry(
1139        &self,
1140        entry_id: ProjectEntryId,
1141        new_path: impl Into<Arc<Path>>,
1142        cx: &mut ModelContext<Worktree>,
1143    ) -> Option<Task<Result<Entry>>> {
1144        let old_path = self.entry_for_id(entry_id)?.path.clone();
1145        let new_path = new_path.into();
1146        let abs_old_path = self.absolutize(&old_path);
1147        let abs_new_path = self.absolutize(&new_path);
1148        let fs = self.fs.clone();
1149        let copy = cx.background().spawn(async move {
1150            copy_recursive(
1151                fs.as_ref(),
1152                &abs_old_path,
1153                &abs_new_path,
1154                Default::default(),
1155            )
1156            .await
1157        });
1158
1159        Some(cx.spawn(|this, mut cx| async move {
1160            copy.await?;
1161            this.update(&mut cx, |this, cx| {
1162                this.as_local_mut()
1163                    .unwrap()
1164                    .refresh_entry(new_path.clone(), None, cx)
1165            })
1166            .await
1167        }))
1168    }
1169
1170    fn refresh_entry(
1171        &self,
1172        path: Arc<Path>,
1173        old_path: Option<Arc<Path>>,
1174        cx: &mut ModelContext<Worktree>,
1175    ) -> Task<Result<Entry>> {
1176        let fs = self.fs.clone();
1177        let abs_root_path = self.abs_path.clone();
1178        let path_changes_tx = self.path_changes_tx.clone();
1179        cx.spawn_weak(move |this, mut cx| async move {
1180            let abs_path = fs.canonicalize(&abs_root_path).await?;
1181            let mut paths = Vec::with_capacity(2);
1182            paths.push(if path.file_name().is_some() {
1183                abs_path.join(&path)
1184            } else {
1185                abs_path.clone()
1186            });
1187            if let Some(old_path) = old_path {
1188                paths.push(if old_path.file_name().is_some() {
1189                    abs_path.join(&old_path)
1190                } else {
1191                    abs_path.clone()
1192                });
1193            }
1194
1195            let (tx, mut rx) = barrier::channel();
1196            path_changes_tx.try_send((paths, tx))?;
1197            rx.recv().await;
1198            this.upgrade(&cx)
1199                .ok_or_else(|| anyhow!("worktree was dropped"))?
1200                .update(&mut cx, |this, _| {
1201                    this.entry_for_path(path)
1202                        .cloned()
1203                        .ok_or_else(|| anyhow!("failed to read path after update"))
1204                })
1205        })
1206    }
1207
1208    pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1209        let (share_tx, share_rx) = oneshot::channel();
1210
1211        if let Some(share) = self.share.as_mut() {
1212            let _ = share_tx.send(());
1213            *share.resume_updates.borrow_mut() = ();
1214        } else {
1215            let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1216            let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1217            let worktree_id = cx.model_id() as u64;
1218
1219            for (path, summaries) in &self.diagnostic_summaries {
1220                for (&server_id, summary) in summaries {
1221                    if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1222                        project_id,
1223                        worktree_id,
1224                        summary: Some(summary.to_proto(server_id, &path)),
1225                    }) {
1226                        return Task::ready(Err(e));
1227                    }
1228                }
1229            }
1230
1231            let _maintain_remote_snapshot = cx.background().spawn({
1232                let client = self.client.clone();
1233                async move {
1234                    let mut share_tx = Some(share_tx);
1235                    let mut prev_snapshot = LocalSnapshot {
1236                        ignores_by_parent_abs_path: Default::default(),
1237                        removed_entry_ids: Default::default(),
1238                        next_entry_id: Default::default(),
1239                        git_repositories: Default::default(),
1240                        snapshot: Snapshot {
1241                            id: WorktreeId(worktree_id as usize),
1242                            abs_path: Path::new("").into(),
1243                            root_name: Default::default(),
1244                            root_char_bag: Default::default(),
1245                            entries_by_path: Default::default(),
1246                            entries_by_id: Default::default(),
1247                            repository_entries: Default::default(),
1248                            scan_id: 0,
1249                            completed_scan_id: 0,
1250                        },
1251                    };
1252                    while let Some(snapshot) = snapshots_rx.recv().await {
1253                        #[cfg(any(test, feature = "test-support"))]
1254                        const MAX_CHUNK_SIZE: usize = 2;
1255                        #[cfg(not(any(test, feature = "test-support")))]
1256                        const MAX_CHUNK_SIZE: usize = 256;
1257
1258                        let update =
1259                            snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1260                        for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1261                            let _ = resume_updates_rx.try_recv();
1262                            while let Err(error) = client.request(update.clone()).await {
1263                                log::error!("failed to send worktree update: {}", error);
1264                                log::info!("waiting to resume updates");
1265                                if resume_updates_rx.next().await.is_none() {
1266                                    return Ok(());
1267                                }
1268                            }
1269                        }
1270
1271                        if let Some(share_tx) = share_tx.take() {
1272                            let _ = share_tx.send(());
1273                        }
1274
1275                        prev_snapshot = snapshot;
1276                    }
1277
1278                    Ok::<_, anyhow::Error>(())
1279                }
1280                .log_err()
1281            });
1282
1283            self.share = Some(ShareState {
1284                project_id,
1285                snapshots_tx,
1286                resume_updates: resume_updates_tx,
1287                _maintain_remote_snapshot,
1288            });
1289        }
1290
1291        cx.foreground()
1292            .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1293    }
1294
1295    pub fn unshare(&mut self) {
1296        self.share.take();
1297    }
1298
1299    pub fn is_shared(&self) -> bool {
1300        self.share.is_some()
1301    }
1302}
1303
1304impl RemoteWorktree {
1305    fn snapshot(&self) -> Snapshot {
1306        self.snapshot.clone()
1307    }
1308
1309    pub fn disconnected_from_host(&mut self) {
1310        self.updates_tx.take();
1311        self.snapshot_subscriptions.clear();
1312        self.disconnected = true;
1313    }
1314
1315    pub fn save_buffer(
1316        &self,
1317        buffer_handle: ModelHandle<Buffer>,
1318        cx: &mut ModelContext<Worktree>,
1319    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1320        let buffer = buffer_handle.read(cx);
1321        let buffer_id = buffer.remote_id();
1322        let version = buffer.version();
1323        let rpc = self.client.clone();
1324        let project_id = self.project_id;
1325        cx.as_mut().spawn(|mut cx| async move {
1326            let response = rpc
1327                .request(proto::SaveBuffer {
1328                    project_id,
1329                    buffer_id,
1330                    version: serialize_version(&version),
1331                })
1332                .await?;
1333            let version = deserialize_version(&response.version);
1334            let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1335            let mtime = response
1336                .mtime
1337                .ok_or_else(|| anyhow!("missing mtime"))?
1338                .into();
1339
1340            buffer_handle.update(&mut cx, |buffer, cx| {
1341                buffer.did_save(version.clone(), fingerprint, mtime, cx);
1342            });
1343
1344            Ok((version, fingerprint, mtime))
1345        })
1346    }
1347
1348    pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1349        if let Some(updates_tx) = &self.updates_tx {
1350            updates_tx
1351                .unbounded_send(update)
1352                .expect("consumer runs to completion");
1353        }
1354    }
1355
1356    fn observed_snapshot(&self, scan_id: usize) -> bool {
1357        self.completed_scan_id >= scan_id
1358    }
1359
1360    fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1361        let (tx, rx) = oneshot::channel();
1362        if self.observed_snapshot(scan_id) {
1363            let _ = tx.send(());
1364        } else if self.disconnected {
1365            drop(tx);
1366        } else {
1367            match self
1368                .snapshot_subscriptions
1369                .binary_search_by_key(&scan_id, |probe| probe.0)
1370            {
1371                Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1372            }
1373        }
1374
1375        async move {
1376            rx.await?;
1377            Ok(())
1378        }
1379    }
1380
1381    pub fn update_diagnostic_summary(
1382        &mut self,
1383        path: Arc<Path>,
1384        summary: &proto::DiagnosticSummary,
1385    ) {
1386        let server_id = LanguageServerId(summary.language_server_id as usize);
1387        let summary = DiagnosticSummary {
1388            error_count: summary.error_count as usize,
1389            warning_count: summary.warning_count as usize,
1390        };
1391
1392        if summary.is_empty() {
1393            if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1394                summaries.remove(&server_id);
1395                if summaries.is_empty() {
1396                    self.diagnostic_summaries.remove(&path);
1397                }
1398            }
1399        } else {
1400            self.diagnostic_summaries
1401                .entry(path)
1402                .or_default()
1403                .insert(server_id, summary);
1404        }
1405    }
1406
1407    pub fn insert_entry(
1408        &mut self,
1409        entry: proto::Entry,
1410        scan_id: usize,
1411        cx: &mut ModelContext<Worktree>,
1412    ) -> Task<Result<Entry>> {
1413        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1414        cx.spawn(|this, mut cx| async move {
1415            wait_for_snapshot.await?;
1416            this.update(&mut cx, |worktree, _| {
1417                let worktree = worktree.as_remote_mut().unwrap();
1418                let mut snapshot = worktree.background_snapshot.lock();
1419                let entry = snapshot.insert_entry(entry);
1420                worktree.snapshot = snapshot.clone();
1421                entry
1422            })
1423        })
1424    }
1425
1426    pub(crate) fn delete_entry(
1427        &mut self,
1428        id: ProjectEntryId,
1429        scan_id: usize,
1430        cx: &mut ModelContext<Worktree>,
1431    ) -> Task<Result<()>> {
1432        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1433        cx.spawn(|this, mut cx| async move {
1434            wait_for_snapshot.await?;
1435            this.update(&mut cx, |worktree, _| {
1436                let worktree = worktree.as_remote_mut().unwrap();
1437                let mut snapshot = worktree.background_snapshot.lock();
1438                snapshot.delete_entry(id);
1439                worktree.snapshot = snapshot.clone();
1440            });
1441            Ok(())
1442        })
1443    }
1444}
1445
1446impl Snapshot {
1447    pub fn id(&self) -> WorktreeId {
1448        self.id
1449    }
1450
1451    pub fn abs_path(&self) -> &Arc<Path> {
1452        &self.abs_path
1453    }
1454
1455    pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1456        self.entries_by_id.get(&entry_id, &()).is_some()
1457    }
1458
1459    pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1460        let entry = Entry::try_from((&self.root_char_bag, entry))?;
1461        let old_entry = self.entries_by_id.insert_or_replace(
1462            PathEntry {
1463                id: entry.id,
1464                path: entry.path.clone(),
1465                is_ignored: entry.is_ignored,
1466                scan_id: 0,
1467            },
1468            &(),
1469        );
1470        if let Some(old_entry) = old_entry {
1471            self.entries_by_path.remove(&PathKey(old_entry.path), &());
1472        }
1473        self.entries_by_path.insert_or_replace(entry.clone(), &());
1474        Ok(entry)
1475    }
1476
1477    fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1478        let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1479        self.entries_by_path = {
1480            let mut cursor = self.entries_by_path.cursor();
1481            let mut new_entries_by_path =
1482                cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1483            while let Some(entry) = cursor.item() {
1484                if entry.path.starts_with(&removed_entry.path) {
1485                    self.entries_by_id.remove(&entry.id, &());
1486                    cursor.next(&());
1487                } else {
1488                    break;
1489                }
1490            }
1491            new_entries_by_path.push_tree(cursor.suffix(&()), &());
1492            new_entries_by_path
1493        };
1494
1495        Some(removed_entry.path)
1496    }
1497
1498    pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1499        let mut entries_by_path_edits = Vec::new();
1500        let mut entries_by_id_edits = Vec::new();
1501        for entry_id in update.removed_entries {
1502            if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1503                entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1504                entries_by_id_edits.push(Edit::Remove(entry.id));
1505            }
1506        }
1507
1508        for entry in update.updated_entries {
1509            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1510            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1511                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1512            }
1513            entries_by_id_edits.push(Edit::Insert(PathEntry {
1514                id: entry.id,
1515                path: entry.path.clone(),
1516                is_ignored: entry.is_ignored,
1517                scan_id: 0,
1518            }));
1519            entries_by_path_edits.push(Edit::Insert(entry));
1520        }
1521
1522        self.entries_by_path.edit(entries_by_path_edits, &());
1523        self.entries_by_id.edit(entries_by_id_edits, &());
1524
1525        update.removed_repositories.sort_unstable();
1526        self.repository_entries.retain(|_, entry| {
1527            if let Ok(_) = update
1528                .removed_repositories
1529                .binary_search(&entry.work_directory.to_proto())
1530            {
1531                false
1532            } else {
1533                true
1534            }
1535        });
1536
1537        for repository in update.updated_repositories {
1538            let work_directory_entry: WorkDirectoryEntry =
1539                ProjectEntryId::from_proto(repository.work_directory_id).into();
1540
1541            if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1542                let mut statuses = TreeMap::default();
1543                for status_entry in repository.updated_statuses {
1544                    let Some(git_file_status) = read_git_status(status_entry.status) else {
1545                        continue;
1546                    };
1547
1548                    let repo_path = RepoPath::new(status_entry.repo_path.into());
1549                    statuses.insert(repo_path, git_file_status);
1550                }
1551
1552                let work_directory = RepositoryWorkDirectory(entry.path.clone());
1553                if self.repository_entries.get(&work_directory).is_some() {
1554                    self.repository_entries.update(&work_directory, |repo| {
1555                        repo.branch = repository.branch.map(Into::into);
1556                        repo.statuses.insert_tree(statuses);
1557
1558                        for repo_path in repository.removed_repo_paths {
1559                            let repo_path = RepoPath::new(repo_path.into());
1560                            repo.statuses.remove(&repo_path);
1561                        }
1562                    });
1563                } else {
1564                    self.repository_entries.insert(
1565                        work_directory,
1566                        RepositoryEntry {
1567                            work_directory: work_directory_entry,
1568                            branch: repository.branch.map(Into::into),
1569                            statuses,
1570                        },
1571                    )
1572                }
1573            } else {
1574                log::error!("no work directory entry for repository {:?}", repository)
1575            }
1576        }
1577
1578        self.scan_id = update.scan_id as usize;
1579        if update.is_last_update {
1580            self.completed_scan_id = update.scan_id as usize;
1581        }
1582
1583        Ok(())
1584    }
1585
1586    pub fn file_count(&self) -> usize {
1587        self.entries_by_path.summary().file_count
1588    }
1589
1590    pub fn visible_file_count(&self) -> usize {
1591        self.entries_by_path.summary().visible_file_count
1592    }
1593
1594    fn traverse_from_offset(
1595        &self,
1596        include_dirs: bool,
1597        include_ignored: bool,
1598        start_offset: usize,
1599    ) -> Traversal {
1600        let mut cursor = self.entries_by_path.cursor();
1601        cursor.seek(
1602            &TraversalTarget::Count {
1603                count: start_offset,
1604                include_dirs,
1605                include_ignored,
1606            },
1607            Bias::Right,
1608            &(),
1609        );
1610        Traversal {
1611            cursor,
1612            include_dirs,
1613            include_ignored,
1614        }
1615    }
1616
1617    fn traverse_from_path(
1618        &self,
1619        include_dirs: bool,
1620        include_ignored: bool,
1621        path: &Path,
1622    ) -> Traversal {
1623        let mut cursor = self.entries_by_path.cursor();
1624        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1625        Traversal {
1626            cursor,
1627            include_dirs,
1628            include_ignored,
1629        }
1630    }
1631
1632    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1633        self.traverse_from_offset(false, include_ignored, start)
1634    }
1635
1636    pub fn entries(&self, include_ignored: bool) -> Traversal {
1637        self.traverse_from_offset(true, include_ignored, 0)
1638    }
1639
1640    pub fn repositories(&self) -> impl Iterator<Item = &RepositoryEntry> {
1641        self.repository_entries.values()
1642    }
1643
1644    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1645        let empty_path = Path::new("");
1646        self.entries_by_path
1647            .cursor::<()>()
1648            .filter(move |entry| entry.path.as_ref() != empty_path)
1649            .map(|entry| &entry.path)
1650    }
1651
1652    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1653        let mut cursor = self.entries_by_path.cursor();
1654        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1655        let traversal = Traversal {
1656            cursor,
1657            include_dirs: true,
1658            include_ignored: true,
1659        };
1660        ChildEntriesIter {
1661            traversal,
1662            parent_path,
1663        }
1664    }
1665
1666    fn descendent_entries<'a>(
1667        &'a self,
1668        include_dirs: bool,
1669        include_ignored: bool,
1670        parent_path: &'a Path,
1671    ) -> DescendentEntriesIter<'a> {
1672        let mut cursor = self.entries_by_path.cursor();
1673        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
1674        let mut traversal = Traversal {
1675            cursor,
1676            include_dirs,
1677            include_ignored,
1678        };
1679
1680        if traversal.end_offset() == traversal.start_offset() {
1681            traversal.advance();
1682        }
1683
1684        DescendentEntriesIter {
1685            traversal,
1686            parent_path,
1687        }
1688    }
1689
1690    pub fn root_entry(&self) -> Option<&Entry> {
1691        self.entry_for_path("")
1692    }
1693
1694    pub fn root_name(&self) -> &str {
1695        &self.root_name
1696    }
1697
1698    pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1699        self.repository_entries
1700            .get(&RepositoryWorkDirectory(Path::new("").into()))
1701            .map(|entry| entry.to_owned())
1702    }
1703
1704    pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1705        self.repository_entries.values()
1706    }
1707
1708    pub fn scan_id(&self) -> usize {
1709        self.scan_id
1710    }
1711
1712    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1713        let path = path.as_ref();
1714        self.traverse_from_path(true, true, path)
1715            .entry()
1716            .and_then(|entry| {
1717                if entry.path.as_ref() == path {
1718                    Some(entry)
1719                } else {
1720                    None
1721                }
1722            })
1723    }
1724
1725    pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1726        let entry = self.entries_by_id.get(&id, &())?;
1727        self.entry_for_path(&entry.path)
1728    }
1729
1730    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1731        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1732    }
1733}
1734
1735impl LocalSnapshot {
1736    pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1737        self.git_repositories.get(&repo.work_directory.0)
1738    }
1739
1740    pub(crate) fn repo_for_metadata(
1741        &self,
1742        path: &Path,
1743    ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1744        self.git_repositories
1745            .iter()
1746            .find(|(_, repo)| repo.in_dot_git(path))
1747    }
1748
1749    #[cfg(test)]
1750    pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1751        let root_name = self.root_name.clone();
1752        proto::UpdateWorktree {
1753            project_id,
1754            worktree_id: self.id().to_proto(),
1755            abs_path: self.abs_path().to_string_lossy().into(),
1756            root_name,
1757            updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1758            removed_entries: Default::default(),
1759            scan_id: self.scan_id as u64,
1760            is_last_update: true,
1761            updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1762            removed_repositories: Default::default(),
1763        }
1764    }
1765
1766    pub(crate) fn build_update(
1767        &self,
1768        other: &Self,
1769        project_id: u64,
1770        worktree_id: u64,
1771        include_ignored: bool,
1772    ) -> proto::UpdateWorktree {
1773        let mut updated_entries = Vec::new();
1774        let mut removed_entries = Vec::new();
1775        let mut self_entries = self
1776            .entries_by_id
1777            .cursor::<()>()
1778            .filter(|e| include_ignored || !e.is_ignored)
1779            .peekable();
1780        let mut other_entries = other
1781            .entries_by_id
1782            .cursor::<()>()
1783            .filter(|e| include_ignored || !e.is_ignored)
1784            .peekable();
1785        loop {
1786            match (self_entries.peek(), other_entries.peek()) {
1787                (Some(self_entry), Some(other_entry)) => {
1788                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1789                        Ordering::Less => {
1790                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1791                            updated_entries.push(entry);
1792                            self_entries.next();
1793                        }
1794                        Ordering::Equal => {
1795                            if self_entry.scan_id != other_entry.scan_id {
1796                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1797                                updated_entries.push(entry);
1798                            }
1799
1800                            self_entries.next();
1801                            other_entries.next();
1802                        }
1803                        Ordering::Greater => {
1804                            removed_entries.push(other_entry.id.to_proto());
1805                            other_entries.next();
1806                        }
1807                    }
1808                }
1809                (Some(self_entry), None) => {
1810                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1811                    updated_entries.push(entry);
1812                    self_entries.next();
1813                }
1814                (None, Some(other_entry)) => {
1815                    removed_entries.push(other_entry.id.to_proto());
1816                    other_entries.next();
1817                }
1818                (None, None) => break,
1819            }
1820        }
1821
1822        let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1823        let mut removed_repositories = Vec::new();
1824        let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1825        let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1826        loop {
1827            match (self_repos.peek(), other_repos.peek()) {
1828                (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1829                    match Ord::cmp(self_work_dir, other_work_dir) {
1830                        Ordering::Less => {
1831                            updated_repositories.push((*self_repo).into());
1832                            self_repos.next();
1833                        }
1834                        Ordering::Equal => {
1835                            if self_repo != other_repo {
1836                                updated_repositories.push(self_repo.build_update(other_repo));
1837                            }
1838
1839                            self_repos.next();
1840                            other_repos.next();
1841                        }
1842                        Ordering::Greater => {
1843                            removed_repositories.push(other_repo.work_directory.to_proto());
1844                            other_repos.next();
1845                        }
1846                    }
1847                }
1848                (Some((_, self_repo)), None) => {
1849                    updated_repositories.push((*self_repo).into());
1850                    self_repos.next();
1851                }
1852                (None, Some((_, other_repo))) => {
1853                    removed_repositories.push(other_repo.work_directory.to_proto());
1854                    other_repos.next();
1855                }
1856                (None, None) => break,
1857            }
1858        }
1859
1860        proto::UpdateWorktree {
1861            project_id,
1862            worktree_id,
1863            abs_path: self.abs_path().to_string_lossy().into(),
1864            root_name: self.root_name().to_string(),
1865            updated_entries,
1866            removed_entries,
1867            scan_id: self.scan_id as u64,
1868            is_last_update: self.completed_scan_id == self.scan_id,
1869            updated_repositories,
1870            removed_repositories,
1871        }
1872    }
1873
1874    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1875        if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1876            let abs_path = self.abs_path.join(&entry.path);
1877            match smol::block_on(build_gitignore(&abs_path, fs)) {
1878                Ok(ignore) => {
1879                    self.ignores_by_parent_abs_path.insert(
1880                        abs_path.parent().unwrap().into(),
1881                        (Arc::new(ignore), self.scan_id),
1882                    );
1883                }
1884                Err(error) => {
1885                    log::error!(
1886                        "error loading .gitignore file {:?} - {:?}",
1887                        &entry.path,
1888                        error
1889                    );
1890                }
1891            }
1892        }
1893
1894        self.reuse_entry_id(&mut entry);
1895
1896        if entry.kind == EntryKind::PendingDir {
1897            if let Some(existing_entry) =
1898                self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1899            {
1900                entry.kind = existing_entry.kind;
1901            }
1902        }
1903
1904        let scan_id = self.scan_id;
1905        let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1906        if let Some(removed) = removed {
1907            if removed.id != entry.id {
1908                self.entries_by_id.remove(&removed.id, &());
1909            }
1910        }
1911        self.entries_by_id.insert_or_replace(
1912            PathEntry {
1913                id: entry.id,
1914                path: entry.path.clone(),
1915                is_ignored: entry.is_ignored,
1916                scan_id,
1917            },
1918            &(),
1919        );
1920
1921        entry
1922    }
1923
1924    fn populate_dir(
1925        &mut self,
1926        parent_path: Arc<Path>,
1927        entries: impl IntoIterator<Item = Entry>,
1928        ignore: Option<Arc<Gitignore>>,
1929        fs: &dyn Fs,
1930    ) {
1931        let mut parent_entry = if let Some(parent_entry) =
1932            self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1933        {
1934            parent_entry.clone()
1935        } else {
1936            log::warn!(
1937                "populating a directory {:?} that has been removed",
1938                parent_path
1939            );
1940            return;
1941        };
1942
1943        match parent_entry.kind {
1944            EntryKind::PendingDir => {
1945                parent_entry.kind = EntryKind::Dir;
1946            }
1947            EntryKind::Dir => {}
1948            _ => return,
1949        }
1950
1951        if let Some(ignore) = ignore {
1952            self.ignores_by_parent_abs_path.insert(
1953                self.abs_path.join(&parent_path).into(),
1954                (ignore, self.scan_id),
1955            );
1956        }
1957
1958        if parent_path.file_name() == Some(&DOT_GIT) {
1959            self.build_repo(parent_path, fs);
1960        }
1961
1962        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1963        let mut entries_by_id_edits = Vec::new();
1964
1965        for mut entry in entries {
1966            self.reuse_entry_id(&mut entry);
1967            entries_by_id_edits.push(Edit::Insert(PathEntry {
1968                id: entry.id,
1969                path: entry.path.clone(),
1970                is_ignored: entry.is_ignored,
1971                scan_id: self.scan_id,
1972            }));
1973            entries_by_path_edits.push(Edit::Insert(entry));
1974        }
1975
1976        self.entries_by_path.edit(entries_by_path_edits, &());
1977        self.entries_by_id.edit(entries_by_id_edits, &());
1978    }
1979
1980    fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1981        let abs_path = self.abs_path.join(&parent_path);
1982        let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1983
1984        // Guard against repositories inside the repository metadata
1985        if work_dir
1986            .components()
1987            .find(|component| component.as_os_str() == *DOT_GIT)
1988            .is_some()
1989        {
1990            return None;
1991        };
1992
1993        let work_dir_id = self
1994            .entry_for_path(work_dir.clone())
1995            .map(|entry| entry.id)?;
1996
1997        if self.git_repositories.get(&work_dir_id).is_none() {
1998            let repo = fs.open_repo(abs_path.as_path())?;
1999            let work_directory = RepositoryWorkDirectory(work_dir.clone());
2000            let scan_id = self.scan_id;
2001
2002            let repo_lock = repo.lock();
2003
2004            self.repository_entries.insert(
2005                work_directory,
2006                RepositoryEntry {
2007                    work_directory: work_dir_id.into(),
2008                    branch: repo_lock.branch_name().map(Into::into),
2009                    statuses: repo_lock.statuses().unwrap_or_default(),
2010                },
2011            );
2012            drop(repo_lock);
2013
2014            self.git_repositories.insert(
2015                work_dir_id,
2016                LocalRepositoryEntry {
2017                    scan_id,
2018                    full_scan_id: scan_id,
2019                    repo_ptr: repo,
2020                    git_dir_path: parent_path.clone(),
2021                },
2022            )
2023        }
2024
2025        Some(())
2026    }
2027    fn reuse_entry_id(&mut self, entry: &mut Entry) {
2028        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2029            entry.id = removed_entry_id;
2030        } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2031            entry.id = existing_entry.id;
2032        }
2033    }
2034
2035    fn remove_path(&mut self, path: &Path) {
2036        let mut new_entries;
2037        let removed_entries;
2038        {
2039            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2040            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2041            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2042            new_entries.push_tree(cursor.suffix(&()), &());
2043        }
2044        self.entries_by_path = new_entries;
2045
2046        let mut entries_by_id_edits = Vec::new();
2047        for entry in removed_entries.cursor::<()>() {
2048            let removed_entry_id = self
2049                .removed_entry_ids
2050                .entry(entry.inode)
2051                .or_insert(entry.id);
2052            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2053            entries_by_id_edits.push(Edit::Remove(entry.id));
2054        }
2055        self.entries_by_id.edit(entries_by_id_edits, &());
2056
2057        if path.file_name() == Some(&GITIGNORE) {
2058            let abs_parent_path = self.abs_path.join(path.parent().unwrap());
2059            if let Some((_, scan_id)) = self
2060                .ignores_by_parent_abs_path
2061                .get_mut(abs_parent_path.as_path())
2062            {
2063                *scan_id = self.snapshot.scan_id;
2064            }
2065        }
2066    }
2067
2068    fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2069        let mut inodes = TreeSet::default();
2070        for ancestor in path.ancestors().skip(1) {
2071            if let Some(entry) = self.entry_for_path(ancestor) {
2072                inodes.insert(entry.inode);
2073            }
2074        }
2075        inodes
2076    }
2077
2078    fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2079        let mut new_ignores = Vec::new();
2080        for ancestor in abs_path.ancestors().skip(1) {
2081            if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2082                new_ignores.push((ancestor, Some(ignore.clone())));
2083            } else {
2084                new_ignores.push((ancestor, None));
2085            }
2086        }
2087
2088        let mut ignore_stack = IgnoreStack::none();
2089        for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2090            if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2091                ignore_stack = IgnoreStack::all();
2092                break;
2093            } else if let Some(ignore) = ignore {
2094                ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2095            }
2096        }
2097
2098        if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2099            ignore_stack = IgnoreStack::all();
2100        }
2101
2102        ignore_stack
2103    }
2104}
2105
2106async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2107    let contents = fs.load(abs_path).await?;
2108    let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2109    let mut builder = GitignoreBuilder::new(parent);
2110    for line in contents.lines() {
2111        builder.add_line(Some(abs_path.into()), line)?;
2112    }
2113    Ok(builder.build()?)
2114}
2115
2116impl WorktreeId {
2117    pub fn from_usize(handle_id: usize) -> Self {
2118        Self(handle_id)
2119    }
2120
2121    pub(crate) fn from_proto(id: u64) -> Self {
2122        Self(id as usize)
2123    }
2124
2125    pub fn to_proto(&self) -> u64 {
2126        self.0 as u64
2127    }
2128
2129    pub fn to_usize(&self) -> usize {
2130        self.0
2131    }
2132}
2133
2134impl fmt::Display for WorktreeId {
2135    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2136        self.0.fmt(f)
2137    }
2138}
2139
2140impl Deref for Worktree {
2141    type Target = Snapshot;
2142
2143    fn deref(&self) -> &Self::Target {
2144        match self {
2145            Worktree::Local(worktree) => &worktree.snapshot,
2146            Worktree::Remote(worktree) => &worktree.snapshot,
2147        }
2148    }
2149}
2150
2151impl Deref for LocalWorktree {
2152    type Target = LocalSnapshot;
2153
2154    fn deref(&self) -> &Self::Target {
2155        &self.snapshot
2156    }
2157}
2158
2159impl Deref for RemoteWorktree {
2160    type Target = Snapshot;
2161
2162    fn deref(&self) -> &Self::Target {
2163        &self.snapshot
2164    }
2165}
2166
2167impl fmt::Debug for LocalWorktree {
2168    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2169        self.snapshot.fmt(f)
2170    }
2171}
2172
2173impl fmt::Debug for Snapshot {
2174    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2175        struct EntriesById<'a>(&'a SumTree<PathEntry>);
2176        struct EntriesByPath<'a>(&'a SumTree<Entry>);
2177
2178        impl<'a> fmt::Debug for EntriesByPath<'a> {
2179            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2180                f.debug_map()
2181                    .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2182                    .finish()
2183            }
2184        }
2185
2186        impl<'a> fmt::Debug for EntriesById<'a> {
2187            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2188                f.debug_list().entries(self.0.iter()).finish()
2189            }
2190        }
2191
2192        f.debug_struct("Snapshot")
2193            .field("id", &self.id)
2194            .field("root_name", &self.root_name)
2195            .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2196            .field("entries_by_id", &EntriesById(&self.entries_by_id))
2197            .finish()
2198    }
2199}
2200
2201#[derive(Clone, PartialEq)]
2202pub struct File {
2203    pub worktree: ModelHandle<Worktree>,
2204    pub path: Arc<Path>,
2205    pub mtime: SystemTime,
2206    pub(crate) entry_id: ProjectEntryId,
2207    pub(crate) is_local: bool,
2208    pub(crate) is_deleted: bool,
2209}
2210
2211impl language::File for File {
2212    fn as_local(&self) -> Option<&dyn language::LocalFile> {
2213        if self.is_local {
2214            Some(self)
2215        } else {
2216            None
2217        }
2218    }
2219
2220    fn mtime(&self) -> SystemTime {
2221        self.mtime
2222    }
2223
2224    fn path(&self) -> &Arc<Path> {
2225        &self.path
2226    }
2227
2228    fn full_path(&self, cx: &AppContext) -> PathBuf {
2229        let mut full_path = PathBuf::new();
2230        let worktree = self.worktree.read(cx);
2231
2232        if worktree.is_visible() {
2233            full_path.push(worktree.root_name());
2234        } else {
2235            let path = worktree.abs_path();
2236
2237            if worktree.is_local() && path.starts_with(HOME.as_path()) {
2238                full_path.push("~");
2239                full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2240            } else {
2241                full_path.push(path)
2242            }
2243        }
2244
2245        if self.path.components().next().is_some() {
2246            full_path.push(&self.path);
2247        }
2248
2249        full_path
2250    }
2251
2252    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2253    /// of its worktree, then this method will return the name of the worktree itself.
2254    fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2255        self.path
2256            .file_name()
2257            .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2258    }
2259
2260    fn is_deleted(&self) -> bool {
2261        self.is_deleted
2262    }
2263
2264    fn as_any(&self) -> &dyn Any {
2265        self
2266    }
2267
2268    fn to_proto(&self) -> rpc::proto::File {
2269        rpc::proto::File {
2270            worktree_id: self.worktree.id() as u64,
2271            entry_id: self.entry_id.to_proto(),
2272            path: self.path.to_string_lossy().into(),
2273            mtime: Some(self.mtime.into()),
2274            is_deleted: self.is_deleted,
2275        }
2276    }
2277}
2278
2279impl language::LocalFile for File {
2280    fn abs_path(&self, cx: &AppContext) -> PathBuf {
2281        self.worktree
2282            .read(cx)
2283            .as_local()
2284            .unwrap()
2285            .abs_path
2286            .join(&self.path)
2287    }
2288
2289    fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2290        let worktree = self.worktree.read(cx).as_local().unwrap();
2291        let abs_path = worktree.absolutize(&self.path);
2292        let fs = worktree.fs.clone();
2293        cx.background()
2294            .spawn(async move { fs.load(&abs_path).await })
2295    }
2296
2297    fn buffer_reloaded(
2298        &self,
2299        buffer_id: u64,
2300        version: &clock::Global,
2301        fingerprint: RopeFingerprint,
2302        line_ending: LineEnding,
2303        mtime: SystemTime,
2304        cx: &mut AppContext,
2305    ) {
2306        let worktree = self.worktree.read(cx).as_local().unwrap();
2307        if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2308            worktree
2309                .client
2310                .send(proto::BufferReloaded {
2311                    project_id,
2312                    buffer_id,
2313                    version: serialize_version(version),
2314                    mtime: Some(mtime.into()),
2315                    fingerprint: serialize_fingerprint(fingerprint),
2316                    line_ending: serialize_line_ending(line_ending) as i32,
2317                })
2318                .log_err();
2319        }
2320    }
2321}
2322
2323impl File {
2324    pub fn from_proto(
2325        proto: rpc::proto::File,
2326        worktree: ModelHandle<Worktree>,
2327        cx: &AppContext,
2328    ) -> Result<Self> {
2329        let worktree_id = worktree
2330            .read(cx)
2331            .as_remote()
2332            .ok_or_else(|| anyhow!("not remote"))?
2333            .id();
2334
2335        if worktree_id.to_proto() != proto.worktree_id {
2336            return Err(anyhow!("worktree id does not match file"));
2337        }
2338
2339        Ok(Self {
2340            worktree,
2341            path: Path::new(&proto.path).into(),
2342            mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2343            entry_id: ProjectEntryId::from_proto(proto.entry_id),
2344            is_local: false,
2345            is_deleted: proto.is_deleted,
2346        })
2347    }
2348
2349    pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2350        file.and_then(|f| f.as_any().downcast_ref())
2351    }
2352
2353    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2354        self.worktree.read(cx).id()
2355    }
2356
2357    pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2358        if self.is_deleted {
2359            None
2360        } else {
2361            Some(self.entry_id)
2362        }
2363    }
2364}
2365
2366#[derive(Clone, Debug, PartialEq, Eq)]
2367pub struct Entry {
2368    pub id: ProjectEntryId,
2369    pub kind: EntryKind,
2370    pub path: Arc<Path>,
2371    pub inode: u64,
2372    pub mtime: SystemTime,
2373    pub is_symlink: bool,
2374    pub is_ignored: bool,
2375}
2376
2377#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2378pub enum EntryKind {
2379    PendingDir,
2380    Dir,
2381    File(CharBag),
2382}
2383
2384#[derive(Clone, Copy, Debug)]
2385pub enum PathChange {
2386    Added,
2387    Removed,
2388    Updated,
2389    AddedOrUpdated,
2390}
2391
2392impl Entry {
2393    fn new(
2394        path: Arc<Path>,
2395        metadata: &fs::Metadata,
2396        next_entry_id: &AtomicUsize,
2397        root_char_bag: CharBag,
2398    ) -> Self {
2399        Self {
2400            id: ProjectEntryId::new(next_entry_id),
2401            kind: if metadata.is_dir {
2402                EntryKind::PendingDir
2403            } else {
2404                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2405            },
2406            path,
2407            inode: metadata.inode,
2408            mtime: metadata.mtime,
2409            is_symlink: metadata.is_symlink,
2410            is_ignored: false,
2411        }
2412    }
2413
2414    pub fn is_dir(&self) -> bool {
2415        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2416    }
2417
2418    pub fn is_file(&self) -> bool {
2419        matches!(self.kind, EntryKind::File(_))
2420    }
2421}
2422
2423impl sum_tree::Item for Entry {
2424    type Summary = EntrySummary;
2425
2426    fn summary(&self) -> Self::Summary {
2427        let visible_count = if self.is_ignored { 0 } else { 1 };
2428        let file_count;
2429        let visible_file_count;
2430        if self.is_file() {
2431            file_count = 1;
2432            visible_file_count = visible_count;
2433        } else {
2434            file_count = 0;
2435            visible_file_count = 0;
2436        }
2437
2438        EntrySummary {
2439            max_path: self.path.clone(),
2440            count: 1,
2441            visible_count,
2442            file_count,
2443            visible_file_count,
2444        }
2445    }
2446}
2447
2448impl sum_tree::KeyedItem for Entry {
2449    type Key = PathKey;
2450
2451    fn key(&self) -> Self::Key {
2452        PathKey(self.path.clone())
2453    }
2454}
2455
2456#[derive(Clone, Debug)]
2457pub struct EntrySummary {
2458    max_path: Arc<Path>,
2459    count: usize,
2460    visible_count: usize,
2461    file_count: usize,
2462    visible_file_count: usize,
2463}
2464
2465impl Default for EntrySummary {
2466    fn default() -> Self {
2467        Self {
2468            max_path: Arc::from(Path::new("")),
2469            count: 0,
2470            visible_count: 0,
2471            file_count: 0,
2472            visible_file_count: 0,
2473        }
2474    }
2475}
2476
2477impl sum_tree::Summary for EntrySummary {
2478    type Context = ();
2479
2480    fn add_summary(&mut self, rhs: &Self, _: &()) {
2481        self.max_path = rhs.max_path.clone();
2482        self.count += rhs.count;
2483        self.visible_count += rhs.visible_count;
2484        self.file_count += rhs.file_count;
2485        self.visible_file_count += rhs.visible_file_count;
2486    }
2487}
2488
2489#[derive(Clone, Debug)]
2490struct PathEntry {
2491    id: ProjectEntryId,
2492    path: Arc<Path>,
2493    is_ignored: bool,
2494    scan_id: usize,
2495}
2496
2497impl sum_tree::Item for PathEntry {
2498    type Summary = PathEntrySummary;
2499
2500    fn summary(&self) -> Self::Summary {
2501        PathEntrySummary { max_id: self.id }
2502    }
2503}
2504
2505impl sum_tree::KeyedItem for PathEntry {
2506    type Key = ProjectEntryId;
2507
2508    fn key(&self) -> Self::Key {
2509        self.id
2510    }
2511}
2512
2513#[derive(Clone, Debug, Default)]
2514struct PathEntrySummary {
2515    max_id: ProjectEntryId,
2516}
2517
2518impl sum_tree::Summary for PathEntrySummary {
2519    type Context = ();
2520
2521    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2522        self.max_id = summary.max_id;
2523    }
2524}
2525
2526impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2527    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2528        *self = summary.max_id;
2529    }
2530}
2531
2532#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2533pub struct PathKey(Arc<Path>);
2534
2535impl Default for PathKey {
2536    fn default() -> Self {
2537        Self(Path::new("").into())
2538    }
2539}
2540
2541impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2542    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2543        self.0 = summary.max_path.clone();
2544    }
2545}
2546
2547struct BackgroundScanner {
2548    snapshot: Mutex<LocalSnapshot>,
2549    fs: Arc<dyn Fs>,
2550    status_updates_tx: UnboundedSender<ScanState>,
2551    executor: Arc<executor::Background>,
2552    refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2553    prev_state: Mutex<BackgroundScannerState>,
2554    finished_initial_scan: bool,
2555}
2556
2557struct BackgroundScannerState {
2558    snapshot: Snapshot,
2559    event_paths: Vec<Arc<Path>>,
2560}
2561
2562impl BackgroundScanner {
2563    fn new(
2564        snapshot: LocalSnapshot,
2565        fs: Arc<dyn Fs>,
2566        status_updates_tx: UnboundedSender<ScanState>,
2567        executor: Arc<executor::Background>,
2568        refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2569    ) -> Self {
2570        Self {
2571            fs,
2572            status_updates_tx,
2573            executor,
2574            refresh_requests_rx,
2575            prev_state: Mutex::new(BackgroundScannerState {
2576                snapshot: snapshot.snapshot.clone(),
2577                event_paths: Default::default(),
2578            }),
2579            snapshot: Mutex::new(snapshot),
2580            finished_initial_scan: false,
2581        }
2582    }
2583
2584    async fn run(
2585        &mut self,
2586        mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2587    ) {
2588        use futures::FutureExt as _;
2589
2590        let (root_abs_path, root_inode) = {
2591            let snapshot = self.snapshot.lock();
2592            (
2593                snapshot.abs_path.clone(),
2594                snapshot.root_entry().map(|e| e.inode),
2595            )
2596        };
2597
2598        // Populate ignores above the root.
2599        let ignore_stack;
2600        for ancestor in root_abs_path.ancestors().skip(1) {
2601            if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2602            {
2603                self.snapshot
2604                    .lock()
2605                    .ignores_by_parent_abs_path
2606                    .insert(ancestor.into(), (ignore.into(), 0));
2607            }
2608        }
2609        {
2610            let mut snapshot = self.snapshot.lock();
2611            snapshot.scan_id += 1;
2612            ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2613            if ignore_stack.is_all() {
2614                if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2615                    root_entry.is_ignored = true;
2616                    snapshot.insert_entry(root_entry, self.fs.as_ref());
2617                }
2618            }
2619        };
2620
2621        // Perform an initial scan of the directory.
2622        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2623        smol::block_on(scan_job_tx.send(ScanJob {
2624            abs_path: root_abs_path,
2625            path: Arc::from(Path::new("")),
2626            ignore_stack,
2627            ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2628            scan_queue: scan_job_tx.clone(),
2629        }))
2630        .unwrap();
2631        drop(scan_job_tx);
2632        self.scan_dirs(true, scan_job_rx).await;
2633        {
2634            let mut snapshot = self.snapshot.lock();
2635            snapshot.completed_scan_id = snapshot.scan_id;
2636        }
2637        self.send_status_update(false, None);
2638
2639        // Process any any FS events that occurred while performing the initial scan.
2640        // For these events, update events cannot be as precise, because we didn't
2641        // have the previous state loaded yet.
2642        if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2643            let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2644            while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2645                paths.extend(more_events.into_iter().map(|e| e.path));
2646            }
2647            self.process_events(paths).await;
2648        }
2649
2650        self.finished_initial_scan = true;
2651
2652        // Continue processing events until the worktree is dropped.
2653        loop {
2654            select_biased! {
2655                // Process any path refresh requests from the worktree. Prioritize
2656                // these before handling changes reported by the filesystem.
2657                request = self.refresh_requests_rx.recv().fuse() => {
2658                    let Ok((paths, barrier)) = request else { break };
2659                    if !self.process_refresh_request(paths, barrier).await {
2660                        return;
2661                    }
2662                }
2663
2664                events = events_rx.next().fuse() => {
2665                    let Some(events) = events else { break };
2666                    let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2667                    while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2668                        paths.extend(more_events.into_iter().map(|e| e.path));
2669                    }
2670                    self.process_events(paths).await;
2671                }
2672            }
2673        }
2674    }
2675
2676    async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2677        if let Some(mut paths) = self.reload_entries_for_paths(paths, None).await {
2678            paths.sort_unstable();
2679            util::extend_sorted(
2680                &mut self.prev_state.lock().event_paths,
2681                paths,
2682                usize::MAX,
2683                Ord::cmp,
2684            );
2685        }
2686        self.send_status_update(false, Some(barrier))
2687    }
2688
2689    async fn process_events(&mut self, paths: Vec<PathBuf>) {
2690        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2691        let paths = self
2692            .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2693            .await;
2694        if let Some(paths) = &paths {
2695            util::extend_sorted(
2696                &mut self.prev_state.lock().event_paths,
2697                paths.iter().cloned(),
2698                usize::MAX,
2699                Ord::cmp,
2700            );
2701        }
2702        drop(scan_job_tx);
2703        self.scan_dirs(false, scan_job_rx).await;
2704
2705        self.update_ignore_statuses().await;
2706
2707        let mut snapshot = self.snapshot.lock();
2708
2709        if let Some(paths) = paths {
2710            for path in paths {
2711                self.reload_repo_for_file_path(&path, &mut *snapshot, self.fs.as_ref());
2712            }
2713        }
2714
2715        let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2716        git_repositories.retain(|work_directory_id, _| {
2717            snapshot
2718                .entry_for_id(*work_directory_id)
2719                .map_or(false, |entry| {
2720                    snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2721                })
2722        });
2723        snapshot.git_repositories = git_repositories;
2724
2725        let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2726        git_repository_entries.retain(|_, entry| {
2727            snapshot
2728                .git_repositories
2729                .get(&entry.work_directory.0)
2730                .is_some()
2731        });
2732        snapshot.snapshot.repository_entries = git_repository_entries;
2733
2734        snapshot.removed_entry_ids.clear();
2735        snapshot.completed_scan_id = snapshot.scan_id;
2736
2737        drop(snapshot);
2738
2739        self.send_status_update(false, None);
2740        self.prev_state.lock().event_paths.clear();
2741    }
2742
2743    async fn scan_dirs(
2744        &self,
2745        enable_progress_updates: bool,
2746        scan_jobs_rx: channel::Receiver<ScanJob>,
2747    ) {
2748        use futures::FutureExt as _;
2749
2750        if self
2751            .status_updates_tx
2752            .unbounded_send(ScanState::Started)
2753            .is_err()
2754        {
2755            return;
2756        }
2757
2758        let progress_update_count = AtomicUsize::new(0);
2759        self.executor
2760            .scoped(|scope| {
2761                for _ in 0..self.executor.num_cpus() {
2762                    scope.spawn(async {
2763                        let mut last_progress_update_count = 0;
2764                        let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2765                        futures::pin_mut!(progress_update_timer);
2766
2767                        loop {
2768                            select_biased! {
2769                                // Process any path refresh requests before moving on to process
2770                                // the scan queue, so that user operations are prioritized.
2771                                request = self.refresh_requests_rx.recv().fuse() => {
2772                                    let Ok((paths, barrier)) = request else { break };
2773                                    if !self.process_refresh_request(paths, barrier).await {
2774                                        return;
2775                                    }
2776                                }
2777
2778                                // Send periodic progress updates to the worktree. Use an atomic counter
2779                                // to ensure that only one of the workers sends a progress update after
2780                                // the update interval elapses.
2781                                _ = progress_update_timer => {
2782                                    match progress_update_count.compare_exchange(
2783                                        last_progress_update_count,
2784                                        last_progress_update_count + 1,
2785                                        SeqCst,
2786                                        SeqCst
2787                                    ) {
2788                                        Ok(_) => {
2789                                            last_progress_update_count += 1;
2790                                            self.send_status_update(true, None);
2791                                        }
2792                                        Err(count) => {
2793                                            last_progress_update_count = count;
2794                                        }
2795                                    }
2796                                    progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2797                                }
2798
2799                                // Recursively load directories from the file system.
2800                                job = scan_jobs_rx.recv().fuse() => {
2801                                    let Ok(job) = job else { break };
2802                                    if let Err(err) = self.scan_dir(&job).await {
2803                                        if job.path.as_ref() != Path::new("") {
2804                                            log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2805                                        }
2806                                    }
2807                                }
2808                            }
2809                        }
2810                    })
2811                }
2812            })
2813            .await;
2814    }
2815
2816    fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2817        let mut prev_state = self.prev_state.lock();
2818        let new_snapshot = self.snapshot.lock().clone();
2819        let old_snapshot = mem::replace(&mut prev_state.snapshot, new_snapshot.snapshot.clone());
2820
2821        let changes = self.build_change_set(
2822            &old_snapshot,
2823            &new_snapshot.snapshot,
2824            &prev_state.event_paths,
2825        );
2826
2827        self.status_updates_tx
2828            .unbounded_send(ScanState::Updated {
2829                snapshot: new_snapshot,
2830                changes,
2831                scanning,
2832                barrier,
2833            })
2834            .is_ok()
2835    }
2836
2837    async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2838        let mut new_entries: Vec<Entry> = Vec::new();
2839        let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2840        let mut ignore_stack = job.ignore_stack.clone();
2841        let mut new_ignore = None;
2842        let (root_abs_path, root_char_bag, next_entry_id) = {
2843            let snapshot = self.snapshot.lock();
2844            (
2845                snapshot.abs_path().clone(),
2846                snapshot.root_char_bag,
2847                snapshot.next_entry_id.clone(),
2848            )
2849        };
2850        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2851        while let Some(child_abs_path) = child_paths.next().await {
2852            let child_abs_path: Arc<Path> = match child_abs_path {
2853                Ok(child_abs_path) => child_abs_path.into(),
2854                Err(error) => {
2855                    log::error!("error processing entry {:?}", error);
2856                    continue;
2857                }
2858            };
2859
2860            let child_name = child_abs_path.file_name().unwrap();
2861            let child_path: Arc<Path> = job.path.join(child_name).into();
2862            let child_metadata = match self.fs.metadata(&child_abs_path).await {
2863                Ok(Some(metadata)) => metadata,
2864                Ok(None) => continue,
2865                Err(err) => {
2866                    log::error!("error processing {:?}: {:?}", child_abs_path, err);
2867                    continue;
2868                }
2869            };
2870
2871            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2872            if child_name == *GITIGNORE {
2873                match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2874                    Ok(ignore) => {
2875                        let ignore = Arc::new(ignore);
2876                        ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2877                        new_ignore = Some(ignore);
2878                    }
2879                    Err(error) => {
2880                        log::error!(
2881                            "error loading .gitignore file {:?} - {:?}",
2882                            child_name,
2883                            error
2884                        );
2885                    }
2886                }
2887
2888                // Update ignore status of any child entries we've already processed to reflect the
2889                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2890                // there should rarely be too numerous. Update the ignore stack associated with any
2891                // new jobs as well.
2892                let mut new_jobs = new_jobs.iter_mut();
2893                for entry in &mut new_entries {
2894                    let entry_abs_path = root_abs_path.join(&entry.path);
2895                    entry.is_ignored =
2896                        ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2897
2898                    if entry.is_dir() {
2899                        if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2900                            job.ignore_stack = if entry.is_ignored {
2901                                IgnoreStack::all()
2902                            } else {
2903                                ignore_stack.clone()
2904                            };
2905                        }
2906                    }
2907                }
2908            }
2909
2910            let mut child_entry = Entry::new(
2911                child_path.clone(),
2912                &child_metadata,
2913                &next_entry_id,
2914                root_char_bag,
2915            );
2916
2917            if child_entry.is_dir() {
2918                let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2919                child_entry.is_ignored = is_ignored;
2920
2921                // Avoid recursing until crash in the case of a recursive symlink
2922                if !job.ancestor_inodes.contains(&child_entry.inode) {
2923                    let mut ancestor_inodes = job.ancestor_inodes.clone();
2924                    ancestor_inodes.insert(child_entry.inode);
2925
2926                    new_jobs.push(Some(ScanJob {
2927                        abs_path: child_abs_path,
2928                        path: child_path,
2929                        ignore_stack: if is_ignored {
2930                            IgnoreStack::all()
2931                        } else {
2932                            ignore_stack.clone()
2933                        },
2934                        ancestor_inodes,
2935                        scan_queue: job.scan_queue.clone(),
2936                    }));
2937                } else {
2938                    new_jobs.push(None);
2939                }
2940            } else {
2941                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2942            }
2943
2944            new_entries.push(child_entry);
2945        }
2946
2947        self.snapshot.lock().populate_dir(
2948            job.path.clone(),
2949            new_entries,
2950            new_ignore,
2951            self.fs.as_ref(),
2952        );
2953
2954        for new_job in new_jobs {
2955            if let Some(new_job) = new_job {
2956                job.scan_queue.send(new_job).await.unwrap();
2957            }
2958        }
2959
2960        Ok(())
2961    }
2962
2963    async fn reload_entries_for_paths(
2964        &self,
2965        mut abs_paths: Vec<PathBuf>,
2966        scan_queue_tx: Option<Sender<ScanJob>>,
2967    ) -> Option<Vec<Arc<Path>>> {
2968        let doing_recursive_update = scan_queue_tx.is_some();
2969
2970        abs_paths.sort_unstable();
2971        abs_paths.dedup_by(|a, b| a.starts_with(&b));
2972
2973        let root_abs_path = self.snapshot.lock().abs_path.clone();
2974        let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2975        let metadata = futures::future::join_all(
2976            abs_paths
2977                .iter()
2978                .map(|abs_path| self.fs.metadata(&abs_path))
2979                .collect::<Vec<_>>(),
2980        )
2981        .await;
2982
2983        let mut snapshot = self.snapshot.lock();
2984        let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2985        snapshot.scan_id += 1;
2986        if is_idle && !doing_recursive_update {
2987            snapshot.completed_scan_id = snapshot.scan_id;
2988        }
2989
2990        // Remove any entries for paths that no longer exist or are being recursively
2991        // refreshed. Do this before adding any new entries, so that renames can be
2992        // detected regardless of the order of the paths.
2993        let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2994        for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2995            if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2996                if matches!(metadata, Ok(None)) || doing_recursive_update {
2997                    snapshot.remove_path(path);
2998                }
2999                event_paths.push(path.into());
3000            } else {
3001                log::error!(
3002                    "unexpected event {:?} for root path {:?}",
3003                    abs_path,
3004                    root_canonical_path
3005                );
3006            }
3007        }
3008
3009        for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
3010            let abs_path: Arc<Path> = root_abs_path.join(&path).into();
3011
3012            match metadata {
3013                Ok(Some(metadata)) => {
3014                    let ignore_stack =
3015                        snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
3016                    let mut fs_entry = Entry::new(
3017                        path.clone(),
3018                        &metadata,
3019                        snapshot.next_entry_id.as_ref(),
3020                        snapshot.root_char_bag,
3021                    );
3022                    fs_entry.is_ignored = ignore_stack.is_all();
3023                    snapshot.insert_entry(fs_entry, self.fs.as_ref());
3024
3025                    if let Some(scan_queue_tx) = &scan_queue_tx {
3026                        let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
3027                        if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
3028                            ancestor_inodes.insert(metadata.inode);
3029                            smol::block_on(scan_queue_tx.send(ScanJob {
3030                                abs_path,
3031                                path,
3032                                ignore_stack,
3033                                ancestor_inodes,
3034                                scan_queue: scan_queue_tx.clone(),
3035                            }))
3036                            .unwrap();
3037                        }
3038                    }
3039                }
3040                Ok(None) => {
3041                    self.remove_repo_path(&path, &mut snapshot);
3042                }
3043                Err(err) => {
3044                    // TODO - create a special 'error' entry in the entries tree to mark this
3045                    log::error!("error reading file on event {:?}", err);
3046                }
3047            }
3048        }
3049
3050        Some(event_paths)
3051    }
3052
3053    fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3054        if !path
3055            .components()
3056            .any(|component| component.as_os_str() == *DOT_GIT)
3057        {
3058            let scan_id = snapshot.scan_id;
3059            let repo = snapshot.repo_for(&path)?;
3060
3061            let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3062
3063            let work_dir = repo.work_directory(snapshot)?;
3064            let work_dir_id = repo.work_directory;
3065
3066            snapshot
3067                .git_repositories
3068                .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3069
3070            snapshot.repository_entries.update(&work_dir, |entry| {
3071                entry
3072                    .statuses
3073                    .remove_range(&repo_path, &RepoPathDescendants(&repo_path))
3074            });
3075        }
3076
3077        Some(())
3078    }
3079
3080    fn reload_repo_for_file_path(
3081        &self,
3082        path: &Path,
3083        snapshot: &mut LocalSnapshot,
3084        fs: &dyn Fs,
3085    ) -> Option<()> {
3086        let scan_id = snapshot.scan_id;
3087
3088        if path
3089            .components()
3090            .any(|component| component.as_os_str() == *DOT_GIT)
3091        {
3092            let (entry_id, repo_ptr) = {
3093                let Some((entry_id, repo)) = snapshot.repo_for_metadata(&path) else {
3094                    let dot_git_dir = path.ancestors()
3095                    .skip_while(|ancestor| ancestor.file_name() != Some(&*DOT_GIT))
3096                    .next()?;
3097
3098                    snapshot.build_repo(dot_git_dir.into(), fs);
3099                    return None;
3100                };
3101                if repo.full_scan_id == scan_id {
3102                    return None;
3103                }
3104                (*entry_id, repo.repo_ptr.to_owned())
3105            };
3106
3107            let work_dir = snapshot
3108                .entry_for_id(entry_id)
3109                .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3110
3111            let repo = repo_ptr.lock();
3112            repo.reload_index();
3113            let branch = repo.branch_name();
3114            let statuses = repo.statuses().unwrap_or_default();
3115
3116            snapshot.git_repositories.update(&entry_id, |entry| {
3117                entry.scan_id = scan_id;
3118                entry.full_scan_id = scan_id;
3119            });
3120
3121            snapshot.repository_entries.update(&work_dir, |entry| {
3122                entry.branch = branch.map(Into::into);
3123                entry.statuses = statuses;
3124            });
3125        } else {
3126            if snapshot
3127                .entry_for_path(&path)
3128                .map(|entry| entry.is_ignored)
3129                .unwrap_or(false)
3130            {
3131                self.remove_repo_path(&path, snapshot);
3132                return None;
3133            }
3134
3135            let repo = snapshot.repo_for(&path)?;
3136
3137            let work_dir = repo.work_directory(snapshot)?;
3138            let work_dir_id = repo.work_directory.clone();
3139
3140            snapshot
3141                .git_repositories
3142                .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3143
3144            let local_repo = snapshot.get_local_repo(&repo)?.to_owned();
3145
3146            // Short circuit if we've already scanned everything
3147            if local_repo.full_scan_id == scan_id {
3148                return None;
3149            }
3150
3151            let mut repository = snapshot.repository_entries.remove(&work_dir)?;
3152
3153            for entry in snapshot.descendent_entries(false, false, path) {
3154                let Some(repo_path) = repo.work_directory.relativize(snapshot, &entry.path) else {
3155                    continue;
3156                };
3157
3158                let status = local_repo.repo_ptr.lock().status(&repo_path);
3159                if let Some(status) = status {
3160                    repository.statuses.insert(repo_path.clone(), status);
3161                } else {
3162                    repository.statuses.remove(&repo_path);
3163                }
3164            }
3165
3166            snapshot.repository_entries.insert(work_dir, repository)
3167        }
3168
3169        Some(())
3170    }
3171
3172    async fn update_ignore_statuses(&self) {
3173        use futures::FutureExt as _;
3174
3175        let mut snapshot = self.snapshot.lock().clone();
3176        let mut ignores_to_update = Vec::new();
3177        let mut ignores_to_delete = Vec::new();
3178        for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
3179            if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
3180                if *scan_id > snapshot.completed_scan_id
3181                    && snapshot.entry_for_path(parent_path).is_some()
3182                {
3183                    ignores_to_update.push(parent_abs_path.clone());
3184                }
3185
3186                let ignore_path = parent_path.join(&*GITIGNORE);
3187                if snapshot.entry_for_path(ignore_path).is_none() {
3188                    ignores_to_delete.push(parent_abs_path.clone());
3189                }
3190            }
3191        }
3192
3193        for parent_abs_path in ignores_to_delete {
3194            snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3195            self.snapshot
3196                .lock()
3197                .ignores_by_parent_abs_path
3198                .remove(&parent_abs_path);
3199        }
3200
3201        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3202        ignores_to_update.sort_unstable();
3203        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3204        while let Some(parent_abs_path) = ignores_to_update.next() {
3205            while ignores_to_update
3206                .peek()
3207                .map_or(false, |p| p.starts_with(&parent_abs_path))
3208            {
3209                ignores_to_update.next().unwrap();
3210            }
3211
3212            let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3213            smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3214                abs_path: parent_abs_path,
3215                ignore_stack,
3216                ignore_queue: ignore_queue_tx.clone(),
3217            }))
3218            .unwrap();
3219        }
3220        drop(ignore_queue_tx);
3221
3222        self.executor
3223            .scoped(|scope| {
3224                for _ in 0..self.executor.num_cpus() {
3225                    scope.spawn(async {
3226                        loop {
3227                            select_biased! {
3228                                // Process any path refresh requests before moving on to process
3229                                // the queue of ignore statuses.
3230                                request = self.refresh_requests_rx.recv().fuse() => {
3231                                    let Ok((paths, barrier)) = request else { break };
3232                                    if !self.process_refresh_request(paths, barrier).await {
3233                                        return;
3234                                    }
3235                                }
3236
3237                                // Recursively process directories whose ignores have changed.
3238                                job = ignore_queue_rx.recv().fuse() => {
3239                                    let Ok(job) = job else { break };
3240                                    self.update_ignore_status(job, &snapshot).await;
3241                                }
3242                            }
3243                        }
3244                    });
3245                }
3246            })
3247            .await;
3248    }
3249
3250    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3251        let mut ignore_stack = job.ignore_stack;
3252        if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3253            ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3254        }
3255
3256        let mut entries_by_id_edits = Vec::new();
3257        let mut entries_by_path_edits = Vec::new();
3258        let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3259        for mut entry in snapshot.child_entries(path).cloned() {
3260            let was_ignored = entry.is_ignored;
3261            let abs_path = snapshot.abs_path().join(&entry.path);
3262            entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3263            if entry.is_dir() {
3264                let child_ignore_stack = if entry.is_ignored {
3265                    IgnoreStack::all()
3266                } else {
3267                    ignore_stack.clone()
3268                };
3269                job.ignore_queue
3270                    .send(UpdateIgnoreStatusJob {
3271                        abs_path: abs_path.into(),
3272                        ignore_stack: child_ignore_stack,
3273                        ignore_queue: job.ignore_queue.clone(),
3274                    })
3275                    .await
3276                    .unwrap();
3277            }
3278
3279            if entry.is_ignored != was_ignored {
3280                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3281                path_entry.scan_id = snapshot.scan_id;
3282                path_entry.is_ignored = entry.is_ignored;
3283                entries_by_id_edits.push(Edit::Insert(path_entry));
3284                entries_by_path_edits.push(Edit::Insert(entry));
3285            }
3286        }
3287
3288        let mut snapshot = self.snapshot.lock();
3289        snapshot.entries_by_path.edit(entries_by_path_edits, &());
3290        snapshot.entries_by_id.edit(entries_by_id_edits, &());
3291    }
3292
3293    fn build_change_set(
3294        &self,
3295        old_snapshot: &Snapshot,
3296        new_snapshot: &Snapshot,
3297        event_paths: &[Arc<Path>],
3298    ) -> HashMap<(Arc<Path>, ProjectEntryId), PathChange> {
3299        use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3300
3301        let mut changes = HashMap::default();
3302        let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3303        let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3304        let received_before_initialized = !self.finished_initial_scan;
3305
3306        for path in event_paths {
3307            let path = PathKey(path.clone());
3308            old_paths.seek(&path, Bias::Left, &());
3309            new_paths.seek(&path, Bias::Left, &());
3310
3311            loop {
3312                match (old_paths.item(), new_paths.item()) {
3313                    (Some(old_entry), Some(new_entry)) => {
3314                        if old_entry.path > path.0
3315                            && new_entry.path > path.0
3316                            && !old_entry.path.starts_with(&path.0)
3317                            && !new_entry.path.starts_with(&path.0)
3318                        {
3319                            break;
3320                        }
3321
3322                        match Ord::cmp(&old_entry.path, &new_entry.path) {
3323                            Ordering::Less => {
3324                                changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3325                                old_paths.next(&());
3326                            }
3327                            Ordering::Equal => {
3328                                if received_before_initialized {
3329                                    // If the worktree was not fully initialized when this event was generated,
3330                                    // we can't know whether this entry was added during the scan or whether
3331                                    // it was merely updated.
3332                                    changes.insert(
3333                                        (new_entry.path.clone(), new_entry.id),
3334                                        AddedOrUpdated,
3335                                    );
3336                                } else if old_entry.mtime != new_entry.mtime {
3337                                    changes.insert((new_entry.path.clone(), new_entry.id), Updated);
3338                                }
3339                                old_paths.next(&());
3340                                new_paths.next(&());
3341                            }
3342                            Ordering::Greater => {
3343                                changes.insert((new_entry.path.clone(), new_entry.id), Added);
3344                                new_paths.next(&());
3345                            }
3346                        }
3347                    }
3348                    (Some(old_entry), None) => {
3349                        changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3350                        old_paths.next(&());
3351                    }
3352                    (None, Some(new_entry)) => {
3353                        changes.insert((new_entry.path.clone(), new_entry.id), Added);
3354                        new_paths.next(&());
3355                    }
3356                    (None, None) => break,
3357                }
3358            }
3359        }
3360
3361        changes
3362    }
3363
3364    async fn progress_timer(&self, running: bool) {
3365        if !running {
3366            return futures::future::pending().await;
3367        }
3368
3369        #[cfg(any(test, feature = "test-support"))]
3370        if self.fs.is_fake() {
3371            return self.executor.simulate_random_delay().await;
3372        }
3373
3374        smol::Timer::after(Duration::from_millis(100)).await;
3375    }
3376}
3377
3378fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3379    let mut result = root_char_bag;
3380    result.extend(
3381        path.to_string_lossy()
3382            .chars()
3383            .map(|c| c.to_ascii_lowercase()),
3384    );
3385    result
3386}
3387
3388struct ScanJob {
3389    abs_path: Arc<Path>,
3390    path: Arc<Path>,
3391    ignore_stack: Arc<IgnoreStack>,
3392    scan_queue: Sender<ScanJob>,
3393    ancestor_inodes: TreeSet<u64>,
3394}
3395
3396struct UpdateIgnoreStatusJob {
3397    abs_path: Arc<Path>,
3398    ignore_stack: Arc<IgnoreStack>,
3399    ignore_queue: Sender<UpdateIgnoreStatusJob>,
3400}
3401
3402pub trait WorktreeHandle {
3403    #[cfg(any(test, feature = "test-support"))]
3404    fn flush_fs_events<'a>(
3405        &self,
3406        cx: &'a gpui::TestAppContext,
3407    ) -> futures::future::LocalBoxFuture<'a, ()>;
3408}
3409
3410impl WorktreeHandle for ModelHandle<Worktree> {
3411    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3412    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3413    // extra directory scans, and emit extra scan-state notifications.
3414    //
3415    // This function mutates the worktree's directory and waits for those mutations to be picked up,
3416    // to ensure that all redundant FS events have already been processed.
3417    #[cfg(any(test, feature = "test-support"))]
3418    fn flush_fs_events<'a>(
3419        &self,
3420        cx: &'a gpui::TestAppContext,
3421    ) -> futures::future::LocalBoxFuture<'a, ()> {
3422        use smol::future::FutureExt;
3423
3424        let filename = "fs-event-sentinel";
3425        let tree = self.clone();
3426        let (fs, root_path) = self.read_with(cx, |tree, _| {
3427            let tree = tree.as_local().unwrap();
3428            (tree.fs.clone(), tree.abs_path().clone())
3429        });
3430
3431        async move {
3432            fs.create_file(&root_path.join(filename), Default::default())
3433                .await
3434                .unwrap();
3435            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3436                .await;
3437
3438            fs.remove_file(&root_path.join(filename), Default::default())
3439                .await
3440                .unwrap();
3441            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3442                .await;
3443
3444            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3445                .await;
3446        }
3447        .boxed_local()
3448    }
3449}
3450
3451#[derive(Clone, Debug)]
3452struct TraversalProgress<'a> {
3453    max_path: &'a Path,
3454    count: usize,
3455    visible_count: usize,
3456    file_count: usize,
3457    visible_file_count: usize,
3458}
3459
3460impl<'a> TraversalProgress<'a> {
3461    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3462        match (include_ignored, include_dirs) {
3463            (true, true) => self.count,
3464            (true, false) => self.file_count,
3465            (false, true) => self.visible_count,
3466            (false, false) => self.visible_file_count,
3467        }
3468    }
3469}
3470
3471impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3472    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3473        self.max_path = summary.max_path.as_ref();
3474        self.count += summary.count;
3475        self.visible_count += summary.visible_count;
3476        self.file_count += summary.file_count;
3477        self.visible_file_count += summary.visible_file_count;
3478    }
3479}
3480
3481impl<'a> Default for TraversalProgress<'a> {
3482    fn default() -> Self {
3483        Self {
3484            max_path: Path::new(""),
3485            count: 0,
3486            visible_count: 0,
3487            file_count: 0,
3488            visible_file_count: 0,
3489        }
3490    }
3491}
3492
3493pub struct Traversal<'a> {
3494    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3495    include_ignored: bool,
3496    include_dirs: bool,
3497}
3498
3499impl<'a> Traversal<'a> {
3500    pub fn advance(&mut self) -> bool {
3501        self.cursor.seek_forward(
3502            &TraversalTarget::Count {
3503                count: self.end_offset() + 1,
3504                include_dirs: self.include_dirs,
3505                include_ignored: self.include_ignored,
3506            },
3507            Bias::Left,
3508            &(),
3509        )
3510    }
3511
3512    pub fn advance_to_sibling(&mut self) -> bool {
3513        while let Some(entry) = self.cursor.item() {
3514            self.cursor.seek_forward(
3515                &TraversalTarget::PathSuccessor(&entry.path),
3516                Bias::Left,
3517                &(),
3518            );
3519            if let Some(entry) = self.cursor.item() {
3520                if (self.include_dirs || !entry.is_dir())
3521                    && (self.include_ignored || !entry.is_ignored)
3522                {
3523                    return true;
3524                }
3525            }
3526        }
3527        false
3528    }
3529
3530    pub fn entry(&self) -> Option<&'a Entry> {
3531        self.cursor.item()
3532    }
3533
3534    pub fn start_offset(&self) -> usize {
3535        self.cursor
3536            .start()
3537            .count(self.include_dirs, self.include_ignored)
3538    }
3539
3540    pub fn end_offset(&self) -> usize {
3541        self.cursor
3542            .end(&())
3543            .count(self.include_dirs, self.include_ignored)
3544    }
3545}
3546
3547impl<'a> Iterator for Traversal<'a> {
3548    type Item = &'a Entry;
3549
3550    fn next(&mut self) -> Option<Self::Item> {
3551        if let Some(item) = self.entry() {
3552            self.advance();
3553            Some(item)
3554        } else {
3555            None
3556        }
3557    }
3558}
3559
3560#[derive(Debug)]
3561enum TraversalTarget<'a> {
3562    Path(&'a Path),
3563    PathSuccessor(&'a Path),
3564    Count {
3565        count: usize,
3566        include_ignored: bool,
3567        include_dirs: bool,
3568    },
3569}
3570
3571impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3572    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3573        match self {
3574            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3575            TraversalTarget::PathSuccessor(path) => {
3576                if !cursor_location.max_path.starts_with(path) {
3577                    Ordering::Equal
3578                } else {
3579                    Ordering::Greater
3580                }
3581            }
3582            TraversalTarget::Count {
3583                count,
3584                include_dirs,
3585                include_ignored,
3586            } => Ord::cmp(
3587                count,
3588                &cursor_location.count(*include_dirs, *include_ignored),
3589            ),
3590        }
3591    }
3592}
3593
3594struct ChildEntriesIter<'a> {
3595    parent_path: &'a Path,
3596    traversal: Traversal<'a>,
3597}
3598
3599impl<'a> Iterator for ChildEntriesIter<'a> {
3600    type Item = &'a Entry;
3601
3602    fn next(&mut self) -> Option<Self::Item> {
3603        if let Some(item) = self.traversal.entry() {
3604            if item.path.starts_with(&self.parent_path) {
3605                self.traversal.advance_to_sibling();
3606                return Some(item);
3607            }
3608        }
3609        None
3610    }
3611}
3612
3613struct DescendentEntriesIter<'a> {
3614    parent_path: &'a Path,
3615    traversal: Traversal<'a>,
3616}
3617
3618impl<'a> Iterator for DescendentEntriesIter<'a> {
3619    type Item = &'a Entry;
3620
3621    fn next(&mut self) -> Option<Self::Item> {
3622        if let Some(item) = self.traversal.entry() {
3623            if item.path.starts_with(&self.parent_path) {
3624                self.traversal.advance();
3625                return Some(item);
3626            }
3627        }
3628        None
3629    }
3630}
3631
3632impl<'a> From<&'a Entry> for proto::Entry {
3633    fn from(entry: &'a Entry) -> Self {
3634        Self {
3635            id: entry.id.to_proto(),
3636            is_dir: entry.is_dir(),
3637            path: entry.path.to_string_lossy().into(),
3638            inode: entry.inode,
3639            mtime: Some(entry.mtime.into()),
3640            is_symlink: entry.is_symlink,
3641            is_ignored: entry.is_ignored,
3642        }
3643    }
3644}
3645
3646impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3647    type Error = anyhow::Error;
3648
3649    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3650        if let Some(mtime) = entry.mtime {
3651            let kind = if entry.is_dir {
3652                EntryKind::Dir
3653            } else {
3654                let mut char_bag = *root_char_bag;
3655                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3656                EntryKind::File(char_bag)
3657            };
3658            let path: Arc<Path> = PathBuf::from(entry.path).into();
3659            Ok(Entry {
3660                id: ProjectEntryId::from_proto(entry.id),
3661                kind,
3662                path,
3663                inode: entry.inode,
3664                mtime: mtime.into(),
3665                is_symlink: entry.is_symlink,
3666                is_ignored: entry.is_ignored,
3667            })
3668        } else {
3669            Err(anyhow!(
3670                "missing mtime in remote worktree entry {:?}",
3671                entry.path
3672            ))
3673        }
3674    }
3675}
3676
3677#[cfg(test)]
3678mod tests {
3679    use super::*;
3680    use fs::{FakeFs, RealFs};
3681    use gpui::{executor::Deterministic, TestAppContext};
3682    use pretty_assertions::assert_eq;
3683    use rand::prelude::*;
3684    use serde_json::json;
3685    use std::{env, fmt::Write};
3686    use util::{http::FakeHttpClient, test::temp_tree};
3687
3688    #[gpui::test]
3689    async fn test_traversal(cx: &mut TestAppContext) {
3690        let fs = FakeFs::new(cx.background());
3691        fs.insert_tree(
3692            "/root",
3693            json!({
3694               ".gitignore": "a/b\n",
3695               "a": {
3696                   "b": "",
3697                   "c": "",
3698               }
3699            }),
3700        )
3701        .await;
3702
3703        let http_client = FakeHttpClient::with_404_response();
3704        let client = cx.read(|cx| Client::new(http_client, cx));
3705
3706        let tree = Worktree::local(
3707            client,
3708            Path::new("/root"),
3709            true,
3710            fs,
3711            Default::default(),
3712            &mut cx.to_async(),
3713        )
3714        .await
3715        .unwrap();
3716        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3717            .await;
3718
3719        tree.read_with(cx, |tree, _| {
3720            assert_eq!(
3721                tree.entries(false)
3722                    .map(|entry| entry.path.as_ref())
3723                    .collect::<Vec<_>>(),
3724                vec![
3725                    Path::new(""),
3726                    Path::new(".gitignore"),
3727                    Path::new("a"),
3728                    Path::new("a/c"),
3729                ]
3730            );
3731            assert_eq!(
3732                tree.entries(true)
3733                    .map(|entry| entry.path.as_ref())
3734                    .collect::<Vec<_>>(),
3735                vec![
3736                    Path::new(""),
3737                    Path::new(".gitignore"),
3738                    Path::new("a"),
3739                    Path::new("a/b"),
3740                    Path::new("a/c"),
3741                ]
3742            );
3743        })
3744    }
3745
3746    #[gpui::test]
3747    async fn test_descendent_entries(cx: &mut TestAppContext) {
3748        let fs = FakeFs::new(cx.background());
3749        fs.insert_tree(
3750            "/root",
3751            json!({
3752                "a": "",
3753                "b": {
3754                   "c": {
3755                       "d": ""
3756                   },
3757                   "e": {}
3758                },
3759                "f": "",
3760                "g": {
3761                    "h": {}
3762                },
3763                "i": {
3764                    "j": {
3765                        "k": ""
3766                    },
3767                    "l": {
3768
3769                    }
3770                },
3771                ".gitignore": "i/j\n",
3772            }),
3773        )
3774        .await;
3775
3776        let http_client = FakeHttpClient::with_404_response();
3777        let client = cx.read(|cx| Client::new(http_client, cx));
3778
3779        let tree = Worktree::local(
3780            client,
3781            Path::new("/root"),
3782            true,
3783            fs,
3784            Default::default(),
3785            &mut cx.to_async(),
3786        )
3787        .await
3788        .unwrap();
3789        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3790            .await;
3791
3792        tree.read_with(cx, |tree, _| {
3793            assert_eq!(
3794                tree.descendent_entries(false, false, Path::new("b"))
3795                    .map(|entry| entry.path.as_ref())
3796                    .collect::<Vec<_>>(),
3797                vec![Path::new("b/c/d"),]
3798            );
3799            assert_eq!(
3800                tree.descendent_entries(true, false, Path::new("b"))
3801                    .map(|entry| entry.path.as_ref())
3802                    .collect::<Vec<_>>(),
3803                vec![
3804                    Path::new("b"),
3805                    Path::new("b/c"),
3806                    Path::new("b/c/d"),
3807                    Path::new("b/e"),
3808                ]
3809            );
3810
3811            assert_eq!(
3812                tree.descendent_entries(false, false, Path::new("g"))
3813                    .map(|entry| entry.path.as_ref())
3814                    .collect::<Vec<_>>(),
3815                Vec::<PathBuf>::new()
3816            );
3817            assert_eq!(
3818                tree.descendent_entries(true, false, Path::new("g"))
3819                    .map(|entry| entry.path.as_ref())
3820                    .collect::<Vec<_>>(),
3821                vec![Path::new("g"), Path::new("g/h"),]
3822            );
3823
3824            assert_eq!(
3825                tree.descendent_entries(false, false, Path::new("i"))
3826                    .map(|entry| entry.path.as_ref())
3827                    .collect::<Vec<_>>(),
3828                Vec::<PathBuf>::new()
3829            );
3830            assert_eq!(
3831                tree.descendent_entries(false, true, Path::new("i"))
3832                    .map(|entry| entry.path.as_ref())
3833                    .collect::<Vec<_>>(),
3834                vec![Path::new("i/j/k")]
3835            );
3836            assert_eq!(
3837                tree.descendent_entries(true, false, Path::new("i"))
3838                    .map(|entry| entry.path.as_ref())
3839                    .collect::<Vec<_>>(),
3840                vec![Path::new("i"), Path::new("i/l"),]
3841            );
3842        })
3843    }
3844
3845    #[gpui::test(iterations = 10)]
3846    async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3847        let fs = FakeFs::new(cx.background());
3848        fs.insert_tree(
3849            "/root",
3850            json!({
3851                "lib": {
3852                    "a": {
3853                        "a.txt": ""
3854                    },
3855                    "b": {
3856                        "b.txt": ""
3857                    }
3858                }
3859            }),
3860        )
3861        .await;
3862        fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3863        fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3864
3865        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3866        let tree = Worktree::local(
3867            client,
3868            Path::new("/root"),
3869            true,
3870            fs.clone(),
3871            Default::default(),
3872            &mut cx.to_async(),
3873        )
3874        .await
3875        .unwrap();
3876
3877        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3878            .await;
3879
3880        tree.read_with(cx, |tree, _| {
3881            assert_eq!(
3882                tree.entries(false)
3883                    .map(|entry| entry.path.as_ref())
3884                    .collect::<Vec<_>>(),
3885                vec![
3886                    Path::new(""),
3887                    Path::new("lib"),
3888                    Path::new("lib/a"),
3889                    Path::new("lib/a/a.txt"),
3890                    Path::new("lib/a/lib"),
3891                    Path::new("lib/b"),
3892                    Path::new("lib/b/b.txt"),
3893                    Path::new("lib/b/lib"),
3894                ]
3895            );
3896        });
3897
3898        fs.rename(
3899            Path::new("/root/lib/a/lib"),
3900            Path::new("/root/lib/a/lib-2"),
3901            Default::default(),
3902        )
3903        .await
3904        .unwrap();
3905        executor.run_until_parked();
3906        tree.read_with(cx, |tree, _| {
3907            assert_eq!(
3908                tree.entries(false)
3909                    .map(|entry| entry.path.as_ref())
3910                    .collect::<Vec<_>>(),
3911                vec![
3912                    Path::new(""),
3913                    Path::new("lib"),
3914                    Path::new("lib/a"),
3915                    Path::new("lib/a/a.txt"),
3916                    Path::new("lib/a/lib-2"),
3917                    Path::new("lib/b"),
3918                    Path::new("lib/b/b.txt"),
3919                    Path::new("lib/b/lib"),
3920                ]
3921            );
3922        });
3923    }
3924
3925    #[gpui::test]
3926    async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3927        let parent_dir = temp_tree(json!({
3928            ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3929            "tree": {
3930                ".git": {},
3931                ".gitignore": "ignored-dir\n",
3932                "tracked-dir": {
3933                    "tracked-file1": "",
3934                    "ancestor-ignored-file1": "",
3935                },
3936                "ignored-dir": {
3937                    "ignored-file1": ""
3938                }
3939            }
3940        }));
3941        let dir = parent_dir.path().join("tree");
3942
3943        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3944
3945        let tree = Worktree::local(
3946            client,
3947            dir.as_path(),
3948            true,
3949            Arc::new(RealFs),
3950            Default::default(),
3951            &mut cx.to_async(),
3952        )
3953        .await
3954        .unwrap();
3955        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3956            .await;
3957        tree.flush_fs_events(cx).await;
3958        cx.read(|cx| {
3959            let tree = tree.read(cx);
3960            assert!(
3961                !tree
3962                    .entry_for_path("tracked-dir/tracked-file1")
3963                    .unwrap()
3964                    .is_ignored
3965            );
3966            assert!(
3967                tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3968                    .unwrap()
3969                    .is_ignored
3970            );
3971            assert!(
3972                tree.entry_for_path("ignored-dir/ignored-file1")
3973                    .unwrap()
3974                    .is_ignored
3975            );
3976        });
3977
3978        std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3979        std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3980        std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3981        tree.flush_fs_events(cx).await;
3982        cx.read(|cx| {
3983            let tree = tree.read(cx);
3984            assert!(
3985                !tree
3986                    .entry_for_path("tracked-dir/tracked-file2")
3987                    .unwrap()
3988                    .is_ignored
3989            );
3990            assert!(
3991                tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3992                    .unwrap()
3993                    .is_ignored
3994            );
3995            assert!(
3996                tree.entry_for_path("ignored-dir/ignored-file2")
3997                    .unwrap()
3998                    .is_ignored
3999            );
4000            assert!(tree.entry_for_path(".git").unwrap().is_ignored);
4001        });
4002    }
4003
4004    #[gpui::test]
4005    async fn test_git_repository_for_path(cx: &mut TestAppContext) {
4006        let root = temp_tree(json!({
4007            "dir1": {
4008                ".git": {},
4009                "deps": {
4010                    "dep1": {
4011                        ".git": {},
4012                        "src": {
4013                            "a.txt": ""
4014                        }
4015                    }
4016                },
4017                "src": {
4018                    "b.txt": ""
4019                }
4020            },
4021            "c.txt": "",
4022        }));
4023
4024        let http_client = FakeHttpClient::with_404_response();
4025        let client = cx.read(|cx| Client::new(http_client, cx));
4026        let tree = Worktree::local(
4027            client,
4028            root.path(),
4029            true,
4030            Arc::new(RealFs),
4031            Default::default(),
4032            &mut cx.to_async(),
4033        )
4034        .await
4035        .unwrap();
4036
4037        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4038            .await;
4039        tree.flush_fs_events(cx).await;
4040
4041        tree.read_with(cx, |tree, _cx| {
4042            let tree = tree.as_local().unwrap();
4043
4044            assert!(tree.repo_for("c.txt".as_ref()).is_none());
4045
4046            let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
4047            assert_eq!(
4048                entry
4049                    .work_directory(tree)
4050                    .map(|directory| directory.as_ref().to_owned()),
4051                Some(Path::new("dir1").to_owned())
4052            );
4053
4054            let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
4055            assert_eq!(
4056                entry
4057                    .work_directory(tree)
4058                    .map(|directory| directory.as_ref().to_owned()),
4059                Some(Path::new("dir1/deps/dep1").to_owned())
4060            );
4061        });
4062
4063        let repo_update_events = Arc::new(Mutex::new(vec![]));
4064        tree.update(cx, |_, cx| {
4065            let repo_update_events = repo_update_events.clone();
4066            cx.subscribe(&tree, move |_, _, event, _| {
4067                if let Event::UpdatedGitRepositories(update) = event {
4068                    repo_update_events.lock().push(update.clone());
4069                }
4070            })
4071            .detach();
4072        });
4073
4074        std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
4075        tree.flush_fs_events(cx).await;
4076
4077        assert_eq!(
4078            repo_update_events.lock()[0]
4079                .keys()
4080                .cloned()
4081                .collect::<Vec<Arc<Path>>>(),
4082            vec![Path::new("dir1").into()]
4083        );
4084
4085        std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
4086        tree.flush_fs_events(cx).await;
4087
4088        tree.read_with(cx, |tree, _cx| {
4089            let tree = tree.as_local().unwrap();
4090
4091            assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
4092        });
4093    }
4094
4095    #[gpui::test]
4096    async fn test_git_status(cx: &mut TestAppContext) {
4097        #[track_caller]
4098        fn git_init(path: &Path) -> git2::Repository {
4099            git2::Repository::init(path).expect("Failed to initialize git repository")
4100        }
4101
4102        #[track_caller]
4103        fn git_add(path: &Path, repo: &git2::Repository) {
4104            let mut index = repo.index().expect("Failed to get index");
4105            index.add_path(path).expect("Failed to add a.txt");
4106            index.write().expect("Failed to write index");
4107        }
4108
4109        #[track_caller]
4110        fn git_remove_index(path: &Path, repo: &git2::Repository) {
4111            let mut index = repo.index().expect("Failed to get index");
4112            index.remove_path(path).expect("Failed to add a.txt");
4113            index.write().expect("Failed to write index");
4114        }
4115
4116        #[track_caller]
4117        fn git_commit(msg: &'static str, repo: &git2::Repository) {
4118            use git2::Signature;
4119
4120            let signature = Signature::now("test", "test@zed.dev").unwrap();
4121            let oid = repo.index().unwrap().write_tree().unwrap();
4122            let tree = repo.find_tree(oid).unwrap();
4123            if let Some(head) = repo.head().ok() {
4124                let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
4125
4126                let parent_commit = parent_obj.as_commit().unwrap();
4127
4128                repo.commit(
4129                    Some("HEAD"),
4130                    &signature,
4131                    &signature,
4132                    msg,
4133                    &tree,
4134                    &[parent_commit],
4135                )
4136                .expect("Failed to commit with parent");
4137            } else {
4138                repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
4139                    .expect("Failed to commit");
4140            }
4141        }
4142
4143        #[track_caller]
4144        fn git_stash(repo: &mut git2::Repository) {
4145            use git2::Signature;
4146
4147            let signature = Signature::now("test", "test@zed.dev").unwrap();
4148            repo.stash_save(&signature, "N/A", None)
4149                .expect("Failed to stash");
4150        }
4151
4152        #[track_caller]
4153        fn git_reset(offset: usize, repo: &git2::Repository) {
4154            let head = repo.head().expect("Couldn't get repo head");
4155            let object = head.peel(git2::ObjectType::Commit).unwrap();
4156            let commit = object.as_commit().unwrap();
4157            let new_head = commit
4158                .parents()
4159                .inspect(|parnet| {
4160                    parnet.message();
4161                })
4162                .skip(offset)
4163                .next()
4164                .expect("Not enough history");
4165            repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
4166                .expect("Could not reset");
4167        }
4168
4169        #[allow(dead_code)]
4170        #[track_caller]
4171        fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
4172            repo.statuses(None)
4173                .unwrap()
4174                .iter()
4175                .map(|status| (status.path().unwrap().to_string(), status.status()))
4176                .collect()
4177        }
4178
4179        const IGNORE_RULE: &'static str = "**/target";
4180
4181        let root = temp_tree(json!({
4182            "project": {
4183                "a.txt": "a",
4184                "b.txt": "bb",
4185                "c": {
4186                    "d": {
4187                        "e.txt": "eee"
4188                    }
4189                },
4190                "f.txt": "ffff",
4191                "target": {
4192                    "build_file": "???"
4193                },
4194                ".gitignore": IGNORE_RULE
4195            },
4196
4197        }));
4198
4199        let http_client = FakeHttpClient::with_404_response();
4200        let client = cx.read(|cx| Client::new(http_client, cx));
4201        let tree = Worktree::local(
4202            client,
4203            root.path(),
4204            true,
4205            Arc::new(RealFs),
4206            Default::default(),
4207            &mut cx.to_async(),
4208        )
4209        .await
4210        .unwrap();
4211
4212        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4213            .await;
4214
4215        const A_TXT: &'static str = "a.txt";
4216        const B_TXT: &'static str = "b.txt";
4217        const E_TXT: &'static str = "c/d/e.txt";
4218        const F_TXT: &'static str = "f.txt";
4219        const DOTGITIGNORE: &'static str = ".gitignore";
4220        const BUILD_FILE: &'static str = "target/build_file";
4221
4222        let work_dir = root.path().join("project");
4223        let mut repo = git_init(work_dir.as_path());
4224        repo.add_ignore_rule(IGNORE_RULE).unwrap();
4225        git_add(Path::new(A_TXT), &repo);
4226        git_add(Path::new(E_TXT), &repo);
4227        git_add(Path::new(DOTGITIGNORE), &repo);
4228        git_commit("Initial commit", &repo);
4229
4230        std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
4231
4232        tree.flush_fs_events(cx).await;
4233
4234        // Check that the right git state is observed on startup
4235        tree.read_with(cx, |tree, _cx| {
4236            let snapshot = tree.snapshot();
4237            assert_eq!(snapshot.repository_entries.iter().count(), 1);
4238            let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
4239            assert_eq!(dir.0.as_ref(), Path::new("project"));
4240
4241            assert_eq!(repo.statuses.iter().count(), 3);
4242            assert_eq!(
4243                repo.statuses.get(&Path::new(A_TXT).into()),
4244                Some(&GitFileStatus::Modified)
4245            );
4246            assert_eq!(
4247                repo.statuses.get(&Path::new(B_TXT).into()),
4248                Some(&GitFileStatus::Added)
4249            );
4250            assert_eq!(
4251                repo.statuses.get(&Path::new(F_TXT).into()),
4252                Some(&GitFileStatus::Added)
4253            );
4254        });
4255
4256        git_add(Path::new(A_TXT), &repo);
4257        git_add(Path::new(B_TXT), &repo);
4258        git_commit("Committing modified and added", &repo);
4259        tree.flush_fs_events(cx).await;
4260
4261        // Check that repo only changes are tracked
4262        tree.read_with(cx, |tree, _cx| {
4263            let snapshot = tree.snapshot();
4264            let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4265
4266            assert_eq!(repo.statuses.iter().count(), 1);
4267            assert_eq!(
4268                repo.statuses.get(&Path::new(F_TXT).into()),
4269                Some(&GitFileStatus::Added)
4270            );
4271        });
4272
4273        git_reset(0, &repo);
4274        git_remove_index(Path::new(B_TXT), &repo);
4275        git_stash(&mut repo);
4276        std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
4277        std::fs::write(work_dir.join(BUILD_FILE), "this should be ignored").unwrap();
4278        tree.flush_fs_events(cx).await;
4279
4280        // Check that more complex repo changes are tracked
4281        tree.read_with(cx, |tree, _cx| {
4282            let snapshot = tree.snapshot();
4283            let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4284
4285            assert_eq!(repo.statuses.iter().count(), 3);
4286            assert_eq!(repo.statuses.get(&Path::new(A_TXT).into()), None);
4287            assert_eq!(
4288                repo.statuses.get(&Path::new(B_TXT).into()),
4289                Some(&GitFileStatus::Added)
4290            );
4291            assert_eq!(
4292                repo.statuses.get(&Path::new(E_TXT).into()),
4293                Some(&GitFileStatus::Modified)
4294            );
4295            assert_eq!(
4296                repo.statuses.get(&Path::new(F_TXT).into()),
4297                Some(&GitFileStatus::Added)
4298            );
4299        });
4300
4301        std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
4302        std::fs::remove_dir_all(work_dir.join("c")).unwrap();
4303        std::fs::write(
4304            work_dir.join(DOTGITIGNORE),
4305            [IGNORE_RULE, "f.txt"].join("\n"),
4306        )
4307        .unwrap();
4308
4309        git_add(Path::new(DOTGITIGNORE), &repo);
4310        git_commit("Committing modified git ignore", &repo);
4311
4312        tree.flush_fs_events(cx).await;
4313
4314        // Check that non-repo behavior is tracked
4315        tree.read_with(cx, |tree, _cx| {
4316            let snapshot = tree.snapshot();
4317            let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4318
4319            assert_eq!(repo.statuses.iter().count(), 0);
4320        });
4321
4322        let mut renamed_dir_name = "first_directory/second_directory";
4323        const RENAMED_FILE: &'static str = "rf.txt";
4324
4325        std::fs::create_dir_all(work_dir.join(renamed_dir_name)).unwrap();
4326        std::fs::write(
4327            work_dir.join(renamed_dir_name).join(RENAMED_FILE),
4328            "new-contents",
4329        )
4330        .unwrap();
4331
4332        tree.flush_fs_events(cx).await;
4333
4334        tree.read_with(cx, |tree, _cx| {
4335            let snapshot = tree.snapshot();
4336            let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4337
4338            assert_eq!(repo.statuses.iter().count(), 1);
4339            assert_eq!(
4340                repo.statuses
4341                    .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
4342                Some(&GitFileStatus::Added)
4343            );
4344        });
4345
4346        renamed_dir_name = "new_first_directory/second_directory";
4347
4348        std::fs::rename(
4349            work_dir.join("first_directory"),
4350            work_dir.join("new_first_directory"),
4351        )
4352        .unwrap();
4353
4354        tree.flush_fs_events(cx).await;
4355
4356        tree.read_with(cx, |tree, _cx| {
4357            let snapshot = tree.snapshot();
4358            let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4359
4360            assert_eq!(repo.statuses.iter().count(), 1);
4361            assert_eq!(
4362                repo.statuses
4363                    .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
4364                Some(&GitFileStatus::Added)
4365            );
4366        });
4367    }
4368
4369    #[gpui::test]
4370    async fn test_write_file(cx: &mut TestAppContext) {
4371        let dir = temp_tree(json!({
4372            ".git": {},
4373            ".gitignore": "ignored-dir\n",
4374            "tracked-dir": {},
4375            "ignored-dir": {}
4376        }));
4377
4378        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4379
4380        let tree = Worktree::local(
4381            client,
4382            dir.path(),
4383            true,
4384            Arc::new(RealFs),
4385            Default::default(),
4386            &mut cx.to_async(),
4387        )
4388        .await
4389        .unwrap();
4390        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4391            .await;
4392        tree.flush_fs_events(cx).await;
4393
4394        tree.update(cx, |tree, cx| {
4395            tree.as_local().unwrap().write_file(
4396                Path::new("tracked-dir/file.txt"),
4397                "hello".into(),
4398                Default::default(),
4399                cx,
4400            )
4401        })
4402        .await
4403        .unwrap();
4404        tree.update(cx, |tree, cx| {
4405            tree.as_local().unwrap().write_file(
4406                Path::new("ignored-dir/file.txt"),
4407                "world".into(),
4408                Default::default(),
4409                cx,
4410            )
4411        })
4412        .await
4413        .unwrap();
4414
4415        tree.read_with(cx, |tree, _| {
4416            let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4417            let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4418            assert!(!tracked.is_ignored);
4419            assert!(ignored.is_ignored);
4420        });
4421    }
4422
4423    #[gpui::test(iterations = 30)]
4424    async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4425        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4426
4427        let fs = FakeFs::new(cx.background());
4428        fs.insert_tree(
4429            "/root",
4430            json!({
4431                "b": {},
4432                "c": {},
4433                "d": {},
4434            }),
4435        )
4436        .await;
4437
4438        let tree = Worktree::local(
4439            client,
4440            "/root".as_ref(),
4441            true,
4442            fs,
4443            Default::default(),
4444            &mut cx.to_async(),
4445        )
4446        .await
4447        .unwrap();
4448
4449        let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4450
4451        let entry = tree
4452            .update(cx, |tree, cx| {
4453                tree.as_local_mut()
4454                    .unwrap()
4455                    .create_entry("a/e".as_ref(), true, cx)
4456            })
4457            .await
4458            .unwrap();
4459        assert!(entry.is_dir());
4460
4461        cx.foreground().run_until_parked();
4462        tree.read_with(cx, |tree, _| {
4463            assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4464        });
4465
4466        let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4467        let update = snapshot2.build_update(&snapshot1, 0, 0, true);
4468        snapshot1.apply_remote_update(update).unwrap();
4469        assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
4470    }
4471
4472    #[gpui::test(iterations = 100)]
4473    async fn test_random_worktree_operations_during_initial_scan(
4474        cx: &mut TestAppContext,
4475        mut rng: StdRng,
4476    ) {
4477        let operations = env::var("OPERATIONS")
4478            .map(|o| o.parse().unwrap())
4479            .unwrap_or(5);
4480        let initial_entries = env::var("INITIAL_ENTRIES")
4481            .map(|o| o.parse().unwrap())
4482            .unwrap_or(20);
4483
4484        let root_dir = Path::new("/test");
4485        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4486        fs.as_fake().insert_tree(root_dir, json!({})).await;
4487        for _ in 0..initial_entries {
4488            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4489        }
4490        log::info!("generated initial tree");
4491
4492        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4493        let worktree = Worktree::local(
4494            client.clone(),
4495            root_dir,
4496            true,
4497            fs.clone(),
4498            Default::default(),
4499            &mut cx.to_async(),
4500        )
4501        .await
4502        .unwrap();
4503
4504        let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4505
4506        for _ in 0..operations {
4507            worktree
4508                .update(cx, |worktree, cx| {
4509                    randomly_mutate_worktree(worktree, &mut rng, cx)
4510                })
4511                .await
4512                .log_err();
4513            worktree.read_with(cx, |tree, _| {
4514                tree.as_local().unwrap().snapshot.check_invariants()
4515            });
4516
4517            if rng.gen_bool(0.6) {
4518                let new_snapshot =
4519                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4520                let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4521                snapshot.apply_remote_update(update.clone()).unwrap();
4522                assert_eq!(
4523                    snapshot.to_vec(true),
4524                    new_snapshot.to_vec(true),
4525                    "incorrect snapshot after update {:?}",
4526                    update
4527                );
4528            }
4529        }
4530
4531        worktree
4532            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4533            .await;
4534        worktree.read_with(cx, |tree, _| {
4535            tree.as_local().unwrap().snapshot.check_invariants()
4536        });
4537
4538        let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4539        let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4540        snapshot.apply_remote_update(update.clone()).unwrap();
4541        assert_eq!(
4542            snapshot.to_vec(true),
4543            new_snapshot.to_vec(true),
4544            "incorrect snapshot after update {:?}",
4545            update
4546        );
4547    }
4548
4549    #[gpui::test(iterations = 100)]
4550    async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4551        let operations = env::var("OPERATIONS")
4552            .map(|o| o.parse().unwrap())
4553            .unwrap_or(40);
4554        let initial_entries = env::var("INITIAL_ENTRIES")
4555            .map(|o| o.parse().unwrap())
4556            .unwrap_or(20);
4557
4558        let root_dir = Path::new("/test");
4559        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4560        fs.as_fake().insert_tree(root_dir, json!({})).await;
4561        for _ in 0..initial_entries {
4562            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4563        }
4564        log::info!("generated initial tree");
4565
4566        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4567        let worktree = Worktree::local(
4568            client.clone(),
4569            root_dir,
4570            true,
4571            fs.clone(),
4572            Default::default(),
4573            &mut cx.to_async(),
4574        )
4575        .await
4576        .unwrap();
4577
4578        worktree
4579            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4580            .await;
4581
4582        // After the initial scan is complete, the `UpdatedEntries` event can
4583        // be used to follow along with all changes to the worktree's snapshot.
4584        worktree.update(cx, |tree, cx| {
4585            let mut paths = tree
4586                .as_local()
4587                .unwrap()
4588                .paths()
4589                .cloned()
4590                .collect::<Vec<_>>();
4591
4592            cx.subscribe(&worktree, move |tree, _, event, _| {
4593                if let Event::UpdatedEntries(changes) = event {
4594                    for ((path, _), change_type) in changes.iter() {
4595                        let path = path.clone();
4596                        let ix = match paths.binary_search(&path) {
4597                            Ok(ix) | Err(ix) => ix,
4598                        };
4599                        match change_type {
4600                            PathChange::Added => {
4601                                assert_ne!(paths.get(ix), Some(&path));
4602                                paths.insert(ix, path);
4603                            }
4604
4605                            PathChange::Removed => {
4606                                assert_eq!(paths.get(ix), Some(&path));
4607                                paths.remove(ix);
4608                            }
4609
4610                            PathChange::Updated => {
4611                                assert_eq!(paths.get(ix), Some(&path));
4612                            }
4613
4614                            PathChange::AddedOrUpdated => {
4615                                if paths[ix] != path {
4616                                    paths.insert(ix, path);
4617                                }
4618                            }
4619                        }
4620                    }
4621
4622                    let new_paths = tree.paths().cloned().collect::<Vec<_>>();
4623                    assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
4624                }
4625            })
4626            .detach();
4627        });
4628
4629        let mut snapshots = Vec::new();
4630        let mut mutations_len = operations;
4631        while mutations_len > 1 {
4632            if rng.gen_bool(0.2) {
4633                worktree
4634                    .update(cx, |worktree, cx| {
4635                        randomly_mutate_worktree(worktree, &mut rng, cx)
4636                    })
4637                    .await
4638                    .unwrap();
4639            } else {
4640                randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4641            }
4642
4643            let buffered_event_count = fs.as_fake().buffered_event_count().await;
4644            if buffered_event_count > 0 && rng.gen_bool(0.3) {
4645                let len = rng.gen_range(0..=buffered_event_count);
4646                log::info!("flushing {} events", len);
4647                fs.as_fake().flush_events(len).await;
4648            } else {
4649                randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4650                mutations_len -= 1;
4651            }
4652
4653            cx.foreground().run_until_parked();
4654            if rng.gen_bool(0.2) {
4655                log::info!("storing snapshot {}", snapshots.len());
4656                let snapshot =
4657                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4658                snapshots.push(snapshot);
4659            }
4660        }
4661
4662        log::info!("quiescing");
4663        fs.as_fake().flush_events(usize::MAX).await;
4664        cx.foreground().run_until_parked();
4665        let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4666        snapshot.check_invariants();
4667
4668        {
4669            let new_worktree = Worktree::local(
4670                client.clone(),
4671                root_dir,
4672                true,
4673                fs.clone(),
4674                Default::default(),
4675                &mut cx.to_async(),
4676            )
4677            .await
4678            .unwrap();
4679            new_worktree
4680                .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4681                .await;
4682            let new_snapshot =
4683                new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4684            assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4685        }
4686
4687        for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4688            let include_ignored = rng.gen::<bool>();
4689            if !include_ignored {
4690                let mut entries_by_path_edits = Vec::new();
4691                let mut entries_by_id_edits = Vec::new();
4692                for entry in prev_snapshot
4693                    .entries_by_id
4694                    .cursor::<()>()
4695                    .filter(|e| e.is_ignored)
4696                {
4697                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4698                    entries_by_id_edits.push(Edit::Remove(entry.id));
4699                }
4700
4701                prev_snapshot
4702                    .entries_by_path
4703                    .edit(entries_by_path_edits, &());
4704                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4705            }
4706
4707            let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4708            prev_snapshot.apply_remote_update(update.clone()).unwrap();
4709            assert_eq!(
4710                prev_snapshot.to_vec(include_ignored),
4711                snapshot.to_vec(include_ignored),
4712                "wrong update for snapshot {i}. update: {:?}",
4713                update
4714            );
4715        }
4716    }
4717
4718    fn randomly_mutate_worktree(
4719        worktree: &mut Worktree,
4720        rng: &mut impl Rng,
4721        cx: &mut ModelContext<Worktree>,
4722    ) -> Task<Result<()>> {
4723        let worktree = worktree.as_local_mut().unwrap();
4724        let snapshot = worktree.snapshot();
4725        let entry = snapshot.entries(false).choose(rng).unwrap();
4726
4727        match rng.gen_range(0_u32..100) {
4728            0..=33 if entry.path.as_ref() != Path::new("") => {
4729                log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4730                worktree.delete_entry(entry.id, cx).unwrap()
4731            }
4732            ..=66 if entry.path.as_ref() != Path::new("") => {
4733                let other_entry = snapshot.entries(false).choose(rng).unwrap();
4734                let new_parent_path = if other_entry.is_dir() {
4735                    other_entry.path.clone()
4736                } else {
4737                    other_entry.path.parent().unwrap().into()
4738                };
4739                let mut new_path = new_parent_path.join(gen_name(rng));
4740                if new_path.starts_with(&entry.path) {
4741                    new_path = gen_name(rng).into();
4742                }
4743
4744                log::info!(
4745                    "renaming entry {:?} ({}) to {:?}",
4746                    entry.path,
4747                    entry.id.0,
4748                    new_path
4749                );
4750                let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4751                cx.foreground().spawn(async move {
4752                    task.await?;
4753                    Ok(())
4754                })
4755            }
4756            _ => {
4757                let task = if entry.is_dir() {
4758                    let child_path = entry.path.join(gen_name(rng));
4759                    let is_dir = rng.gen_bool(0.3);
4760                    log::info!(
4761                        "creating {} at {:?}",
4762                        if is_dir { "dir" } else { "file" },
4763                        child_path,
4764                    );
4765                    worktree.create_entry(child_path, is_dir, cx)
4766                } else {
4767                    log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4768                    worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4769                };
4770                cx.foreground().spawn(async move {
4771                    task.await?;
4772                    Ok(())
4773                })
4774            }
4775        }
4776    }
4777
4778    async fn randomly_mutate_fs(
4779        fs: &Arc<dyn Fs>,
4780        root_path: &Path,
4781        insertion_probability: f64,
4782        rng: &mut impl Rng,
4783    ) {
4784        let mut files = Vec::new();
4785        let mut dirs = Vec::new();
4786        for path in fs.as_fake().paths() {
4787            if path.starts_with(root_path) {
4788                if fs.is_file(&path).await {
4789                    files.push(path);
4790                } else {
4791                    dirs.push(path);
4792                }
4793            }
4794        }
4795
4796        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4797            let path = dirs.choose(rng).unwrap();
4798            let new_path = path.join(gen_name(rng));
4799
4800            if rng.gen() {
4801                log::info!(
4802                    "creating dir {:?}",
4803                    new_path.strip_prefix(root_path).unwrap()
4804                );
4805                fs.create_dir(&new_path).await.unwrap();
4806            } else {
4807                log::info!(
4808                    "creating file {:?}",
4809                    new_path.strip_prefix(root_path).unwrap()
4810                );
4811                fs.create_file(&new_path, Default::default()).await.unwrap();
4812            }
4813        } else if rng.gen_bool(0.05) {
4814            let ignore_dir_path = dirs.choose(rng).unwrap();
4815            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4816
4817            let subdirs = dirs
4818                .iter()
4819                .filter(|d| d.starts_with(&ignore_dir_path))
4820                .cloned()
4821                .collect::<Vec<_>>();
4822            let subfiles = files
4823                .iter()
4824                .filter(|d| d.starts_with(&ignore_dir_path))
4825                .cloned()
4826                .collect::<Vec<_>>();
4827            let files_to_ignore = {
4828                let len = rng.gen_range(0..=subfiles.len());
4829                subfiles.choose_multiple(rng, len)
4830            };
4831            let dirs_to_ignore = {
4832                let len = rng.gen_range(0..subdirs.len());
4833                subdirs.choose_multiple(rng, len)
4834            };
4835
4836            let mut ignore_contents = String::new();
4837            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4838                writeln!(
4839                    ignore_contents,
4840                    "{}",
4841                    path_to_ignore
4842                        .strip_prefix(&ignore_dir_path)
4843                        .unwrap()
4844                        .to_str()
4845                        .unwrap()
4846                )
4847                .unwrap();
4848            }
4849            log::info!(
4850                "creating gitignore {:?} with contents:\n{}",
4851                ignore_path.strip_prefix(&root_path).unwrap(),
4852                ignore_contents
4853            );
4854            fs.save(
4855                &ignore_path,
4856                &ignore_contents.as_str().into(),
4857                Default::default(),
4858            )
4859            .await
4860            .unwrap();
4861        } else {
4862            let old_path = {
4863                let file_path = files.choose(rng);
4864                let dir_path = dirs[1..].choose(rng);
4865                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4866            };
4867
4868            let is_rename = rng.gen();
4869            if is_rename {
4870                let new_path_parent = dirs
4871                    .iter()
4872                    .filter(|d| !d.starts_with(old_path))
4873                    .choose(rng)
4874                    .unwrap();
4875
4876                let overwrite_existing_dir =
4877                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4878                let new_path = if overwrite_existing_dir {
4879                    fs.remove_dir(
4880                        &new_path_parent,
4881                        RemoveOptions {
4882                            recursive: true,
4883                            ignore_if_not_exists: true,
4884                        },
4885                    )
4886                    .await
4887                    .unwrap();
4888                    new_path_parent.to_path_buf()
4889                } else {
4890                    new_path_parent.join(gen_name(rng))
4891                };
4892
4893                log::info!(
4894                    "renaming {:?} to {}{:?}",
4895                    old_path.strip_prefix(&root_path).unwrap(),
4896                    if overwrite_existing_dir {
4897                        "overwrite "
4898                    } else {
4899                        ""
4900                    },
4901                    new_path.strip_prefix(&root_path).unwrap()
4902                );
4903                fs.rename(
4904                    &old_path,
4905                    &new_path,
4906                    fs::RenameOptions {
4907                        overwrite: true,
4908                        ignore_if_exists: true,
4909                    },
4910                )
4911                .await
4912                .unwrap();
4913            } else if fs.is_file(&old_path).await {
4914                log::info!(
4915                    "deleting file {:?}",
4916                    old_path.strip_prefix(&root_path).unwrap()
4917                );
4918                fs.remove_file(old_path, Default::default()).await.unwrap();
4919            } else {
4920                log::info!(
4921                    "deleting dir {:?}",
4922                    old_path.strip_prefix(&root_path).unwrap()
4923                );
4924                fs.remove_dir(
4925                    &old_path,
4926                    RemoveOptions {
4927                        recursive: true,
4928                        ignore_if_not_exists: true,
4929                    },
4930                )
4931                .await
4932                .unwrap();
4933            }
4934        }
4935    }
4936
4937    fn gen_name(rng: &mut impl Rng) -> String {
4938        (0..6)
4939            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4940            .map(char::from)
4941            .collect()
4942    }
4943
4944    impl LocalSnapshot {
4945        fn check_invariants(&self) {
4946            assert_eq!(
4947                self.entries_by_path
4948                    .cursor::<()>()
4949                    .map(|e| (&e.path, e.id))
4950                    .collect::<Vec<_>>(),
4951                self.entries_by_id
4952                    .cursor::<()>()
4953                    .map(|e| (&e.path, e.id))
4954                    .collect::<collections::BTreeSet<_>>()
4955                    .into_iter()
4956                    .collect::<Vec<_>>(),
4957                "entries_by_path and entries_by_id are inconsistent"
4958            );
4959
4960            let mut files = self.files(true, 0);
4961            let mut visible_files = self.files(false, 0);
4962            for entry in self.entries_by_path.cursor::<()>() {
4963                if entry.is_file() {
4964                    assert_eq!(files.next().unwrap().inode, entry.inode);
4965                    if !entry.is_ignored {
4966                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4967                    }
4968                }
4969            }
4970
4971            assert!(files.next().is_none());
4972            assert!(visible_files.next().is_none());
4973
4974            let mut bfs_paths = Vec::new();
4975            let mut stack = vec![Path::new("")];
4976            while let Some(path) = stack.pop() {
4977                bfs_paths.push(path);
4978                let ix = stack.len();
4979                for child_entry in self.child_entries(path) {
4980                    stack.insert(ix, &child_entry.path);
4981                }
4982            }
4983
4984            let dfs_paths_via_iter = self
4985                .entries_by_path
4986                .cursor::<()>()
4987                .map(|e| e.path.as_ref())
4988                .collect::<Vec<_>>();
4989            assert_eq!(bfs_paths, dfs_paths_via_iter);
4990
4991            let dfs_paths_via_traversal = self
4992                .entries(true)
4993                .map(|e| e.path.as_ref())
4994                .collect::<Vec<_>>();
4995            assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4996
4997            for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4998                let ignore_parent_path =
4999                    ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
5000                assert!(self.entry_for_path(&ignore_parent_path).is_some());
5001                assert!(self
5002                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
5003                    .is_some());
5004            }
5005        }
5006
5007        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
5008            let mut paths = Vec::new();
5009            for entry in self.entries_by_path.cursor::<()>() {
5010                if include_ignored || !entry.is_ignored {
5011                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
5012                }
5013            }
5014            paths.sort_by(|a, b| a.0.cmp(b.0));
5015            paths
5016        }
5017    }
5018}