worktree.rs

   1use crate::{
   2    copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
   3};
   4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   5use anyhow::{anyhow, Context, Result};
   6use client::{proto, Client};
   7use clock::ReplicaId;
   8use collections::{HashMap, VecDeque};
   9use fs::{
  10    repository::{GitFileStatus, GitRepository, RepoPath, RepoPathDescendants},
  11    Fs, LineEnding,
  12};
  13use futures::{
  14    channel::{
  15        mpsc::{self, UnboundedSender},
  16        oneshot,
  17    },
  18    select_biased,
  19    task::Poll,
  20    Stream, StreamExt,
  21};
  22use fuzzy::CharBag;
  23use git::{DOT_GIT, GITIGNORE};
  24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
  25use language::{
  26    proto::{
  27        deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
  28        serialize_version,
  29    },
  30    Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
  31};
  32use lsp::LanguageServerId;
  33use parking_lot::Mutex;
  34use postage::{
  35    barrier,
  36    prelude::{Sink as _, Stream as _},
  37    watch,
  38};
  39use smol::channel::{self, Sender};
  40use std::{
  41    any::Any,
  42    cmp::{self, Ordering},
  43    convert::TryFrom,
  44    ffi::OsStr,
  45    fmt,
  46    future::Future,
  47    mem,
  48    ops::{Deref, DerefMut},
  49    path::{Path, PathBuf},
  50    pin::Pin,
  51    sync::{
  52        atomic::{AtomicUsize, Ordering::SeqCst},
  53        Arc,
  54    },
  55    time::{Duration, SystemTime},
  56};
  57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
  58use util::{paths::HOME, ResultExt, TakeUntilExt, TryFutureExt};
  59
  60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  61pub struct WorktreeId(usize);
  62
  63pub enum Worktree {
  64    Local(LocalWorktree),
  65    Remote(RemoteWorktree),
  66}
  67
  68pub struct LocalWorktree {
  69    snapshot: LocalSnapshot,
  70    path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
  71    is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
  72    _background_scanner_task: Task<()>,
  73    share: Option<ShareState>,
  74    diagnostics: HashMap<
  75        Arc<Path>,
  76        Vec<(
  77            LanguageServerId,
  78            Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
  79        )>,
  80    >,
  81    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  82    client: Arc<Client>,
  83    fs: Arc<dyn Fs>,
  84    visible: bool,
  85}
  86
  87pub struct RemoteWorktree {
  88    snapshot: Snapshot,
  89    background_snapshot: Arc<Mutex<Snapshot>>,
  90    project_id: u64,
  91    client: Arc<Client>,
  92    updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
  93    snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
  94    replica_id: ReplicaId,
  95    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  96    visible: bool,
  97    disconnected: bool,
  98}
  99
 100#[derive(Clone)]
 101pub struct Snapshot {
 102    id: WorktreeId,
 103    abs_path: Arc<Path>,
 104    root_name: String,
 105    root_char_bag: CharBag,
 106    entries_by_path: SumTree<Entry>,
 107    entries_by_id: SumTree<PathEntry>,
 108    repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
 109
 110    /// A number that increases every time the worktree begins scanning
 111    /// a set of paths from the filesystem. This scanning could be caused
 112    /// by some operation performed on the worktree, such as reading or
 113    /// writing a file, or by an event reported by the filesystem.
 114    scan_id: usize,
 115
 116    /// The latest scan id that has completed, and whose preceding scans
 117    /// have all completed. The current `scan_id` could be more than one
 118    /// greater than the `completed_scan_id` if operations are performed
 119    /// on the worktree while it is processing a file-system event.
 120    completed_scan_id: usize,
 121}
 122
 123impl Snapshot {
 124    pub fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
 125        let mut max_len = 0;
 126        let mut current_candidate = None;
 127        for (work_directory, repo) in (&self.repository_entries).iter() {
 128            if path.starts_with(&work_directory.0) {
 129                if work_directory.0.as_os_str().len() >= max_len {
 130                    current_candidate = Some(repo);
 131                    max_len = work_directory.0.as_os_str().len();
 132                } else {
 133                    break;
 134                }
 135            }
 136        }
 137
 138        current_candidate.map(|entry| entry.to_owned())
 139    }
 140}
 141
 142#[derive(Clone, Debug, PartialEq, Eq)]
 143pub struct RepositoryEntry {
 144    pub(crate) work_directory: WorkDirectoryEntry,
 145    pub(crate) branch: Option<Arc<str>>,
 146    pub(crate) statuses: TreeMap<RepoPath, GitFileStatus>,
 147}
 148
 149fn read_git_status(git_status: i32) -> Option<GitFileStatus> {
 150    proto::GitStatus::from_i32(git_status).map(|status| match status {
 151        proto::GitStatus::Added => GitFileStatus::Added,
 152        proto::GitStatus::Modified => GitFileStatus::Modified,
 153        proto::GitStatus::Conflict => GitFileStatus::Conflict,
 154    })
 155}
 156
 157impl RepositoryEntry {
 158    pub fn branch(&self) -> Option<Arc<str>> {
 159        self.branch.clone()
 160    }
 161
 162    pub fn work_directory_id(&self) -> ProjectEntryId {
 163        *self.work_directory
 164    }
 165
 166    pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
 167        snapshot
 168            .entry_for_id(self.work_directory_id())
 169            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
 170    }
 171
 172    pub fn status_for_file(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
 173        self.work_directory
 174            .relativize(snapshot, path)
 175            .and_then(|repo_path| self.statuses.get(&repo_path))
 176            .cloned()
 177    }
 178
 179    pub fn status_for_path(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
 180        self.work_directory
 181            .relativize(snapshot, path)
 182            .and_then(|repo_path| {
 183                self.statuses
 184                    .iter_from(&repo_path)
 185                    .take_while(|(key, _)| key.starts_with(&repo_path))
 186                    // Short circut once we've found the highest level
 187                    .take_until(|(_, status)| status == &&GitFileStatus::Conflict)
 188                    .map(|(_, status)| status)
 189                    .reduce(
 190                        |status_first, status_second| match (status_first, status_second) {
 191                            (GitFileStatus::Conflict, _) | (_, GitFileStatus::Conflict) => {
 192                                &GitFileStatus::Conflict
 193                            }
 194                            (GitFileStatus::Modified, _) | (_, GitFileStatus::Modified) => {
 195                                &GitFileStatus::Modified
 196                            }
 197                            _ => &GitFileStatus::Added,
 198                        },
 199                    )
 200                    .copied()
 201            })
 202    }
 203
 204    pub fn build_update(&self, other: &Self) -> proto::RepositoryEntry {
 205        let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
 206        let mut removed_statuses: Vec<String> = Vec::new();
 207
 208        let mut self_statuses = self.statuses.iter().peekable();
 209        let mut other_statuses = other.statuses.iter().peekable();
 210        loop {
 211            match (self_statuses.peek(), other_statuses.peek()) {
 212                (Some((self_repo_path, self_status)), Some((other_repo_path, other_status))) => {
 213                    match Ord::cmp(self_repo_path, other_repo_path) {
 214                        Ordering::Less => {
 215                            updated_statuses.push(make_status_entry(self_repo_path, self_status));
 216                            self_statuses.next();
 217                        }
 218                        Ordering::Equal => {
 219                            if self_status != other_status {
 220                                updated_statuses
 221                                    .push(make_status_entry(self_repo_path, self_status));
 222                            }
 223
 224                            self_statuses.next();
 225                            other_statuses.next();
 226                        }
 227                        Ordering::Greater => {
 228                            removed_statuses.push(make_repo_path(other_repo_path));
 229                            other_statuses.next();
 230                        }
 231                    }
 232                }
 233                (Some((self_repo_path, self_status)), None) => {
 234                    updated_statuses.push(make_status_entry(self_repo_path, self_status));
 235                    self_statuses.next();
 236                }
 237                (None, Some((other_repo_path, _))) => {
 238                    removed_statuses.push(make_repo_path(other_repo_path));
 239                    other_statuses.next();
 240                }
 241                (None, None) => break,
 242            }
 243        }
 244
 245        proto::RepositoryEntry {
 246            work_directory_id: self.work_directory_id().to_proto(),
 247            branch: self.branch.as_ref().map(|str| str.to_string()),
 248            removed_repo_paths: removed_statuses,
 249            updated_statuses: updated_statuses,
 250        }
 251    }
 252}
 253
 254fn make_repo_path(path: &RepoPath) -> String {
 255    path.as_os_str().to_string_lossy().to_string()
 256}
 257
 258fn make_status_entry(path: &RepoPath, status: &GitFileStatus) -> proto::StatusEntry {
 259    proto::StatusEntry {
 260        repo_path: make_repo_path(path),
 261        status: match status {
 262            GitFileStatus::Added => proto::GitStatus::Added.into(),
 263            GitFileStatus::Modified => proto::GitStatus::Modified.into(),
 264            GitFileStatus::Conflict => proto::GitStatus::Conflict.into(),
 265        },
 266    }
 267}
 268
 269impl From<&RepositoryEntry> for proto::RepositoryEntry {
 270    fn from(value: &RepositoryEntry) -> Self {
 271        proto::RepositoryEntry {
 272            work_directory_id: value.work_directory.to_proto(),
 273            branch: value.branch.as_ref().map(|str| str.to_string()),
 274            updated_statuses: value
 275                .statuses
 276                .iter()
 277                .map(|(repo_path, status)| make_status_entry(repo_path, status))
 278                .collect(),
 279            removed_repo_paths: Default::default(),
 280        }
 281    }
 282}
 283
 284/// This path corresponds to the 'content path' (the folder that contains the .git)
 285#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 286pub struct RepositoryWorkDirectory(Arc<Path>);
 287
 288impl Default for RepositoryWorkDirectory {
 289    fn default() -> Self {
 290        RepositoryWorkDirectory(Arc::from(Path::new("")))
 291    }
 292}
 293
 294impl AsRef<Path> for RepositoryWorkDirectory {
 295    fn as_ref(&self) -> &Path {
 296        self.0.as_ref()
 297    }
 298}
 299
 300#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 301pub struct WorkDirectoryEntry(ProjectEntryId);
 302
 303impl WorkDirectoryEntry {
 304    pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
 305        worktree.entry_for_id(self.0).and_then(|entry| {
 306            path.strip_prefix(&entry.path)
 307                .ok()
 308                .map(move |path| path.into())
 309        })
 310    }
 311}
 312
 313impl Deref for WorkDirectoryEntry {
 314    type Target = ProjectEntryId;
 315
 316    fn deref(&self) -> &Self::Target {
 317        &self.0
 318    }
 319}
 320
 321impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
 322    fn from(value: ProjectEntryId) -> Self {
 323        WorkDirectoryEntry(value)
 324    }
 325}
 326
 327#[derive(Debug, Clone)]
 328pub struct LocalSnapshot {
 329    ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>, // (gitignore, needs_update)
 330    // The ProjectEntryId corresponds to the entry for the .git dir
 331    // work_directory_id
 332    git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 333    removed_entry_ids: HashMap<u64, ProjectEntryId>,
 334    next_entry_id: Arc<AtomicUsize>,
 335    snapshot: Snapshot,
 336}
 337
 338#[derive(Debug, Clone)]
 339pub struct LocalRepositoryEntry {
 340    pub(crate) scan_id: usize,
 341    pub(crate) full_scan_id: usize,
 342    pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
 343    /// Path to the actual .git folder.
 344    /// Note: if .git is a file, this points to the folder indicated by the .git file
 345    pub(crate) git_dir_path: Arc<Path>,
 346}
 347
 348impl LocalRepositoryEntry {
 349    // Note that this path should be relative to the worktree root.
 350    pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
 351        path.starts_with(self.git_dir_path.as_ref())
 352    }
 353}
 354
 355impl Deref for LocalSnapshot {
 356    type Target = Snapshot;
 357
 358    fn deref(&self) -> &Self::Target {
 359        &self.snapshot
 360    }
 361}
 362
 363impl DerefMut for LocalSnapshot {
 364    fn deref_mut(&mut self) -> &mut Self::Target {
 365        &mut self.snapshot
 366    }
 367}
 368
 369enum ScanState {
 370    Started,
 371    Updated {
 372        snapshot: LocalSnapshot,
 373        changes: HashMap<(Arc<Path>, ProjectEntryId), PathChange>,
 374        barrier: Option<barrier::Sender>,
 375        scanning: bool,
 376    },
 377}
 378
 379struct ShareState {
 380    project_id: u64,
 381    snapshots_tx: watch::Sender<LocalSnapshot>,
 382    resume_updates: watch::Sender<()>,
 383    _maintain_remote_snapshot: Task<Option<()>>,
 384}
 385
 386pub enum Event {
 387    UpdatedEntries(HashMap<(Arc<Path>, ProjectEntryId), PathChange>),
 388    UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
 389}
 390
 391impl Entity for Worktree {
 392    type Event = Event;
 393}
 394
 395impl Worktree {
 396    pub async fn local(
 397        client: Arc<Client>,
 398        path: impl Into<Arc<Path>>,
 399        visible: bool,
 400        fs: Arc<dyn Fs>,
 401        next_entry_id: Arc<AtomicUsize>,
 402        cx: &mut AsyncAppContext,
 403    ) -> Result<ModelHandle<Self>> {
 404        // After determining whether the root entry is a file or a directory, populate the
 405        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 406        let abs_path = path.into();
 407        let metadata = fs
 408            .metadata(&abs_path)
 409            .await
 410            .context("failed to stat worktree path")?;
 411
 412        Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
 413            let root_name = abs_path
 414                .file_name()
 415                .map_or(String::new(), |f| f.to_string_lossy().to_string());
 416
 417            let mut snapshot = LocalSnapshot {
 418                ignores_by_parent_abs_path: Default::default(),
 419                removed_entry_ids: Default::default(),
 420                git_repositories: Default::default(),
 421                next_entry_id,
 422                snapshot: Snapshot {
 423                    id: WorktreeId::from_usize(cx.model_id()),
 424                    abs_path: abs_path.clone(),
 425                    root_name: root_name.clone(),
 426                    root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
 427                    entries_by_path: Default::default(),
 428                    entries_by_id: Default::default(),
 429                    repository_entries: Default::default(),
 430                    scan_id: 1,
 431                    completed_scan_id: 0,
 432                },
 433            };
 434
 435            if let Some(metadata) = metadata {
 436                snapshot.insert_entry(
 437                    Entry::new(
 438                        Arc::from(Path::new("")),
 439                        &metadata,
 440                        &snapshot.next_entry_id,
 441                        snapshot.root_char_bag,
 442                    ),
 443                    fs.as_ref(),
 444                );
 445            }
 446
 447            let (path_changes_tx, path_changes_rx) = channel::unbounded();
 448            let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
 449
 450            cx.spawn_weak(|this, mut cx| async move {
 451                while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
 452                    this.update(&mut cx, |this, cx| {
 453                        let this = this.as_local_mut().unwrap();
 454                        match state {
 455                            ScanState::Started => {
 456                                *this.is_scanning.0.borrow_mut() = true;
 457                            }
 458                            ScanState::Updated {
 459                                snapshot,
 460                                changes,
 461                                barrier,
 462                                scanning,
 463                            } => {
 464                                *this.is_scanning.0.borrow_mut() = scanning;
 465                                this.set_snapshot(snapshot, cx);
 466                                cx.emit(Event::UpdatedEntries(changes));
 467                                drop(barrier);
 468                            }
 469                        }
 470                        cx.notify();
 471                    });
 472                }
 473            })
 474            .detach();
 475
 476            let background_scanner_task = cx.background().spawn({
 477                let fs = fs.clone();
 478                let snapshot = snapshot.clone();
 479                let background = cx.background().clone();
 480                async move {
 481                    let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 482                    BackgroundScanner::new(
 483                        snapshot,
 484                        fs,
 485                        scan_states_tx,
 486                        background,
 487                        path_changes_rx,
 488                    )
 489                    .run(events)
 490                    .await;
 491                }
 492            });
 493
 494            Worktree::Local(LocalWorktree {
 495                snapshot,
 496                is_scanning: watch::channel_with(true),
 497                share: None,
 498                path_changes_tx,
 499                _background_scanner_task: background_scanner_task,
 500                diagnostics: Default::default(),
 501                diagnostic_summaries: Default::default(),
 502                client,
 503                fs,
 504                visible,
 505            })
 506        }))
 507    }
 508
 509    pub fn remote(
 510        project_remote_id: u64,
 511        replica_id: ReplicaId,
 512        worktree: proto::WorktreeMetadata,
 513        client: Arc<Client>,
 514        cx: &mut AppContext,
 515    ) -> ModelHandle<Self> {
 516        cx.add_model(|cx: &mut ModelContext<Self>| {
 517            let snapshot = Snapshot {
 518                id: WorktreeId(worktree.id as usize),
 519                abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
 520                root_name: worktree.root_name.clone(),
 521                root_char_bag: worktree
 522                    .root_name
 523                    .chars()
 524                    .map(|c| c.to_ascii_lowercase())
 525                    .collect(),
 526                entries_by_path: Default::default(),
 527                entries_by_id: Default::default(),
 528                repository_entries: Default::default(),
 529                scan_id: 1,
 530                completed_scan_id: 0,
 531            };
 532
 533            let (updates_tx, mut updates_rx) = mpsc::unbounded();
 534            let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
 535            let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
 536
 537            cx.background()
 538                .spawn({
 539                    let background_snapshot = background_snapshot.clone();
 540                    async move {
 541                        while let Some(update) = updates_rx.next().await {
 542                            if let Err(error) =
 543                                background_snapshot.lock().apply_remote_update(update)
 544                            {
 545                                log::error!("error applying worktree update: {}", error);
 546                            }
 547                            snapshot_updated_tx.send(()).await.ok();
 548                        }
 549                    }
 550                })
 551                .detach();
 552
 553            cx.spawn_weak(|this, mut cx| async move {
 554                while (snapshot_updated_rx.recv().await).is_some() {
 555                    if let Some(this) = this.upgrade(&cx) {
 556                        this.update(&mut cx, |this, cx| {
 557                            let this = this.as_remote_mut().unwrap();
 558                            this.snapshot = this.background_snapshot.lock().clone();
 559                            cx.emit(Event::UpdatedEntries(Default::default()));
 560                            cx.notify();
 561                            while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
 562                                if this.observed_snapshot(*scan_id) {
 563                                    let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
 564                                    let _ = tx.send(());
 565                                } else {
 566                                    break;
 567                                }
 568                            }
 569                        });
 570                    } else {
 571                        break;
 572                    }
 573                }
 574            })
 575            .detach();
 576
 577            Worktree::Remote(RemoteWorktree {
 578                project_id: project_remote_id,
 579                replica_id,
 580                snapshot: snapshot.clone(),
 581                background_snapshot,
 582                updates_tx: Some(updates_tx),
 583                snapshot_subscriptions: Default::default(),
 584                client: client.clone(),
 585                diagnostic_summaries: Default::default(),
 586                visible: worktree.visible,
 587                disconnected: false,
 588            })
 589        })
 590    }
 591
 592    pub fn as_local(&self) -> Option<&LocalWorktree> {
 593        if let Worktree::Local(worktree) = self {
 594            Some(worktree)
 595        } else {
 596            None
 597        }
 598    }
 599
 600    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 601        if let Worktree::Remote(worktree) = self {
 602            Some(worktree)
 603        } else {
 604            None
 605        }
 606    }
 607
 608    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 609        if let Worktree::Local(worktree) = self {
 610            Some(worktree)
 611        } else {
 612            None
 613        }
 614    }
 615
 616    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 617        if let Worktree::Remote(worktree) = self {
 618            Some(worktree)
 619        } else {
 620            None
 621        }
 622    }
 623
 624    pub fn is_local(&self) -> bool {
 625        matches!(self, Worktree::Local(_))
 626    }
 627
 628    pub fn is_remote(&self) -> bool {
 629        !self.is_local()
 630    }
 631
 632    pub fn snapshot(&self) -> Snapshot {
 633        match self {
 634            Worktree::Local(worktree) => worktree.snapshot().snapshot,
 635            Worktree::Remote(worktree) => worktree.snapshot(),
 636        }
 637    }
 638
 639    pub fn scan_id(&self) -> usize {
 640        match self {
 641            Worktree::Local(worktree) => worktree.snapshot.scan_id,
 642            Worktree::Remote(worktree) => worktree.snapshot.scan_id,
 643        }
 644    }
 645
 646    pub fn completed_scan_id(&self) -> usize {
 647        match self {
 648            Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
 649            Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
 650        }
 651    }
 652
 653    pub fn is_visible(&self) -> bool {
 654        match self {
 655            Worktree::Local(worktree) => worktree.visible,
 656            Worktree::Remote(worktree) => worktree.visible,
 657        }
 658    }
 659
 660    pub fn replica_id(&self) -> ReplicaId {
 661        match self {
 662            Worktree::Local(_) => 0,
 663            Worktree::Remote(worktree) => worktree.replica_id,
 664        }
 665    }
 666
 667    pub fn diagnostic_summaries(
 668        &self,
 669    ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
 670        match self {
 671            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 672            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 673        }
 674        .iter()
 675        .flat_map(|(path, summaries)| {
 676            summaries
 677                .iter()
 678                .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
 679        })
 680    }
 681
 682    pub fn abs_path(&self) -> Arc<Path> {
 683        match self {
 684            Worktree::Local(worktree) => worktree.abs_path.clone(),
 685            Worktree::Remote(worktree) => worktree.abs_path.clone(),
 686        }
 687    }
 688}
 689
 690impl LocalWorktree {
 691    pub fn contains_abs_path(&self, path: &Path) -> bool {
 692        path.starts_with(&self.abs_path)
 693    }
 694
 695    fn absolutize(&self, path: &Path) -> PathBuf {
 696        if path.file_name().is_some() {
 697            self.abs_path.join(path)
 698        } else {
 699            self.abs_path.to_path_buf()
 700        }
 701    }
 702
 703    pub(crate) fn load_buffer(
 704        &mut self,
 705        id: u64,
 706        path: &Path,
 707        cx: &mut ModelContext<Worktree>,
 708    ) -> Task<Result<ModelHandle<Buffer>>> {
 709        let path = Arc::from(path);
 710        cx.spawn(move |this, mut cx| async move {
 711            let (file, contents, diff_base) = this
 712                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
 713                .await?;
 714            let text_buffer = cx
 715                .background()
 716                .spawn(async move { text::Buffer::new(0, id, contents) })
 717                .await;
 718            Ok(cx.add_model(|cx| {
 719                let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
 720                buffer.git_diff_recalc(cx);
 721                buffer
 722            }))
 723        })
 724    }
 725
 726    pub fn diagnostics_for_path(
 727        &self,
 728        path: &Path,
 729    ) -> Vec<(
 730        LanguageServerId,
 731        Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 732    )> {
 733        self.diagnostics.get(path).cloned().unwrap_or_default()
 734    }
 735
 736    pub fn update_diagnostics(
 737        &mut self,
 738        server_id: LanguageServerId,
 739        worktree_path: Arc<Path>,
 740        diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 741        _: &mut ModelContext<Worktree>,
 742    ) -> Result<bool> {
 743        let summaries_by_server_id = self
 744            .diagnostic_summaries
 745            .entry(worktree_path.clone())
 746            .or_default();
 747
 748        let old_summary = summaries_by_server_id
 749            .remove(&server_id)
 750            .unwrap_or_default();
 751
 752        let new_summary = DiagnosticSummary::new(&diagnostics);
 753        if new_summary.is_empty() {
 754            if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
 755                if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 756                    diagnostics_by_server_id.remove(ix);
 757                }
 758                if diagnostics_by_server_id.is_empty() {
 759                    self.diagnostics.remove(&worktree_path);
 760                }
 761            }
 762        } else {
 763            summaries_by_server_id.insert(server_id, new_summary);
 764            let diagnostics_by_server_id =
 765                self.diagnostics.entry(worktree_path.clone()).or_default();
 766            match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 767                Ok(ix) => {
 768                    diagnostics_by_server_id[ix] = (server_id, diagnostics);
 769                }
 770                Err(ix) => {
 771                    diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
 772                }
 773            }
 774        }
 775
 776        if !old_summary.is_empty() || !new_summary.is_empty() {
 777            if let Some(share) = self.share.as_ref() {
 778                self.client
 779                    .send(proto::UpdateDiagnosticSummary {
 780                        project_id: share.project_id,
 781                        worktree_id: self.id().to_proto(),
 782                        summary: Some(proto::DiagnosticSummary {
 783                            path: worktree_path.to_string_lossy().to_string(),
 784                            language_server_id: server_id.0 as u64,
 785                            error_count: new_summary.error_count as u32,
 786                            warning_count: new_summary.warning_count as u32,
 787                        }),
 788                    })
 789                    .log_err();
 790            }
 791        }
 792
 793        Ok(!old_summary.is_empty() || !new_summary.is_empty())
 794    }
 795
 796    fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
 797        let updated_repos =
 798            self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
 799        self.snapshot = new_snapshot;
 800
 801        if let Some(share) = self.share.as_mut() {
 802            *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
 803        }
 804
 805        if !updated_repos.is_empty() {
 806            cx.emit(Event::UpdatedGitRepositories(updated_repos));
 807        }
 808    }
 809
 810    fn changed_repos(
 811        &self,
 812        old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 813        new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 814    ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
 815        let mut diff = HashMap::default();
 816        let mut old_repos = old_repos.iter().peekable();
 817        let mut new_repos = new_repos.iter().peekable();
 818        loop {
 819            match (old_repos.peek(), new_repos.peek()) {
 820                (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
 821                    match Ord::cmp(old_entry_id, new_entry_id) {
 822                        Ordering::Less => {
 823                            if let Some(entry) = self.entry_for_id(**old_entry_id) {
 824                                diff.insert(entry.path.clone(), (*old_repo).clone());
 825                            }
 826                            old_repos.next();
 827                        }
 828                        Ordering::Equal => {
 829                            if old_repo.scan_id != new_repo.scan_id {
 830                                if let Some(entry) = self.entry_for_id(**new_entry_id) {
 831                                    diff.insert(entry.path.clone(), (*new_repo).clone());
 832                                }
 833                            }
 834
 835                            old_repos.next();
 836                            new_repos.next();
 837                        }
 838                        Ordering::Greater => {
 839                            if let Some(entry) = self.entry_for_id(**new_entry_id) {
 840                                diff.insert(entry.path.clone(), (*new_repo).clone());
 841                            }
 842                            new_repos.next();
 843                        }
 844                    }
 845                }
 846                (Some((old_entry_id, old_repo)), None) => {
 847                    if let Some(entry) = self.entry_for_id(**old_entry_id) {
 848                        diff.insert(entry.path.clone(), (*old_repo).clone());
 849                    }
 850                    old_repos.next();
 851                }
 852                (None, Some((new_entry_id, new_repo))) => {
 853                    if let Some(entry) = self.entry_for_id(**new_entry_id) {
 854                        diff.insert(entry.path.clone(), (*new_repo).clone());
 855                    }
 856                    new_repos.next();
 857                }
 858                (None, None) => break,
 859            }
 860        }
 861        diff
 862    }
 863
 864    pub fn scan_complete(&self) -> impl Future<Output = ()> {
 865        let mut is_scanning_rx = self.is_scanning.1.clone();
 866        async move {
 867            let mut is_scanning = is_scanning_rx.borrow().clone();
 868            while is_scanning {
 869                if let Some(value) = is_scanning_rx.recv().await {
 870                    is_scanning = value;
 871                } else {
 872                    break;
 873                }
 874            }
 875        }
 876    }
 877
 878    pub fn snapshot(&self) -> LocalSnapshot {
 879        self.snapshot.clone()
 880    }
 881
 882    pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
 883        proto::WorktreeMetadata {
 884            id: self.id().to_proto(),
 885            root_name: self.root_name().to_string(),
 886            visible: self.visible,
 887            abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
 888        }
 889    }
 890
 891    fn load(
 892        &self,
 893        path: &Path,
 894        cx: &mut ModelContext<Worktree>,
 895    ) -> Task<Result<(File, String, Option<String>)>> {
 896        let handle = cx.handle();
 897        let path = Arc::from(path);
 898        let abs_path = self.absolutize(&path);
 899        let fs = self.fs.clone();
 900        let snapshot = self.snapshot();
 901
 902        let mut index_task = None;
 903
 904        if let Some(repo) = snapshot.repo_for(&path) {
 905            let repo_path = repo.work_directory.relativize(self, &path).unwrap();
 906            if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
 907                let repo = repo.repo_ptr.to_owned();
 908                index_task = Some(
 909                    cx.background()
 910                        .spawn(async move { repo.lock().load_index_text(&repo_path) }),
 911                );
 912            }
 913        }
 914
 915        cx.spawn(|this, mut cx| async move {
 916            let text = fs.load(&abs_path).await?;
 917
 918            let diff_base = if let Some(index_task) = index_task {
 919                index_task.await
 920            } else {
 921                None
 922            };
 923
 924            // Eagerly populate the snapshot with an updated entry for the loaded file
 925            let entry = this
 926                .update(&mut cx, |this, cx| {
 927                    this.as_local().unwrap().refresh_entry(path, None, cx)
 928                })
 929                .await?;
 930
 931            Ok((
 932                File {
 933                    entry_id: entry.id,
 934                    worktree: handle,
 935                    path: entry.path,
 936                    mtime: entry.mtime,
 937                    is_local: true,
 938                    is_deleted: false,
 939                },
 940                text,
 941                diff_base,
 942            ))
 943        })
 944    }
 945
 946    pub fn save_buffer(
 947        &self,
 948        buffer_handle: ModelHandle<Buffer>,
 949        path: Arc<Path>,
 950        has_changed_file: bool,
 951        cx: &mut ModelContext<Worktree>,
 952    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
 953        let handle = cx.handle();
 954        let buffer = buffer_handle.read(cx);
 955
 956        let rpc = self.client.clone();
 957        let buffer_id = buffer.remote_id();
 958        let project_id = self.share.as_ref().map(|share| share.project_id);
 959
 960        let text = buffer.as_rope().clone();
 961        let fingerprint = text.fingerprint();
 962        let version = buffer.version();
 963        let save = self.write_file(path, text, buffer.line_ending(), cx);
 964
 965        cx.as_mut().spawn(|mut cx| async move {
 966            let entry = save.await?;
 967
 968            if has_changed_file {
 969                let new_file = Arc::new(File {
 970                    entry_id: entry.id,
 971                    worktree: handle,
 972                    path: entry.path,
 973                    mtime: entry.mtime,
 974                    is_local: true,
 975                    is_deleted: false,
 976                });
 977
 978                if let Some(project_id) = project_id {
 979                    rpc.send(proto::UpdateBufferFile {
 980                        project_id,
 981                        buffer_id,
 982                        file: Some(new_file.to_proto()),
 983                    })
 984                    .log_err();
 985                }
 986
 987                buffer_handle.update(&mut cx, |buffer, cx| {
 988                    if has_changed_file {
 989                        buffer.file_updated(new_file, cx).detach();
 990                    }
 991                });
 992            }
 993
 994            if let Some(project_id) = project_id {
 995                rpc.send(proto::BufferSaved {
 996                    project_id,
 997                    buffer_id,
 998                    version: serialize_version(&version),
 999                    mtime: Some(entry.mtime.into()),
1000                    fingerprint: serialize_fingerprint(fingerprint),
1001                })?;
1002            }
1003
1004            buffer_handle.update(&mut cx, |buffer, cx| {
1005                buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
1006            });
1007
1008            Ok((version, fingerprint, entry.mtime))
1009        })
1010    }
1011
1012    pub fn create_entry(
1013        &self,
1014        path: impl Into<Arc<Path>>,
1015        is_dir: bool,
1016        cx: &mut ModelContext<Worktree>,
1017    ) -> Task<Result<Entry>> {
1018        let path = path.into();
1019        let abs_path = self.absolutize(&path);
1020        let fs = self.fs.clone();
1021        let write = cx.background().spawn(async move {
1022            if is_dir {
1023                fs.create_dir(&abs_path).await
1024            } else {
1025                fs.save(&abs_path, &Default::default(), Default::default())
1026                    .await
1027            }
1028        });
1029
1030        cx.spawn(|this, mut cx| async move {
1031            write.await?;
1032            this.update(&mut cx, |this, cx| {
1033                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1034            })
1035            .await
1036        })
1037    }
1038
1039    pub fn write_file(
1040        &self,
1041        path: impl Into<Arc<Path>>,
1042        text: Rope,
1043        line_ending: LineEnding,
1044        cx: &mut ModelContext<Worktree>,
1045    ) -> Task<Result<Entry>> {
1046        let path = path.into();
1047        let abs_path = self.absolutize(&path);
1048        let fs = self.fs.clone();
1049        let write = cx
1050            .background()
1051            .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1052
1053        cx.spawn(|this, mut cx| async move {
1054            write.await?;
1055            this.update(&mut cx, |this, cx| {
1056                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1057            })
1058            .await
1059        })
1060    }
1061
1062    pub fn delete_entry(
1063        &self,
1064        entry_id: ProjectEntryId,
1065        cx: &mut ModelContext<Worktree>,
1066    ) -> Option<Task<Result<()>>> {
1067        let entry = self.entry_for_id(entry_id)?.clone();
1068        let abs_path = self.abs_path.clone();
1069        let fs = self.fs.clone();
1070
1071        let delete = cx.background().spawn(async move {
1072            let mut abs_path = fs.canonicalize(&abs_path).await?;
1073            if entry.path.file_name().is_some() {
1074                abs_path = abs_path.join(&entry.path);
1075            }
1076            if entry.is_file() {
1077                fs.remove_file(&abs_path, Default::default()).await?;
1078            } else {
1079                fs.remove_dir(
1080                    &abs_path,
1081                    RemoveOptions {
1082                        recursive: true,
1083                        ignore_if_not_exists: false,
1084                    },
1085                )
1086                .await?;
1087            }
1088            anyhow::Ok(abs_path)
1089        });
1090
1091        Some(cx.spawn(|this, mut cx| async move {
1092            let abs_path = delete.await?;
1093            let (tx, mut rx) = barrier::channel();
1094            this.update(&mut cx, |this, _| {
1095                this.as_local_mut()
1096                    .unwrap()
1097                    .path_changes_tx
1098                    .try_send((vec![abs_path], tx))
1099            })?;
1100            rx.recv().await;
1101            Ok(())
1102        }))
1103    }
1104
1105    pub fn rename_entry(
1106        &self,
1107        entry_id: ProjectEntryId,
1108        new_path: impl Into<Arc<Path>>,
1109        cx: &mut ModelContext<Worktree>,
1110    ) -> Option<Task<Result<Entry>>> {
1111        let old_path = self.entry_for_id(entry_id)?.path.clone();
1112        let new_path = new_path.into();
1113        let abs_old_path = self.absolutize(&old_path);
1114        let abs_new_path = self.absolutize(&new_path);
1115        let fs = self.fs.clone();
1116        let rename = cx.background().spawn(async move {
1117            fs.rename(&abs_old_path, &abs_new_path, Default::default())
1118                .await
1119        });
1120
1121        Some(cx.spawn(|this, mut cx| async move {
1122            rename.await?;
1123            this.update(&mut cx, |this, cx| {
1124                this.as_local_mut()
1125                    .unwrap()
1126                    .refresh_entry(new_path.clone(), Some(old_path), cx)
1127            })
1128            .await
1129        }))
1130    }
1131
1132    pub fn copy_entry(
1133        &self,
1134        entry_id: ProjectEntryId,
1135        new_path: impl Into<Arc<Path>>,
1136        cx: &mut ModelContext<Worktree>,
1137    ) -> Option<Task<Result<Entry>>> {
1138        let old_path = self.entry_for_id(entry_id)?.path.clone();
1139        let new_path = new_path.into();
1140        let abs_old_path = self.absolutize(&old_path);
1141        let abs_new_path = self.absolutize(&new_path);
1142        let fs = self.fs.clone();
1143        let copy = cx.background().spawn(async move {
1144            copy_recursive(
1145                fs.as_ref(),
1146                &abs_old_path,
1147                &abs_new_path,
1148                Default::default(),
1149            )
1150            .await
1151        });
1152
1153        Some(cx.spawn(|this, mut cx| async move {
1154            copy.await?;
1155            this.update(&mut cx, |this, cx| {
1156                this.as_local_mut()
1157                    .unwrap()
1158                    .refresh_entry(new_path.clone(), None, cx)
1159            })
1160            .await
1161        }))
1162    }
1163
1164    fn refresh_entry(
1165        &self,
1166        path: Arc<Path>,
1167        old_path: Option<Arc<Path>>,
1168        cx: &mut ModelContext<Worktree>,
1169    ) -> Task<Result<Entry>> {
1170        let fs = self.fs.clone();
1171        let abs_root_path = self.abs_path.clone();
1172        let path_changes_tx = self.path_changes_tx.clone();
1173        cx.spawn_weak(move |this, mut cx| async move {
1174            let abs_path = fs.canonicalize(&abs_root_path).await?;
1175            let mut paths = Vec::with_capacity(2);
1176            paths.push(if path.file_name().is_some() {
1177                abs_path.join(&path)
1178            } else {
1179                abs_path.clone()
1180            });
1181            if let Some(old_path) = old_path {
1182                paths.push(if old_path.file_name().is_some() {
1183                    abs_path.join(&old_path)
1184                } else {
1185                    abs_path.clone()
1186                });
1187            }
1188
1189            let (tx, mut rx) = barrier::channel();
1190            path_changes_tx.try_send((paths, tx))?;
1191            rx.recv().await;
1192            this.upgrade(&cx)
1193                .ok_or_else(|| anyhow!("worktree was dropped"))?
1194                .update(&mut cx, |this, _| {
1195                    this.entry_for_path(path)
1196                        .cloned()
1197                        .ok_or_else(|| anyhow!("failed to read path after update"))
1198                })
1199        })
1200    }
1201
1202    pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1203        let (share_tx, share_rx) = oneshot::channel();
1204
1205        if let Some(share) = self.share.as_mut() {
1206            let _ = share_tx.send(());
1207            *share.resume_updates.borrow_mut() = ();
1208        } else {
1209            let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1210            let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1211            let worktree_id = cx.model_id() as u64;
1212
1213            for (path, summaries) in &self.diagnostic_summaries {
1214                for (&server_id, summary) in summaries {
1215                    if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1216                        project_id,
1217                        worktree_id,
1218                        summary: Some(summary.to_proto(server_id, &path)),
1219                    }) {
1220                        return Task::ready(Err(e));
1221                    }
1222                }
1223            }
1224
1225            let _maintain_remote_snapshot = cx.background().spawn({
1226                let client = self.client.clone();
1227                async move {
1228                    let mut share_tx = Some(share_tx);
1229                    let mut prev_snapshot = LocalSnapshot {
1230                        ignores_by_parent_abs_path: Default::default(),
1231                        removed_entry_ids: Default::default(),
1232                        next_entry_id: Default::default(),
1233                        git_repositories: Default::default(),
1234                        snapshot: Snapshot {
1235                            id: WorktreeId(worktree_id as usize),
1236                            abs_path: Path::new("").into(),
1237                            root_name: Default::default(),
1238                            root_char_bag: Default::default(),
1239                            entries_by_path: Default::default(),
1240                            entries_by_id: Default::default(),
1241                            repository_entries: Default::default(),
1242                            scan_id: 0,
1243                            completed_scan_id: 0,
1244                        },
1245                    };
1246                    while let Some(snapshot) = snapshots_rx.recv().await {
1247                        #[cfg(any(test, feature = "test-support"))]
1248                        const MAX_CHUNK_SIZE: usize = 2;
1249                        #[cfg(not(any(test, feature = "test-support")))]
1250                        const MAX_CHUNK_SIZE: usize = 256;
1251
1252                        let update =
1253                            snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1254                        for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1255                            let _ = resume_updates_rx.try_recv();
1256                            while let Err(error) = client.request(update.clone()).await {
1257                                log::error!("failed to send worktree update: {}", error);
1258                                log::info!("waiting to resume updates");
1259                                if resume_updates_rx.next().await.is_none() {
1260                                    return Ok(());
1261                                }
1262                            }
1263                        }
1264
1265                        if let Some(share_tx) = share_tx.take() {
1266                            let _ = share_tx.send(());
1267                        }
1268
1269                        prev_snapshot = snapshot;
1270                    }
1271
1272                    Ok::<_, anyhow::Error>(())
1273                }
1274                .log_err()
1275            });
1276
1277            self.share = Some(ShareState {
1278                project_id,
1279                snapshots_tx,
1280                resume_updates: resume_updates_tx,
1281                _maintain_remote_snapshot,
1282            });
1283        }
1284
1285        cx.foreground()
1286            .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1287    }
1288
1289    pub fn unshare(&mut self) {
1290        self.share.take();
1291    }
1292
1293    pub fn is_shared(&self) -> bool {
1294        self.share.is_some()
1295    }
1296}
1297
1298impl RemoteWorktree {
1299    fn snapshot(&self) -> Snapshot {
1300        self.snapshot.clone()
1301    }
1302
1303    pub fn disconnected_from_host(&mut self) {
1304        self.updates_tx.take();
1305        self.snapshot_subscriptions.clear();
1306        self.disconnected = true;
1307    }
1308
1309    pub fn save_buffer(
1310        &self,
1311        buffer_handle: ModelHandle<Buffer>,
1312        cx: &mut ModelContext<Worktree>,
1313    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1314        let buffer = buffer_handle.read(cx);
1315        let buffer_id = buffer.remote_id();
1316        let version = buffer.version();
1317        let rpc = self.client.clone();
1318        let project_id = self.project_id;
1319        cx.as_mut().spawn(|mut cx| async move {
1320            let response = rpc
1321                .request(proto::SaveBuffer {
1322                    project_id,
1323                    buffer_id,
1324                    version: serialize_version(&version),
1325                })
1326                .await?;
1327            let version = deserialize_version(&response.version);
1328            let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1329            let mtime = response
1330                .mtime
1331                .ok_or_else(|| anyhow!("missing mtime"))?
1332                .into();
1333
1334            buffer_handle.update(&mut cx, |buffer, cx| {
1335                buffer.did_save(version.clone(), fingerprint, mtime, cx);
1336            });
1337
1338            Ok((version, fingerprint, mtime))
1339        })
1340    }
1341
1342    pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1343        if let Some(updates_tx) = &self.updates_tx {
1344            updates_tx
1345                .unbounded_send(update)
1346                .expect("consumer runs to completion");
1347        }
1348    }
1349
1350    fn observed_snapshot(&self, scan_id: usize) -> bool {
1351        self.completed_scan_id >= scan_id
1352    }
1353
1354    fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1355        let (tx, rx) = oneshot::channel();
1356        if self.observed_snapshot(scan_id) {
1357            let _ = tx.send(());
1358        } else if self.disconnected {
1359            drop(tx);
1360        } else {
1361            match self
1362                .snapshot_subscriptions
1363                .binary_search_by_key(&scan_id, |probe| probe.0)
1364            {
1365                Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1366            }
1367        }
1368
1369        async move {
1370            rx.await?;
1371            Ok(())
1372        }
1373    }
1374
1375    pub fn update_diagnostic_summary(
1376        &mut self,
1377        path: Arc<Path>,
1378        summary: &proto::DiagnosticSummary,
1379    ) {
1380        let server_id = LanguageServerId(summary.language_server_id as usize);
1381        let summary = DiagnosticSummary {
1382            error_count: summary.error_count as usize,
1383            warning_count: summary.warning_count as usize,
1384        };
1385
1386        if summary.is_empty() {
1387            if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1388                summaries.remove(&server_id);
1389                if summaries.is_empty() {
1390                    self.diagnostic_summaries.remove(&path);
1391                }
1392            }
1393        } else {
1394            self.diagnostic_summaries
1395                .entry(path)
1396                .or_default()
1397                .insert(server_id, summary);
1398        }
1399    }
1400
1401    pub fn insert_entry(
1402        &mut self,
1403        entry: proto::Entry,
1404        scan_id: usize,
1405        cx: &mut ModelContext<Worktree>,
1406    ) -> Task<Result<Entry>> {
1407        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1408        cx.spawn(|this, mut cx| async move {
1409            wait_for_snapshot.await?;
1410            this.update(&mut cx, |worktree, _| {
1411                let worktree = worktree.as_remote_mut().unwrap();
1412                let mut snapshot = worktree.background_snapshot.lock();
1413                let entry = snapshot.insert_entry(entry);
1414                worktree.snapshot = snapshot.clone();
1415                entry
1416            })
1417        })
1418    }
1419
1420    pub(crate) fn delete_entry(
1421        &mut self,
1422        id: ProjectEntryId,
1423        scan_id: usize,
1424        cx: &mut ModelContext<Worktree>,
1425    ) -> Task<Result<()>> {
1426        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1427        cx.spawn(|this, mut cx| async move {
1428            wait_for_snapshot.await?;
1429            this.update(&mut cx, |worktree, _| {
1430                let worktree = worktree.as_remote_mut().unwrap();
1431                let mut snapshot = worktree.background_snapshot.lock();
1432                snapshot.delete_entry(id);
1433                worktree.snapshot = snapshot.clone();
1434            });
1435            Ok(())
1436        })
1437    }
1438}
1439
1440impl Snapshot {
1441    pub fn id(&self) -> WorktreeId {
1442        self.id
1443    }
1444
1445    pub fn abs_path(&self) -> &Arc<Path> {
1446        &self.abs_path
1447    }
1448
1449    pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1450        self.entries_by_id.get(&entry_id, &()).is_some()
1451    }
1452
1453    pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1454        let entry = Entry::try_from((&self.root_char_bag, entry))?;
1455        let old_entry = self.entries_by_id.insert_or_replace(
1456            PathEntry {
1457                id: entry.id,
1458                path: entry.path.clone(),
1459                is_ignored: entry.is_ignored,
1460                scan_id: 0,
1461            },
1462            &(),
1463        );
1464        if let Some(old_entry) = old_entry {
1465            self.entries_by_path.remove(&PathKey(old_entry.path), &());
1466        }
1467        self.entries_by_path.insert_or_replace(entry.clone(), &());
1468        Ok(entry)
1469    }
1470
1471    fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1472        let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1473        self.entries_by_path = {
1474            let mut cursor = self.entries_by_path.cursor();
1475            let mut new_entries_by_path =
1476                cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1477            while let Some(entry) = cursor.item() {
1478                if entry.path.starts_with(&removed_entry.path) {
1479                    self.entries_by_id.remove(&entry.id, &());
1480                    cursor.next(&());
1481                } else {
1482                    break;
1483                }
1484            }
1485            new_entries_by_path.push_tree(cursor.suffix(&()), &());
1486            new_entries_by_path
1487        };
1488
1489        Some(removed_entry.path)
1490    }
1491
1492    pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1493        let mut entries_by_path_edits = Vec::new();
1494        let mut entries_by_id_edits = Vec::new();
1495        for entry_id in update.removed_entries {
1496            if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1497                entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1498                entries_by_id_edits.push(Edit::Remove(entry.id));
1499            }
1500        }
1501
1502        for entry in update.updated_entries {
1503            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1504            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1505                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1506            }
1507            entries_by_id_edits.push(Edit::Insert(PathEntry {
1508                id: entry.id,
1509                path: entry.path.clone(),
1510                is_ignored: entry.is_ignored,
1511                scan_id: 0,
1512            }));
1513            entries_by_path_edits.push(Edit::Insert(entry));
1514        }
1515
1516        self.entries_by_path.edit(entries_by_path_edits, &());
1517        self.entries_by_id.edit(entries_by_id_edits, &());
1518
1519        update.removed_repositories.sort_unstable();
1520        self.repository_entries.retain(|_, entry| {
1521            if let Ok(_) = update
1522                .removed_repositories
1523                .binary_search(&entry.work_directory.to_proto())
1524            {
1525                false
1526            } else {
1527                true
1528            }
1529        });
1530
1531        for repository in update.updated_repositories {
1532            let work_directory_entry: WorkDirectoryEntry =
1533                ProjectEntryId::from_proto(repository.work_directory_id).into();
1534
1535            if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1536                let mut statuses = TreeMap::default();
1537                for status_entry in repository.updated_statuses {
1538                    let Some(git_file_status) = read_git_status(status_entry.status) else {
1539                        continue;
1540                    };
1541
1542                    let repo_path = RepoPath::new(status_entry.repo_path.into());
1543                    statuses.insert(repo_path, git_file_status);
1544                }
1545
1546                let work_directory = RepositoryWorkDirectory(entry.path.clone());
1547                if self.repository_entries.get(&work_directory).is_some() {
1548                    self.repository_entries.update(&work_directory, |repo| {
1549                        repo.branch = repository.branch.map(Into::into);
1550                        repo.statuses.insert_tree(statuses);
1551
1552                        for repo_path in repository.removed_repo_paths {
1553                            let repo_path = RepoPath::new(repo_path.into());
1554                            repo.statuses.remove(&repo_path);
1555                        }
1556                    });
1557                } else {
1558                    self.repository_entries.insert(
1559                        work_directory,
1560                        RepositoryEntry {
1561                            work_directory: work_directory_entry,
1562                            branch: repository.branch.map(Into::into),
1563                            statuses,
1564                        },
1565                    )
1566                }
1567            } else {
1568                log::error!("no work directory entry for repository {:?}", repository)
1569            }
1570        }
1571
1572        self.scan_id = update.scan_id as usize;
1573        if update.is_last_update {
1574            self.completed_scan_id = update.scan_id as usize;
1575        }
1576
1577        Ok(())
1578    }
1579
1580    pub fn file_count(&self) -> usize {
1581        self.entries_by_path.summary().file_count
1582    }
1583
1584    pub fn visible_file_count(&self) -> usize {
1585        self.entries_by_path.summary().visible_file_count
1586    }
1587
1588    fn traverse_from_offset(
1589        &self,
1590        include_dirs: bool,
1591        include_ignored: bool,
1592        start_offset: usize,
1593    ) -> Traversal {
1594        let mut cursor = self.entries_by_path.cursor();
1595        cursor.seek(
1596            &TraversalTarget::Count {
1597                count: start_offset,
1598                include_dirs,
1599                include_ignored,
1600            },
1601            Bias::Right,
1602            &(),
1603        );
1604        Traversal {
1605            cursor,
1606            include_dirs,
1607            include_ignored,
1608        }
1609    }
1610
1611    fn traverse_from_path(
1612        &self,
1613        include_dirs: bool,
1614        include_ignored: bool,
1615        path: &Path,
1616    ) -> Traversal {
1617        let mut cursor = self.entries_by_path.cursor();
1618        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1619        Traversal {
1620            cursor,
1621            include_dirs,
1622            include_ignored,
1623        }
1624    }
1625
1626    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1627        self.traverse_from_offset(false, include_ignored, start)
1628    }
1629
1630    pub fn entries(&self, include_ignored: bool) -> Traversal {
1631        self.traverse_from_offset(true, include_ignored, 0)
1632    }
1633
1634    pub fn repositories(&self) -> impl Iterator<Item = &RepositoryEntry> {
1635        self.repository_entries.values()
1636    }
1637
1638    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1639        let empty_path = Path::new("");
1640        self.entries_by_path
1641            .cursor::<()>()
1642            .filter(move |entry| entry.path.as_ref() != empty_path)
1643            .map(|entry| &entry.path)
1644    }
1645
1646    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1647        let mut cursor = self.entries_by_path.cursor();
1648        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1649        let traversal = Traversal {
1650            cursor,
1651            include_dirs: true,
1652            include_ignored: true,
1653        };
1654        ChildEntriesIter {
1655            traversal,
1656            parent_path,
1657        }
1658    }
1659
1660    fn descendent_entries<'a>(
1661        &'a self,
1662        include_dirs: bool,
1663        include_ignored: bool,
1664        parent_path: &'a Path,
1665    ) -> DescendentEntriesIter<'a> {
1666        let mut cursor = self.entries_by_path.cursor();
1667        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
1668        let mut traversal = Traversal {
1669            cursor,
1670            include_dirs,
1671            include_ignored,
1672        };
1673
1674        if traversal.end_offset() == traversal.start_offset() {
1675            traversal.advance();
1676        }
1677
1678        DescendentEntriesIter {
1679            traversal,
1680            parent_path,
1681        }
1682    }
1683
1684    pub fn root_entry(&self) -> Option<&Entry> {
1685        self.entry_for_path("")
1686    }
1687
1688    pub fn root_name(&self) -> &str {
1689        &self.root_name
1690    }
1691
1692    pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1693        self.repository_entries
1694            .get(&RepositoryWorkDirectory(Path::new("").into()))
1695            .map(|entry| entry.to_owned())
1696    }
1697
1698    pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1699        self.repository_entries.values()
1700    }
1701
1702    pub fn scan_id(&self) -> usize {
1703        self.scan_id
1704    }
1705
1706    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1707        let path = path.as_ref();
1708        self.traverse_from_path(true, true, path)
1709            .entry()
1710            .and_then(|entry| {
1711                if entry.path.as_ref() == path {
1712                    Some(entry)
1713                } else {
1714                    None
1715                }
1716            })
1717    }
1718
1719    pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1720        let entry = self.entries_by_id.get(&id, &())?;
1721        self.entry_for_path(&entry.path)
1722    }
1723
1724    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1725        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1726    }
1727}
1728
1729impl LocalSnapshot {
1730    pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1731        self.git_repositories.get(&repo.work_directory.0)
1732    }
1733
1734    pub(crate) fn repo_for_metadata(
1735        &self,
1736        path: &Path,
1737    ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1738        self.git_repositories
1739            .iter()
1740            .find(|(_, repo)| repo.in_dot_git(path))
1741    }
1742
1743    #[cfg(test)]
1744    pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1745        let root_name = self.root_name.clone();
1746        proto::UpdateWorktree {
1747            project_id,
1748            worktree_id: self.id().to_proto(),
1749            abs_path: self.abs_path().to_string_lossy().into(),
1750            root_name,
1751            updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1752            removed_entries: Default::default(),
1753            scan_id: self.scan_id as u64,
1754            is_last_update: true,
1755            updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1756            removed_repositories: Default::default(),
1757        }
1758    }
1759
1760    pub(crate) fn build_update(
1761        &self,
1762        other: &Self,
1763        project_id: u64,
1764        worktree_id: u64,
1765        include_ignored: bool,
1766    ) -> proto::UpdateWorktree {
1767        let mut updated_entries = Vec::new();
1768        let mut removed_entries = Vec::new();
1769        let mut self_entries = self
1770            .entries_by_id
1771            .cursor::<()>()
1772            .filter(|e| include_ignored || !e.is_ignored)
1773            .peekable();
1774        let mut other_entries = other
1775            .entries_by_id
1776            .cursor::<()>()
1777            .filter(|e| include_ignored || !e.is_ignored)
1778            .peekable();
1779        loop {
1780            match (self_entries.peek(), other_entries.peek()) {
1781                (Some(self_entry), Some(other_entry)) => {
1782                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1783                        Ordering::Less => {
1784                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1785                            updated_entries.push(entry);
1786                            self_entries.next();
1787                        }
1788                        Ordering::Equal => {
1789                            if self_entry.scan_id != other_entry.scan_id {
1790                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1791                                updated_entries.push(entry);
1792                            }
1793
1794                            self_entries.next();
1795                            other_entries.next();
1796                        }
1797                        Ordering::Greater => {
1798                            removed_entries.push(other_entry.id.to_proto());
1799                            other_entries.next();
1800                        }
1801                    }
1802                }
1803                (Some(self_entry), None) => {
1804                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1805                    updated_entries.push(entry);
1806                    self_entries.next();
1807                }
1808                (None, Some(other_entry)) => {
1809                    removed_entries.push(other_entry.id.to_proto());
1810                    other_entries.next();
1811                }
1812                (None, None) => break,
1813            }
1814        }
1815
1816        let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1817        let mut removed_repositories = Vec::new();
1818        let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1819        let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1820        loop {
1821            match (self_repos.peek(), other_repos.peek()) {
1822                (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1823                    match Ord::cmp(self_work_dir, other_work_dir) {
1824                        Ordering::Less => {
1825                            updated_repositories.push((*self_repo).into());
1826                            self_repos.next();
1827                        }
1828                        Ordering::Equal => {
1829                            if self_repo != other_repo {
1830                                updated_repositories.push(self_repo.build_update(other_repo));
1831                            }
1832
1833                            self_repos.next();
1834                            other_repos.next();
1835                        }
1836                        Ordering::Greater => {
1837                            removed_repositories.push(other_repo.work_directory.to_proto());
1838                            other_repos.next();
1839                        }
1840                    }
1841                }
1842                (Some((_, self_repo)), None) => {
1843                    updated_repositories.push((*self_repo).into());
1844                    self_repos.next();
1845                }
1846                (None, Some((_, other_repo))) => {
1847                    removed_repositories.push(other_repo.work_directory.to_proto());
1848                    other_repos.next();
1849                }
1850                (None, None) => break,
1851            }
1852        }
1853
1854        proto::UpdateWorktree {
1855            project_id,
1856            worktree_id,
1857            abs_path: self.abs_path().to_string_lossy().into(),
1858            root_name: self.root_name().to_string(),
1859            updated_entries,
1860            removed_entries,
1861            scan_id: self.scan_id as u64,
1862            is_last_update: self.completed_scan_id == self.scan_id,
1863            updated_repositories,
1864            removed_repositories,
1865        }
1866    }
1867
1868    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1869        if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1870            let abs_path = self.abs_path.join(&entry.path);
1871            match smol::block_on(build_gitignore(&abs_path, fs)) {
1872                Ok(ignore) => {
1873                    self.ignores_by_parent_abs_path
1874                        .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
1875                }
1876                Err(error) => {
1877                    log::error!(
1878                        "error loading .gitignore file {:?} - {:?}",
1879                        &entry.path,
1880                        error
1881                    );
1882                }
1883            }
1884        }
1885
1886        self.reuse_entry_id(&mut entry);
1887
1888        if entry.kind == EntryKind::PendingDir {
1889            if let Some(existing_entry) =
1890                self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1891            {
1892                entry.kind = existing_entry.kind;
1893            }
1894        }
1895
1896        let scan_id = self.scan_id;
1897        let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1898        if let Some(removed) = removed {
1899            if removed.id != entry.id {
1900                self.entries_by_id.remove(&removed.id, &());
1901            }
1902        }
1903        self.entries_by_id.insert_or_replace(
1904            PathEntry {
1905                id: entry.id,
1906                path: entry.path.clone(),
1907                is_ignored: entry.is_ignored,
1908                scan_id,
1909            },
1910            &(),
1911        );
1912
1913        entry
1914    }
1915
1916    fn populate_dir(
1917        &mut self,
1918        parent_path: Arc<Path>,
1919        entries: impl IntoIterator<Item = Entry>,
1920        ignore: Option<Arc<Gitignore>>,
1921        fs: &dyn Fs,
1922    ) {
1923        let mut parent_entry = if let Some(parent_entry) =
1924            self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1925        {
1926            parent_entry.clone()
1927        } else {
1928            log::warn!(
1929                "populating a directory {:?} that has been removed",
1930                parent_path
1931            );
1932            return;
1933        };
1934
1935        match parent_entry.kind {
1936            EntryKind::PendingDir => {
1937                parent_entry.kind = EntryKind::Dir;
1938            }
1939            EntryKind::Dir => {}
1940            _ => return,
1941        }
1942
1943        if let Some(ignore) = ignore {
1944            self.ignores_by_parent_abs_path
1945                .insert(self.abs_path.join(&parent_path).into(), (ignore, false));
1946        }
1947
1948        if parent_path.file_name() == Some(&DOT_GIT) {
1949            self.build_repo(parent_path, fs);
1950        }
1951
1952        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1953        let mut entries_by_id_edits = Vec::new();
1954
1955        for mut entry in entries {
1956            self.reuse_entry_id(&mut entry);
1957            entries_by_id_edits.push(Edit::Insert(PathEntry {
1958                id: entry.id,
1959                path: entry.path.clone(),
1960                is_ignored: entry.is_ignored,
1961                scan_id: self.scan_id,
1962            }));
1963            entries_by_path_edits.push(Edit::Insert(entry));
1964        }
1965
1966        self.entries_by_path.edit(entries_by_path_edits, &());
1967        self.entries_by_id.edit(entries_by_id_edits, &());
1968    }
1969
1970    fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1971        let abs_path = self.abs_path.join(&parent_path);
1972        let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1973
1974        // Guard against repositories inside the repository metadata
1975        if work_dir
1976            .components()
1977            .find(|component| component.as_os_str() == *DOT_GIT)
1978            .is_some()
1979        {
1980            return None;
1981        };
1982
1983        let work_dir_id = self
1984            .entry_for_path(work_dir.clone())
1985            .map(|entry| entry.id)?;
1986
1987        if self.git_repositories.get(&work_dir_id).is_none() {
1988            let repo = fs.open_repo(abs_path.as_path())?;
1989            let work_directory = RepositoryWorkDirectory(work_dir.clone());
1990            let scan_id = self.scan_id;
1991
1992            let repo_lock = repo.lock();
1993
1994            self.repository_entries.insert(
1995                work_directory,
1996                RepositoryEntry {
1997                    work_directory: work_dir_id.into(),
1998                    branch: repo_lock.branch_name().map(Into::into),
1999                    statuses: repo_lock.statuses().unwrap_or_default(),
2000                },
2001            );
2002            drop(repo_lock);
2003
2004            self.git_repositories.insert(
2005                work_dir_id,
2006                LocalRepositoryEntry {
2007                    scan_id,
2008                    full_scan_id: scan_id,
2009                    repo_ptr: repo,
2010                    git_dir_path: parent_path.clone(),
2011                },
2012            )
2013        }
2014
2015        Some(())
2016    }
2017    fn reuse_entry_id(&mut self, entry: &mut Entry) {
2018        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2019            entry.id = removed_entry_id;
2020        } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2021            entry.id = existing_entry.id;
2022        }
2023    }
2024
2025    fn remove_path(&mut self, path: &Path) {
2026        let mut new_entries;
2027        let removed_entries;
2028        {
2029            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2030            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2031            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2032            new_entries.push_tree(cursor.suffix(&()), &());
2033        }
2034        self.entries_by_path = new_entries;
2035
2036        let mut entries_by_id_edits = Vec::new();
2037        for entry in removed_entries.cursor::<()>() {
2038            let removed_entry_id = self
2039                .removed_entry_ids
2040                .entry(entry.inode)
2041                .or_insert(entry.id);
2042            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2043            entries_by_id_edits.push(Edit::Remove(entry.id));
2044        }
2045        self.entries_by_id.edit(entries_by_id_edits, &());
2046
2047        if path.file_name() == Some(&GITIGNORE) {
2048            let abs_parent_path = self.abs_path.join(path.parent().unwrap());
2049            if let Some((_, needs_update)) = self
2050                .ignores_by_parent_abs_path
2051                .get_mut(abs_parent_path.as_path())
2052            {
2053                *needs_update = true;
2054            }
2055        }
2056    }
2057
2058    fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2059        let mut inodes = TreeSet::default();
2060        for ancestor in path.ancestors().skip(1) {
2061            if let Some(entry) = self.entry_for_path(ancestor) {
2062                inodes.insert(entry.inode);
2063            }
2064        }
2065        inodes
2066    }
2067
2068    fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2069        let mut new_ignores = Vec::new();
2070        for ancestor in abs_path.ancestors().skip(1) {
2071            if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2072                new_ignores.push((ancestor, Some(ignore.clone())));
2073            } else {
2074                new_ignores.push((ancestor, None));
2075            }
2076        }
2077
2078        let mut ignore_stack = IgnoreStack::none();
2079        for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2080            if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2081                ignore_stack = IgnoreStack::all();
2082                break;
2083            } else if let Some(ignore) = ignore {
2084                ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2085            }
2086        }
2087
2088        if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2089            ignore_stack = IgnoreStack::all();
2090        }
2091
2092        ignore_stack
2093    }
2094}
2095
2096async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2097    let contents = fs.load(abs_path).await?;
2098    let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2099    let mut builder = GitignoreBuilder::new(parent);
2100    for line in contents.lines() {
2101        builder.add_line(Some(abs_path.into()), line)?;
2102    }
2103    Ok(builder.build()?)
2104}
2105
2106impl WorktreeId {
2107    pub fn from_usize(handle_id: usize) -> Self {
2108        Self(handle_id)
2109    }
2110
2111    pub(crate) fn from_proto(id: u64) -> Self {
2112        Self(id as usize)
2113    }
2114
2115    pub fn to_proto(&self) -> u64 {
2116        self.0 as u64
2117    }
2118
2119    pub fn to_usize(&self) -> usize {
2120        self.0
2121    }
2122}
2123
2124impl fmt::Display for WorktreeId {
2125    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2126        self.0.fmt(f)
2127    }
2128}
2129
2130impl Deref for Worktree {
2131    type Target = Snapshot;
2132
2133    fn deref(&self) -> &Self::Target {
2134        match self {
2135            Worktree::Local(worktree) => &worktree.snapshot,
2136            Worktree::Remote(worktree) => &worktree.snapshot,
2137        }
2138    }
2139}
2140
2141impl Deref for LocalWorktree {
2142    type Target = LocalSnapshot;
2143
2144    fn deref(&self) -> &Self::Target {
2145        &self.snapshot
2146    }
2147}
2148
2149impl Deref for RemoteWorktree {
2150    type Target = Snapshot;
2151
2152    fn deref(&self) -> &Self::Target {
2153        &self.snapshot
2154    }
2155}
2156
2157impl fmt::Debug for LocalWorktree {
2158    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2159        self.snapshot.fmt(f)
2160    }
2161}
2162
2163impl fmt::Debug for Snapshot {
2164    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2165        struct EntriesById<'a>(&'a SumTree<PathEntry>);
2166        struct EntriesByPath<'a>(&'a SumTree<Entry>);
2167
2168        impl<'a> fmt::Debug for EntriesByPath<'a> {
2169            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2170                f.debug_map()
2171                    .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2172                    .finish()
2173            }
2174        }
2175
2176        impl<'a> fmt::Debug for EntriesById<'a> {
2177            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2178                f.debug_list().entries(self.0.iter()).finish()
2179            }
2180        }
2181
2182        f.debug_struct("Snapshot")
2183            .field("id", &self.id)
2184            .field("root_name", &self.root_name)
2185            .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2186            .field("entries_by_id", &EntriesById(&self.entries_by_id))
2187            .finish()
2188    }
2189}
2190
2191#[derive(Clone, PartialEq)]
2192pub struct File {
2193    pub worktree: ModelHandle<Worktree>,
2194    pub path: Arc<Path>,
2195    pub mtime: SystemTime,
2196    pub(crate) entry_id: ProjectEntryId,
2197    pub(crate) is_local: bool,
2198    pub(crate) is_deleted: bool,
2199}
2200
2201impl language::File for File {
2202    fn as_local(&self) -> Option<&dyn language::LocalFile> {
2203        if self.is_local {
2204            Some(self)
2205        } else {
2206            None
2207        }
2208    }
2209
2210    fn mtime(&self) -> SystemTime {
2211        self.mtime
2212    }
2213
2214    fn path(&self) -> &Arc<Path> {
2215        &self.path
2216    }
2217
2218    fn full_path(&self, cx: &AppContext) -> PathBuf {
2219        let mut full_path = PathBuf::new();
2220        let worktree = self.worktree.read(cx);
2221
2222        if worktree.is_visible() {
2223            full_path.push(worktree.root_name());
2224        } else {
2225            let path = worktree.abs_path();
2226
2227            if worktree.is_local() && path.starts_with(HOME.as_path()) {
2228                full_path.push("~");
2229                full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2230            } else {
2231                full_path.push(path)
2232            }
2233        }
2234
2235        if self.path.components().next().is_some() {
2236            full_path.push(&self.path);
2237        }
2238
2239        full_path
2240    }
2241
2242    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2243    /// of its worktree, then this method will return the name of the worktree itself.
2244    fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2245        self.path
2246            .file_name()
2247            .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2248    }
2249
2250    fn is_deleted(&self) -> bool {
2251        self.is_deleted
2252    }
2253
2254    fn as_any(&self) -> &dyn Any {
2255        self
2256    }
2257
2258    fn to_proto(&self) -> rpc::proto::File {
2259        rpc::proto::File {
2260            worktree_id: self.worktree.id() as u64,
2261            entry_id: self.entry_id.to_proto(),
2262            path: self.path.to_string_lossy().into(),
2263            mtime: Some(self.mtime.into()),
2264            is_deleted: self.is_deleted,
2265        }
2266    }
2267}
2268
2269impl language::LocalFile for File {
2270    fn abs_path(&self, cx: &AppContext) -> PathBuf {
2271        self.worktree
2272            .read(cx)
2273            .as_local()
2274            .unwrap()
2275            .abs_path
2276            .join(&self.path)
2277    }
2278
2279    fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2280        let worktree = self.worktree.read(cx).as_local().unwrap();
2281        let abs_path = worktree.absolutize(&self.path);
2282        let fs = worktree.fs.clone();
2283        cx.background()
2284            .spawn(async move { fs.load(&abs_path).await })
2285    }
2286
2287    fn buffer_reloaded(
2288        &self,
2289        buffer_id: u64,
2290        version: &clock::Global,
2291        fingerprint: RopeFingerprint,
2292        line_ending: LineEnding,
2293        mtime: SystemTime,
2294        cx: &mut AppContext,
2295    ) {
2296        let worktree = self.worktree.read(cx).as_local().unwrap();
2297        if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2298            worktree
2299                .client
2300                .send(proto::BufferReloaded {
2301                    project_id,
2302                    buffer_id,
2303                    version: serialize_version(version),
2304                    mtime: Some(mtime.into()),
2305                    fingerprint: serialize_fingerprint(fingerprint),
2306                    line_ending: serialize_line_ending(line_ending) as i32,
2307                })
2308                .log_err();
2309        }
2310    }
2311}
2312
2313impl File {
2314    pub fn from_proto(
2315        proto: rpc::proto::File,
2316        worktree: ModelHandle<Worktree>,
2317        cx: &AppContext,
2318    ) -> Result<Self> {
2319        let worktree_id = worktree
2320            .read(cx)
2321            .as_remote()
2322            .ok_or_else(|| anyhow!("not remote"))?
2323            .id();
2324
2325        if worktree_id.to_proto() != proto.worktree_id {
2326            return Err(anyhow!("worktree id does not match file"));
2327        }
2328
2329        Ok(Self {
2330            worktree,
2331            path: Path::new(&proto.path).into(),
2332            mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2333            entry_id: ProjectEntryId::from_proto(proto.entry_id),
2334            is_local: false,
2335            is_deleted: proto.is_deleted,
2336        })
2337    }
2338
2339    pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2340        file.and_then(|f| f.as_any().downcast_ref())
2341    }
2342
2343    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2344        self.worktree.read(cx).id()
2345    }
2346
2347    pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2348        if self.is_deleted {
2349            None
2350        } else {
2351            Some(self.entry_id)
2352        }
2353    }
2354}
2355
2356#[derive(Clone, Debug, PartialEq, Eq)]
2357pub struct Entry {
2358    pub id: ProjectEntryId,
2359    pub kind: EntryKind,
2360    pub path: Arc<Path>,
2361    pub inode: u64,
2362    pub mtime: SystemTime,
2363    pub is_symlink: bool,
2364    pub is_ignored: bool,
2365}
2366
2367#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2368pub enum EntryKind {
2369    PendingDir,
2370    Dir,
2371    File(CharBag),
2372}
2373
2374#[derive(Clone, Copy, Debug)]
2375pub enum PathChange {
2376    Added,
2377    Removed,
2378    Updated,
2379    AddedOrUpdated,
2380}
2381
2382impl Entry {
2383    fn new(
2384        path: Arc<Path>,
2385        metadata: &fs::Metadata,
2386        next_entry_id: &AtomicUsize,
2387        root_char_bag: CharBag,
2388    ) -> Self {
2389        Self {
2390            id: ProjectEntryId::new(next_entry_id),
2391            kind: if metadata.is_dir {
2392                EntryKind::PendingDir
2393            } else {
2394                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2395            },
2396            path,
2397            inode: metadata.inode,
2398            mtime: metadata.mtime,
2399            is_symlink: metadata.is_symlink,
2400            is_ignored: false,
2401        }
2402    }
2403
2404    pub fn is_dir(&self) -> bool {
2405        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2406    }
2407
2408    pub fn is_file(&self) -> bool {
2409        matches!(self.kind, EntryKind::File(_))
2410    }
2411}
2412
2413impl sum_tree::Item for Entry {
2414    type Summary = EntrySummary;
2415
2416    fn summary(&self) -> Self::Summary {
2417        let visible_count = if self.is_ignored { 0 } else { 1 };
2418        let file_count;
2419        let visible_file_count;
2420        if self.is_file() {
2421            file_count = 1;
2422            visible_file_count = visible_count;
2423        } else {
2424            file_count = 0;
2425            visible_file_count = 0;
2426        }
2427
2428        EntrySummary {
2429            max_path: self.path.clone(),
2430            count: 1,
2431            visible_count,
2432            file_count,
2433            visible_file_count,
2434        }
2435    }
2436}
2437
2438impl sum_tree::KeyedItem for Entry {
2439    type Key = PathKey;
2440
2441    fn key(&self) -> Self::Key {
2442        PathKey(self.path.clone())
2443    }
2444}
2445
2446#[derive(Clone, Debug)]
2447pub struct EntrySummary {
2448    max_path: Arc<Path>,
2449    count: usize,
2450    visible_count: usize,
2451    file_count: usize,
2452    visible_file_count: usize,
2453}
2454
2455impl Default for EntrySummary {
2456    fn default() -> Self {
2457        Self {
2458            max_path: Arc::from(Path::new("")),
2459            count: 0,
2460            visible_count: 0,
2461            file_count: 0,
2462            visible_file_count: 0,
2463        }
2464    }
2465}
2466
2467impl sum_tree::Summary for EntrySummary {
2468    type Context = ();
2469
2470    fn add_summary(&mut self, rhs: &Self, _: &()) {
2471        self.max_path = rhs.max_path.clone();
2472        self.count += rhs.count;
2473        self.visible_count += rhs.visible_count;
2474        self.file_count += rhs.file_count;
2475        self.visible_file_count += rhs.visible_file_count;
2476    }
2477}
2478
2479#[derive(Clone, Debug)]
2480struct PathEntry {
2481    id: ProjectEntryId,
2482    path: Arc<Path>,
2483    is_ignored: bool,
2484    scan_id: usize,
2485}
2486
2487impl sum_tree::Item for PathEntry {
2488    type Summary = PathEntrySummary;
2489
2490    fn summary(&self) -> Self::Summary {
2491        PathEntrySummary { max_id: self.id }
2492    }
2493}
2494
2495impl sum_tree::KeyedItem for PathEntry {
2496    type Key = ProjectEntryId;
2497
2498    fn key(&self) -> Self::Key {
2499        self.id
2500    }
2501}
2502
2503#[derive(Clone, Debug, Default)]
2504struct PathEntrySummary {
2505    max_id: ProjectEntryId,
2506}
2507
2508impl sum_tree::Summary for PathEntrySummary {
2509    type Context = ();
2510
2511    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2512        self.max_id = summary.max_id;
2513    }
2514}
2515
2516impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2517    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2518        *self = summary.max_id;
2519    }
2520}
2521
2522#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2523pub struct PathKey(Arc<Path>);
2524
2525impl Default for PathKey {
2526    fn default() -> Self {
2527        Self(Path::new("").into())
2528    }
2529}
2530
2531impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2532    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2533        self.0 = summary.max_path.clone();
2534    }
2535}
2536
2537struct BackgroundScanner {
2538    snapshot: Mutex<LocalSnapshot>,
2539    fs: Arc<dyn Fs>,
2540    status_updates_tx: UnboundedSender<ScanState>,
2541    executor: Arc<executor::Background>,
2542    refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2543    prev_state: Mutex<BackgroundScannerState>,
2544    finished_initial_scan: bool,
2545}
2546
2547struct BackgroundScannerState {
2548    snapshot: Snapshot,
2549    event_paths: Vec<Arc<Path>>,
2550}
2551
2552impl BackgroundScanner {
2553    fn new(
2554        snapshot: LocalSnapshot,
2555        fs: Arc<dyn Fs>,
2556        status_updates_tx: UnboundedSender<ScanState>,
2557        executor: Arc<executor::Background>,
2558        refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2559    ) -> Self {
2560        Self {
2561            fs,
2562            status_updates_tx,
2563            executor,
2564            refresh_requests_rx,
2565            prev_state: Mutex::new(BackgroundScannerState {
2566                snapshot: snapshot.snapshot.clone(),
2567                event_paths: Default::default(),
2568            }),
2569            snapshot: Mutex::new(snapshot),
2570            finished_initial_scan: false,
2571        }
2572    }
2573
2574    async fn run(
2575        &mut self,
2576        mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2577    ) {
2578        use futures::FutureExt as _;
2579
2580        let (root_abs_path, root_inode) = {
2581            let snapshot = self.snapshot.lock();
2582            (
2583                snapshot.abs_path.clone(),
2584                snapshot.root_entry().map(|e| e.inode),
2585            )
2586        };
2587
2588        // Populate ignores above the root.
2589        let ignore_stack;
2590        for ancestor in root_abs_path.ancestors().skip(1) {
2591            if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2592            {
2593                self.snapshot
2594                    .lock()
2595                    .ignores_by_parent_abs_path
2596                    .insert(ancestor.into(), (ignore.into(), false));
2597            }
2598        }
2599        {
2600            let mut snapshot = self.snapshot.lock();
2601            snapshot.scan_id += 1;
2602            ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2603            if ignore_stack.is_all() {
2604                if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2605                    root_entry.is_ignored = true;
2606                    snapshot.insert_entry(root_entry, self.fs.as_ref());
2607                }
2608            }
2609        };
2610
2611        // Perform an initial scan of the directory.
2612        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2613        smol::block_on(scan_job_tx.send(ScanJob {
2614            abs_path: root_abs_path,
2615            path: Arc::from(Path::new("")),
2616            ignore_stack,
2617            ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2618            scan_queue: scan_job_tx.clone(),
2619        }))
2620        .unwrap();
2621        drop(scan_job_tx);
2622        self.scan_dirs(true, scan_job_rx).await;
2623        {
2624            let mut snapshot = self.snapshot.lock();
2625            snapshot.completed_scan_id = snapshot.scan_id;
2626        }
2627        self.send_status_update(false, None);
2628
2629        // Process any any FS events that occurred while performing the initial scan.
2630        // For these events, update events cannot be as precise, because we didn't
2631        // have the previous state loaded yet.
2632        if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2633            let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2634            while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2635                paths.extend(more_events.into_iter().map(|e| e.path));
2636            }
2637            self.process_events(paths).await;
2638        }
2639
2640        self.finished_initial_scan = true;
2641
2642        // Continue processing events until the worktree is dropped.
2643        loop {
2644            select_biased! {
2645                // Process any path refresh requests from the worktree. Prioritize
2646                // these before handling changes reported by the filesystem.
2647                request = self.refresh_requests_rx.recv().fuse() => {
2648                    let Ok((paths, barrier)) = request else { break };
2649                    if !self.process_refresh_request(paths.clone(), barrier).await {
2650                        return;
2651                    }
2652                }
2653
2654                events = events_rx.next().fuse() => {
2655                    let Some(events) = events else { break };
2656                    let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2657                    while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2658                        paths.extend(more_events.into_iter().map(|e| e.path));
2659                    }
2660                    self.process_events(paths.clone()).await;
2661                }
2662            }
2663        }
2664    }
2665
2666    async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2667        if let Some(mut paths) = self.reload_entries_for_paths(paths, None).await {
2668            paths.sort_unstable();
2669            util::extend_sorted(
2670                &mut self.prev_state.lock().event_paths,
2671                paths,
2672                usize::MAX,
2673                Ord::cmp,
2674            );
2675        }
2676        self.send_status_update(false, Some(barrier))
2677    }
2678
2679    async fn process_events(&mut self, paths: Vec<PathBuf>) {
2680        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2681        let paths = self
2682            .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2683            .await;
2684        if let Some(paths) = &paths {
2685            util::extend_sorted(
2686                &mut self.prev_state.lock().event_paths,
2687                paths.iter().cloned(),
2688                usize::MAX,
2689                Ord::cmp,
2690            );
2691        }
2692        drop(scan_job_tx);
2693        self.scan_dirs(false, scan_job_rx).await;
2694
2695        self.update_ignore_statuses().await;
2696
2697        let mut snapshot = self.snapshot.lock();
2698
2699        if let Some(paths) = paths {
2700            for path in paths {
2701                self.reload_repo_for_file_path(&path, &mut *snapshot, self.fs.as_ref());
2702            }
2703        }
2704
2705        let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2706        git_repositories.retain(|work_directory_id, _| {
2707            snapshot
2708                .entry_for_id(*work_directory_id)
2709                .map_or(false, |entry| {
2710                    snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2711                })
2712        });
2713        snapshot.git_repositories = git_repositories;
2714
2715        let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2716        git_repository_entries.retain(|_, entry| {
2717            snapshot
2718                .git_repositories
2719                .get(&entry.work_directory.0)
2720                .is_some()
2721        });
2722        snapshot.snapshot.repository_entries = git_repository_entries;
2723
2724        snapshot.removed_entry_ids.clear();
2725        snapshot.completed_scan_id = snapshot.scan_id;
2726
2727        drop(snapshot);
2728
2729        self.send_status_update(false, None);
2730        self.prev_state.lock().event_paths.clear();
2731    }
2732
2733    async fn scan_dirs(
2734        &self,
2735        enable_progress_updates: bool,
2736        scan_jobs_rx: channel::Receiver<ScanJob>,
2737    ) {
2738        use futures::FutureExt as _;
2739
2740        if self
2741            .status_updates_tx
2742            .unbounded_send(ScanState::Started)
2743            .is_err()
2744        {
2745            return;
2746        }
2747
2748        let progress_update_count = AtomicUsize::new(0);
2749        self.executor
2750            .scoped(|scope| {
2751                for _ in 0..self.executor.num_cpus() {
2752                    scope.spawn(async {
2753                        let mut last_progress_update_count = 0;
2754                        let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2755                        futures::pin_mut!(progress_update_timer);
2756
2757                        loop {
2758                            select_biased! {
2759                                // Process any path refresh requests before moving on to process
2760                                // the scan queue, so that user operations are prioritized.
2761                                request = self.refresh_requests_rx.recv().fuse() => {
2762                                    let Ok((paths, barrier)) = request else { break };
2763                                    if !self.process_refresh_request(paths, barrier).await {
2764                                        return;
2765                                    }
2766                                }
2767
2768                                // Send periodic progress updates to the worktree. Use an atomic counter
2769                                // to ensure that only one of the workers sends a progress update after
2770                                // the update interval elapses.
2771                                _ = progress_update_timer => {
2772                                    match progress_update_count.compare_exchange(
2773                                        last_progress_update_count,
2774                                        last_progress_update_count + 1,
2775                                        SeqCst,
2776                                        SeqCst
2777                                    ) {
2778                                        Ok(_) => {
2779                                            last_progress_update_count += 1;
2780                                            self.send_status_update(true, None);
2781                                        }
2782                                        Err(count) => {
2783                                            last_progress_update_count = count;
2784                                        }
2785                                    }
2786                                    progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2787                                }
2788
2789                                // Recursively load directories from the file system.
2790                                job = scan_jobs_rx.recv().fuse() => {
2791                                    let Ok(job) = job else { break };
2792                                    if let Err(err) = self.scan_dir(&job).await {
2793                                        if job.path.as_ref() != Path::new("") {
2794                                            log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2795                                        }
2796                                    }
2797                                }
2798                            }
2799                        }
2800                    })
2801                }
2802            })
2803            .await;
2804    }
2805
2806    fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2807        let mut prev_state = self.prev_state.lock();
2808        let new_snapshot = self.snapshot.lock().clone();
2809        let old_snapshot = mem::replace(&mut prev_state.snapshot, new_snapshot.snapshot.clone());
2810
2811        let changes = self.build_change_set(
2812            &old_snapshot,
2813            &new_snapshot.snapshot,
2814            &prev_state.event_paths,
2815        );
2816
2817        self.status_updates_tx
2818            .unbounded_send(ScanState::Updated {
2819                snapshot: new_snapshot,
2820                changes,
2821                scanning,
2822                barrier,
2823            })
2824            .is_ok()
2825    }
2826
2827    async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2828        let mut new_entries: Vec<Entry> = Vec::new();
2829        let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2830        let mut ignore_stack = job.ignore_stack.clone();
2831        let mut new_ignore = None;
2832        let (root_abs_path, root_char_bag, next_entry_id) = {
2833            let snapshot = self.snapshot.lock();
2834            (
2835                snapshot.abs_path().clone(),
2836                snapshot.root_char_bag,
2837                snapshot.next_entry_id.clone(),
2838            )
2839        };
2840        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2841        while let Some(child_abs_path) = child_paths.next().await {
2842            let child_abs_path: Arc<Path> = match child_abs_path {
2843                Ok(child_abs_path) => child_abs_path.into(),
2844                Err(error) => {
2845                    log::error!("error processing entry {:?}", error);
2846                    continue;
2847                }
2848            };
2849
2850            let child_name = child_abs_path.file_name().unwrap();
2851            let child_path: Arc<Path> = job.path.join(child_name).into();
2852            let child_metadata = match self.fs.metadata(&child_abs_path).await {
2853                Ok(Some(metadata)) => metadata,
2854                Ok(None) => continue,
2855                Err(err) => {
2856                    log::error!("error processing {:?}: {:?}", child_abs_path, err);
2857                    continue;
2858                }
2859            };
2860
2861            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2862            if child_name == *GITIGNORE {
2863                match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2864                    Ok(ignore) => {
2865                        let ignore = Arc::new(ignore);
2866                        ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2867                        new_ignore = Some(ignore);
2868                    }
2869                    Err(error) => {
2870                        log::error!(
2871                            "error loading .gitignore file {:?} - {:?}",
2872                            child_name,
2873                            error
2874                        );
2875                    }
2876                }
2877
2878                // Update ignore status of any child entries we've already processed to reflect the
2879                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2880                // there should rarely be too numerous. Update the ignore stack associated with any
2881                // new jobs as well.
2882                let mut new_jobs = new_jobs.iter_mut();
2883                for entry in &mut new_entries {
2884                    let entry_abs_path = root_abs_path.join(&entry.path);
2885                    entry.is_ignored =
2886                        ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2887
2888                    if entry.is_dir() {
2889                        if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2890                            job.ignore_stack = if entry.is_ignored {
2891                                IgnoreStack::all()
2892                            } else {
2893                                ignore_stack.clone()
2894                            };
2895                        }
2896                    }
2897                }
2898            }
2899
2900            let mut child_entry = Entry::new(
2901                child_path.clone(),
2902                &child_metadata,
2903                &next_entry_id,
2904                root_char_bag,
2905            );
2906
2907            if child_entry.is_dir() {
2908                let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2909                child_entry.is_ignored = is_ignored;
2910
2911                // Avoid recursing until crash in the case of a recursive symlink
2912                if !job.ancestor_inodes.contains(&child_entry.inode) {
2913                    let mut ancestor_inodes = job.ancestor_inodes.clone();
2914                    ancestor_inodes.insert(child_entry.inode);
2915
2916                    new_jobs.push(Some(ScanJob {
2917                        abs_path: child_abs_path,
2918                        path: child_path,
2919                        ignore_stack: if is_ignored {
2920                            IgnoreStack::all()
2921                        } else {
2922                            ignore_stack.clone()
2923                        },
2924                        ancestor_inodes,
2925                        scan_queue: job.scan_queue.clone(),
2926                    }));
2927                } else {
2928                    new_jobs.push(None);
2929                }
2930            } else {
2931                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2932            }
2933
2934            new_entries.push(child_entry);
2935        }
2936
2937        self.snapshot.lock().populate_dir(
2938            job.path.clone(),
2939            new_entries,
2940            new_ignore,
2941            self.fs.as_ref(),
2942        );
2943
2944        for new_job in new_jobs {
2945            if let Some(new_job) = new_job {
2946                job.scan_queue.send(new_job).await.unwrap();
2947            }
2948        }
2949
2950        Ok(())
2951    }
2952
2953    async fn reload_entries_for_paths(
2954        &self,
2955        mut abs_paths: Vec<PathBuf>,
2956        scan_queue_tx: Option<Sender<ScanJob>>,
2957    ) -> Option<Vec<Arc<Path>>> {
2958        let doing_recursive_update = scan_queue_tx.is_some();
2959
2960        abs_paths.sort_unstable();
2961        abs_paths.dedup_by(|a, b| a.starts_with(&b));
2962
2963        let root_abs_path = self.snapshot.lock().abs_path.clone();
2964        let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2965        let metadata = futures::future::join_all(
2966            abs_paths
2967                .iter()
2968                .map(|abs_path| self.fs.metadata(&abs_path))
2969                .collect::<Vec<_>>(),
2970        )
2971        .await;
2972
2973        let mut snapshot = self.snapshot.lock();
2974        let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2975        snapshot.scan_id += 1;
2976        if is_idle && !doing_recursive_update {
2977            snapshot.completed_scan_id = snapshot.scan_id;
2978        }
2979
2980        // Remove any entries for paths that no longer exist or are being recursively
2981        // refreshed. Do this before adding any new entries, so that renames can be
2982        // detected regardless of the order of the paths.
2983        let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2984        for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2985            if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2986                if matches!(metadata, Ok(None)) || doing_recursive_update {
2987                    snapshot.remove_path(path);
2988                }
2989                event_paths.push(path.into());
2990            } else {
2991                log::error!(
2992                    "unexpected event {:?} for root path {:?}",
2993                    abs_path,
2994                    root_canonical_path
2995                );
2996            }
2997        }
2998
2999        for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
3000            let abs_path: Arc<Path> = root_abs_path.join(&path).into();
3001
3002            match metadata {
3003                Ok(Some(metadata)) => {
3004                    let ignore_stack =
3005                        snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
3006                    let mut fs_entry = Entry::new(
3007                        path.clone(),
3008                        &metadata,
3009                        snapshot.next_entry_id.as_ref(),
3010                        snapshot.root_char_bag,
3011                    );
3012                    fs_entry.is_ignored = ignore_stack.is_all();
3013                    snapshot.insert_entry(fs_entry, self.fs.as_ref());
3014
3015                    if let Some(scan_queue_tx) = &scan_queue_tx {
3016                        let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
3017                        if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
3018                            ancestor_inodes.insert(metadata.inode);
3019                            smol::block_on(scan_queue_tx.send(ScanJob {
3020                                abs_path,
3021                                path,
3022                                ignore_stack,
3023                                ancestor_inodes,
3024                                scan_queue: scan_queue_tx.clone(),
3025                            }))
3026                            .unwrap();
3027                        }
3028                    }
3029                }
3030                Ok(None) => {
3031                    self.remove_repo_path(&path, &mut snapshot);
3032                }
3033                Err(err) => {
3034                    // TODO - create a special 'error' entry in the entries tree to mark this
3035                    log::error!("error reading file on event {:?}", err);
3036                }
3037            }
3038        }
3039
3040        Some(event_paths)
3041    }
3042
3043    fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3044        if !path
3045            .components()
3046            .any(|component| component.as_os_str() == *DOT_GIT)
3047        {
3048            let scan_id = snapshot.scan_id;
3049            let repo = snapshot.repo_for(&path)?;
3050
3051            let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3052
3053            let work_dir = repo.work_directory(snapshot)?;
3054            let work_dir_id = repo.work_directory;
3055
3056            snapshot
3057                .git_repositories
3058                .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3059
3060            snapshot.repository_entries.update(&work_dir, |entry| {
3061                entry
3062                    .statuses
3063                    .remove_range(&repo_path, &RepoPathDescendants(&repo_path))
3064            });
3065        }
3066
3067        Some(())
3068    }
3069
3070    fn reload_repo_for_file_path(
3071        &self,
3072        path: &Path,
3073        snapshot: &mut LocalSnapshot,
3074        fs: &dyn Fs,
3075    ) -> Option<()> {
3076        let scan_id = snapshot.scan_id;
3077
3078        if path
3079            .components()
3080            .any(|component| component.as_os_str() == *DOT_GIT)
3081        {
3082            let (entry_id, repo_ptr) = {
3083                let Some((entry_id, repo)) = snapshot.repo_for_metadata(&path) else {
3084                    let dot_git_dir = path.ancestors()
3085                    .skip_while(|ancestor| ancestor.file_name() != Some(&*DOT_GIT))
3086                    .next()?;
3087
3088                    snapshot.build_repo(dot_git_dir.into(), fs);
3089                    return None;
3090                };
3091                if repo.full_scan_id == scan_id {
3092                    return None;
3093                }
3094                (*entry_id, repo.repo_ptr.to_owned())
3095            };
3096
3097            let work_dir = snapshot
3098                .entry_for_id(entry_id)
3099                .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3100
3101            let repo = repo_ptr.lock();
3102            repo.reload_index();
3103            let branch = repo.branch_name();
3104            let statuses = repo.statuses().unwrap_or_default();
3105
3106            snapshot.git_repositories.update(&entry_id, |entry| {
3107                entry.scan_id = scan_id;
3108                entry.full_scan_id = scan_id;
3109            });
3110
3111            snapshot.repository_entries.update(&work_dir, |entry| {
3112                entry.branch = branch.map(Into::into);
3113                entry.statuses = statuses;
3114            });
3115        } else {
3116            if snapshot
3117                .entry_for_path(&path)
3118                .map(|entry| entry.is_ignored)
3119                .unwrap_or(false)
3120            {
3121                self.remove_repo_path(&path, snapshot);
3122                return None;
3123            }
3124
3125            let repo = snapshot.repo_for(&path)?;
3126
3127            let work_dir = repo.work_directory(snapshot)?;
3128            let work_dir_id = repo.work_directory.clone();
3129
3130            snapshot
3131                .git_repositories
3132                .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3133
3134            let local_repo = snapshot.get_local_repo(&repo)?.to_owned();
3135
3136            // Short circuit if we've already scanned everything
3137            if local_repo.full_scan_id == scan_id {
3138                return None;
3139            }
3140
3141            let mut repository = snapshot.repository_entries.remove(&work_dir)?;
3142
3143            for entry in snapshot.descendent_entries(false, false, path) {
3144                let Some(repo_path) = repo.work_directory.relativize(snapshot, &entry.path) else {
3145                    continue;
3146                };
3147
3148                let status = local_repo.repo_ptr.lock().status(&repo_path);
3149                if let Some(status) = status {
3150                    repository.statuses.insert(repo_path.clone(), status);
3151                } else {
3152                    repository.statuses.remove(&repo_path);
3153                }
3154            }
3155
3156            snapshot.repository_entries.insert(work_dir, repository)
3157        }
3158
3159        Some(())
3160    }
3161
3162    async fn update_ignore_statuses(&self) {
3163        use futures::FutureExt as _;
3164
3165        let mut snapshot = self.snapshot.lock().clone();
3166        let mut ignores_to_update = Vec::new();
3167        let mut ignores_to_delete = Vec::new();
3168        let abs_path = snapshot.abs_path.clone();
3169        for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
3170            if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
3171                if *needs_update {
3172                    *needs_update = false;
3173                    if snapshot.snapshot.entry_for_path(parent_path).is_some() {
3174                        ignores_to_update.push(parent_abs_path.clone());
3175                    }
3176                }
3177
3178                let ignore_path = parent_path.join(&*GITIGNORE);
3179                if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
3180                    ignores_to_delete.push(parent_abs_path.clone());
3181                }
3182            }
3183        }
3184
3185        for parent_abs_path in ignores_to_delete {
3186            snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3187            self.snapshot
3188                .lock()
3189                .ignores_by_parent_abs_path
3190                .remove(&parent_abs_path);
3191        }
3192
3193        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3194        ignores_to_update.sort_unstable();
3195        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3196        while let Some(parent_abs_path) = ignores_to_update.next() {
3197            while ignores_to_update
3198                .peek()
3199                .map_or(false, |p| p.starts_with(&parent_abs_path))
3200            {
3201                ignores_to_update.next().unwrap();
3202            }
3203
3204            let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3205            smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3206                abs_path: parent_abs_path,
3207                ignore_stack,
3208                ignore_queue: ignore_queue_tx.clone(),
3209            }))
3210            .unwrap();
3211        }
3212        drop(ignore_queue_tx);
3213
3214        self.executor
3215            .scoped(|scope| {
3216                for _ in 0..self.executor.num_cpus() {
3217                    scope.spawn(async {
3218                        loop {
3219                            select_biased! {
3220                                // Process any path refresh requests before moving on to process
3221                                // the queue of ignore statuses.
3222                                request = self.refresh_requests_rx.recv().fuse() => {
3223                                    let Ok((paths, barrier)) = request else { break };
3224                                    if !self.process_refresh_request(paths, barrier).await {
3225                                        return;
3226                                    }
3227                                }
3228
3229                                // Recursively process directories whose ignores have changed.
3230                                job = ignore_queue_rx.recv().fuse() => {
3231                                    let Ok(job) = job else { break };
3232                                    self.update_ignore_status(job, &snapshot).await;
3233                                }
3234                            }
3235                        }
3236                    });
3237                }
3238            })
3239            .await;
3240    }
3241
3242    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3243        let mut ignore_stack = job.ignore_stack;
3244        if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3245            ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3246        }
3247
3248        let mut entries_by_id_edits = Vec::new();
3249        let mut entries_by_path_edits = Vec::new();
3250        let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3251        for mut entry in snapshot.child_entries(path).cloned() {
3252            let was_ignored = entry.is_ignored;
3253            let abs_path = snapshot.abs_path().join(&entry.path);
3254            entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3255            if entry.is_dir() {
3256                let child_ignore_stack = if entry.is_ignored {
3257                    IgnoreStack::all()
3258                } else {
3259                    ignore_stack.clone()
3260                };
3261                job.ignore_queue
3262                    .send(UpdateIgnoreStatusJob {
3263                        abs_path: abs_path.into(),
3264                        ignore_stack: child_ignore_stack,
3265                        ignore_queue: job.ignore_queue.clone(),
3266                    })
3267                    .await
3268                    .unwrap();
3269            }
3270
3271            if entry.is_ignored != was_ignored {
3272                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3273                path_entry.scan_id = snapshot.scan_id;
3274                path_entry.is_ignored = entry.is_ignored;
3275                entries_by_id_edits.push(Edit::Insert(path_entry));
3276                entries_by_path_edits.push(Edit::Insert(entry));
3277            }
3278        }
3279
3280        let mut snapshot = self.snapshot.lock();
3281        snapshot.entries_by_path.edit(entries_by_path_edits, &());
3282        snapshot.entries_by_id.edit(entries_by_id_edits, &());
3283    }
3284
3285    fn build_change_set(
3286        &self,
3287        old_snapshot: &Snapshot,
3288        new_snapshot: &Snapshot,
3289        event_paths: &[Arc<Path>],
3290    ) -> HashMap<(Arc<Path>, ProjectEntryId), PathChange> {
3291        use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3292
3293        let mut changes = HashMap::default();
3294        let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3295        let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3296        let received_before_initialized = !self.finished_initial_scan;
3297
3298        for path in event_paths {
3299            let path = PathKey(path.clone());
3300            old_paths.seek(&path, Bias::Left, &());
3301            new_paths.seek(&path, Bias::Left, &());
3302
3303            loop {
3304                match (old_paths.item(), new_paths.item()) {
3305                    (Some(old_entry), Some(new_entry)) => {
3306                        if old_entry.path > path.0
3307                            && new_entry.path > path.0
3308                            && !old_entry.path.starts_with(&path.0)
3309                            && !new_entry.path.starts_with(&path.0)
3310                        {
3311                            break;
3312                        }
3313
3314                        match Ord::cmp(&old_entry.path, &new_entry.path) {
3315                            Ordering::Less => {
3316                                changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3317                                old_paths.next(&());
3318                            }
3319                            Ordering::Equal => {
3320                                if received_before_initialized {
3321                                    // If the worktree was not fully initialized when this event was generated,
3322                                    // we can't know whether this entry was added during the scan or whether
3323                                    // it was merely updated.
3324                                    changes.insert(
3325                                        (new_entry.path.clone(), new_entry.id),
3326                                        AddedOrUpdated,
3327                                    );
3328                                } else if old_entry.mtime != new_entry.mtime {
3329                                    changes.insert((new_entry.path.clone(), new_entry.id), Updated);
3330                                }
3331                                old_paths.next(&());
3332                                new_paths.next(&());
3333                            }
3334                            Ordering::Greater => {
3335                                changes.insert((new_entry.path.clone(), new_entry.id), Added);
3336                                new_paths.next(&());
3337                            }
3338                        }
3339                    }
3340                    (Some(old_entry), None) => {
3341                        changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3342                        old_paths.next(&());
3343                    }
3344                    (None, Some(new_entry)) => {
3345                        changes.insert((new_entry.path.clone(), new_entry.id), Added);
3346                        new_paths.next(&());
3347                    }
3348                    (None, None) => break,
3349                }
3350            }
3351        }
3352
3353        changes
3354    }
3355
3356    async fn progress_timer(&self, running: bool) {
3357        if !running {
3358            return futures::future::pending().await;
3359        }
3360
3361        #[cfg(any(test, feature = "test-support"))]
3362        if self.fs.is_fake() {
3363            return self.executor.simulate_random_delay().await;
3364        }
3365
3366        smol::Timer::after(Duration::from_millis(100)).await;
3367    }
3368}
3369
3370fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3371    let mut result = root_char_bag;
3372    result.extend(
3373        path.to_string_lossy()
3374            .chars()
3375            .map(|c| c.to_ascii_lowercase()),
3376    );
3377    result
3378}
3379
3380struct ScanJob {
3381    abs_path: Arc<Path>,
3382    path: Arc<Path>,
3383    ignore_stack: Arc<IgnoreStack>,
3384    scan_queue: Sender<ScanJob>,
3385    ancestor_inodes: TreeSet<u64>,
3386}
3387
3388struct UpdateIgnoreStatusJob {
3389    abs_path: Arc<Path>,
3390    ignore_stack: Arc<IgnoreStack>,
3391    ignore_queue: Sender<UpdateIgnoreStatusJob>,
3392}
3393
3394pub trait WorktreeHandle {
3395    #[cfg(any(test, feature = "test-support"))]
3396    fn flush_fs_events<'a>(
3397        &self,
3398        cx: &'a gpui::TestAppContext,
3399    ) -> futures::future::LocalBoxFuture<'a, ()>;
3400}
3401
3402impl WorktreeHandle for ModelHandle<Worktree> {
3403    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3404    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3405    // extra directory scans, and emit extra scan-state notifications.
3406    //
3407    // This function mutates the worktree's directory and waits for those mutations to be picked up,
3408    // to ensure that all redundant FS events have already been processed.
3409    #[cfg(any(test, feature = "test-support"))]
3410    fn flush_fs_events<'a>(
3411        &self,
3412        cx: &'a gpui::TestAppContext,
3413    ) -> futures::future::LocalBoxFuture<'a, ()> {
3414        use smol::future::FutureExt;
3415
3416        let filename = "fs-event-sentinel";
3417        let tree = self.clone();
3418        let (fs, root_path) = self.read_with(cx, |tree, _| {
3419            let tree = tree.as_local().unwrap();
3420            (tree.fs.clone(), tree.abs_path().clone())
3421        });
3422
3423        async move {
3424            fs.create_file(&root_path.join(filename), Default::default())
3425                .await
3426                .unwrap();
3427            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3428                .await;
3429
3430            fs.remove_file(&root_path.join(filename), Default::default())
3431                .await
3432                .unwrap();
3433            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3434                .await;
3435
3436            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3437                .await;
3438        }
3439        .boxed_local()
3440    }
3441}
3442
3443#[derive(Clone, Debug)]
3444struct TraversalProgress<'a> {
3445    max_path: &'a Path,
3446    count: usize,
3447    visible_count: usize,
3448    file_count: usize,
3449    visible_file_count: usize,
3450}
3451
3452impl<'a> TraversalProgress<'a> {
3453    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3454        match (include_ignored, include_dirs) {
3455            (true, true) => self.count,
3456            (true, false) => self.file_count,
3457            (false, true) => self.visible_count,
3458            (false, false) => self.visible_file_count,
3459        }
3460    }
3461}
3462
3463impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3464    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3465        self.max_path = summary.max_path.as_ref();
3466        self.count += summary.count;
3467        self.visible_count += summary.visible_count;
3468        self.file_count += summary.file_count;
3469        self.visible_file_count += summary.visible_file_count;
3470    }
3471}
3472
3473impl<'a> Default for TraversalProgress<'a> {
3474    fn default() -> Self {
3475        Self {
3476            max_path: Path::new(""),
3477            count: 0,
3478            visible_count: 0,
3479            file_count: 0,
3480            visible_file_count: 0,
3481        }
3482    }
3483}
3484
3485pub struct Traversal<'a> {
3486    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3487    include_ignored: bool,
3488    include_dirs: bool,
3489}
3490
3491impl<'a> Traversal<'a> {
3492    pub fn advance(&mut self) -> bool {
3493        self.cursor.seek_forward(
3494            &TraversalTarget::Count {
3495                count: self.end_offset() + 1,
3496                include_dirs: self.include_dirs,
3497                include_ignored: self.include_ignored,
3498            },
3499            Bias::Left,
3500            &(),
3501        )
3502    }
3503
3504    pub fn advance_to_sibling(&mut self) -> bool {
3505        while let Some(entry) = self.cursor.item() {
3506            self.cursor.seek_forward(
3507                &TraversalTarget::PathSuccessor(&entry.path),
3508                Bias::Left,
3509                &(),
3510            );
3511            if let Some(entry) = self.cursor.item() {
3512                if (self.include_dirs || !entry.is_dir())
3513                    && (self.include_ignored || !entry.is_ignored)
3514                {
3515                    return true;
3516                }
3517            }
3518        }
3519        false
3520    }
3521
3522    pub fn entry(&self) -> Option<&'a Entry> {
3523        self.cursor.item()
3524    }
3525
3526    pub fn start_offset(&self) -> usize {
3527        self.cursor
3528            .start()
3529            .count(self.include_dirs, self.include_ignored)
3530    }
3531
3532    pub fn end_offset(&self) -> usize {
3533        self.cursor
3534            .end(&())
3535            .count(self.include_dirs, self.include_ignored)
3536    }
3537}
3538
3539impl<'a> Iterator for Traversal<'a> {
3540    type Item = &'a Entry;
3541
3542    fn next(&mut self) -> Option<Self::Item> {
3543        if let Some(item) = self.entry() {
3544            self.advance();
3545            Some(item)
3546        } else {
3547            None
3548        }
3549    }
3550}
3551
3552#[derive(Debug)]
3553enum TraversalTarget<'a> {
3554    Path(&'a Path),
3555    PathSuccessor(&'a Path),
3556    Count {
3557        count: usize,
3558        include_ignored: bool,
3559        include_dirs: bool,
3560    },
3561}
3562
3563impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3564    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3565        match self {
3566            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3567            TraversalTarget::PathSuccessor(path) => {
3568                if !cursor_location.max_path.starts_with(path) {
3569                    Ordering::Equal
3570                } else {
3571                    Ordering::Greater
3572                }
3573            }
3574            TraversalTarget::Count {
3575                count,
3576                include_dirs,
3577                include_ignored,
3578            } => Ord::cmp(
3579                count,
3580                &cursor_location.count(*include_dirs, *include_ignored),
3581            ),
3582        }
3583    }
3584}
3585
3586struct ChildEntriesIter<'a> {
3587    parent_path: &'a Path,
3588    traversal: Traversal<'a>,
3589}
3590
3591impl<'a> Iterator for ChildEntriesIter<'a> {
3592    type Item = &'a Entry;
3593
3594    fn next(&mut self) -> Option<Self::Item> {
3595        if let Some(item) = self.traversal.entry() {
3596            if item.path.starts_with(&self.parent_path) {
3597                self.traversal.advance_to_sibling();
3598                return Some(item);
3599            }
3600        }
3601        None
3602    }
3603}
3604
3605struct DescendentEntriesIter<'a> {
3606    parent_path: &'a Path,
3607    traversal: Traversal<'a>,
3608}
3609
3610impl<'a> Iterator for DescendentEntriesIter<'a> {
3611    type Item = &'a Entry;
3612
3613    fn next(&mut self) -> Option<Self::Item> {
3614        if let Some(item) = self.traversal.entry() {
3615            if item.path.starts_with(&self.parent_path) {
3616                self.traversal.advance();
3617                return Some(item);
3618            }
3619        }
3620        None
3621    }
3622}
3623
3624impl<'a> From<&'a Entry> for proto::Entry {
3625    fn from(entry: &'a Entry) -> Self {
3626        Self {
3627            id: entry.id.to_proto(),
3628            is_dir: entry.is_dir(),
3629            path: entry.path.to_string_lossy().into(),
3630            inode: entry.inode,
3631            mtime: Some(entry.mtime.into()),
3632            is_symlink: entry.is_symlink,
3633            is_ignored: entry.is_ignored,
3634        }
3635    }
3636}
3637
3638impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3639    type Error = anyhow::Error;
3640
3641    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3642        if let Some(mtime) = entry.mtime {
3643            let kind = if entry.is_dir {
3644                EntryKind::Dir
3645            } else {
3646                let mut char_bag = *root_char_bag;
3647                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3648                EntryKind::File(char_bag)
3649            };
3650            let path: Arc<Path> = PathBuf::from(entry.path).into();
3651            Ok(Entry {
3652                id: ProjectEntryId::from_proto(entry.id),
3653                kind,
3654                path,
3655                inode: entry.inode,
3656                mtime: mtime.into(),
3657                is_symlink: entry.is_symlink,
3658                is_ignored: entry.is_ignored,
3659            })
3660        } else {
3661            Err(anyhow!(
3662                "missing mtime in remote worktree entry {:?}",
3663                entry.path
3664            ))
3665        }
3666    }
3667}
3668
3669#[cfg(test)]
3670mod tests {
3671    use super::*;
3672    use fs::{FakeFs, RealFs};
3673    use gpui::{executor::Deterministic, TestAppContext};
3674    use pretty_assertions::assert_eq;
3675    use rand::prelude::*;
3676    use serde_json::json;
3677    use std::{env, fmt::Write};
3678    use util::{http::FakeHttpClient, test::temp_tree};
3679
3680    #[gpui::test]
3681    async fn test_traversal(cx: &mut TestAppContext) {
3682        let fs = FakeFs::new(cx.background());
3683        fs.insert_tree(
3684            "/root",
3685            json!({
3686               ".gitignore": "a/b\n",
3687               "a": {
3688                   "b": "",
3689                   "c": "",
3690               }
3691            }),
3692        )
3693        .await;
3694
3695        let http_client = FakeHttpClient::with_404_response();
3696        let client = cx.read(|cx| Client::new(http_client, cx));
3697
3698        let tree = Worktree::local(
3699            client,
3700            Path::new("/root"),
3701            true,
3702            fs,
3703            Default::default(),
3704            &mut cx.to_async(),
3705        )
3706        .await
3707        .unwrap();
3708        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3709            .await;
3710
3711        tree.read_with(cx, |tree, _| {
3712            assert_eq!(
3713                tree.entries(false)
3714                    .map(|entry| entry.path.as_ref())
3715                    .collect::<Vec<_>>(),
3716                vec![
3717                    Path::new(""),
3718                    Path::new(".gitignore"),
3719                    Path::new("a"),
3720                    Path::new("a/c"),
3721                ]
3722            );
3723            assert_eq!(
3724                tree.entries(true)
3725                    .map(|entry| entry.path.as_ref())
3726                    .collect::<Vec<_>>(),
3727                vec![
3728                    Path::new(""),
3729                    Path::new(".gitignore"),
3730                    Path::new("a"),
3731                    Path::new("a/b"),
3732                    Path::new("a/c"),
3733                ]
3734            );
3735        })
3736    }
3737
3738    #[gpui::test]
3739    async fn test_descendent_entries(cx: &mut TestAppContext) {
3740        let fs = FakeFs::new(cx.background());
3741        fs.insert_tree(
3742            "/root",
3743            json!({
3744                "a": "",
3745                "b": {
3746                   "c": {
3747                       "d": ""
3748                   },
3749                   "e": {}
3750                },
3751                "f": "",
3752                "g": {
3753                    "h": {}
3754                },
3755                "i": {
3756                    "j": {
3757                        "k": ""
3758                    },
3759                    "l": {
3760
3761                    }
3762                },
3763                ".gitignore": "i/j\n",
3764            }),
3765        )
3766        .await;
3767
3768        let http_client = FakeHttpClient::with_404_response();
3769        let client = cx.read(|cx| Client::new(http_client, cx));
3770
3771        let tree = Worktree::local(
3772            client,
3773            Path::new("/root"),
3774            true,
3775            fs,
3776            Default::default(),
3777            &mut cx.to_async(),
3778        )
3779        .await
3780        .unwrap();
3781        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3782            .await;
3783
3784        tree.read_with(cx, |tree, _| {
3785            assert_eq!(
3786                tree.descendent_entries(false, false, Path::new("b"))
3787                    .map(|entry| entry.path.as_ref())
3788                    .collect::<Vec<_>>(),
3789                vec![Path::new("b/c/d"),]
3790            );
3791            assert_eq!(
3792                tree.descendent_entries(true, false, Path::new("b"))
3793                    .map(|entry| entry.path.as_ref())
3794                    .collect::<Vec<_>>(),
3795                vec![
3796                    Path::new("b"),
3797                    Path::new("b/c"),
3798                    Path::new("b/c/d"),
3799                    Path::new("b/e"),
3800                ]
3801            );
3802
3803            assert_eq!(
3804                tree.descendent_entries(false, false, Path::new("g"))
3805                    .map(|entry| entry.path.as_ref())
3806                    .collect::<Vec<_>>(),
3807                Vec::<PathBuf>::new()
3808            );
3809            assert_eq!(
3810                tree.descendent_entries(true, false, Path::new("g"))
3811                    .map(|entry| entry.path.as_ref())
3812                    .collect::<Vec<_>>(),
3813                vec![Path::new("g"), Path::new("g/h"),]
3814            );
3815
3816            assert_eq!(
3817                tree.descendent_entries(false, false, Path::new("i"))
3818                    .map(|entry| entry.path.as_ref())
3819                    .collect::<Vec<_>>(),
3820                Vec::<PathBuf>::new()
3821            );
3822            assert_eq!(
3823                tree.descendent_entries(false, true, Path::new("i"))
3824                    .map(|entry| entry.path.as_ref())
3825                    .collect::<Vec<_>>(),
3826                vec![Path::new("i/j/k")]
3827            );
3828            assert_eq!(
3829                tree.descendent_entries(true, false, Path::new("i"))
3830                    .map(|entry| entry.path.as_ref())
3831                    .collect::<Vec<_>>(),
3832                vec![Path::new("i"), Path::new("i/l"),]
3833            );
3834        })
3835    }
3836
3837    #[gpui::test(iterations = 10)]
3838    async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3839        let fs = FakeFs::new(cx.background());
3840        fs.insert_tree(
3841            "/root",
3842            json!({
3843                "lib": {
3844                    "a": {
3845                        "a.txt": ""
3846                    },
3847                    "b": {
3848                        "b.txt": ""
3849                    }
3850                }
3851            }),
3852        )
3853        .await;
3854        fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3855        fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3856
3857        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3858        let tree = Worktree::local(
3859            client,
3860            Path::new("/root"),
3861            true,
3862            fs.clone(),
3863            Default::default(),
3864            &mut cx.to_async(),
3865        )
3866        .await
3867        .unwrap();
3868
3869        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3870            .await;
3871
3872        tree.read_with(cx, |tree, _| {
3873            assert_eq!(
3874                tree.entries(false)
3875                    .map(|entry| entry.path.as_ref())
3876                    .collect::<Vec<_>>(),
3877                vec![
3878                    Path::new(""),
3879                    Path::new("lib"),
3880                    Path::new("lib/a"),
3881                    Path::new("lib/a/a.txt"),
3882                    Path::new("lib/a/lib"),
3883                    Path::new("lib/b"),
3884                    Path::new("lib/b/b.txt"),
3885                    Path::new("lib/b/lib"),
3886                ]
3887            );
3888        });
3889
3890        fs.rename(
3891            Path::new("/root/lib/a/lib"),
3892            Path::new("/root/lib/a/lib-2"),
3893            Default::default(),
3894        )
3895        .await
3896        .unwrap();
3897        executor.run_until_parked();
3898        tree.read_with(cx, |tree, _| {
3899            assert_eq!(
3900                tree.entries(false)
3901                    .map(|entry| entry.path.as_ref())
3902                    .collect::<Vec<_>>(),
3903                vec![
3904                    Path::new(""),
3905                    Path::new("lib"),
3906                    Path::new("lib/a"),
3907                    Path::new("lib/a/a.txt"),
3908                    Path::new("lib/a/lib-2"),
3909                    Path::new("lib/b"),
3910                    Path::new("lib/b/b.txt"),
3911                    Path::new("lib/b/lib"),
3912                ]
3913            );
3914        });
3915    }
3916
3917    #[gpui::test]
3918    async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3919        let parent_dir = temp_tree(json!({
3920            ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3921            "tree": {
3922                ".git": {},
3923                ".gitignore": "ignored-dir\n",
3924                "tracked-dir": {
3925                    "tracked-file1": "",
3926                    "ancestor-ignored-file1": "",
3927                },
3928                "ignored-dir": {
3929                    "ignored-file1": ""
3930                }
3931            }
3932        }));
3933        let dir = parent_dir.path().join("tree");
3934
3935        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3936
3937        let tree = Worktree::local(
3938            client,
3939            dir.as_path(),
3940            true,
3941            Arc::new(RealFs),
3942            Default::default(),
3943            &mut cx.to_async(),
3944        )
3945        .await
3946        .unwrap();
3947        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3948            .await;
3949        tree.flush_fs_events(cx).await;
3950        cx.read(|cx| {
3951            let tree = tree.read(cx);
3952            assert!(
3953                !tree
3954                    .entry_for_path("tracked-dir/tracked-file1")
3955                    .unwrap()
3956                    .is_ignored
3957            );
3958            assert!(
3959                tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3960                    .unwrap()
3961                    .is_ignored
3962            );
3963            assert!(
3964                tree.entry_for_path("ignored-dir/ignored-file1")
3965                    .unwrap()
3966                    .is_ignored
3967            );
3968        });
3969
3970        std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3971        std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3972        std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3973        tree.flush_fs_events(cx).await;
3974        cx.read(|cx| {
3975            let tree = tree.read(cx);
3976            assert!(
3977                !tree
3978                    .entry_for_path("tracked-dir/tracked-file2")
3979                    .unwrap()
3980                    .is_ignored
3981            );
3982            assert!(
3983                tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3984                    .unwrap()
3985                    .is_ignored
3986            );
3987            assert!(
3988                tree.entry_for_path("ignored-dir/ignored-file2")
3989                    .unwrap()
3990                    .is_ignored
3991            );
3992            assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3993        });
3994    }
3995
3996    #[gpui::test]
3997    async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3998        let root = temp_tree(json!({
3999            "dir1": {
4000                ".git": {},
4001                "deps": {
4002                    "dep1": {
4003                        ".git": {},
4004                        "src": {
4005                            "a.txt": ""
4006                        }
4007                    }
4008                },
4009                "src": {
4010                    "b.txt": ""
4011                }
4012            },
4013            "c.txt": "",
4014        }));
4015
4016        let http_client = FakeHttpClient::with_404_response();
4017        let client = cx.read(|cx| Client::new(http_client, cx));
4018        let tree = Worktree::local(
4019            client,
4020            root.path(),
4021            true,
4022            Arc::new(RealFs),
4023            Default::default(),
4024            &mut cx.to_async(),
4025        )
4026        .await
4027        .unwrap();
4028
4029        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4030            .await;
4031        tree.flush_fs_events(cx).await;
4032
4033        tree.read_with(cx, |tree, _cx| {
4034            let tree = tree.as_local().unwrap();
4035
4036            assert!(tree.repo_for("c.txt".as_ref()).is_none());
4037
4038            let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
4039            assert_eq!(
4040                entry
4041                    .work_directory(tree)
4042                    .map(|directory| directory.as_ref().to_owned()),
4043                Some(Path::new("dir1").to_owned())
4044            );
4045
4046            let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
4047            assert_eq!(
4048                entry
4049                    .work_directory(tree)
4050                    .map(|directory| directory.as_ref().to_owned()),
4051                Some(Path::new("dir1/deps/dep1").to_owned())
4052            );
4053        });
4054
4055        let repo_update_events = Arc::new(Mutex::new(vec![]));
4056        tree.update(cx, |_, cx| {
4057            let repo_update_events = repo_update_events.clone();
4058            cx.subscribe(&tree, move |_, _, event, _| {
4059                if let Event::UpdatedGitRepositories(update) = event {
4060                    repo_update_events.lock().push(update.clone());
4061                }
4062            })
4063            .detach();
4064        });
4065
4066        std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
4067        tree.flush_fs_events(cx).await;
4068
4069        assert_eq!(
4070            repo_update_events.lock()[0]
4071                .keys()
4072                .cloned()
4073                .collect::<Vec<Arc<Path>>>(),
4074            vec![Path::new("dir1").into()]
4075        );
4076
4077        std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
4078        tree.flush_fs_events(cx).await;
4079
4080        tree.read_with(cx, |tree, _cx| {
4081            let tree = tree.as_local().unwrap();
4082
4083            assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
4084        });
4085    }
4086
4087    #[gpui::test]
4088    async fn test_git_status(cx: &mut TestAppContext) {
4089        #[track_caller]
4090        fn git_init(path: &Path) -> git2::Repository {
4091            git2::Repository::init(path).expect("Failed to initialize git repository")
4092        }
4093
4094        #[track_caller]
4095        fn git_add(path: &Path, repo: &git2::Repository) {
4096            let mut index = repo.index().expect("Failed to get index");
4097            index.add_path(path).expect("Failed to add a.txt");
4098            index.write().expect("Failed to write index");
4099        }
4100
4101        #[track_caller]
4102        fn git_remove_index(path: &Path, repo: &git2::Repository) {
4103            let mut index = repo.index().expect("Failed to get index");
4104            index.remove_path(path).expect("Failed to add a.txt");
4105            index.write().expect("Failed to write index");
4106        }
4107
4108        #[track_caller]
4109        fn git_commit(msg: &'static str, repo: &git2::Repository) {
4110            use git2::Signature;
4111
4112            let signature = Signature::now("test", "test@zed.dev").unwrap();
4113            let oid = repo.index().unwrap().write_tree().unwrap();
4114            let tree = repo.find_tree(oid).unwrap();
4115            if let Some(head) = repo.head().ok() {
4116                let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
4117
4118                let parent_commit = parent_obj.as_commit().unwrap();
4119
4120                repo.commit(
4121                    Some("HEAD"),
4122                    &signature,
4123                    &signature,
4124                    msg,
4125                    &tree,
4126                    &[parent_commit],
4127                )
4128                .expect("Failed to commit with parent");
4129            } else {
4130                repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
4131                    .expect("Failed to commit");
4132            }
4133        }
4134
4135        #[track_caller]
4136        fn git_stash(repo: &mut git2::Repository) {
4137            use git2::Signature;
4138
4139            let signature = Signature::now("test", "test@zed.dev").unwrap();
4140            repo.stash_save(&signature, "N/A", None)
4141                .expect("Failed to stash");
4142        }
4143
4144        #[track_caller]
4145        fn git_reset(offset: usize, repo: &git2::Repository) {
4146            let head = repo.head().expect("Couldn't get repo head");
4147            let object = head.peel(git2::ObjectType::Commit).unwrap();
4148            let commit = object.as_commit().unwrap();
4149            let new_head = commit
4150                .parents()
4151                .inspect(|parnet| {
4152                    parnet.message();
4153                })
4154                .skip(offset)
4155                .next()
4156                .expect("Not enough history");
4157            repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
4158                .expect("Could not reset");
4159        }
4160
4161        #[allow(dead_code)]
4162        #[track_caller]
4163        fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
4164            repo.statuses(None)
4165                .unwrap()
4166                .iter()
4167                .map(|status| (status.path().unwrap().to_string(), status.status()))
4168                .collect()
4169        }
4170
4171        const IGNORE_RULE: &'static str = "**/target";
4172
4173        let root = temp_tree(json!({
4174            "project": {
4175                "a.txt": "a",
4176                "b.txt": "bb",
4177                "c": {
4178                    "d": {
4179                        "e.txt": "eee"
4180                    }
4181                },
4182                "f.txt": "ffff",
4183                "target": {
4184                    "build_file": "???"
4185                },
4186                ".gitignore": IGNORE_RULE
4187            },
4188
4189        }));
4190
4191        let http_client = FakeHttpClient::with_404_response();
4192        let client = cx.read(|cx| Client::new(http_client, cx));
4193        let tree = Worktree::local(
4194            client,
4195            root.path(),
4196            true,
4197            Arc::new(RealFs),
4198            Default::default(),
4199            &mut cx.to_async(),
4200        )
4201        .await
4202        .unwrap();
4203
4204        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4205            .await;
4206
4207        const A_TXT: &'static str = "a.txt";
4208        const B_TXT: &'static str = "b.txt";
4209        const E_TXT: &'static str = "c/d/e.txt";
4210        const F_TXT: &'static str = "f.txt";
4211        const DOTGITIGNORE: &'static str = ".gitignore";
4212        const BUILD_FILE: &'static str = "target/build_file";
4213
4214        let work_dir = root.path().join("project");
4215        let mut repo = git_init(work_dir.as_path());
4216        repo.add_ignore_rule(IGNORE_RULE).unwrap();
4217        git_add(Path::new(A_TXT), &repo);
4218        git_add(Path::new(E_TXT), &repo);
4219        git_add(Path::new(DOTGITIGNORE), &repo);
4220        git_commit("Initial commit", &repo);
4221
4222        std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
4223
4224        tree.flush_fs_events(cx).await;
4225
4226        // Check that the right git state is observed on startup
4227        tree.read_with(cx, |tree, _cx| {
4228            let snapshot = tree.snapshot();
4229            assert_eq!(snapshot.repository_entries.iter().count(), 1);
4230            let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
4231            assert_eq!(dir.0.as_ref(), Path::new("project"));
4232
4233            assert_eq!(repo.statuses.iter().count(), 3);
4234            assert_eq!(
4235                repo.statuses.get(&Path::new(A_TXT).into()),
4236                Some(&GitFileStatus::Modified)
4237            );
4238            assert_eq!(
4239                repo.statuses.get(&Path::new(B_TXT).into()),
4240                Some(&GitFileStatus::Added)
4241            );
4242            assert_eq!(
4243                repo.statuses.get(&Path::new(F_TXT).into()),
4244                Some(&GitFileStatus::Added)
4245            );
4246        });
4247
4248        git_add(Path::new(A_TXT), &repo);
4249        git_add(Path::new(B_TXT), &repo);
4250        git_commit("Committing modified and added", &repo);
4251        tree.flush_fs_events(cx).await;
4252
4253        // Check that repo only changes are tracked
4254        tree.read_with(cx, |tree, _cx| {
4255            let snapshot = tree.snapshot();
4256            let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4257
4258            assert_eq!(repo.statuses.iter().count(), 1);
4259            assert_eq!(
4260                repo.statuses.get(&Path::new(F_TXT).into()),
4261                Some(&GitFileStatus::Added)
4262            );
4263        });
4264
4265        git_reset(0, &repo);
4266        git_remove_index(Path::new(B_TXT), &repo);
4267        git_stash(&mut repo);
4268        std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
4269        std::fs::write(work_dir.join(BUILD_FILE), "this should be ignored").unwrap();
4270        tree.flush_fs_events(cx).await;
4271
4272        // Check that more complex repo changes are tracked
4273        tree.read_with(cx, |tree, _cx| {
4274            let snapshot = tree.snapshot();
4275            let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4276
4277            assert_eq!(repo.statuses.iter().count(), 3);
4278            assert_eq!(repo.statuses.get(&Path::new(A_TXT).into()), None);
4279            assert_eq!(
4280                repo.statuses.get(&Path::new(B_TXT).into()),
4281                Some(&GitFileStatus::Added)
4282            );
4283            assert_eq!(
4284                repo.statuses.get(&Path::new(E_TXT).into()),
4285                Some(&GitFileStatus::Modified)
4286            );
4287            assert_eq!(
4288                repo.statuses.get(&Path::new(F_TXT).into()),
4289                Some(&GitFileStatus::Added)
4290            );
4291        });
4292
4293        std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
4294        std::fs::remove_dir_all(work_dir.join("c")).unwrap();
4295        std::fs::write(
4296            work_dir.join(DOTGITIGNORE),
4297            [IGNORE_RULE, "f.txt"].join("\n"),
4298        )
4299        .unwrap();
4300
4301        git_add(Path::new(DOTGITIGNORE), &repo);
4302        git_commit("Committing modified git ignore", &repo);
4303
4304        tree.flush_fs_events(cx).await;
4305
4306        // Check that non-repo behavior is tracked
4307        tree.read_with(cx, |tree, _cx| {
4308            let snapshot = tree.snapshot();
4309            let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4310
4311            assert_eq!(repo.statuses.iter().count(), 0);
4312        });
4313
4314        let mut renamed_dir_name = "first_directory/second_directory";
4315        const RENAMED_FILE: &'static str = "rf.txt";
4316
4317        std::fs::create_dir_all(work_dir.join(renamed_dir_name)).unwrap();
4318        std::fs::write(
4319            work_dir.join(renamed_dir_name).join(RENAMED_FILE),
4320            "new-contents",
4321        )
4322        .unwrap();
4323
4324        tree.flush_fs_events(cx).await;
4325
4326        tree.read_with(cx, |tree, _cx| {
4327            let snapshot = tree.snapshot();
4328            let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4329
4330            assert_eq!(repo.statuses.iter().count(), 1);
4331            assert_eq!(
4332                repo.statuses
4333                    .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
4334                Some(&GitFileStatus::Added)
4335            );
4336        });
4337
4338        renamed_dir_name = "new_first_directory/second_directory";
4339
4340        std::fs::rename(
4341            work_dir.join("first_directory"),
4342            work_dir.join("new_first_directory"),
4343        )
4344        .unwrap();
4345
4346        tree.flush_fs_events(cx).await;
4347
4348        tree.read_with(cx, |tree, _cx| {
4349            let snapshot = tree.snapshot();
4350            let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
4351
4352            assert_eq!(repo.statuses.iter().count(), 1);
4353            assert_eq!(
4354                repo.statuses
4355                    .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
4356                Some(&GitFileStatus::Added)
4357            );
4358        });
4359    }
4360
4361    #[gpui::test]
4362    async fn test_write_file(cx: &mut TestAppContext) {
4363        let dir = temp_tree(json!({
4364            ".git": {},
4365            ".gitignore": "ignored-dir\n",
4366            "tracked-dir": {},
4367            "ignored-dir": {}
4368        }));
4369
4370        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4371
4372        let tree = Worktree::local(
4373            client,
4374            dir.path(),
4375            true,
4376            Arc::new(RealFs),
4377            Default::default(),
4378            &mut cx.to_async(),
4379        )
4380        .await
4381        .unwrap();
4382        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4383            .await;
4384        tree.flush_fs_events(cx).await;
4385
4386        tree.update(cx, |tree, cx| {
4387            tree.as_local().unwrap().write_file(
4388                Path::new("tracked-dir/file.txt"),
4389                "hello".into(),
4390                Default::default(),
4391                cx,
4392            )
4393        })
4394        .await
4395        .unwrap();
4396        tree.update(cx, |tree, cx| {
4397            tree.as_local().unwrap().write_file(
4398                Path::new("ignored-dir/file.txt"),
4399                "world".into(),
4400                Default::default(),
4401                cx,
4402            )
4403        })
4404        .await
4405        .unwrap();
4406
4407        tree.read_with(cx, |tree, _| {
4408            let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4409            let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4410            assert!(!tracked.is_ignored);
4411            assert!(ignored.is_ignored);
4412        });
4413    }
4414
4415    #[gpui::test(iterations = 30)]
4416    async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4417        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4418
4419        let fs = FakeFs::new(cx.background());
4420        fs.insert_tree(
4421            "/root",
4422            json!({
4423                "b": {},
4424                "c": {},
4425                "d": {},
4426            }),
4427        )
4428        .await;
4429
4430        let tree = Worktree::local(
4431            client,
4432            "/root".as_ref(),
4433            true,
4434            fs,
4435            Default::default(),
4436            &mut cx.to_async(),
4437        )
4438        .await
4439        .unwrap();
4440
4441        let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4442
4443        let entry = tree
4444            .update(cx, |tree, cx| {
4445                tree.as_local_mut()
4446                    .unwrap()
4447                    .create_entry("a/e".as_ref(), true, cx)
4448            })
4449            .await
4450            .unwrap();
4451        assert!(entry.is_dir());
4452
4453        cx.foreground().run_until_parked();
4454        tree.read_with(cx, |tree, _| {
4455            assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4456        });
4457
4458        let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4459        let update = snapshot2.build_update(&snapshot1, 0, 0, true);
4460        snapshot1.apply_remote_update(update).unwrap();
4461        assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
4462    }
4463
4464    #[gpui::test(iterations = 100)]
4465    async fn test_random_worktree_operations_during_initial_scan(
4466        cx: &mut TestAppContext,
4467        mut rng: StdRng,
4468    ) {
4469        let operations = env::var("OPERATIONS")
4470            .map(|o| o.parse().unwrap())
4471            .unwrap_or(5);
4472        let initial_entries = env::var("INITIAL_ENTRIES")
4473            .map(|o| o.parse().unwrap())
4474            .unwrap_or(20);
4475
4476        let root_dir = Path::new("/test");
4477        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4478        fs.as_fake().insert_tree(root_dir, json!({})).await;
4479        for _ in 0..initial_entries {
4480            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4481        }
4482        log::info!("generated initial tree");
4483
4484        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4485        let worktree = Worktree::local(
4486            client.clone(),
4487            root_dir,
4488            true,
4489            fs.clone(),
4490            Default::default(),
4491            &mut cx.to_async(),
4492        )
4493        .await
4494        .unwrap();
4495
4496        let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4497
4498        for _ in 0..operations {
4499            worktree
4500                .update(cx, |worktree, cx| {
4501                    randomly_mutate_worktree(worktree, &mut rng, cx)
4502                })
4503                .await
4504                .log_err();
4505            worktree.read_with(cx, |tree, _| {
4506                tree.as_local().unwrap().snapshot.check_invariants()
4507            });
4508
4509            if rng.gen_bool(0.6) {
4510                let new_snapshot =
4511                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4512                let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4513                snapshot.apply_remote_update(update.clone()).unwrap();
4514                assert_eq!(
4515                    snapshot.to_vec(true),
4516                    new_snapshot.to_vec(true),
4517                    "incorrect snapshot after update {:?}",
4518                    update
4519                );
4520            }
4521        }
4522
4523        worktree
4524            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4525            .await;
4526        worktree.read_with(cx, |tree, _| {
4527            tree.as_local().unwrap().snapshot.check_invariants()
4528        });
4529
4530        let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4531        let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4532        snapshot.apply_remote_update(update.clone()).unwrap();
4533        assert_eq!(
4534            snapshot.to_vec(true),
4535            new_snapshot.to_vec(true),
4536            "incorrect snapshot after update {:?}",
4537            update
4538        );
4539    }
4540
4541    #[gpui::test(iterations = 100)]
4542    async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4543        let operations = env::var("OPERATIONS")
4544            .map(|o| o.parse().unwrap())
4545            .unwrap_or(40);
4546        let initial_entries = env::var("INITIAL_ENTRIES")
4547            .map(|o| o.parse().unwrap())
4548            .unwrap_or(20);
4549
4550        let root_dir = Path::new("/test");
4551        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4552        fs.as_fake().insert_tree(root_dir, json!({})).await;
4553        for _ in 0..initial_entries {
4554            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4555        }
4556        log::info!("generated initial tree");
4557
4558        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4559        let worktree = Worktree::local(
4560            client.clone(),
4561            root_dir,
4562            true,
4563            fs.clone(),
4564            Default::default(),
4565            &mut cx.to_async(),
4566        )
4567        .await
4568        .unwrap();
4569
4570        worktree
4571            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4572            .await;
4573
4574        // After the initial scan is complete, the `UpdatedEntries` event can
4575        // be used to follow along with all changes to the worktree's snapshot.
4576        worktree.update(cx, |tree, cx| {
4577            let mut paths = tree
4578                .as_local()
4579                .unwrap()
4580                .paths()
4581                .cloned()
4582                .collect::<Vec<_>>();
4583
4584            cx.subscribe(&worktree, move |tree, _, event, _| {
4585                if let Event::UpdatedEntries(changes) = event {
4586                    for ((path, _), change_type) in changes.iter() {
4587                        let path = path.clone();
4588                        let ix = match paths.binary_search(&path) {
4589                            Ok(ix) | Err(ix) => ix,
4590                        };
4591                        match change_type {
4592                            PathChange::Added => {
4593                                assert_ne!(paths.get(ix), Some(&path));
4594                                paths.insert(ix, path);
4595                            }
4596
4597                            PathChange::Removed => {
4598                                assert_eq!(paths.get(ix), Some(&path));
4599                                paths.remove(ix);
4600                            }
4601
4602                            PathChange::Updated => {
4603                                assert_eq!(paths.get(ix), Some(&path));
4604                            }
4605
4606                            PathChange::AddedOrUpdated => {
4607                                if paths[ix] != path {
4608                                    paths.insert(ix, path);
4609                                }
4610                            }
4611                        }
4612                    }
4613
4614                    let new_paths = tree.paths().cloned().collect::<Vec<_>>();
4615                    assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
4616                }
4617            })
4618            .detach();
4619        });
4620
4621        fs.as_fake().pause_events();
4622        let mut snapshots = Vec::new();
4623        let mut mutations_len = operations;
4624        while mutations_len > 1 {
4625            if rng.gen_bool(0.2) {
4626                worktree
4627                    .update(cx, |worktree, cx| {
4628                        randomly_mutate_worktree(worktree, &mut rng, cx)
4629                    })
4630                    .await
4631                    .log_err();
4632            } else {
4633                randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4634            }
4635
4636            let buffered_event_count = fs.as_fake().buffered_event_count();
4637            if buffered_event_count > 0 && rng.gen_bool(0.3) {
4638                let len = rng.gen_range(0..=buffered_event_count);
4639                log::info!("flushing {} events", len);
4640                fs.as_fake().flush_events(len);
4641            } else {
4642                randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4643                mutations_len -= 1;
4644            }
4645
4646            cx.foreground().run_until_parked();
4647            if rng.gen_bool(0.2) {
4648                log::info!("storing snapshot {}", snapshots.len());
4649                let snapshot =
4650                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4651                snapshots.push(snapshot);
4652            }
4653        }
4654
4655        log::info!("quiescing");
4656        fs.as_fake().flush_events(usize::MAX);
4657        cx.foreground().run_until_parked();
4658        let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4659        snapshot.check_invariants();
4660
4661        {
4662            let new_worktree = Worktree::local(
4663                client.clone(),
4664                root_dir,
4665                true,
4666                fs.clone(),
4667                Default::default(),
4668                &mut cx.to_async(),
4669            )
4670            .await
4671            .unwrap();
4672            new_worktree
4673                .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4674                .await;
4675            let new_snapshot =
4676                new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4677            assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4678        }
4679
4680        for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4681            let include_ignored = rng.gen::<bool>();
4682            if !include_ignored {
4683                let mut entries_by_path_edits = Vec::new();
4684                let mut entries_by_id_edits = Vec::new();
4685                for entry in prev_snapshot
4686                    .entries_by_id
4687                    .cursor::<()>()
4688                    .filter(|e| e.is_ignored)
4689                {
4690                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4691                    entries_by_id_edits.push(Edit::Remove(entry.id));
4692                }
4693
4694                prev_snapshot
4695                    .entries_by_path
4696                    .edit(entries_by_path_edits, &());
4697                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4698            }
4699
4700            let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4701            prev_snapshot.apply_remote_update(update.clone()).unwrap();
4702            assert_eq!(
4703                prev_snapshot.to_vec(include_ignored),
4704                snapshot.to_vec(include_ignored),
4705                "wrong update for snapshot {i}. update: {:?}",
4706                update
4707            );
4708        }
4709    }
4710
4711    fn randomly_mutate_worktree(
4712        worktree: &mut Worktree,
4713        rng: &mut impl Rng,
4714        cx: &mut ModelContext<Worktree>,
4715    ) -> Task<Result<()>> {
4716        log::info!("mutating worktree");
4717        let worktree = worktree.as_local_mut().unwrap();
4718        let snapshot = worktree.snapshot();
4719        let entry = snapshot.entries(false).choose(rng).unwrap();
4720
4721        match rng.gen_range(0_u32..100) {
4722            0..=33 if entry.path.as_ref() != Path::new("") => {
4723                log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4724                worktree.delete_entry(entry.id, cx).unwrap()
4725            }
4726            ..=66 if entry.path.as_ref() != Path::new("") => {
4727                let other_entry = snapshot.entries(false).choose(rng).unwrap();
4728                let new_parent_path = if other_entry.is_dir() {
4729                    other_entry.path.clone()
4730                } else {
4731                    other_entry.path.parent().unwrap().into()
4732                };
4733                let mut new_path = new_parent_path.join(gen_name(rng));
4734                if new_path.starts_with(&entry.path) {
4735                    new_path = gen_name(rng).into();
4736                }
4737
4738                log::info!(
4739                    "renaming entry {:?} ({}) to {:?}",
4740                    entry.path,
4741                    entry.id.0,
4742                    new_path
4743                );
4744                let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4745                cx.foreground().spawn(async move {
4746                    task.await?;
4747                    Ok(())
4748                })
4749            }
4750            _ => {
4751                let task = if entry.is_dir() {
4752                    let child_path = entry.path.join(gen_name(rng));
4753                    let is_dir = rng.gen_bool(0.3);
4754                    log::info!(
4755                        "creating {} at {:?}",
4756                        if is_dir { "dir" } else { "file" },
4757                        child_path,
4758                    );
4759                    worktree.create_entry(child_path, is_dir, cx)
4760                } else {
4761                    log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4762                    worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4763                };
4764                cx.foreground().spawn(async move {
4765                    task.await?;
4766                    Ok(())
4767                })
4768            }
4769        }
4770    }
4771
4772    async fn randomly_mutate_fs(
4773        fs: &Arc<dyn Fs>,
4774        root_path: &Path,
4775        insertion_probability: f64,
4776        rng: &mut impl Rng,
4777    ) {
4778        log::info!("mutating fs");
4779        let mut files = Vec::new();
4780        let mut dirs = Vec::new();
4781        for path in fs.as_fake().paths() {
4782            if path.starts_with(root_path) {
4783                if fs.is_file(&path).await {
4784                    files.push(path);
4785                } else {
4786                    dirs.push(path);
4787                }
4788            }
4789        }
4790
4791        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4792            let path = dirs.choose(rng).unwrap();
4793            let new_path = path.join(gen_name(rng));
4794
4795            if rng.gen() {
4796                log::info!(
4797                    "creating dir {:?}",
4798                    new_path.strip_prefix(root_path).unwrap()
4799                );
4800                fs.create_dir(&new_path).await.unwrap();
4801            } else {
4802                log::info!(
4803                    "creating file {:?}",
4804                    new_path.strip_prefix(root_path).unwrap()
4805                );
4806                fs.create_file(&new_path, Default::default()).await.unwrap();
4807            }
4808        } else if rng.gen_bool(0.05) {
4809            let ignore_dir_path = dirs.choose(rng).unwrap();
4810            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4811
4812            let subdirs = dirs
4813                .iter()
4814                .filter(|d| d.starts_with(&ignore_dir_path))
4815                .cloned()
4816                .collect::<Vec<_>>();
4817            let subfiles = files
4818                .iter()
4819                .filter(|d| d.starts_with(&ignore_dir_path))
4820                .cloned()
4821                .collect::<Vec<_>>();
4822            let files_to_ignore = {
4823                let len = rng.gen_range(0..=subfiles.len());
4824                subfiles.choose_multiple(rng, len)
4825            };
4826            let dirs_to_ignore = {
4827                let len = rng.gen_range(0..subdirs.len());
4828                subdirs.choose_multiple(rng, len)
4829            };
4830
4831            let mut ignore_contents = String::new();
4832            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4833                writeln!(
4834                    ignore_contents,
4835                    "{}",
4836                    path_to_ignore
4837                        .strip_prefix(&ignore_dir_path)
4838                        .unwrap()
4839                        .to_str()
4840                        .unwrap()
4841                )
4842                .unwrap();
4843            }
4844            log::info!(
4845                "creating gitignore {:?} with contents:\n{}",
4846                ignore_path.strip_prefix(&root_path).unwrap(),
4847                ignore_contents
4848            );
4849            fs.save(
4850                &ignore_path,
4851                &ignore_contents.as_str().into(),
4852                Default::default(),
4853            )
4854            .await
4855            .unwrap();
4856        } else {
4857            let old_path = {
4858                let file_path = files.choose(rng);
4859                let dir_path = dirs[1..].choose(rng);
4860                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4861            };
4862
4863            let is_rename = rng.gen();
4864            if is_rename {
4865                let new_path_parent = dirs
4866                    .iter()
4867                    .filter(|d| !d.starts_with(old_path))
4868                    .choose(rng)
4869                    .unwrap();
4870
4871                let overwrite_existing_dir =
4872                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4873                let new_path = if overwrite_existing_dir {
4874                    fs.remove_dir(
4875                        &new_path_parent,
4876                        RemoveOptions {
4877                            recursive: true,
4878                            ignore_if_not_exists: true,
4879                        },
4880                    )
4881                    .await
4882                    .unwrap();
4883                    new_path_parent.to_path_buf()
4884                } else {
4885                    new_path_parent.join(gen_name(rng))
4886                };
4887
4888                log::info!(
4889                    "renaming {:?} to {}{:?}",
4890                    old_path.strip_prefix(&root_path).unwrap(),
4891                    if overwrite_existing_dir {
4892                        "overwrite "
4893                    } else {
4894                        ""
4895                    },
4896                    new_path.strip_prefix(&root_path).unwrap()
4897                );
4898                fs.rename(
4899                    &old_path,
4900                    &new_path,
4901                    fs::RenameOptions {
4902                        overwrite: true,
4903                        ignore_if_exists: true,
4904                    },
4905                )
4906                .await
4907                .unwrap();
4908            } else if fs.is_file(&old_path).await {
4909                log::info!(
4910                    "deleting file {:?}",
4911                    old_path.strip_prefix(&root_path).unwrap()
4912                );
4913                fs.remove_file(old_path, Default::default()).await.unwrap();
4914            } else {
4915                log::info!(
4916                    "deleting dir {:?}",
4917                    old_path.strip_prefix(&root_path).unwrap()
4918                );
4919                fs.remove_dir(
4920                    &old_path,
4921                    RemoveOptions {
4922                        recursive: true,
4923                        ignore_if_not_exists: true,
4924                    },
4925                )
4926                .await
4927                .unwrap();
4928            }
4929        }
4930    }
4931
4932    fn gen_name(rng: &mut impl Rng) -> String {
4933        (0..6)
4934            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4935            .map(char::from)
4936            .collect()
4937    }
4938
4939    impl LocalSnapshot {
4940        fn check_invariants(&self) {
4941            assert_eq!(
4942                self.entries_by_path
4943                    .cursor::<()>()
4944                    .map(|e| (&e.path, e.id))
4945                    .collect::<Vec<_>>(),
4946                self.entries_by_id
4947                    .cursor::<()>()
4948                    .map(|e| (&e.path, e.id))
4949                    .collect::<collections::BTreeSet<_>>()
4950                    .into_iter()
4951                    .collect::<Vec<_>>(),
4952                "entries_by_path and entries_by_id are inconsistent"
4953            );
4954
4955            let mut files = self.files(true, 0);
4956            let mut visible_files = self.files(false, 0);
4957            for entry in self.entries_by_path.cursor::<()>() {
4958                if entry.is_file() {
4959                    assert_eq!(files.next().unwrap().inode, entry.inode);
4960                    if !entry.is_ignored {
4961                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4962                    }
4963                }
4964            }
4965
4966            assert!(files.next().is_none());
4967            assert!(visible_files.next().is_none());
4968
4969            let mut bfs_paths = Vec::new();
4970            let mut stack = vec![Path::new("")];
4971            while let Some(path) = stack.pop() {
4972                bfs_paths.push(path);
4973                let ix = stack.len();
4974                for child_entry in self.child_entries(path) {
4975                    stack.insert(ix, &child_entry.path);
4976                }
4977            }
4978
4979            let dfs_paths_via_iter = self
4980                .entries_by_path
4981                .cursor::<()>()
4982                .map(|e| e.path.as_ref())
4983                .collect::<Vec<_>>();
4984            assert_eq!(bfs_paths, dfs_paths_via_iter);
4985
4986            let dfs_paths_via_traversal = self
4987                .entries(true)
4988                .map(|e| e.path.as_ref())
4989                .collect::<Vec<_>>();
4990            assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4991
4992            for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4993                let ignore_parent_path =
4994                    ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4995                assert!(self.entry_for_path(&ignore_parent_path).is_some());
4996                assert!(self
4997                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4998                    .is_some());
4999            }
5000        }
5001
5002        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
5003            let mut paths = Vec::new();
5004            for entry in self.entries_by_path.cursor::<()>() {
5005                if include_ignored || !entry.is_ignored {
5006                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
5007                }
5008            }
5009            paths.sort_by(|a, b| a.0.cmp(b.0));
5010            paths
5011        }
5012    }
5013}