worktree.rs

   1use crate::{
   2    copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
   3};
   4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   5use anyhow::{anyhow, Context, Result};
   6use client::{proto, Client};
   7use clock::ReplicaId;
   8use collections::{HashMap, VecDeque};
   9use fs::{
  10    repository::{GitFileStatus, GitRepository, RepoPath, RepoPathDescendants},
  11    Fs, LineEnding,
  12};
  13use futures::{
  14    channel::{
  15        mpsc::{self, UnboundedSender},
  16        oneshot,
  17    },
  18    select_biased,
  19    task::Poll,
  20    Stream, StreamExt,
  21};
  22use fuzzy::CharBag;
  23use git::{DOT_GIT, GITIGNORE};
  24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
  25use language::{
  26    proto::{
  27        deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
  28        serialize_version,
  29    },
  30    Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
  31};
  32use lsp::LanguageServerId;
  33use parking_lot::Mutex;
  34use postage::{
  35    barrier,
  36    prelude::{Sink as _, Stream as _},
  37    watch,
  38};
  39use smol::channel::{self, Sender};
  40use std::{
  41    any::Any,
  42    cmp::{self, Ordering},
  43    convert::TryFrom,
  44    ffi::OsStr,
  45    fmt,
  46    future::Future,
  47    mem,
  48    ops::{Deref, DerefMut},
  49    path::{Path, PathBuf},
  50    pin::Pin,
  51    sync::{
  52        atomic::{AtomicUsize, Ordering::SeqCst},
  53        Arc,
  54    },
  55    time::{Duration, SystemTime},
  56};
  57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
  58use util::{paths::HOME, ResultExt, TakeUntilExt, TryFutureExt};
  59
  60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  61pub struct WorktreeId(usize);
  62
  63pub enum Worktree {
  64    Local(LocalWorktree),
  65    Remote(RemoteWorktree),
  66}
  67
  68pub struct LocalWorktree {
  69    snapshot: LocalSnapshot,
  70    path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
  71    is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
  72    _background_scanner_task: Task<()>,
  73    share: Option<ShareState>,
  74    diagnostics: HashMap<
  75        Arc<Path>,
  76        Vec<(
  77            LanguageServerId,
  78            Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
  79        )>,
  80    >,
  81    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  82    client: Arc<Client>,
  83    fs: Arc<dyn Fs>,
  84    visible: bool,
  85}
  86
  87pub struct RemoteWorktree {
  88    snapshot: Snapshot,
  89    background_snapshot: Arc<Mutex<Snapshot>>,
  90    project_id: u64,
  91    client: Arc<Client>,
  92    updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
  93    snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
  94    replica_id: ReplicaId,
  95    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  96    visible: bool,
  97    disconnected: bool,
  98}
  99
 100#[derive(Clone)]
 101pub struct Snapshot {
 102    id: WorktreeId,
 103    abs_path: Arc<Path>,
 104    root_name: String,
 105    root_char_bag: CharBag,
 106    entries_by_path: SumTree<Entry>,
 107    entries_by_id: SumTree<PathEntry>,
 108    repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
 109
 110    /// A number that increases every time the worktree begins scanning
 111    /// a set of paths from the filesystem. This scanning could be caused
 112    /// by some operation performed on the worktree, such as reading or
 113    /// writing a file, or by an event reported by the filesystem.
 114    scan_id: usize,
 115
 116    /// The latest scan id that has completed, and whose preceding scans
 117    /// have all completed. The current `scan_id` could be more than one
 118    /// greater than the `completed_scan_id` if operations are performed
 119    /// on the worktree while it is processing a file-system event.
 120    completed_scan_id: usize,
 121}
 122
 123#[derive(Clone, Debug, PartialEq, Eq)]
 124pub struct RepositoryEntry {
 125    pub(crate) work_directory: WorkDirectoryEntry,
 126    pub(crate) branch: Option<Arc<str>>,
 127    pub(crate) statuses: TreeMap<RepoPath, GitFileStatus>,
 128}
 129
 130fn read_git_status(git_status: i32) -> Option<GitFileStatus> {
 131    proto::GitStatus::from_i32(git_status).map(|status| match status {
 132        proto::GitStatus::Added => GitFileStatus::Added,
 133        proto::GitStatus::Modified => GitFileStatus::Modified,
 134        proto::GitStatus::Conflict => GitFileStatus::Conflict,
 135    })
 136}
 137
 138impl RepositoryEntry {
 139    pub fn branch(&self) -> Option<Arc<str>> {
 140        self.branch.clone()
 141    }
 142
 143    pub fn work_directory_id(&self) -> ProjectEntryId {
 144        *self.work_directory
 145    }
 146
 147    pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
 148        snapshot
 149            .entry_for_id(self.work_directory_id())
 150            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
 151    }
 152
 153    pub fn status_for_path(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
 154        self.work_directory
 155            .relativize(snapshot, path)
 156            .and_then(|repo_path| {
 157                self.statuses
 158                    .iter_from(&repo_path)
 159                    .take_while(|(key, _)| key.starts_with(&repo_path))
 160                    // Short circut once we've found the highest level
 161                    .take_until(|(_, status)| status == &&GitFileStatus::Conflict)
 162                    .map(|(_, status)| status)
 163                    .reduce(
 164                        |status_first, status_second| match (status_first, status_second) {
 165                            (GitFileStatus::Conflict, _) | (_, GitFileStatus::Conflict) => {
 166                                &GitFileStatus::Conflict
 167                            }
 168                            (GitFileStatus::Modified, _) | (_, GitFileStatus::Modified) => {
 169                                &GitFileStatus::Modified
 170                            }
 171                            _ => &GitFileStatus::Added,
 172                        },
 173                    )
 174                    .copied()
 175            })
 176    }
 177
 178    #[cfg(any(test, feature = "test-support"))]
 179    pub fn status_for_file(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
 180        self.work_directory
 181            .relativize(snapshot, path)
 182            .and_then(|repo_path| (&self.statuses).get(&repo_path))
 183            .cloned()
 184    }
 185
 186    pub fn build_update(&self, other: &Self) -> proto::RepositoryEntry {
 187        let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
 188        let mut removed_statuses: Vec<String> = Vec::new();
 189
 190        let mut self_statuses = self.statuses.iter().peekable();
 191        let mut other_statuses = other.statuses.iter().peekable();
 192        loop {
 193            match (self_statuses.peek(), other_statuses.peek()) {
 194                (Some((self_repo_path, self_status)), Some((other_repo_path, other_status))) => {
 195                    match Ord::cmp(self_repo_path, other_repo_path) {
 196                        Ordering::Less => {
 197                            updated_statuses.push(make_status_entry(self_repo_path, self_status));
 198                            self_statuses.next();
 199                        }
 200                        Ordering::Equal => {
 201                            if self_status != other_status {
 202                                updated_statuses
 203                                    .push(make_status_entry(self_repo_path, self_status));
 204                            }
 205
 206                            self_statuses.next();
 207                            other_statuses.next();
 208                        }
 209                        Ordering::Greater => {
 210                            removed_statuses.push(make_repo_path(other_repo_path));
 211                            other_statuses.next();
 212                        }
 213                    }
 214                }
 215                (Some((self_repo_path, self_status)), None) => {
 216                    updated_statuses.push(make_status_entry(self_repo_path, self_status));
 217                    self_statuses.next();
 218                }
 219                (None, Some((other_repo_path, _))) => {
 220                    removed_statuses.push(make_repo_path(other_repo_path));
 221                    other_statuses.next();
 222                }
 223                (None, None) => break,
 224            }
 225        }
 226
 227        proto::RepositoryEntry {
 228            work_directory_id: self.work_directory_id().to_proto(),
 229            branch: self.branch.as_ref().map(|str| str.to_string()),
 230            removed_repo_paths: removed_statuses,
 231            updated_statuses,
 232        }
 233    }
 234}
 235
 236fn make_repo_path(path: &RepoPath) -> String {
 237    path.as_os_str().to_string_lossy().to_string()
 238}
 239
 240fn make_status_entry(path: &RepoPath, status: &GitFileStatus) -> proto::StatusEntry {
 241    proto::StatusEntry {
 242        repo_path: make_repo_path(path),
 243        status: match status {
 244            GitFileStatus::Added => proto::GitStatus::Added.into(),
 245            GitFileStatus::Modified => proto::GitStatus::Modified.into(),
 246            GitFileStatus::Conflict => proto::GitStatus::Conflict.into(),
 247        },
 248    }
 249}
 250
 251impl From<&RepositoryEntry> for proto::RepositoryEntry {
 252    fn from(value: &RepositoryEntry) -> Self {
 253        proto::RepositoryEntry {
 254            work_directory_id: value.work_directory.to_proto(),
 255            branch: value.branch.as_ref().map(|str| str.to_string()),
 256            updated_statuses: value
 257                .statuses
 258                .iter()
 259                .map(|(repo_path, status)| make_status_entry(repo_path, status))
 260                .collect(),
 261            removed_repo_paths: Default::default(),
 262        }
 263    }
 264}
 265
 266/// This path corresponds to the 'content path' (the folder that contains the .git)
 267#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 268pub struct RepositoryWorkDirectory(Arc<Path>);
 269
 270impl Default for RepositoryWorkDirectory {
 271    fn default() -> Self {
 272        RepositoryWorkDirectory(Arc::from(Path::new("")))
 273    }
 274}
 275
 276impl AsRef<Path> for RepositoryWorkDirectory {
 277    fn as_ref(&self) -> &Path {
 278        self.0.as_ref()
 279    }
 280}
 281
 282#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 283pub struct WorkDirectoryEntry(ProjectEntryId);
 284
 285impl WorkDirectoryEntry {
 286    pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
 287        worktree.entry_for_id(self.0).and_then(|entry| {
 288            path.strip_prefix(&entry.path)
 289                .ok()
 290                .map(move |path| path.into())
 291        })
 292    }
 293}
 294
 295impl Deref for WorkDirectoryEntry {
 296    type Target = ProjectEntryId;
 297
 298    fn deref(&self) -> &Self::Target {
 299        &self.0
 300    }
 301}
 302
 303impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
 304    fn from(value: ProjectEntryId) -> Self {
 305        WorkDirectoryEntry(value)
 306    }
 307}
 308
 309#[derive(Debug, Clone)]
 310pub struct LocalSnapshot {
 311    snapshot: Snapshot,
 312    /// All of the gitignore files in the worktree, indexed by their relative path.
 313    /// The boolean indicates whether the gitignore needs to be updated.
 314    ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
 315    /// All of the git repositories in the worktree, indexed by the project entry
 316    /// id of their parent directory.
 317    git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 318}
 319
 320pub struct BackgroundScannerState {
 321    snapshot: LocalSnapshot,
 322    /// The ids of all of the entries that were removed from the snapshot
 323    /// as part of the current update. These entry ids may be re-used
 324    /// if the same inode is discovered at a new path, or if the given
 325    /// path is re-created after being deleted.
 326    removed_entry_ids: HashMap<u64, ProjectEntryId>,
 327    changed_paths: Vec<Arc<Path>>,
 328    prev_snapshot: Snapshot,
 329}
 330
 331#[derive(Debug, Clone)]
 332pub struct LocalRepositoryEntry {
 333    pub(crate) scan_id: usize,
 334    pub(crate) git_dir_scan_id: usize,
 335    pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
 336    /// Path to the actual .git folder.
 337    /// Note: if .git is a file, this points to the folder indicated by the .git file
 338    pub(crate) git_dir_path: Arc<Path>,
 339}
 340
 341impl LocalRepositoryEntry {
 342    // Note that this path should be relative to the worktree root.
 343    pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
 344        path.starts_with(self.git_dir_path.as_ref())
 345    }
 346}
 347
 348impl Deref for LocalSnapshot {
 349    type Target = Snapshot;
 350
 351    fn deref(&self) -> &Self::Target {
 352        &self.snapshot
 353    }
 354}
 355
 356impl DerefMut for LocalSnapshot {
 357    fn deref_mut(&mut self) -> &mut Self::Target {
 358        &mut self.snapshot
 359    }
 360}
 361
 362enum ScanState {
 363    Started,
 364    Updated {
 365        snapshot: LocalSnapshot,
 366        changes: HashMap<(Arc<Path>, ProjectEntryId), PathChange>,
 367        barrier: Option<barrier::Sender>,
 368        scanning: bool,
 369    },
 370}
 371
 372struct ShareState {
 373    project_id: u64,
 374    snapshots_tx: watch::Sender<LocalSnapshot>,
 375    resume_updates: watch::Sender<()>,
 376    _maintain_remote_snapshot: Task<Option<()>>,
 377}
 378
 379pub enum Event {
 380    UpdatedEntries(HashMap<(Arc<Path>, ProjectEntryId), PathChange>),
 381    UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
 382}
 383
 384impl Entity for Worktree {
 385    type Event = Event;
 386}
 387
 388impl Worktree {
 389    pub async fn local(
 390        client: Arc<Client>,
 391        path: impl Into<Arc<Path>>,
 392        visible: bool,
 393        fs: Arc<dyn Fs>,
 394        next_entry_id: Arc<AtomicUsize>,
 395        cx: &mut AsyncAppContext,
 396    ) -> Result<ModelHandle<Self>> {
 397        // After determining whether the root entry is a file or a directory, populate the
 398        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 399        let abs_path = path.into();
 400        let metadata = fs
 401            .metadata(&abs_path)
 402            .await
 403            .context("failed to stat worktree path")?;
 404
 405        Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
 406            let root_name = abs_path
 407                .file_name()
 408                .map_or(String::new(), |f| f.to_string_lossy().to_string());
 409
 410            let mut snapshot = LocalSnapshot {
 411                ignores_by_parent_abs_path: Default::default(),
 412                git_repositories: Default::default(),
 413                snapshot: Snapshot {
 414                    id: WorktreeId::from_usize(cx.model_id()),
 415                    abs_path: abs_path.clone(),
 416                    root_name: root_name.clone(),
 417                    root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
 418                    entries_by_path: Default::default(),
 419                    entries_by_id: Default::default(),
 420                    repository_entries: Default::default(),
 421                    scan_id: 1,
 422                    completed_scan_id: 0,
 423                },
 424            };
 425
 426            if let Some(metadata) = metadata {
 427                snapshot.insert_entry(
 428                    Entry::new(
 429                        Arc::from(Path::new("")),
 430                        &metadata,
 431                        &next_entry_id,
 432                        snapshot.root_char_bag,
 433                    ),
 434                    fs.as_ref(),
 435                );
 436            }
 437
 438            let (path_changes_tx, path_changes_rx) = channel::unbounded();
 439            let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
 440
 441            cx.spawn_weak(|this, mut cx| async move {
 442                while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
 443                    this.update(&mut cx, |this, cx| {
 444                        let this = this.as_local_mut().unwrap();
 445                        match state {
 446                            ScanState::Started => {
 447                                *this.is_scanning.0.borrow_mut() = true;
 448                            }
 449                            ScanState::Updated {
 450                                snapshot,
 451                                changes,
 452                                barrier,
 453                                scanning,
 454                            } => {
 455                                *this.is_scanning.0.borrow_mut() = scanning;
 456                                this.set_snapshot(snapshot, cx);
 457                                cx.emit(Event::UpdatedEntries(changes));
 458                                drop(barrier);
 459                            }
 460                        }
 461                        cx.notify();
 462                    });
 463                }
 464            })
 465            .detach();
 466
 467            let background_scanner_task = cx.background().spawn({
 468                let fs = fs.clone();
 469                let snapshot = snapshot.clone();
 470                let background = cx.background().clone();
 471                async move {
 472                    let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 473                    BackgroundScanner::new(
 474                        snapshot,
 475                        next_entry_id,
 476                        fs,
 477                        scan_states_tx,
 478                        background,
 479                        path_changes_rx,
 480                    )
 481                    .run(events)
 482                    .await;
 483                }
 484            });
 485
 486            Worktree::Local(LocalWorktree {
 487                snapshot,
 488                is_scanning: watch::channel_with(true),
 489                share: None,
 490                path_changes_tx,
 491                _background_scanner_task: background_scanner_task,
 492                diagnostics: Default::default(),
 493                diagnostic_summaries: Default::default(),
 494                client,
 495                fs,
 496                visible,
 497            })
 498        }))
 499    }
 500
 501    pub fn remote(
 502        project_remote_id: u64,
 503        replica_id: ReplicaId,
 504        worktree: proto::WorktreeMetadata,
 505        client: Arc<Client>,
 506        cx: &mut AppContext,
 507    ) -> ModelHandle<Self> {
 508        cx.add_model(|cx: &mut ModelContext<Self>| {
 509            let snapshot = Snapshot {
 510                id: WorktreeId(worktree.id as usize),
 511                abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
 512                root_name: worktree.root_name.clone(),
 513                root_char_bag: worktree
 514                    .root_name
 515                    .chars()
 516                    .map(|c| c.to_ascii_lowercase())
 517                    .collect(),
 518                entries_by_path: Default::default(),
 519                entries_by_id: Default::default(),
 520                repository_entries: Default::default(),
 521                scan_id: 1,
 522                completed_scan_id: 0,
 523            };
 524
 525            let (updates_tx, mut updates_rx) = mpsc::unbounded();
 526            let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
 527            let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
 528
 529            cx.background()
 530                .spawn({
 531                    let background_snapshot = background_snapshot.clone();
 532                    async move {
 533                        while let Some(update) = updates_rx.next().await {
 534                            if let Err(error) =
 535                                background_snapshot.lock().apply_remote_update(update)
 536                            {
 537                                log::error!("error applying worktree update: {}", error);
 538                            }
 539                            snapshot_updated_tx.send(()).await.ok();
 540                        }
 541                    }
 542                })
 543                .detach();
 544
 545            cx.spawn_weak(|this, mut cx| async move {
 546                while (snapshot_updated_rx.recv().await).is_some() {
 547                    if let Some(this) = this.upgrade(&cx) {
 548                        this.update(&mut cx, |this, cx| {
 549                            let this = this.as_remote_mut().unwrap();
 550                            this.snapshot = this.background_snapshot.lock().clone();
 551                            cx.emit(Event::UpdatedEntries(Default::default()));
 552                            cx.notify();
 553                            while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
 554                                if this.observed_snapshot(*scan_id) {
 555                                    let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
 556                                    let _ = tx.send(());
 557                                } else {
 558                                    break;
 559                                }
 560                            }
 561                        });
 562                    } else {
 563                        break;
 564                    }
 565                }
 566            })
 567            .detach();
 568
 569            Worktree::Remote(RemoteWorktree {
 570                project_id: project_remote_id,
 571                replica_id,
 572                snapshot: snapshot.clone(),
 573                background_snapshot,
 574                updates_tx: Some(updates_tx),
 575                snapshot_subscriptions: Default::default(),
 576                client: client.clone(),
 577                diagnostic_summaries: Default::default(),
 578                visible: worktree.visible,
 579                disconnected: false,
 580            })
 581        })
 582    }
 583
 584    pub fn as_local(&self) -> Option<&LocalWorktree> {
 585        if let Worktree::Local(worktree) = self {
 586            Some(worktree)
 587        } else {
 588            None
 589        }
 590    }
 591
 592    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 593        if let Worktree::Remote(worktree) = self {
 594            Some(worktree)
 595        } else {
 596            None
 597        }
 598    }
 599
 600    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 601        if let Worktree::Local(worktree) = self {
 602            Some(worktree)
 603        } else {
 604            None
 605        }
 606    }
 607
 608    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 609        if let Worktree::Remote(worktree) = self {
 610            Some(worktree)
 611        } else {
 612            None
 613        }
 614    }
 615
 616    pub fn is_local(&self) -> bool {
 617        matches!(self, Worktree::Local(_))
 618    }
 619
 620    pub fn is_remote(&self) -> bool {
 621        !self.is_local()
 622    }
 623
 624    pub fn snapshot(&self) -> Snapshot {
 625        match self {
 626            Worktree::Local(worktree) => worktree.snapshot().snapshot,
 627            Worktree::Remote(worktree) => worktree.snapshot(),
 628        }
 629    }
 630
 631    pub fn scan_id(&self) -> usize {
 632        match self {
 633            Worktree::Local(worktree) => worktree.snapshot.scan_id,
 634            Worktree::Remote(worktree) => worktree.snapshot.scan_id,
 635        }
 636    }
 637
 638    pub fn completed_scan_id(&self) -> usize {
 639        match self {
 640            Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
 641            Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
 642        }
 643    }
 644
 645    pub fn is_visible(&self) -> bool {
 646        match self {
 647            Worktree::Local(worktree) => worktree.visible,
 648            Worktree::Remote(worktree) => worktree.visible,
 649        }
 650    }
 651
 652    pub fn replica_id(&self) -> ReplicaId {
 653        match self {
 654            Worktree::Local(_) => 0,
 655            Worktree::Remote(worktree) => worktree.replica_id,
 656        }
 657    }
 658
 659    pub fn diagnostic_summaries(
 660        &self,
 661    ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
 662        match self {
 663            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 664            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 665        }
 666        .iter()
 667        .flat_map(|(path, summaries)| {
 668            summaries
 669                .iter()
 670                .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
 671        })
 672    }
 673
 674    pub fn abs_path(&self) -> Arc<Path> {
 675        match self {
 676            Worktree::Local(worktree) => worktree.abs_path.clone(),
 677            Worktree::Remote(worktree) => worktree.abs_path.clone(),
 678        }
 679    }
 680}
 681
 682impl LocalWorktree {
 683    pub fn contains_abs_path(&self, path: &Path) -> bool {
 684        path.starts_with(&self.abs_path)
 685    }
 686
 687    fn absolutize(&self, path: &Path) -> PathBuf {
 688        if path.file_name().is_some() {
 689            self.abs_path.join(path)
 690        } else {
 691            self.abs_path.to_path_buf()
 692        }
 693    }
 694
 695    pub(crate) fn load_buffer(
 696        &mut self,
 697        id: u64,
 698        path: &Path,
 699        cx: &mut ModelContext<Worktree>,
 700    ) -> Task<Result<ModelHandle<Buffer>>> {
 701        let path = Arc::from(path);
 702        cx.spawn(move |this, mut cx| async move {
 703            let (file, contents, diff_base) = this
 704                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
 705                .await?;
 706            let text_buffer = cx
 707                .background()
 708                .spawn(async move { text::Buffer::new(0, id, contents) })
 709                .await;
 710            Ok(cx.add_model(|_| Buffer::build(text_buffer, diff_base, Some(Arc::new(file)))))
 711        })
 712    }
 713
 714    pub fn diagnostics_for_path(
 715        &self,
 716        path: &Path,
 717    ) -> Vec<(
 718        LanguageServerId,
 719        Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 720    )> {
 721        self.diagnostics.get(path).cloned().unwrap_or_default()
 722    }
 723
 724    pub fn clear_diagnostics_for_language_server(
 725        &mut self,
 726        server_id: LanguageServerId,
 727        _: &mut ModelContext<Worktree>,
 728    ) {
 729        let worktree_id = self.id().to_proto();
 730        self.diagnostic_summaries
 731            .retain(|path, summaries_by_server_id| {
 732                if summaries_by_server_id.remove(&server_id).is_some() {
 733                    if let Some(share) = self.share.as_ref() {
 734                        self.client
 735                            .send(proto::UpdateDiagnosticSummary {
 736                                project_id: share.project_id,
 737                                worktree_id,
 738                                summary: Some(proto::DiagnosticSummary {
 739                                    path: path.to_string_lossy().to_string(),
 740                                    language_server_id: server_id.0 as u64,
 741                                    error_count: 0,
 742                                    warning_count: 0,
 743                                }),
 744                            })
 745                            .log_err();
 746                    }
 747                    !summaries_by_server_id.is_empty()
 748                } else {
 749                    true
 750                }
 751            });
 752
 753        self.diagnostics.retain(|_, diagnostics_by_server_id| {
 754            if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 755                diagnostics_by_server_id.remove(ix);
 756                !diagnostics_by_server_id.is_empty()
 757            } else {
 758                true
 759            }
 760        });
 761    }
 762
 763    pub fn update_diagnostics(
 764        &mut self,
 765        server_id: LanguageServerId,
 766        worktree_path: Arc<Path>,
 767        diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 768        _: &mut ModelContext<Worktree>,
 769    ) -> Result<bool> {
 770        let summaries_by_server_id = self
 771            .diagnostic_summaries
 772            .entry(worktree_path.clone())
 773            .or_default();
 774
 775        let old_summary = summaries_by_server_id
 776            .remove(&server_id)
 777            .unwrap_or_default();
 778
 779        let new_summary = DiagnosticSummary::new(&diagnostics);
 780        if new_summary.is_empty() {
 781            if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
 782                if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 783                    diagnostics_by_server_id.remove(ix);
 784                }
 785                if diagnostics_by_server_id.is_empty() {
 786                    self.diagnostics.remove(&worktree_path);
 787                }
 788            }
 789        } else {
 790            summaries_by_server_id.insert(server_id, new_summary);
 791            let diagnostics_by_server_id =
 792                self.diagnostics.entry(worktree_path.clone()).or_default();
 793            match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 794                Ok(ix) => {
 795                    diagnostics_by_server_id[ix] = (server_id, diagnostics);
 796                }
 797                Err(ix) => {
 798                    diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
 799                }
 800            }
 801        }
 802
 803        if !old_summary.is_empty() || !new_summary.is_empty() {
 804            if let Some(share) = self.share.as_ref() {
 805                self.client
 806                    .send(proto::UpdateDiagnosticSummary {
 807                        project_id: share.project_id,
 808                        worktree_id: self.id().to_proto(),
 809                        summary: Some(proto::DiagnosticSummary {
 810                            path: worktree_path.to_string_lossy().to_string(),
 811                            language_server_id: server_id.0 as u64,
 812                            error_count: new_summary.error_count as u32,
 813                            warning_count: new_summary.warning_count as u32,
 814                        }),
 815                    })
 816                    .log_err();
 817            }
 818        }
 819
 820        Ok(!old_summary.is_empty() || !new_summary.is_empty())
 821    }
 822
 823    fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
 824        let updated_repos =
 825            self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
 826
 827        self.snapshot = new_snapshot;
 828
 829        if let Some(share) = self.share.as_mut() {
 830            *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
 831        }
 832
 833        if !updated_repos.is_empty() {
 834            cx.emit(Event::UpdatedGitRepositories(updated_repos));
 835        }
 836    }
 837
 838    fn changed_repos(
 839        &self,
 840        old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 841        new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 842    ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
 843        let mut diff = HashMap::default();
 844        let mut old_repos = old_repos.iter().peekable();
 845        let mut new_repos = new_repos.iter().peekable();
 846        loop {
 847            match (old_repos.peek(), new_repos.peek()) {
 848                (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
 849                    match Ord::cmp(old_entry_id, new_entry_id) {
 850                        Ordering::Less => {
 851                            if let Some(entry) = self.entry_for_id(**old_entry_id) {
 852                                diff.insert(entry.path.clone(), (*old_repo).clone());
 853                            }
 854                            old_repos.next();
 855                        }
 856                        Ordering::Equal => {
 857                            if old_repo.git_dir_scan_id != new_repo.git_dir_scan_id {
 858                                if let Some(entry) = self.entry_for_id(**new_entry_id) {
 859                                    diff.insert(entry.path.clone(), (*new_repo).clone());
 860                                }
 861                            }
 862
 863                            old_repos.next();
 864                            new_repos.next();
 865                        }
 866                        Ordering::Greater => {
 867                            if let Some(entry) = self.entry_for_id(**new_entry_id) {
 868                                diff.insert(entry.path.clone(), (*new_repo).clone());
 869                            }
 870                            new_repos.next();
 871                        }
 872                    }
 873                }
 874                (Some((old_entry_id, old_repo)), None) => {
 875                    if let Some(entry) = self.entry_for_id(**old_entry_id) {
 876                        diff.insert(entry.path.clone(), (*old_repo).clone());
 877                    }
 878                    old_repos.next();
 879                }
 880                (None, Some((new_entry_id, new_repo))) => {
 881                    if let Some(entry) = self.entry_for_id(**new_entry_id) {
 882                        diff.insert(entry.path.clone(), (*new_repo).clone());
 883                    }
 884                    new_repos.next();
 885                }
 886                (None, None) => break,
 887            }
 888        }
 889        diff
 890    }
 891
 892    pub fn scan_complete(&self) -> impl Future<Output = ()> {
 893        let mut is_scanning_rx = self.is_scanning.1.clone();
 894        async move {
 895            let mut is_scanning = is_scanning_rx.borrow().clone();
 896            while is_scanning {
 897                if let Some(value) = is_scanning_rx.recv().await {
 898                    is_scanning = value;
 899                } else {
 900                    break;
 901                }
 902            }
 903        }
 904    }
 905
 906    pub fn snapshot(&self) -> LocalSnapshot {
 907        self.snapshot.clone()
 908    }
 909
 910    pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
 911        proto::WorktreeMetadata {
 912            id: self.id().to_proto(),
 913            root_name: self.root_name().to_string(),
 914            visible: self.visible,
 915            abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
 916        }
 917    }
 918
 919    fn load(
 920        &self,
 921        path: &Path,
 922        cx: &mut ModelContext<Worktree>,
 923    ) -> Task<Result<(File, String, Option<String>)>> {
 924        let handle = cx.handle();
 925        let path = Arc::from(path);
 926        let abs_path = self.absolutize(&path);
 927        let fs = self.fs.clone();
 928        let snapshot = self.snapshot();
 929
 930        let mut index_task = None;
 931
 932        if let Some(repo) = snapshot.repository_for_path(&path) {
 933            let repo_path = repo.work_directory.relativize(self, &path).unwrap();
 934            if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
 935                let repo = repo.repo_ptr.to_owned();
 936                index_task = Some(
 937                    cx.background()
 938                        .spawn(async move { repo.lock().load_index_text(&repo_path) }),
 939                );
 940            }
 941        }
 942
 943        cx.spawn(|this, mut cx| async move {
 944            let text = fs.load(&abs_path).await?;
 945
 946            let diff_base = if let Some(index_task) = index_task {
 947                index_task.await
 948            } else {
 949                None
 950            };
 951
 952            // Eagerly populate the snapshot with an updated entry for the loaded file
 953            let entry = this
 954                .update(&mut cx, |this, cx| {
 955                    this.as_local().unwrap().refresh_entry(path, None, cx)
 956                })
 957                .await?;
 958
 959            Ok((
 960                File {
 961                    entry_id: entry.id,
 962                    worktree: handle,
 963                    path: entry.path,
 964                    mtime: entry.mtime,
 965                    is_local: true,
 966                    is_deleted: false,
 967                },
 968                text,
 969                diff_base,
 970            ))
 971        })
 972    }
 973
 974    pub fn save_buffer(
 975        &self,
 976        buffer_handle: ModelHandle<Buffer>,
 977        path: Arc<Path>,
 978        has_changed_file: bool,
 979        cx: &mut ModelContext<Worktree>,
 980    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
 981        let handle = cx.handle();
 982        let buffer = buffer_handle.read(cx);
 983
 984        let rpc = self.client.clone();
 985        let buffer_id = buffer.remote_id();
 986        let project_id = self.share.as_ref().map(|share| share.project_id);
 987
 988        let text = buffer.as_rope().clone();
 989        let fingerprint = text.fingerprint();
 990        let version = buffer.version();
 991        let save = self.write_file(path, text, buffer.line_ending(), cx);
 992
 993        cx.as_mut().spawn(|mut cx| async move {
 994            let entry = save.await?;
 995
 996            if has_changed_file {
 997                let new_file = Arc::new(File {
 998                    entry_id: entry.id,
 999                    worktree: handle,
1000                    path: entry.path,
1001                    mtime: entry.mtime,
1002                    is_local: true,
1003                    is_deleted: false,
1004                });
1005
1006                if let Some(project_id) = project_id {
1007                    rpc.send(proto::UpdateBufferFile {
1008                        project_id,
1009                        buffer_id,
1010                        file: Some(new_file.to_proto()),
1011                    })
1012                    .log_err();
1013                }
1014
1015                buffer_handle.update(&mut cx, |buffer, cx| {
1016                    if has_changed_file {
1017                        buffer.file_updated(new_file, cx).detach();
1018                    }
1019                });
1020            }
1021
1022            if let Some(project_id) = project_id {
1023                rpc.send(proto::BufferSaved {
1024                    project_id,
1025                    buffer_id,
1026                    version: serialize_version(&version),
1027                    mtime: Some(entry.mtime.into()),
1028                    fingerprint: serialize_fingerprint(fingerprint),
1029                })?;
1030            }
1031
1032            buffer_handle.update(&mut cx, |buffer, cx| {
1033                buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
1034            });
1035
1036            Ok((version, fingerprint, entry.mtime))
1037        })
1038    }
1039
1040    pub fn create_entry(
1041        &self,
1042        path: impl Into<Arc<Path>>,
1043        is_dir: bool,
1044        cx: &mut ModelContext<Worktree>,
1045    ) -> Task<Result<Entry>> {
1046        let path = path.into();
1047        let abs_path = self.absolutize(&path);
1048        let fs = self.fs.clone();
1049        let write = cx.background().spawn(async move {
1050            if is_dir {
1051                fs.create_dir(&abs_path).await
1052            } else {
1053                fs.save(&abs_path, &Default::default(), Default::default())
1054                    .await
1055            }
1056        });
1057
1058        cx.spawn(|this, mut cx| async move {
1059            write.await?;
1060            this.update(&mut cx, |this, cx| {
1061                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1062            })
1063            .await
1064        })
1065    }
1066
1067    pub fn write_file(
1068        &self,
1069        path: impl Into<Arc<Path>>,
1070        text: Rope,
1071        line_ending: LineEnding,
1072        cx: &mut ModelContext<Worktree>,
1073    ) -> Task<Result<Entry>> {
1074        let path = path.into();
1075        let abs_path = self.absolutize(&path);
1076        let fs = self.fs.clone();
1077        let write = cx
1078            .background()
1079            .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1080
1081        cx.spawn(|this, mut cx| async move {
1082            write.await?;
1083            this.update(&mut cx, |this, cx| {
1084                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1085            })
1086            .await
1087        })
1088    }
1089
1090    pub fn delete_entry(
1091        &self,
1092        entry_id: ProjectEntryId,
1093        cx: &mut ModelContext<Worktree>,
1094    ) -> Option<Task<Result<()>>> {
1095        let entry = self.entry_for_id(entry_id)?.clone();
1096        let abs_path = self.abs_path.clone();
1097        let fs = self.fs.clone();
1098
1099        let delete = cx.background().spawn(async move {
1100            let mut abs_path = fs.canonicalize(&abs_path).await?;
1101            if entry.path.file_name().is_some() {
1102                abs_path = abs_path.join(&entry.path);
1103            }
1104            if entry.is_file() {
1105                fs.remove_file(&abs_path, Default::default()).await?;
1106            } else {
1107                fs.remove_dir(
1108                    &abs_path,
1109                    RemoveOptions {
1110                        recursive: true,
1111                        ignore_if_not_exists: false,
1112                    },
1113                )
1114                .await?;
1115            }
1116            anyhow::Ok(abs_path)
1117        });
1118
1119        Some(cx.spawn(|this, mut cx| async move {
1120            let abs_path = delete.await?;
1121            let (tx, mut rx) = barrier::channel();
1122            this.update(&mut cx, |this, _| {
1123                this.as_local_mut()
1124                    .unwrap()
1125                    .path_changes_tx
1126                    .try_send((vec![abs_path], tx))
1127            })?;
1128            rx.recv().await;
1129            Ok(())
1130        }))
1131    }
1132
1133    pub fn rename_entry(
1134        &self,
1135        entry_id: ProjectEntryId,
1136        new_path: impl Into<Arc<Path>>,
1137        cx: &mut ModelContext<Worktree>,
1138    ) -> Option<Task<Result<Entry>>> {
1139        let old_path = self.entry_for_id(entry_id)?.path.clone();
1140        let new_path = new_path.into();
1141        let abs_old_path = self.absolutize(&old_path);
1142        let abs_new_path = self.absolutize(&new_path);
1143        let fs = self.fs.clone();
1144        let rename = cx.background().spawn(async move {
1145            fs.rename(&abs_old_path, &abs_new_path, Default::default())
1146                .await
1147        });
1148
1149        Some(cx.spawn(|this, mut cx| async move {
1150            rename.await?;
1151            this.update(&mut cx, |this, cx| {
1152                this.as_local_mut()
1153                    .unwrap()
1154                    .refresh_entry(new_path.clone(), Some(old_path), cx)
1155            })
1156            .await
1157        }))
1158    }
1159
1160    pub fn copy_entry(
1161        &self,
1162        entry_id: ProjectEntryId,
1163        new_path: impl Into<Arc<Path>>,
1164        cx: &mut ModelContext<Worktree>,
1165    ) -> Option<Task<Result<Entry>>> {
1166        let old_path = self.entry_for_id(entry_id)?.path.clone();
1167        let new_path = new_path.into();
1168        let abs_old_path = self.absolutize(&old_path);
1169        let abs_new_path = self.absolutize(&new_path);
1170        let fs = self.fs.clone();
1171        let copy = cx.background().spawn(async move {
1172            copy_recursive(
1173                fs.as_ref(),
1174                &abs_old_path,
1175                &abs_new_path,
1176                Default::default(),
1177            )
1178            .await
1179        });
1180
1181        Some(cx.spawn(|this, mut cx| async move {
1182            copy.await?;
1183            this.update(&mut cx, |this, cx| {
1184                this.as_local_mut()
1185                    .unwrap()
1186                    .refresh_entry(new_path.clone(), None, cx)
1187            })
1188            .await
1189        }))
1190    }
1191
1192    fn refresh_entry(
1193        &self,
1194        path: Arc<Path>,
1195        old_path: Option<Arc<Path>>,
1196        cx: &mut ModelContext<Worktree>,
1197    ) -> Task<Result<Entry>> {
1198        let fs = self.fs.clone();
1199        let abs_root_path = self.abs_path.clone();
1200        let path_changes_tx = self.path_changes_tx.clone();
1201        cx.spawn_weak(move |this, mut cx| async move {
1202            let abs_path = fs.canonicalize(&abs_root_path).await?;
1203            let mut paths = Vec::with_capacity(2);
1204            paths.push(if path.file_name().is_some() {
1205                abs_path.join(&path)
1206            } else {
1207                abs_path.clone()
1208            });
1209            if let Some(old_path) = old_path {
1210                paths.push(if old_path.file_name().is_some() {
1211                    abs_path.join(&old_path)
1212                } else {
1213                    abs_path.clone()
1214                });
1215            }
1216
1217            let (tx, mut rx) = barrier::channel();
1218            path_changes_tx.try_send((paths, tx))?;
1219            rx.recv().await;
1220            this.upgrade(&cx)
1221                .ok_or_else(|| anyhow!("worktree was dropped"))?
1222                .update(&mut cx, |this, _| {
1223                    this.entry_for_path(path)
1224                        .cloned()
1225                        .ok_or_else(|| anyhow!("failed to read path after update"))
1226                })
1227        })
1228    }
1229
1230    pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1231        let (share_tx, share_rx) = oneshot::channel();
1232
1233        if let Some(share) = self.share.as_mut() {
1234            let _ = share_tx.send(());
1235            *share.resume_updates.borrow_mut() = ();
1236        } else {
1237            let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1238            let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1239            let worktree_id = cx.model_id() as u64;
1240
1241            for (path, summaries) in &self.diagnostic_summaries {
1242                for (&server_id, summary) in summaries {
1243                    if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1244                        project_id,
1245                        worktree_id,
1246                        summary: Some(summary.to_proto(server_id, &path)),
1247                    }) {
1248                        return Task::ready(Err(e));
1249                    }
1250                }
1251            }
1252
1253            let _maintain_remote_snapshot = cx.background().spawn({
1254                let client = self.client.clone();
1255                async move {
1256                    let mut share_tx = Some(share_tx);
1257                    let mut prev_snapshot = LocalSnapshot {
1258                        ignores_by_parent_abs_path: Default::default(),
1259                        git_repositories: Default::default(),
1260                        snapshot: Snapshot {
1261                            id: WorktreeId(worktree_id as usize),
1262                            abs_path: Path::new("").into(),
1263                            root_name: Default::default(),
1264                            root_char_bag: Default::default(),
1265                            entries_by_path: Default::default(),
1266                            entries_by_id: Default::default(),
1267                            repository_entries: Default::default(),
1268                            scan_id: 0,
1269                            completed_scan_id: 0,
1270                        },
1271                    };
1272                    while let Some(snapshot) = snapshots_rx.recv().await {
1273                        #[cfg(any(test, feature = "test-support"))]
1274                        const MAX_CHUNK_SIZE: usize = 2;
1275                        #[cfg(not(any(test, feature = "test-support")))]
1276                        const MAX_CHUNK_SIZE: usize = 256;
1277
1278                        let update =
1279                            snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1280                        for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1281                            let _ = resume_updates_rx.try_recv();
1282                            while let Err(error) = client.request(update.clone()).await {
1283                                log::error!("failed to send worktree update: {}", error);
1284                                log::info!("waiting to resume updates");
1285                                if resume_updates_rx.next().await.is_none() {
1286                                    return Ok(());
1287                                }
1288                            }
1289                        }
1290
1291                        if let Some(share_tx) = share_tx.take() {
1292                            let _ = share_tx.send(());
1293                        }
1294
1295                        prev_snapshot = snapshot;
1296                    }
1297
1298                    Ok::<_, anyhow::Error>(())
1299                }
1300                .log_err()
1301            });
1302
1303            self.share = Some(ShareState {
1304                project_id,
1305                snapshots_tx,
1306                resume_updates: resume_updates_tx,
1307                _maintain_remote_snapshot,
1308            });
1309        }
1310
1311        cx.foreground()
1312            .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1313    }
1314
1315    pub fn unshare(&mut self) {
1316        self.share.take();
1317    }
1318
1319    pub fn is_shared(&self) -> bool {
1320        self.share.is_some()
1321    }
1322}
1323
1324impl RemoteWorktree {
1325    fn snapshot(&self) -> Snapshot {
1326        self.snapshot.clone()
1327    }
1328
1329    pub fn disconnected_from_host(&mut self) {
1330        self.updates_tx.take();
1331        self.snapshot_subscriptions.clear();
1332        self.disconnected = true;
1333    }
1334
1335    pub fn save_buffer(
1336        &self,
1337        buffer_handle: ModelHandle<Buffer>,
1338        cx: &mut ModelContext<Worktree>,
1339    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1340        let buffer = buffer_handle.read(cx);
1341        let buffer_id = buffer.remote_id();
1342        let version = buffer.version();
1343        let rpc = self.client.clone();
1344        let project_id = self.project_id;
1345        cx.as_mut().spawn(|mut cx| async move {
1346            let response = rpc
1347                .request(proto::SaveBuffer {
1348                    project_id,
1349                    buffer_id,
1350                    version: serialize_version(&version),
1351                })
1352                .await?;
1353            let version = deserialize_version(&response.version);
1354            let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1355            let mtime = response
1356                .mtime
1357                .ok_or_else(|| anyhow!("missing mtime"))?
1358                .into();
1359
1360            buffer_handle.update(&mut cx, |buffer, cx| {
1361                buffer.did_save(version.clone(), fingerprint, mtime, cx);
1362            });
1363
1364            Ok((version, fingerprint, mtime))
1365        })
1366    }
1367
1368    pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1369        if let Some(updates_tx) = &self.updates_tx {
1370            updates_tx
1371                .unbounded_send(update)
1372                .expect("consumer runs to completion");
1373        }
1374    }
1375
1376    fn observed_snapshot(&self, scan_id: usize) -> bool {
1377        self.completed_scan_id >= scan_id
1378    }
1379
1380    fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1381        let (tx, rx) = oneshot::channel();
1382        if self.observed_snapshot(scan_id) {
1383            let _ = tx.send(());
1384        } else if self.disconnected {
1385            drop(tx);
1386        } else {
1387            match self
1388                .snapshot_subscriptions
1389                .binary_search_by_key(&scan_id, |probe| probe.0)
1390            {
1391                Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1392            }
1393        }
1394
1395        async move {
1396            rx.await?;
1397            Ok(())
1398        }
1399    }
1400
1401    pub fn update_diagnostic_summary(
1402        &mut self,
1403        path: Arc<Path>,
1404        summary: &proto::DiagnosticSummary,
1405    ) {
1406        let server_id = LanguageServerId(summary.language_server_id as usize);
1407        let summary = DiagnosticSummary {
1408            error_count: summary.error_count as usize,
1409            warning_count: summary.warning_count as usize,
1410        };
1411
1412        if summary.is_empty() {
1413            if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1414                summaries.remove(&server_id);
1415                if summaries.is_empty() {
1416                    self.diagnostic_summaries.remove(&path);
1417                }
1418            }
1419        } else {
1420            self.diagnostic_summaries
1421                .entry(path)
1422                .or_default()
1423                .insert(server_id, summary);
1424        }
1425    }
1426
1427    pub fn insert_entry(
1428        &mut self,
1429        entry: proto::Entry,
1430        scan_id: usize,
1431        cx: &mut ModelContext<Worktree>,
1432    ) -> Task<Result<Entry>> {
1433        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1434        cx.spawn(|this, mut cx| async move {
1435            wait_for_snapshot.await?;
1436            this.update(&mut cx, |worktree, _| {
1437                let worktree = worktree.as_remote_mut().unwrap();
1438                let mut snapshot = worktree.background_snapshot.lock();
1439                let entry = snapshot.insert_entry(entry);
1440                worktree.snapshot = snapshot.clone();
1441                entry
1442            })
1443        })
1444    }
1445
1446    pub(crate) fn delete_entry(
1447        &mut self,
1448        id: ProjectEntryId,
1449        scan_id: usize,
1450        cx: &mut ModelContext<Worktree>,
1451    ) -> Task<Result<()>> {
1452        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1453        cx.spawn(|this, mut cx| async move {
1454            wait_for_snapshot.await?;
1455            this.update(&mut cx, |worktree, _| {
1456                let worktree = worktree.as_remote_mut().unwrap();
1457                let mut snapshot = worktree.background_snapshot.lock();
1458                snapshot.delete_entry(id);
1459                worktree.snapshot = snapshot.clone();
1460            });
1461            Ok(())
1462        })
1463    }
1464}
1465
1466impl Snapshot {
1467    pub fn id(&self) -> WorktreeId {
1468        self.id
1469    }
1470
1471    pub fn abs_path(&self) -> &Arc<Path> {
1472        &self.abs_path
1473    }
1474
1475    pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1476        self.entries_by_id.get(&entry_id, &()).is_some()
1477    }
1478
1479    pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1480        let entry = Entry::try_from((&self.root_char_bag, entry))?;
1481        let old_entry = self.entries_by_id.insert_or_replace(
1482            PathEntry {
1483                id: entry.id,
1484                path: entry.path.clone(),
1485                is_ignored: entry.is_ignored,
1486                scan_id: 0,
1487            },
1488            &(),
1489        );
1490        if let Some(old_entry) = old_entry {
1491            self.entries_by_path.remove(&PathKey(old_entry.path), &());
1492        }
1493        self.entries_by_path.insert_or_replace(entry.clone(), &());
1494        Ok(entry)
1495    }
1496
1497    fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1498        let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1499        self.entries_by_path = {
1500            let mut cursor = self.entries_by_path.cursor();
1501            let mut new_entries_by_path =
1502                cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1503            while let Some(entry) = cursor.item() {
1504                if entry.path.starts_with(&removed_entry.path) {
1505                    self.entries_by_id.remove(&entry.id, &());
1506                    cursor.next(&());
1507                } else {
1508                    break;
1509                }
1510            }
1511            new_entries_by_path.push_tree(cursor.suffix(&()), &());
1512            new_entries_by_path
1513        };
1514
1515        Some(removed_entry.path)
1516    }
1517
1518    pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1519        let mut entries_by_path_edits = Vec::new();
1520        let mut entries_by_id_edits = Vec::new();
1521        for entry_id in update.removed_entries {
1522            if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1523                entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1524                entries_by_id_edits.push(Edit::Remove(entry.id));
1525            }
1526        }
1527
1528        for entry in update.updated_entries {
1529            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1530            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1531                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1532            }
1533            entries_by_id_edits.push(Edit::Insert(PathEntry {
1534                id: entry.id,
1535                path: entry.path.clone(),
1536                is_ignored: entry.is_ignored,
1537                scan_id: 0,
1538            }));
1539            entries_by_path_edits.push(Edit::Insert(entry));
1540        }
1541
1542        self.entries_by_path.edit(entries_by_path_edits, &());
1543        self.entries_by_id.edit(entries_by_id_edits, &());
1544
1545        update.removed_repositories.sort_unstable();
1546        self.repository_entries.retain(|_, entry| {
1547            if let Ok(_) = update
1548                .removed_repositories
1549                .binary_search(&entry.work_directory.to_proto())
1550            {
1551                false
1552            } else {
1553                true
1554            }
1555        });
1556
1557        for repository in update.updated_repositories {
1558            let work_directory_entry: WorkDirectoryEntry =
1559                ProjectEntryId::from_proto(repository.work_directory_id).into();
1560
1561            if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1562                let mut statuses = TreeMap::default();
1563                for status_entry in repository.updated_statuses {
1564                    let Some(git_file_status) = read_git_status(status_entry.status) else {
1565                        continue;
1566                    };
1567
1568                    let repo_path = RepoPath::new(status_entry.repo_path.into());
1569                    statuses.insert(repo_path, git_file_status);
1570                }
1571
1572                let work_directory = RepositoryWorkDirectory(entry.path.clone());
1573                if self.repository_entries.get(&work_directory).is_some() {
1574                    self.repository_entries.update(&work_directory, |repo| {
1575                        repo.branch = repository.branch.map(Into::into);
1576                        repo.statuses.insert_tree(statuses);
1577
1578                        for repo_path in repository.removed_repo_paths {
1579                            let repo_path = RepoPath::new(repo_path.into());
1580                            repo.statuses.remove(&repo_path);
1581                        }
1582                    });
1583                } else {
1584                    self.repository_entries.insert(
1585                        work_directory,
1586                        RepositoryEntry {
1587                            work_directory: work_directory_entry,
1588                            branch: repository.branch.map(Into::into),
1589                            statuses,
1590                        },
1591                    )
1592                }
1593            } else {
1594                log::error!("no work directory entry for repository {:?}", repository)
1595            }
1596        }
1597
1598        self.scan_id = update.scan_id as usize;
1599        if update.is_last_update {
1600            self.completed_scan_id = update.scan_id as usize;
1601        }
1602
1603        Ok(())
1604    }
1605
1606    pub fn file_count(&self) -> usize {
1607        self.entries_by_path.summary().file_count
1608    }
1609
1610    pub fn visible_file_count(&self) -> usize {
1611        self.entries_by_path.summary().visible_file_count
1612    }
1613
1614    fn traverse_from_offset(
1615        &self,
1616        include_dirs: bool,
1617        include_ignored: bool,
1618        start_offset: usize,
1619    ) -> Traversal {
1620        let mut cursor = self.entries_by_path.cursor();
1621        cursor.seek(
1622            &TraversalTarget::Count {
1623                count: start_offset,
1624                include_dirs,
1625                include_ignored,
1626            },
1627            Bias::Right,
1628            &(),
1629        );
1630        Traversal {
1631            cursor,
1632            include_dirs,
1633            include_ignored,
1634        }
1635    }
1636
1637    fn traverse_from_path(
1638        &self,
1639        include_dirs: bool,
1640        include_ignored: bool,
1641        path: &Path,
1642    ) -> Traversal {
1643        let mut cursor = self.entries_by_path.cursor();
1644        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1645        Traversal {
1646            cursor,
1647            include_dirs,
1648            include_ignored,
1649        }
1650    }
1651
1652    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1653        self.traverse_from_offset(false, include_ignored, start)
1654    }
1655
1656    pub fn entries(&self, include_ignored: bool) -> Traversal {
1657        self.traverse_from_offset(true, include_ignored, 0)
1658    }
1659
1660    pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
1661        self.repository_entries
1662            .iter()
1663            .map(|(path, entry)| (&path.0, entry))
1664    }
1665
1666    /// Get the repository whose work directory contains the given path.
1667    pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
1668        self.repository_entries
1669            .get(&RepositoryWorkDirectory(path.into()))
1670            .cloned()
1671    }
1672
1673    /// Get the repository whose work directory contains the given path.
1674    pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
1675        let mut max_len = 0;
1676        let mut current_candidate = None;
1677        for (work_directory, repo) in (&self.repository_entries).iter() {
1678            if path.starts_with(&work_directory.0) {
1679                if work_directory.0.as_os_str().len() >= max_len {
1680                    current_candidate = Some(repo);
1681                    max_len = work_directory.0.as_os_str().len();
1682                } else {
1683                    break;
1684                }
1685            }
1686        }
1687
1688        current_candidate.cloned()
1689    }
1690
1691    /// Given an ordered iterator of entries, returns an iterator of those entries,
1692    /// along with their containing git repository.
1693    pub fn entries_with_repositories<'a>(
1694        &'a self,
1695        entries: impl 'a + Iterator<Item = &'a Entry>,
1696    ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
1697        let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
1698        let mut repositories = self.repositories().peekable();
1699        entries.map(move |entry| {
1700            while let Some((repo_path, _)) = containing_repos.last() {
1701                if !entry.path.starts_with(repo_path) {
1702                    containing_repos.pop();
1703                } else {
1704                    break;
1705                }
1706            }
1707            while let Some((repo_path, _)) = repositories.peek() {
1708                if entry.path.starts_with(repo_path) {
1709                    containing_repos.push(repositories.next().unwrap());
1710                } else {
1711                    break;
1712                }
1713            }
1714            let repo = containing_repos.last().map(|(_, repo)| *repo);
1715            (entry, repo)
1716        })
1717    }
1718
1719    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1720        let empty_path = Path::new("");
1721        self.entries_by_path
1722            .cursor::<()>()
1723            .filter(move |entry| entry.path.as_ref() != empty_path)
1724            .map(|entry| &entry.path)
1725    }
1726
1727    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1728        let mut cursor = self.entries_by_path.cursor();
1729        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1730        let traversal = Traversal {
1731            cursor,
1732            include_dirs: true,
1733            include_ignored: true,
1734        };
1735        ChildEntriesIter {
1736            traversal,
1737            parent_path,
1738        }
1739    }
1740
1741    fn descendent_entries<'a>(
1742        &'a self,
1743        include_dirs: bool,
1744        include_ignored: bool,
1745        parent_path: &'a Path,
1746    ) -> DescendentEntriesIter<'a> {
1747        let mut cursor = self.entries_by_path.cursor();
1748        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
1749        let mut traversal = Traversal {
1750            cursor,
1751            include_dirs,
1752            include_ignored,
1753        };
1754
1755        if traversal.end_offset() == traversal.start_offset() {
1756            traversal.advance();
1757        }
1758
1759        DescendentEntriesIter {
1760            traversal,
1761            parent_path,
1762        }
1763    }
1764
1765    pub fn root_entry(&self) -> Option<&Entry> {
1766        self.entry_for_path("")
1767    }
1768
1769    pub fn root_name(&self) -> &str {
1770        &self.root_name
1771    }
1772
1773    pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1774        self.repository_entries
1775            .get(&RepositoryWorkDirectory(Path::new("").into()))
1776            .map(|entry| entry.to_owned())
1777    }
1778
1779    pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1780        self.repository_entries.values()
1781    }
1782
1783    pub fn scan_id(&self) -> usize {
1784        self.scan_id
1785    }
1786
1787    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1788        let path = path.as_ref();
1789        self.traverse_from_path(true, true, path)
1790            .entry()
1791            .and_then(|entry| {
1792                if entry.path.as_ref() == path {
1793                    Some(entry)
1794                } else {
1795                    None
1796                }
1797            })
1798    }
1799
1800    pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1801        let entry = self.entries_by_id.get(&id, &())?;
1802        self.entry_for_path(&entry.path)
1803    }
1804
1805    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1806        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1807    }
1808}
1809
1810impl LocalSnapshot {
1811    pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1812        self.git_repositories.get(&repo.work_directory.0)
1813    }
1814
1815    pub(crate) fn repo_for_metadata(
1816        &self,
1817        path: &Path,
1818    ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1819        self.git_repositories
1820            .iter()
1821            .find(|(_, repo)| repo.in_dot_git(path))
1822    }
1823
1824    #[cfg(test)]
1825    pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1826        let root_name = self.root_name.clone();
1827        proto::UpdateWorktree {
1828            project_id,
1829            worktree_id: self.id().to_proto(),
1830            abs_path: self.abs_path().to_string_lossy().into(),
1831            root_name,
1832            updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1833            removed_entries: Default::default(),
1834            scan_id: self.scan_id as u64,
1835            is_last_update: true,
1836            updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1837            removed_repositories: Default::default(),
1838        }
1839    }
1840
1841    pub(crate) fn build_update(
1842        &self,
1843        other: &Self,
1844        project_id: u64,
1845        worktree_id: u64,
1846        include_ignored: bool,
1847    ) -> proto::UpdateWorktree {
1848        let mut updated_entries = Vec::new();
1849        let mut removed_entries = Vec::new();
1850        let mut self_entries = self
1851            .entries_by_id
1852            .cursor::<()>()
1853            .filter(|e| include_ignored || !e.is_ignored)
1854            .peekable();
1855        let mut other_entries = other
1856            .entries_by_id
1857            .cursor::<()>()
1858            .filter(|e| include_ignored || !e.is_ignored)
1859            .peekable();
1860        loop {
1861            match (self_entries.peek(), other_entries.peek()) {
1862                (Some(self_entry), Some(other_entry)) => {
1863                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1864                        Ordering::Less => {
1865                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1866                            updated_entries.push(entry);
1867                            self_entries.next();
1868                        }
1869                        Ordering::Equal => {
1870                            if self_entry.scan_id != other_entry.scan_id {
1871                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1872                                updated_entries.push(entry);
1873                            }
1874
1875                            self_entries.next();
1876                            other_entries.next();
1877                        }
1878                        Ordering::Greater => {
1879                            removed_entries.push(other_entry.id.to_proto());
1880                            other_entries.next();
1881                        }
1882                    }
1883                }
1884                (Some(self_entry), None) => {
1885                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1886                    updated_entries.push(entry);
1887                    self_entries.next();
1888                }
1889                (None, Some(other_entry)) => {
1890                    removed_entries.push(other_entry.id.to_proto());
1891                    other_entries.next();
1892                }
1893                (None, None) => break,
1894            }
1895        }
1896
1897        let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1898        let mut removed_repositories = Vec::new();
1899        let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1900        let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1901        loop {
1902            match (self_repos.peek(), other_repos.peek()) {
1903                (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1904                    match Ord::cmp(self_work_dir, other_work_dir) {
1905                        Ordering::Less => {
1906                            updated_repositories.push((*self_repo).into());
1907                            self_repos.next();
1908                        }
1909                        Ordering::Equal => {
1910                            if self_repo != other_repo {
1911                                updated_repositories.push(self_repo.build_update(other_repo));
1912                            }
1913
1914                            self_repos.next();
1915                            other_repos.next();
1916                        }
1917                        Ordering::Greater => {
1918                            removed_repositories.push(other_repo.work_directory.to_proto());
1919                            other_repos.next();
1920                        }
1921                    }
1922                }
1923                (Some((_, self_repo)), None) => {
1924                    updated_repositories.push((*self_repo).into());
1925                    self_repos.next();
1926                }
1927                (None, Some((_, other_repo))) => {
1928                    removed_repositories.push(other_repo.work_directory.to_proto());
1929                    other_repos.next();
1930                }
1931                (None, None) => break,
1932            }
1933        }
1934
1935        proto::UpdateWorktree {
1936            project_id,
1937            worktree_id,
1938            abs_path: self.abs_path().to_string_lossy().into(),
1939            root_name: self.root_name().to_string(),
1940            updated_entries,
1941            removed_entries,
1942            scan_id: self.scan_id as u64,
1943            is_last_update: self.completed_scan_id == self.scan_id,
1944            updated_repositories,
1945            removed_repositories,
1946        }
1947    }
1948
1949    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1950        if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1951            let abs_path = self.abs_path.join(&entry.path);
1952            match smol::block_on(build_gitignore(&abs_path, fs)) {
1953                Ok(ignore) => {
1954                    self.ignores_by_parent_abs_path
1955                        .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
1956                }
1957                Err(error) => {
1958                    log::error!(
1959                        "error loading .gitignore file {:?} - {:?}",
1960                        &entry.path,
1961                        error
1962                    );
1963                }
1964            }
1965        }
1966
1967        if entry.kind == EntryKind::PendingDir {
1968            if let Some(existing_entry) =
1969                self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1970            {
1971                entry.kind = existing_entry.kind;
1972            }
1973        }
1974
1975        let scan_id = self.scan_id;
1976        let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1977        if let Some(removed) = removed {
1978            if removed.id != entry.id {
1979                self.entries_by_id.remove(&removed.id, &());
1980            }
1981        }
1982        self.entries_by_id.insert_or_replace(
1983            PathEntry {
1984                id: entry.id,
1985                path: entry.path.clone(),
1986                is_ignored: entry.is_ignored,
1987                scan_id,
1988            },
1989            &(),
1990        );
1991
1992        entry
1993    }
1994
1995    fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1996        let abs_path = self.abs_path.join(&parent_path);
1997        let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1998
1999        // Guard against repositories inside the repository metadata
2000        if work_dir
2001            .components()
2002            .find(|component| component.as_os_str() == *DOT_GIT)
2003            .is_some()
2004        {
2005            return None;
2006        };
2007
2008        let work_dir_id = self
2009            .entry_for_path(work_dir.clone())
2010            .map(|entry| entry.id)?;
2011
2012        if self.git_repositories.get(&work_dir_id).is_none() {
2013            let repo = fs.open_repo(abs_path.as_path())?;
2014            let work_directory = RepositoryWorkDirectory(work_dir.clone());
2015            let scan_id = self.scan_id;
2016
2017            let repo_lock = repo.lock();
2018
2019            self.repository_entries.insert(
2020                work_directory,
2021                RepositoryEntry {
2022                    work_directory: work_dir_id.into(),
2023                    branch: repo_lock.branch_name().map(Into::into),
2024                    statuses: repo_lock.statuses().unwrap_or_default(),
2025                },
2026            );
2027            drop(repo_lock);
2028
2029            self.git_repositories.insert(
2030                work_dir_id,
2031                LocalRepositoryEntry {
2032                    scan_id,
2033                    git_dir_scan_id: scan_id,
2034                    repo_ptr: repo,
2035                    git_dir_path: parent_path.clone(),
2036                },
2037            )
2038        }
2039
2040        Some(())
2041    }
2042
2043    fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2044        let mut inodes = TreeSet::default();
2045        for ancestor in path.ancestors().skip(1) {
2046            if let Some(entry) = self.entry_for_path(ancestor) {
2047                inodes.insert(entry.inode);
2048            }
2049        }
2050        inodes
2051    }
2052
2053    fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2054        let mut new_ignores = Vec::new();
2055        for ancestor in abs_path.ancestors().skip(1) {
2056            if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2057                new_ignores.push((ancestor, Some(ignore.clone())));
2058            } else {
2059                new_ignores.push((ancestor, None));
2060            }
2061        }
2062
2063        let mut ignore_stack = IgnoreStack::none();
2064        for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2065            if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2066                ignore_stack = IgnoreStack::all();
2067                break;
2068            } else if let Some(ignore) = ignore {
2069                ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2070            }
2071        }
2072
2073        if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2074            ignore_stack = IgnoreStack::all();
2075        }
2076
2077        ignore_stack
2078    }
2079}
2080
2081impl BackgroundScannerState {
2082    fn reuse_entry_id(&mut self, entry: &mut Entry) {
2083        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2084            entry.id = removed_entry_id;
2085        } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
2086            entry.id = existing_entry.id;
2087        }
2088    }
2089
2090    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2091        self.reuse_entry_id(&mut entry);
2092        self.snapshot.insert_entry(entry, fs)
2093    }
2094
2095    fn populate_dir(
2096        &mut self,
2097        parent_path: Arc<Path>,
2098        entries: impl IntoIterator<Item = Entry>,
2099        ignore: Option<Arc<Gitignore>>,
2100        fs: &dyn Fs,
2101    ) {
2102        let mut parent_entry = if let Some(parent_entry) = self
2103            .snapshot
2104            .entries_by_path
2105            .get(&PathKey(parent_path.clone()), &())
2106        {
2107            parent_entry.clone()
2108        } else {
2109            log::warn!(
2110                "populating a directory {:?} that has been removed",
2111                parent_path
2112            );
2113            return;
2114        };
2115
2116        match parent_entry.kind {
2117            EntryKind::PendingDir => {
2118                parent_entry.kind = EntryKind::Dir;
2119            }
2120            EntryKind::Dir => {}
2121            _ => return,
2122        }
2123
2124        if let Some(ignore) = ignore {
2125            let abs_parent_path = self.snapshot.abs_path.join(&parent_path).into();
2126            self.snapshot
2127                .ignores_by_parent_abs_path
2128                .insert(abs_parent_path, (ignore, false));
2129        }
2130
2131        if parent_path.file_name() == Some(&DOT_GIT) {
2132            self.snapshot.build_repo(parent_path, fs);
2133        }
2134
2135        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2136        let mut entries_by_id_edits = Vec::new();
2137
2138        for mut entry in entries {
2139            self.reuse_entry_id(&mut entry);
2140            entries_by_id_edits.push(Edit::Insert(PathEntry {
2141                id: entry.id,
2142                path: entry.path.clone(),
2143                is_ignored: entry.is_ignored,
2144                scan_id: self.snapshot.scan_id,
2145            }));
2146            entries_by_path_edits.push(Edit::Insert(entry));
2147        }
2148
2149        self.snapshot
2150            .entries_by_path
2151            .edit(entries_by_path_edits, &());
2152        self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2153    }
2154
2155    fn remove_path(&mut self, path: &Path) {
2156        let mut new_entries;
2157        let removed_entries;
2158        {
2159            let mut cursor = self.snapshot.entries_by_path.cursor::<TraversalProgress>();
2160            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2161            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2162            new_entries.push_tree(cursor.suffix(&()), &());
2163        }
2164        self.snapshot.entries_by_path = new_entries;
2165
2166        let mut entries_by_id_edits = Vec::new();
2167        for entry in removed_entries.cursor::<()>() {
2168            let removed_entry_id = self
2169                .removed_entry_ids
2170                .entry(entry.inode)
2171                .or_insert(entry.id);
2172            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2173            entries_by_id_edits.push(Edit::Remove(entry.id));
2174        }
2175        self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2176
2177        if path.file_name() == Some(&GITIGNORE) {
2178            let abs_parent_path = self.snapshot.abs_path.join(path.parent().unwrap());
2179            if let Some((_, needs_update)) = self
2180                .snapshot
2181                .ignores_by_parent_abs_path
2182                .get_mut(abs_parent_path.as_path())
2183            {
2184                *needs_update = true;
2185            }
2186        }
2187    }
2188}
2189
2190async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2191    let contents = fs.load(abs_path).await?;
2192    let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2193    let mut builder = GitignoreBuilder::new(parent);
2194    for line in contents.lines() {
2195        builder.add_line(Some(abs_path.into()), line)?;
2196    }
2197    Ok(builder.build()?)
2198}
2199
2200impl WorktreeId {
2201    pub fn from_usize(handle_id: usize) -> Self {
2202        Self(handle_id)
2203    }
2204
2205    pub(crate) fn from_proto(id: u64) -> Self {
2206        Self(id as usize)
2207    }
2208
2209    pub fn to_proto(&self) -> u64 {
2210        self.0 as u64
2211    }
2212
2213    pub fn to_usize(&self) -> usize {
2214        self.0
2215    }
2216}
2217
2218impl fmt::Display for WorktreeId {
2219    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2220        self.0.fmt(f)
2221    }
2222}
2223
2224impl Deref for Worktree {
2225    type Target = Snapshot;
2226
2227    fn deref(&self) -> &Self::Target {
2228        match self {
2229            Worktree::Local(worktree) => &worktree.snapshot,
2230            Worktree::Remote(worktree) => &worktree.snapshot,
2231        }
2232    }
2233}
2234
2235impl Deref for LocalWorktree {
2236    type Target = LocalSnapshot;
2237
2238    fn deref(&self) -> &Self::Target {
2239        &self.snapshot
2240    }
2241}
2242
2243impl Deref for RemoteWorktree {
2244    type Target = Snapshot;
2245
2246    fn deref(&self) -> &Self::Target {
2247        &self.snapshot
2248    }
2249}
2250
2251impl fmt::Debug for LocalWorktree {
2252    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2253        self.snapshot.fmt(f)
2254    }
2255}
2256
2257impl fmt::Debug for Snapshot {
2258    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2259        struct EntriesById<'a>(&'a SumTree<PathEntry>);
2260        struct EntriesByPath<'a>(&'a SumTree<Entry>);
2261
2262        impl<'a> fmt::Debug for EntriesByPath<'a> {
2263            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2264                f.debug_map()
2265                    .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2266                    .finish()
2267            }
2268        }
2269
2270        impl<'a> fmt::Debug for EntriesById<'a> {
2271            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2272                f.debug_list().entries(self.0.iter()).finish()
2273            }
2274        }
2275
2276        f.debug_struct("Snapshot")
2277            .field("id", &self.id)
2278            .field("root_name", &self.root_name)
2279            .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2280            .field("entries_by_id", &EntriesById(&self.entries_by_id))
2281            .finish()
2282    }
2283}
2284
2285#[derive(Clone, PartialEq)]
2286pub struct File {
2287    pub worktree: ModelHandle<Worktree>,
2288    pub path: Arc<Path>,
2289    pub mtime: SystemTime,
2290    pub(crate) entry_id: ProjectEntryId,
2291    pub(crate) is_local: bool,
2292    pub(crate) is_deleted: bool,
2293}
2294
2295impl language::File for File {
2296    fn as_local(&self) -> Option<&dyn language::LocalFile> {
2297        if self.is_local {
2298            Some(self)
2299        } else {
2300            None
2301        }
2302    }
2303
2304    fn mtime(&self) -> SystemTime {
2305        self.mtime
2306    }
2307
2308    fn path(&self) -> &Arc<Path> {
2309        &self.path
2310    }
2311
2312    fn full_path(&self, cx: &AppContext) -> PathBuf {
2313        let mut full_path = PathBuf::new();
2314        let worktree = self.worktree.read(cx);
2315
2316        if worktree.is_visible() {
2317            full_path.push(worktree.root_name());
2318        } else {
2319            let path = worktree.abs_path();
2320
2321            if worktree.is_local() && path.starts_with(HOME.as_path()) {
2322                full_path.push("~");
2323                full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2324            } else {
2325                full_path.push(path)
2326            }
2327        }
2328
2329        if self.path.components().next().is_some() {
2330            full_path.push(&self.path);
2331        }
2332
2333        full_path
2334    }
2335
2336    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2337    /// of its worktree, then this method will return the name of the worktree itself.
2338    fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2339        self.path
2340            .file_name()
2341            .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2342    }
2343
2344    fn is_deleted(&self) -> bool {
2345        self.is_deleted
2346    }
2347
2348    fn as_any(&self) -> &dyn Any {
2349        self
2350    }
2351
2352    fn to_proto(&self) -> rpc::proto::File {
2353        rpc::proto::File {
2354            worktree_id: self.worktree.id() as u64,
2355            entry_id: self.entry_id.to_proto(),
2356            path: self.path.to_string_lossy().into(),
2357            mtime: Some(self.mtime.into()),
2358            is_deleted: self.is_deleted,
2359        }
2360    }
2361}
2362
2363impl language::LocalFile for File {
2364    fn abs_path(&self, cx: &AppContext) -> PathBuf {
2365        self.worktree
2366            .read(cx)
2367            .as_local()
2368            .unwrap()
2369            .abs_path
2370            .join(&self.path)
2371    }
2372
2373    fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2374        let worktree = self.worktree.read(cx).as_local().unwrap();
2375        let abs_path = worktree.absolutize(&self.path);
2376        let fs = worktree.fs.clone();
2377        cx.background()
2378            .spawn(async move { fs.load(&abs_path).await })
2379    }
2380
2381    fn buffer_reloaded(
2382        &self,
2383        buffer_id: u64,
2384        version: &clock::Global,
2385        fingerprint: RopeFingerprint,
2386        line_ending: LineEnding,
2387        mtime: SystemTime,
2388        cx: &mut AppContext,
2389    ) {
2390        let worktree = self.worktree.read(cx).as_local().unwrap();
2391        if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2392            worktree
2393                .client
2394                .send(proto::BufferReloaded {
2395                    project_id,
2396                    buffer_id,
2397                    version: serialize_version(version),
2398                    mtime: Some(mtime.into()),
2399                    fingerprint: serialize_fingerprint(fingerprint),
2400                    line_ending: serialize_line_ending(line_ending) as i32,
2401                })
2402                .log_err();
2403        }
2404    }
2405}
2406
2407impl File {
2408    pub fn from_proto(
2409        proto: rpc::proto::File,
2410        worktree: ModelHandle<Worktree>,
2411        cx: &AppContext,
2412    ) -> Result<Self> {
2413        let worktree_id = worktree
2414            .read(cx)
2415            .as_remote()
2416            .ok_or_else(|| anyhow!("not remote"))?
2417            .id();
2418
2419        if worktree_id.to_proto() != proto.worktree_id {
2420            return Err(anyhow!("worktree id does not match file"));
2421        }
2422
2423        Ok(Self {
2424            worktree,
2425            path: Path::new(&proto.path).into(),
2426            mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2427            entry_id: ProjectEntryId::from_proto(proto.entry_id),
2428            is_local: false,
2429            is_deleted: proto.is_deleted,
2430        })
2431    }
2432
2433    pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2434        file.and_then(|f| f.as_any().downcast_ref())
2435    }
2436
2437    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2438        self.worktree.read(cx).id()
2439    }
2440
2441    pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2442        if self.is_deleted {
2443            None
2444        } else {
2445            Some(self.entry_id)
2446        }
2447    }
2448}
2449
2450#[derive(Clone, Debug, PartialEq, Eq)]
2451pub struct Entry {
2452    pub id: ProjectEntryId,
2453    pub kind: EntryKind,
2454    pub path: Arc<Path>,
2455    pub inode: u64,
2456    pub mtime: SystemTime,
2457    pub is_symlink: bool,
2458    pub is_ignored: bool,
2459}
2460
2461#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2462pub enum EntryKind {
2463    PendingDir,
2464    Dir,
2465    File(CharBag),
2466}
2467
2468#[derive(Clone, Copy, Debug)]
2469pub enum PathChange {
2470    /// A filesystem entry was was created.
2471    Added,
2472    /// A filesystem entry was removed.
2473    Removed,
2474    /// A filesystem entry was updated.
2475    Updated,
2476    /// A filesystem entry was either updated or added. We don't know
2477    /// whether or not it already existed, because the path had not
2478    /// been loaded before the event.
2479    AddedOrUpdated,
2480    /// A filesystem entry was found during the initial scan of the worktree.
2481    Loaded,
2482}
2483
2484impl Entry {
2485    fn new(
2486        path: Arc<Path>,
2487        metadata: &fs::Metadata,
2488        next_entry_id: &AtomicUsize,
2489        root_char_bag: CharBag,
2490    ) -> Self {
2491        Self {
2492            id: ProjectEntryId::new(next_entry_id),
2493            kind: if metadata.is_dir {
2494                EntryKind::PendingDir
2495            } else {
2496                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2497            },
2498            path,
2499            inode: metadata.inode,
2500            mtime: metadata.mtime,
2501            is_symlink: metadata.is_symlink,
2502            is_ignored: false,
2503        }
2504    }
2505
2506    pub fn is_dir(&self) -> bool {
2507        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2508    }
2509
2510    pub fn is_file(&self) -> bool {
2511        matches!(self.kind, EntryKind::File(_))
2512    }
2513}
2514
2515impl sum_tree::Item for Entry {
2516    type Summary = EntrySummary;
2517
2518    fn summary(&self) -> Self::Summary {
2519        let visible_count = if self.is_ignored { 0 } else { 1 };
2520        let file_count;
2521        let visible_file_count;
2522        if self.is_file() {
2523            file_count = 1;
2524            visible_file_count = visible_count;
2525        } else {
2526            file_count = 0;
2527            visible_file_count = 0;
2528        }
2529
2530        EntrySummary {
2531            max_path: self.path.clone(),
2532            count: 1,
2533            visible_count,
2534            file_count,
2535            visible_file_count,
2536        }
2537    }
2538}
2539
2540impl sum_tree::KeyedItem for Entry {
2541    type Key = PathKey;
2542
2543    fn key(&self) -> Self::Key {
2544        PathKey(self.path.clone())
2545    }
2546}
2547
2548#[derive(Clone, Debug)]
2549pub struct EntrySummary {
2550    max_path: Arc<Path>,
2551    count: usize,
2552    visible_count: usize,
2553    file_count: usize,
2554    visible_file_count: usize,
2555}
2556
2557impl Default for EntrySummary {
2558    fn default() -> Self {
2559        Self {
2560            max_path: Arc::from(Path::new("")),
2561            count: 0,
2562            visible_count: 0,
2563            file_count: 0,
2564            visible_file_count: 0,
2565        }
2566    }
2567}
2568
2569impl sum_tree::Summary for EntrySummary {
2570    type Context = ();
2571
2572    fn add_summary(&mut self, rhs: &Self, _: &()) {
2573        self.max_path = rhs.max_path.clone();
2574        self.count += rhs.count;
2575        self.visible_count += rhs.visible_count;
2576        self.file_count += rhs.file_count;
2577        self.visible_file_count += rhs.visible_file_count;
2578    }
2579}
2580
2581#[derive(Clone, Debug)]
2582struct PathEntry {
2583    id: ProjectEntryId,
2584    path: Arc<Path>,
2585    is_ignored: bool,
2586    scan_id: usize,
2587}
2588
2589impl sum_tree::Item for PathEntry {
2590    type Summary = PathEntrySummary;
2591
2592    fn summary(&self) -> Self::Summary {
2593        PathEntrySummary { max_id: self.id }
2594    }
2595}
2596
2597impl sum_tree::KeyedItem for PathEntry {
2598    type Key = ProjectEntryId;
2599
2600    fn key(&self) -> Self::Key {
2601        self.id
2602    }
2603}
2604
2605#[derive(Clone, Debug, Default)]
2606struct PathEntrySummary {
2607    max_id: ProjectEntryId,
2608}
2609
2610impl sum_tree::Summary for PathEntrySummary {
2611    type Context = ();
2612
2613    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2614        self.max_id = summary.max_id;
2615    }
2616}
2617
2618impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2619    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2620        *self = summary.max_id;
2621    }
2622}
2623
2624#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2625pub struct PathKey(Arc<Path>);
2626
2627impl Default for PathKey {
2628    fn default() -> Self {
2629        Self(Path::new("").into())
2630    }
2631}
2632
2633impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2634    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2635        self.0 = summary.max_path.clone();
2636    }
2637}
2638
2639struct BackgroundScanner {
2640    state: Mutex<BackgroundScannerState>,
2641    fs: Arc<dyn Fs>,
2642    status_updates_tx: UnboundedSender<ScanState>,
2643    executor: Arc<executor::Background>,
2644    refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2645    next_entry_id: Arc<AtomicUsize>,
2646    phase: BackgroundScannerPhase,
2647}
2648
2649#[derive(PartialEq)]
2650enum BackgroundScannerPhase {
2651    InitialScan,
2652    EventsReceivedDuringInitialScan,
2653    Events,
2654}
2655
2656impl BackgroundScanner {
2657    fn new(
2658        snapshot: LocalSnapshot,
2659        next_entry_id: Arc<AtomicUsize>,
2660        fs: Arc<dyn Fs>,
2661        status_updates_tx: UnboundedSender<ScanState>,
2662        executor: Arc<executor::Background>,
2663        refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2664    ) -> Self {
2665        Self {
2666            fs,
2667            status_updates_tx,
2668            executor,
2669            refresh_requests_rx,
2670            next_entry_id,
2671            state: Mutex::new(BackgroundScannerState {
2672                prev_snapshot: snapshot.snapshot.clone(),
2673                snapshot,
2674                removed_entry_ids: Default::default(),
2675                changed_paths: Default::default(),
2676            }),
2677            phase: BackgroundScannerPhase::InitialScan,
2678        }
2679    }
2680
2681    async fn run(
2682        &mut self,
2683        mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2684    ) {
2685        use futures::FutureExt as _;
2686
2687        let (root_abs_path, root_inode) = {
2688            let snapshot = &self.state.lock().snapshot;
2689            (
2690                snapshot.abs_path.clone(),
2691                snapshot.root_entry().map(|e| e.inode),
2692            )
2693        };
2694
2695        // Populate ignores above the root.
2696        let ignore_stack;
2697        for ancestor in root_abs_path.ancestors().skip(1) {
2698            if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2699            {
2700                self.state
2701                    .lock()
2702                    .snapshot
2703                    .ignores_by_parent_abs_path
2704                    .insert(ancestor.into(), (ignore.into(), false));
2705            }
2706        }
2707        {
2708            let mut state = self.state.lock();
2709            state.snapshot.scan_id += 1;
2710            ignore_stack = state
2711                .snapshot
2712                .ignore_stack_for_abs_path(&root_abs_path, true);
2713            if ignore_stack.is_all() {
2714                if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
2715                    root_entry.is_ignored = true;
2716                    state.insert_entry(root_entry, self.fs.as_ref());
2717                }
2718            }
2719        };
2720
2721        // Perform an initial scan of the directory.
2722        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2723        smol::block_on(scan_job_tx.send(ScanJob {
2724            abs_path: root_abs_path,
2725            path: Arc::from(Path::new("")),
2726            ignore_stack,
2727            ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2728            scan_queue: scan_job_tx.clone(),
2729        }))
2730        .unwrap();
2731        drop(scan_job_tx);
2732        self.scan_dirs(true, scan_job_rx).await;
2733        {
2734            let mut state = self.state.lock();
2735            state.snapshot.completed_scan_id = state.snapshot.scan_id;
2736        }
2737        self.send_status_update(false, None);
2738
2739        // Process any any FS events that occurred while performing the initial scan.
2740        // For these events, update events cannot be as precise, because we didn't
2741        // have the previous state loaded yet.
2742        self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
2743        if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2744            let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2745            while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2746                paths.extend(more_events.into_iter().map(|e| e.path));
2747            }
2748            self.process_events(paths).await;
2749        }
2750
2751        // Continue processing events until the worktree is dropped.
2752        self.phase = BackgroundScannerPhase::Events;
2753        loop {
2754            select_biased! {
2755                // Process any path refresh requests from the worktree. Prioritize
2756                // these before handling changes reported by the filesystem.
2757                request = self.refresh_requests_rx.recv().fuse() => {
2758                    let Ok((paths, barrier)) = request else { break };
2759                    if !self.process_refresh_request(paths.clone(), barrier).await {
2760                        return;
2761                    }
2762                }
2763
2764                events = events_rx.next().fuse() => {
2765                    let Some(events) = events else { break };
2766                    let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2767                    while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2768                        paths.extend(more_events.into_iter().map(|e| e.path));
2769                    }
2770                    self.process_events(paths.clone()).await;
2771                }
2772            }
2773        }
2774    }
2775
2776    async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2777        self.reload_entries_for_paths(paths, None).await;
2778        self.send_status_update(false, Some(barrier))
2779    }
2780
2781    async fn process_events(&mut self, paths: Vec<PathBuf>) {
2782        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2783        let paths = self
2784            .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2785            .await;
2786        drop(scan_job_tx);
2787        self.scan_dirs(false, scan_job_rx).await;
2788
2789        self.update_ignore_statuses().await;
2790
2791        {
2792            let mut snapshot = &mut self.state.lock().snapshot;
2793
2794            if let Some(paths) = paths {
2795                for path in paths {
2796                    self.reload_repo_for_file_path(&path, &mut *snapshot, self.fs.as_ref());
2797                }
2798            }
2799
2800            let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2801            git_repositories.retain(|work_directory_id, _| {
2802                snapshot
2803                    .entry_for_id(*work_directory_id)
2804                    .map_or(false, |entry| {
2805                        snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2806                    })
2807            });
2808            snapshot.git_repositories = git_repositories;
2809
2810            let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2811            git_repository_entries.retain(|_, entry| {
2812                snapshot
2813                    .git_repositories
2814                    .get(&entry.work_directory.0)
2815                    .is_some()
2816            });
2817            snapshot.snapshot.repository_entries = git_repository_entries;
2818            snapshot.completed_scan_id = snapshot.scan_id;
2819        }
2820
2821        self.send_status_update(false, None);
2822    }
2823
2824    async fn scan_dirs(
2825        &self,
2826        enable_progress_updates: bool,
2827        scan_jobs_rx: channel::Receiver<ScanJob>,
2828    ) {
2829        use futures::FutureExt as _;
2830
2831        if self
2832            .status_updates_tx
2833            .unbounded_send(ScanState::Started)
2834            .is_err()
2835        {
2836            return;
2837        }
2838
2839        let progress_update_count = AtomicUsize::new(0);
2840        self.executor
2841            .scoped(|scope| {
2842                for _ in 0..self.executor.num_cpus() {
2843                    scope.spawn(async {
2844                        let mut last_progress_update_count = 0;
2845                        let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2846                        futures::pin_mut!(progress_update_timer);
2847
2848                        loop {
2849                            select_biased! {
2850                                // Process any path refresh requests before moving on to process
2851                                // the scan queue, so that user operations are prioritized.
2852                                request = self.refresh_requests_rx.recv().fuse() => {
2853                                    let Ok((paths, barrier)) = request else { break };
2854                                    if !self.process_refresh_request(paths, barrier).await {
2855                                        return;
2856                                    }
2857                                }
2858
2859                                // Send periodic progress updates to the worktree. Use an atomic counter
2860                                // to ensure that only one of the workers sends a progress update after
2861                                // the update interval elapses.
2862                                _ = progress_update_timer => {
2863                                    match progress_update_count.compare_exchange(
2864                                        last_progress_update_count,
2865                                        last_progress_update_count + 1,
2866                                        SeqCst,
2867                                        SeqCst
2868                                    ) {
2869                                        Ok(_) => {
2870                                            last_progress_update_count += 1;
2871                                            self.send_status_update(true, None);
2872                                        }
2873                                        Err(count) => {
2874                                            last_progress_update_count = count;
2875                                        }
2876                                    }
2877                                    progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2878                                }
2879
2880                                // Recursively load directories from the file system.
2881                                job = scan_jobs_rx.recv().fuse() => {
2882                                    let Ok(job) = job else { break };
2883                                    if let Err(err) = self.scan_dir(&job).await {
2884                                        if job.path.as_ref() != Path::new("") {
2885                                            log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2886                                        }
2887                                    }
2888                                }
2889                            }
2890                        }
2891                    })
2892                }
2893            })
2894            .await;
2895    }
2896
2897    fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2898        let mut state = self.state.lock();
2899        let new_snapshot = state.snapshot.clone();
2900        let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
2901
2902        let changes =
2903            self.build_change_set(&old_snapshot, &new_snapshot.snapshot, &state.changed_paths);
2904        state.changed_paths.clear();
2905
2906        self.status_updates_tx
2907            .unbounded_send(ScanState::Updated {
2908                snapshot: new_snapshot,
2909                changes,
2910                scanning,
2911                barrier,
2912            })
2913            .is_ok()
2914    }
2915
2916    async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2917        let mut new_entries: Vec<Entry> = Vec::new();
2918        let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2919        let mut ignore_stack = job.ignore_stack.clone();
2920        let mut new_ignore = None;
2921        let (root_abs_path, root_char_bag, next_entry_id) = {
2922            let snapshot = &self.state.lock().snapshot;
2923            (
2924                snapshot.abs_path().clone(),
2925                snapshot.root_char_bag,
2926                self.next_entry_id.clone(),
2927            )
2928        };
2929        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2930        while let Some(child_abs_path) = child_paths.next().await {
2931            let child_abs_path: Arc<Path> = match child_abs_path {
2932                Ok(child_abs_path) => child_abs_path.into(),
2933                Err(error) => {
2934                    log::error!("error processing entry {:?}", error);
2935                    continue;
2936                }
2937            };
2938
2939            let child_name = child_abs_path.file_name().unwrap();
2940            let child_path: Arc<Path> = job.path.join(child_name).into();
2941            let child_metadata = match self.fs.metadata(&child_abs_path).await {
2942                Ok(Some(metadata)) => metadata,
2943                Ok(None) => continue,
2944                Err(err) => {
2945                    log::error!("error processing {:?}: {:?}", child_abs_path, err);
2946                    continue;
2947                }
2948            };
2949
2950            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2951            if child_name == *GITIGNORE {
2952                match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2953                    Ok(ignore) => {
2954                        let ignore = Arc::new(ignore);
2955                        ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2956                        new_ignore = Some(ignore);
2957                    }
2958                    Err(error) => {
2959                        log::error!(
2960                            "error loading .gitignore file {:?} - {:?}",
2961                            child_name,
2962                            error
2963                        );
2964                    }
2965                }
2966
2967                // Update ignore status of any child entries we've already processed to reflect the
2968                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2969                // there should rarely be too numerous. Update the ignore stack associated with any
2970                // new jobs as well.
2971                let mut new_jobs = new_jobs.iter_mut();
2972                for entry in &mut new_entries {
2973                    let entry_abs_path = root_abs_path.join(&entry.path);
2974                    entry.is_ignored =
2975                        ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2976
2977                    if entry.is_dir() {
2978                        if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2979                            job.ignore_stack = if entry.is_ignored {
2980                                IgnoreStack::all()
2981                            } else {
2982                                ignore_stack.clone()
2983                            };
2984                        }
2985                    }
2986                }
2987            }
2988
2989            let mut child_entry = Entry::new(
2990                child_path.clone(),
2991                &child_metadata,
2992                &next_entry_id,
2993                root_char_bag,
2994            );
2995
2996            if child_entry.is_dir() {
2997                let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2998                child_entry.is_ignored = is_ignored;
2999
3000                // Avoid recursing until crash in the case of a recursive symlink
3001                if !job.ancestor_inodes.contains(&child_entry.inode) {
3002                    let mut ancestor_inodes = job.ancestor_inodes.clone();
3003                    ancestor_inodes.insert(child_entry.inode);
3004
3005                    new_jobs.push(Some(ScanJob {
3006                        abs_path: child_abs_path,
3007                        path: child_path,
3008                        ignore_stack: if is_ignored {
3009                            IgnoreStack::all()
3010                        } else {
3011                            ignore_stack.clone()
3012                        },
3013                        ancestor_inodes,
3014                        scan_queue: job.scan_queue.clone(),
3015                    }));
3016                } else {
3017                    new_jobs.push(None);
3018                }
3019            } else {
3020                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
3021            }
3022
3023            new_entries.push(child_entry);
3024        }
3025
3026        {
3027            let mut state = self.state.lock();
3028            state.populate_dir(job.path.clone(), new_entries, new_ignore, self.fs.as_ref());
3029            if let Err(ix) = state.changed_paths.binary_search(&job.path) {
3030                state.changed_paths.insert(ix, job.path.clone());
3031            }
3032        }
3033
3034        for new_job in new_jobs {
3035            if let Some(new_job) = new_job {
3036                job.scan_queue.send(new_job).await.unwrap();
3037            }
3038        }
3039
3040        Ok(())
3041    }
3042
3043    async fn reload_entries_for_paths(
3044        &self,
3045        mut abs_paths: Vec<PathBuf>,
3046        scan_queue_tx: Option<Sender<ScanJob>>,
3047    ) -> Option<Vec<Arc<Path>>> {
3048        let doing_recursive_update = scan_queue_tx.is_some();
3049
3050        abs_paths.sort_unstable();
3051        abs_paths.dedup_by(|a, b| a.starts_with(&b));
3052
3053        let root_abs_path = self.state.lock().snapshot.abs_path.clone();
3054        let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
3055        let metadata = futures::future::join_all(
3056            abs_paths
3057                .iter()
3058                .map(|abs_path| self.fs.metadata(&abs_path))
3059                .collect::<Vec<_>>(),
3060        )
3061        .await;
3062
3063        let mut state = self.state.lock();
3064        let snapshot = &mut state.snapshot;
3065        let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
3066        snapshot.scan_id += 1;
3067        if is_idle && !doing_recursive_update {
3068            snapshot.completed_scan_id = snapshot.scan_id;
3069        }
3070
3071        // Remove any entries for paths that no longer exist or are being recursively
3072        // refreshed. Do this before adding any new entries, so that renames can be
3073        // detected regardless of the order of the paths.
3074        let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
3075        for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
3076            if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3077                if matches!(metadata, Ok(None)) || doing_recursive_update {
3078                    state.remove_path(path);
3079                }
3080                event_paths.push(path.into());
3081            } else {
3082                log::error!(
3083                    "unexpected event {:?} for root path {:?}",
3084                    abs_path,
3085                    root_canonical_path
3086                );
3087            }
3088        }
3089
3090        for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
3091            let abs_path: Arc<Path> = root_abs_path.join(&path).into();
3092
3093            match metadata {
3094                Ok(Some(metadata)) => {
3095                    let ignore_stack = state
3096                        .snapshot
3097                        .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
3098                    let mut fs_entry = Entry::new(
3099                        path.clone(),
3100                        &metadata,
3101                        self.next_entry_id.as_ref(),
3102                        state.snapshot.root_char_bag,
3103                    );
3104                    fs_entry.is_ignored = ignore_stack.is_all();
3105                    state.insert_entry(fs_entry, self.fs.as_ref());
3106
3107                    if let Some(scan_queue_tx) = &scan_queue_tx {
3108                        let mut ancestor_inodes = state.snapshot.ancestor_inodes_for_path(&path);
3109                        if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
3110                            ancestor_inodes.insert(metadata.inode);
3111                            smol::block_on(scan_queue_tx.send(ScanJob {
3112                                abs_path,
3113                                path,
3114                                ignore_stack,
3115                                ancestor_inodes,
3116                                scan_queue: scan_queue_tx.clone(),
3117                            }))
3118                            .unwrap();
3119                        }
3120                    }
3121                }
3122                Ok(None) => {
3123                    self.remove_repo_path(&path, &mut state.snapshot);
3124                }
3125                Err(err) => {
3126                    // TODO - create a special 'error' entry in the entries tree to mark this
3127                    log::error!("error reading file on event {:?}", err);
3128                }
3129            }
3130        }
3131
3132        util::extend_sorted(
3133            &mut state.changed_paths,
3134            event_paths.iter().cloned(),
3135            usize::MAX,
3136            Ord::cmp,
3137        );
3138
3139        Some(event_paths)
3140    }
3141
3142    fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3143        if !path
3144            .components()
3145            .any(|component| component.as_os_str() == *DOT_GIT)
3146        {
3147            let scan_id = snapshot.scan_id;
3148
3149            if let Some(repository) = snapshot.repository_for_work_directory(path) {
3150                let entry = repository.work_directory.0;
3151                snapshot.git_repositories.remove(&entry);
3152                snapshot
3153                    .snapshot
3154                    .repository_entries
3155                    .remove(&RepositoryWorkDirectory(path.into()));
3156                return Some(());
3157            }
3158
3159            let repo = snapshot.repository_for_path(&path)?;
3160            let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3161            let work_dir = repo.work_directory(snapshot)?;
3162            let work_dir_id = repo.work_directory;
3163
3164            snapshot
3165                .git_repositories
3166                .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3167
3168            snapshot.repository_entries.update(&work_dir, |entry| {
3169                entry
3170                    .statuses
3171                    .remove_range(&repo_path, &RepoPathDescendants(&repo_path))
3172            });
3173        }
3174
3175        Some(())
3176    }
3177
3178    fn reload_repo_for_file_path(
3179        &self,
3180        path: &Path,
3181        snapshot: &mut LocalSnapshot,
3182        fs: &dyn Fs,
3183    ) -> Option<()> {
3184        let scan_id = snapshot.scan_id;
3185
3186        if path
3187            .components()
3188            .any(|component| component.as_os_str() == *DOT_GIT)
3189        {
3190            let (entry_id, repo_ptr) = {
3191                let Some((entry_id, repo)) = snapshot.repo_for_metadata(&path) else {
3192                    let dot_git_dir = path.ancestors()
3193                    .skip_while(|ancestor| ancestor.file_name() != Some(&*DOT_GIT))
3194                    .next()?;
3195
3196                    snapshot.build_repo(dot_git_dir.into(), fs);
3197                    return None;
3198                };
3199                if repo.git_dir_scan_id == scan_id {
3200                    return None;
3201                }
3202                (*entry_id, repo.repo_ptr.to_owned())
3203            };
3204
3205            let work_dir = snapshot
3206                .entry_for_id(entry_id)
3207                .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3208
3209            let repo = repo_ptr.lock();
3210            repo.reload_index();
3211            let branch = repo.branch_name();
3212            let statuses = repo.statuses().unwrap_or_default();
3213
3214            snapshot.git_repositories.update(&entry_id, |entry| {
3215                entry.scan_id = scan_id;
3216                entry.git_dir_scan_id = scan_id;
3217            });
3218
3219            snapshot.repository_entries.update(&work_dir, |entry| {
3220                entry.branch = branch.map(Into::into);
3221                entry.statuses = statuses;
3222            });
3223        } else {
3224            if snapshot
3225                .entry_for_path(&path)
3226                .map(|entry| entry.is_ignored)
3227                .unwrap_or(false)
3228            {
3229                self.remove_repo_path(&path, snapshot);
3230                return None;
3231            }
3232
3233            let repo = snapshot.repository_for_path(&path)?;
3234
3235            let work_dir = repo.work_directory(snapshot)?;
3236            let work_dir_id = repo.work_directory.clone();
3237
3238            snapshot
3239                .git_repositories
3240                .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3241
3242            let local_repo = snapshot.get_local_repo(&repo)?.to_owned();
3243
3244            // Short circuit if we've already scanned everything
3245            if local_repo.git_dir_scan_id == scan_id {
3246                return None;
3247            }
3248
3249            let mut repository = snapshot.repository_entries.remove(&work_dir)?;
3250
3251            for entry in snapshot.descendent_entries(false, false, path) {
3252                let Some(repo_path) = repo.work_directory.relativize(snapshot, &entry.path) else {
3253                    continue;
3254                };
3255
3256                let status = local_repo.repo_ptr.lock().status(&repo_path);
3257                if let Some(status) = status {
3258                    repository.statuses.insert(repo_path.clone(), status);
3259                } else {
3260                    repository.statuses.remove(&repo_path);
3261                }
3262            }
3263
3264            snapshot.repository_entries.insert(work_dir, repository)
3265        }
3266
3267        Some(())
3268    }
3269
3270    async fn update_ignore_statuses(&self) {
3271        use futures::FutureExt as _;
3272
3273        let mut snapshot = self.state.lock().snapshot.clone();
3274        let mut ignores_to_update = Vec::new();
3275        let mut ignores_to_delete = Vec::new();
3276        let abs_path = snapshot.abs_path.clone();
3277        for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
3278            if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
3279                if *needs_update {
3280                    *needs_update = false;
3281                    if snapshot.snapshot.entry_for_path(parent_path).is_some() {
3282                        ignores_to_update.push(parent_abs_path.clone());
3283                    }
3284                }
3285
3286                let ignore_path = parent_path.join(&*GITIGNORE);
3287                if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
3288                    ignores_to_delete.push(parent_abs_path.clone());
3289                }
3290            }
3291        }
3292
3293        for parent_abs_path in ignores_to_delete {
3294            snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3295            self.state
3296                .lock()
3297                .snapshot
3298                .ignores_by_parent_abs_path
3299                .remove(&parent_abs_path);
3300        }
3301
3302        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3303        ignores_to_update.sort_unstable();
3304        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3305        while let Some(parent_abs_path) = ignores_to_update.next() {
3306            while ignores_to_update
3307                .peek()
3308                .map_or(false, |p| p.starts_with(&parent_abs_path))
3309            {
3310                ignores_to_update.next().unwrap();
3311            }
3312
3313            let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3314            smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3315                abs_path: parent_abs_path,
3316                ignore_stack,
3317                ignore_queue: ignore_queue_tx.clone(),
3318            }))
3319            .unwrap();
3320        }
3321        drop(ignore_queue_tx);
3322
3323        self.executor
3324            .scoped(|scope| {
3325                for _ in 0..self.executor.num_cpus() {
3326                    scope.spawn(async {
3327                        loop {
3328                            select_biased! {
3329                                // Process any path refresh requests before moving on to process
3330                                // the queue of ignore statuses.
3331                                request = self.refresh_requests_rx.recv().fuse() => {
3332                                    let Ok((paths, barrier)) = request else { break };
3333                                    if !self.process_refresh_request(paths, barrier).await {
3334                                        return;
3335                                    }
3336                                }
3337
3338                                // Recursively process directories whose ignores have changed.
3339                                job = ignore_queue_rx.recv().fuse() => {
3340                                    let Ok(job) = job else { break };
3341                                    self.update_ignore_status(job, &snapshot).await;
3342                                }
3343                            }
3344                        }
3345                    });
3346                }
3347            })
3348            .await;
3349    }
3350
3351    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3352        let mut ignore_stack = job.ignore_stack;
3353        if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3354            ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3355        }
3356
3357        let mut entries_by_id_edits = Vec::new();
3358        let mut entries_by_path_edits = Vec::new();
3359        let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3360        for mut entry in snapshot.child_entries(path).cloned() {
3361            let was_ignored = entry.is_ignored;
3362            let abs_path = snapshot.abs_path().join(&entry.path);
3363            entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3364            if entry.is_dir() {
3365                let child_ignore_stack = if entry.is_ignored {
3366                    IgnoreStack::all()
3367                } else {
3368                    ignore_stack.clone()
3369                };
3370                job.ignore_queue
3371                    .send(UpdateIgnoreStatusJob {
3372                        abs_path: abs_path.into(),
3373                        ignore_stack: child_ignore_stack,
3374                        ignore_queue: job.ignore_queue.clone(),
3375                    })
3376                    .await
3377                    .unwrap();
3378            }
3379
3380            if entry.is_ignored != was_ignored {
3381                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3382                path_entry.scan_id = snapshot.scan_id;
3383                path_entry.is_ignored = entry.is_ignored;
3384                entries_by_id_edits.push(Edit::Insert(path_entry));
3385                entries_by_path_edits.push(Edit::Insert(entry));
3386            }
3387        }
3388
3389        let snapshot = &mut self.state.lock().snapshot;
3390        snapshot.entries_by_path.edit(entries_by_path_edits, &());
3391        snapshot.entries_by_id.edit(entries_by_id_edits, &());
3392    }
3393
3394    fn build_change_set(
3395        &self,
3396        old_snapshot: &Snapshot,
3397        new_snapshot: &Snapshot,
3398        event_paths: &[Arc<Path>],
3399    ) -> HashMap<(Arc<Path>, ProjectEntryId), PathChange> {
3400        use BackgroundScannerPhase::*;
3401        use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
3402
3403        let mut changes = HashMap::default();
3404        let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3405        let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3406
3407        for path in event_paths {
3408            let path = PathKey(path.clone());
3409            old_paths.seek(&path, Bias::Left, &());
3410            new_paths.seek(&path, Bias::Left, &());
3411
3412            loop {
3413                match (old_paths.item(), new_paths.item()) {
3414                    (Some(old_entry), Some(new_entry)) => {
3415                        if old_entry.path > path.0
3416                            && new_entry.path > path.0
3417                            && !old_entry.path.starts_with(&path.0)
3418                            && !new_entry.path.starts_with(&path.0)
3419                        {
3420                            break;
3421                        }
3422
3423                        match Ord::cmp(&old_entry.path, &new_entry.path) {
3424                            Ordering::Less => {
3425                                changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3426                                old_paths.next(&());
3427                            }
3428                            Ordering::Equal => {
3429                                if self.phase == EventsReceivedDuringInitialScan {
3430                                    // If the worktree was not fully initialized when this event was generated,
3431                                    // we can't know whether this entry was added during the scan or whether
3432                                    // it was merely updated.
3433                                    changes.insert(
3434                                        (new_entry.path.clone(), new_entry.id),
3435                                        AddedOrUpdated,
3436                                    );
3437                                } else if old_entry.mtime != new_entry.mtime {
3438                                    changes.insert((new_entry.path.clone(), new_entry.id), Updated);
3439                                }
3440                                old_paths.next(&());
3441                                new_paths.next(&());
3442                            }
3443                            Ordering::Greater => {
3444                                changes.insert(
3445                                    (new_entry.path.clone(), new_entry.id),
3446                                    if self.phase == InitialScan {
3447                                        Loaded
3448                                    } else {
3449                                        Added
3450                                    },
3451                                );
3452                                new_paths.next(&());
3453                            }
3454                        }
3455                    }
3456                    (Some(old_entry), None) => {
3457                        changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3458                        old_paths.next(&());
3459                    }
3460                    (None, Some(new_entry)) => {
3461                        changes.insert(
3462                            (new_entry.path.clone(), new_entry.id),
3463                            if self.phase == InitialScan {
3464                                Loaded
3465                            } else {
3466                                Added
3467                            },
3468                        );
3469                        new_paths.next(&());
3470                    }
3471                    (None, None) => break,
3472                }
3473            }
3474        }
3475
3476        changes
3477    }
3478
3479    async fn progress_timer(&self, running: bool) {
3480        if !running {
3481            return futures::future::pending().await;
3482        }
3483
3484        #[cfg(any(test, feature = "test-support"))]
3485        if self.fs.is_fake() {
3486            return self.executor.simulate_random_delay().await;
3487        }
3488
3489        smol::Timer::after(Duration::from_millis(100)).await;
3490    }
3491}
3492
3493fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3494    let mut result = root_char_bag;
3495    result.extend(
3496        path.to_string_lossy()
3497            .chars()
3498            .map(|c| c.to_ascii_lowercase()),
3499    );
3500    result
3501}
3502
3503struct ScanJob {
3504    abs_path: Arc<Path>,
3505    path: Arc<Path>,
3506    ignore_stack: Arc<IgnoreStack>,
3507    scan_queue: Sender<ScanJob>,
3508    ancestor_inodes: TreeSet<u64>,
3509}
3510
3511struct UpdateIgnoreStatusJob {
3512    abs_path: Arc<Path>,
3513    ignore_stack: Arc<IgnoreStack>,
3514    ignore_queue: Sender<UpdateIgnoreStatusJob>,
3515}
3516
3517pub trait WorktreeHandle {
3518    #[cfg(any(test, feature = "test-support"))]
3519    fn flush_fs_events<'a>(
3520        &self,
3521        cx: &'a gpui::TestAppContext,
3522    ) -> futures::future::LocalBoxFuture<'a, ()>;
3523}
3524
3525impl WorktreeHandle for ModelHandle<Worktree> {
3526    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3527    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3528    // extra directory scans, and emit extra scan-state notifications.
3529    //
3530    // This function mutates the worktree's directory and waits for those mutations to be picked up,
3531    // to ensure that all redundant FS events have already been processed.
3532    #[cfg(any(test, feature = "test-support"))]
3533    fn flush_fs_events<'a>(
3534        &self,
3535        cx: &'a gpui::TestAppContext,
3536    ) -> futures::future::LocalBoxFuture<'a, ()> {
3537        use smol::future::FutureExt;
3538
3539        let filename = "fs-event-sentinel";
3540        let tree = self.clone();
3541        let (fs, root_path) = self.read_with(cx, |tree, _| {
3542            let tree = tree.as_local().unwrap();
3543            (tree.fs.clone(), tree.abs_path().clone())
3544        });
3545
3546        async move {
3547            fs.create_file(&root_path.join(filename), Default::default())
3548                .await
3549                .unwrap();
3550            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3551                .await;
3552
3553            fs.remove_file(&root_path.join(filename), Default::default())
3554                .await
3555                .unwrap();
3556            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3557                .await;
3558
3559            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3560                .await;
3561        }
3562        .boxed_local()
3563    }
3564}
3565
3566#[derive(Clone, Debug)]
3567struct TraversalProgress<'a> {
3568    max_path: &'a Path,
3569    count: usize,
3570    visible_count: usize,
3571    file_count: usize,
3572    visible_file_count: usize,
3573}
3574
3575impl<'a> TraversalProgress<'a> {
3576    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3577        match (include_ignored, include_dirs) {
3578            (true, true) => self.count,
3579            (true, false) => self.file_count,
3580            (false, true) => self.visible_count,
3581            (false, false) => self.visible_file_count,
3582        }
3583    }
3584}
3585
3586impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3587    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3588        self.max_path = summary.max_path.as_ref();
3589        self.count += summary.count;
3590        self.visible_count += summary.visible_count;
3591        self.file_count += summary.file_count;
3592        self.visible_file_count += summary.visible_file_count;
3593    }
3594}
3595
3596impl<'a> Default for TraversalProgress<'a> {
3597    fn default() -> Self {
3598        Self {
3599            max_path: Path::new(""),
3600            count: 0,
3601            visible_count: 0,
3602            file_count: 0,
3603            visible_file_count: 0,
3604        }
3605    }
3606}
3607
3608pub struct Traversal<'a> {
3609    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3610    include_ignored: bool,
3611    include_dirs: bool,
3612}
3613
3614impl<'a> Traversal<'a> {
3615    pub fn advance(&mut self) -> bool {
3616        self.cursor.seek_forward(
3617            &TraversalTarget::Count {
3618                count: self.end_offset() + 1,
3619                include_dirs: self.include_dirs,
3620                include_ignored: self.include_ignored,
3621            },
3622            Bias::Left,
3623            &(),
3624        )
3625    }
3626
3627    pub fn advance_to_sibling(&mut self) -> bool {
3628        while let Some(entry) = self.cursor.item() {
3629            self.cursor.seek_forward(
3630                &TraversalTarget::PathSuccessor(&entry.path),
3631                Bias::Left,
3632                &(),
3633            );
3634            if let Some(entry) = self.cursor.item() {
3635                if (self.include_dirs || !entry.is_dir())
3636                    && (self.include_ignored || !entry.is_ignored)
3637                {
3638                    return true;
3639                }
3640            }
3641        }
3642        false
3643    }
3644
3645    pub fn entry(&self) -> Option<&'a Entry> {
3646        self.cursor.item()
3647    }
3648
3649    pub fn start_offset(&self) -> usize {
3650        self.cursor
3651            .start()
3652            .count(self.include_dirs, self.include_ignored)
3653    }
3654
3655    pub fn end_offset(&self) -> usize {
3656        self.cursor
3657            .end(&())
3658            .count(self.include_dirs, self.include_ignored)
3659    }
3660}
3661
3662impl<'a> Iterator for Traversal<'a> {
3663    type Item = &'a Entry;
3664
3665    fn next(&mut self) -> Option<Self::Item> {
3666        if let Some(item) = self.entry() {
3667            self.advance();
3668            Some(item)
3669        } else {
3670            None
3671        }
3672    }
3673}
3674
3675#[derive(Debug)]
3676enum TraversalTarget<'a> {
3677    Path(&'a Path),
3678    PathSuccessor(&'a Path),
3679    Count {
3680        count: usize,
3681        include_ignored: bool,
3682        include_dirs: bool,
3683    },
3684}
3685
3686impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3687    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3688        match self {
3689            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3690            TraversalTarget::PathSuccessor(path) => {
3691                if !cursor_location.max_path.starts_with(path) {
3692                    Ordering::Equal
3693                } else {
3694                    Ordering::Greater
3695                }
3696            }
3697            TraversalTarget::Count {
3698                count,
3699                include_dirs,
3700                include_ignored,
3701            } => Ord::cmp(
3702                count,
3703                &cursor_location.count(*include_dirs, *include_ignored),
3704            ),
3705        }
3706    }
3707}
3708
3709struct ChildEntriesIter<'a> {
3710    parent_path: &'a Path,
3711    traversal: Traversal<'a>,
3712}
3713
3714impl<'a> Iterator for ChildEntriesIter<'a> {
3715    type Item = &'a Entry;
3716
3717    fn next(&mut self) -> Option<Self::Item> {
3718        if let Some(item) = self.traversal.entry() {
3719            if item.path.starts_with(&self.parent_path) {
3720                self.traversal.advance_to_sibling();
3721                return Some(item);
3722            }
3723        }
3724        None
3725    }
3726}
3727
3728struct DescendentEntriesIter<'a> {
3729    parent_path: &'a Path,
3730    traversal: Traversal<'a>,
3731}
3732
3733impl<'a> Iterator for DescendentEntriesIter<'a> {
3734    type Item = &'a Entry;
3735
3736    fn next(&mut self) -> Option<Self::Item> {
3737        if let Some(item) = self.traversal.entry() {
3738            if item.path.starts_with(&self.parent_path) {
3739                self.traversal.advance();
3740                return Some(item);
3741            }
3742        }
3743        None
3744    }
3745}
3746
3747impl<'a> From<&'a Entry> for proto::Entry {
3748    fn from(entry: &'a Entry) -> Self {
3749        Self {
3750            id: entry.id.to_proto(),
3751            is_dir: entry.is_dir(),
3752            path: entry.path.to_string_lossy().into(),
3753            inode: entry.inode,
3754            mtime: Some(entry.mtime.into()),
3755            is_symlink: entry.is_symlink,
3756            is_ignored: entry.is_ignored,
3757        }
3758    }
3759}
3760
3761impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3762    type Error = anyhow::Error;
3763
3764    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3765        if let Some(mtime) = entry.mtime {
3766            let kind = if entry.is_dir {
3767                EntryKind::Dir
3768            } else {
3769                let mut char_bag = *root_char_bag;
3770                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3771                EntryKind::File(char_bag)
3772            };
3773            let path: Arc<Path> = PathBuf::from(entry.path).into();
3774            Ok(Entry {
3775                id: ProjectEntryId::from_proto(entry.id),
3776                kind,
3777                path,
3778                inode: entry.inode,
3779                mtime: mtime.into(),
3780                is_symlink: entry.is_symlink,
3781                is_ignored: entry.is_ignored,
3782            })
3783        } else {
3784            Err(anyhow!(
3785                "missing mtime in remote worktree entry {:?}",
3786                entry.path
3787            ))
3788        }
3789    }
3790}
3791
3792#[cfg(test)]
3793mod tests {
3794    use super::*;
3795    use fs::{FakeFs, RealFs};
3796    use gpui::{executor::Deterministic, TestAppContext};
3797    use pretty_assertions::assert_eq;
3798    use rand::prelude::*;
3799    use serde_json::json;
3800    use std::{env, fmt::Write};
3801    use util::{http::FakeHttpClient, test::temp_tree};
3802
3803    #[gpui::test]
3804    async fn test_traversal(cx: &mut TestAppContext) {
3805        let fs = FakeFs::new(cx.background());
3806        fs.insert_tree(
3807            "/root",
3808            json!({
3809               ".gitignore": "a/b\n",
3810               "a": {
3811                   "b": "",
3812                   "c": "",
3813               }
3814            }),
3815        )
3816        .await;
3817
3818        let http_client = FakeHttpClient::with_404_response();
3819        let client = cx.read(|cx| Client::new(http_client, cx));
3820
3821        let tree = Worktree::local(
3822            client,
3823            Path::new("/root"),
3824            true,
3825            fs,
3826            Default::default(),
3827            &mut cx.to_async(),
3828        )
3829        .await
3830        .unwrap();
3831        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3832            .await;
3833
3834        tree.read_with(cx, |tree, _| {
3835            assert_eq!(
3836                tree.entries(false)
3837                    .map(|entry| entry.path.as_ref())
3838                    .collect::<Vec<_>>(),
3839                vec![
3840                    Path::new(""),
3841                    Path::new(".gitignore"),
3842                    Path::new("a"),
3843                    Path::new("a/c"),
3844                ]
3845            );
3846            assert_eq!(
3847                tree.entries(true)
3848                    .map(|entry| entry.path.as_ref())
3849                    .collect::<Vec<_>>(),
3850                vec![
3851                    Path::new(""),
3852                    Path::new(".gitignore"),
3853                    Path::new("a"),
3854                    Path::new("a/b"),
3855                    Path::new("a/c"),
3856                ]
3857            );
3858        })
3859    }
3860
3861    #[gpui::test]
3862    async fn test_descendent_entries(cx: &mut TestAppContext) {
3863        let fs = FakeFs::new(cx.background());
3864        fs.insert_tree(
3865            "/root",
3866            json!({
3867                "a": "",
3868                "b": {
3869                   "c": {
3870                       "d": ""
3871                   },
3872                   "e": {}
3873                },
3874                "f": "",
3875                "g": {
3876                    "h": {}
3877                },
3878                "i": {
3879                    "j": {
3880                        "k": ""
3881                    },
3882                    "l": {
3883
3884                    }
3885                },
3886                ".gitignore": "i/j\n",
3887            }),
3888        )
3889        .await;
3890
3891        let http_client = FakeHttpClient::with_404_response();
3892        let client = cx.read(|cx| Client::new(http_client, cx));
3893
3894        let tree = Worktree::local(
3895            client,
3896            Path::new("/root"),
3897            true,
3898            fs,
3899            Default::default(),
3900            &mut cx.to_async(),
3901        )
3902        .await
3903        .unwrap();
3904        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3905            .await;
3906
3907        tree.read_with(cx, |tree, _| {
3908            assert_eq!(
3909                tree.descendent_entries(false, false, Path::new("b"))
3910                    .map(|entry| entry.path.as_ref())
3911                    .collect::<Vec<_>>(),
3912                vec![Path::new("b/c/d"),]
3913            );
3914            assert_eq!(
3915                tree.descendent_entries(true, false, Path::new("b"))
3916                    .map(|entry| entry.path.as_ref())
3917                    .collect::<Vec<_>>(),
3918                vec![
3919                    Path::new("b"),
3920                    Path::new("b/c"),
3921                    Path::new("b/c/d"),
3922                    Path::new("b/e"),
3923                ]
3924            );
3925
3926            assert_eq!(
3927                tree.descendent_entries(false, false, Path::new("g"))
3928                    .map(|entry| entry.path.as_ref())
3929                    .collect::<Vec<_>>(),
3930                Vec::<PathBuf>::new()
3931            );
3932            assert_eq!(
3933                tree.descendent_entries(true, false, Path::new("g"))
3934                    .map(|entry| entry.path.as_ref())
3935                    .collect::<Vec<_>>(),
3936                vec![Path::new("g"), Path::new("g/h"),]
3937            );
3938
3939            assert_eq!(
3940                tree.descendent_entries(false, false, Path::new("i"))
3941                    .map(|entry| entry.path.as_ref())
3942                    .collect::<Vec<_>>(),
3943                Vec::<PathBuf>::new()
3944            );
3945            assert_eq!(
3946                tree.descendent_entries(false, true, Path::new("i"))
3947                    .map(|entry| entry.path.as_ref())
3948                    .collect::<Vec<_>>(),
3949                vec![Path::new("i/j/k")]
3950            );
3951            assert_eq!(
3952                tree.descendent_entries(true, false, Path::new("i"))
3953                    .map(|entry| entry.path.as_ref())
3954                    .collect::<Vec<_>>(),
3955                vec![Path::new("i"), Path::new("i/l"),]
3956            );
3957        })
3958    }
3959
3960    #[gpui::test(iterations = 10)]
3961    async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3962        let fs = FakeFs::new(cx.background());
3963        fs.insert_tree(
3964            "/root",
3965            json!({
3966                "lib": {
3967                    "a": {
3968                        "a.txt": ""
3969                    },
3970                    "b": {
3971                        "b.txt": ""
3972                    }
3973                }
3974            }),
3975        )
3976        .await;
3977        fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3978        fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3979
3980        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3981        let tree = Worktree::local(
3982            client,
3983            Path::new("/root"),
3984            true,
3985            fs.clone(),
3986            Default::default(),
3987            &mut cx.to_async(),
3988        )
3989        .await
3990        .unwrap();
3991
3992        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3993            .await;
3994
3995        tree.read_with(cx, |tree, _| {
3996            assert_eq!(
3997                tree.entries(false)
3998                    .map(|entry| entry.path.as_ref())
3999                    .collect::<Vec<_>>(),
4000                vec![
4001                    Path::new(""),
4002                    Path::new("lib"),
4003                    Path::new("lib/a"),
4004                    Path::new("lib/a/a.txt"),
4005                    Path::new("lib/a/lib"),
4006                    Path::new("lib/b"),
4007                    Path::new("lib/b/b.txt"),
4008                    Path::new("lib/b/lib"),
4009                ]
4010            );
4011        });
4012
4013        fs.rename(
4014            Path::new("/root/lib/a/lib"),
4015            Path::new("/root/lib/a/lib-2"),
4016            Default::default(),
4017        )
4018        .await
4019        .unwrap();
4020        executor.run_until_parked();
4021        tree.read_with(cx, |tree, _| {
4022            assert_eq!(
4023                tree.entries(false)
4024                    .map(|entry| entry.path.as_ref())
4025                    .collect::<Vec<_>>(),
4026                vec![
4027                    Path::new(""),
4028                    Path::new("lib"),
4029                    Path::new("lib/a"),
4030                    Path::new("lib/a/a.txt"),
4031                    Path::new("lib/a/lib-2"),
4032                    Path::new("lib/b"),
4033                    Path::new("lib/b/b.txt"),
4034                    Path::new("lib/b/lib"),
4035                ]
4036            );
4037        });
4038    }
4039
4040    #[gpui::test]
4041    async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
4042        // .gitignores are handled explicitly by Zed and do not use the git
4043        // machinery that the git_tests module checks
4044        let parent_dir = temp_tree(json!({
4045            ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
4046            "tree": {
4047                ".git": {},
4048                ".gitignore": "ignored-dir\n",
4049                "tracked-dir": {
4050                    "tracked-file1": "",
4051                    "ancestor-ignored-file1": "",
4052                },
4053                "ignored-dir": {
4054                    "ignored-file1": ""
4055                }
4056            }
4057        }));
4058        let dir = parent_dir.path().join("tree");
4059
4060        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4061
4062        let tree = Worktree::local(
4063            client,
4064            dir.as_path(),
4065            true,
4066            Arc::new(RealFs),
4067            Default::default(),
4068            &mut cx.to_async(),
4069        )
4070        .await
4071        .unwrap();
4072        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4073            .await;
4074        tree.flush_fs_events(cx).await;
4075        cx.read(|cx| {
4076            let tree = tree.read(cx);
4077            assert!(
4078                !tree
4079                    .entry_for_path("tracked-dir/tracked-file1")
4080                    .unwrap()
4081                    .is_ignored
4082            );
4083            assert!(
4084                tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
4085                    .unwrap()
4086                    .is_ignored
4087            );
4088            assert!(
4089                tree.entry_for_path("ignored-dir/ignored-file1")
4090                    .unwrap()
4091                    .is_ignored
4092            );
4093        });
4094
4095        std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
4096        std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
4097        std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
4098        tree.flush_fs_events(cx).await;
4099        cx.read(|cx| {
4100            let tree = tree.read(cx);
4101            assert!(
4102                !tree
4103                    .entry_for_path("tracked-dir/tracked-file2")
4104                    .unwrap()
4105                    .is_ignored
4106            );
4107            assert!(
4108                tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
4109                    .unwrap()
4110                    .is_ignored
4111            );
4112            assert!(
4113                tree.entry_for_path("ignored-dir/ignored-file2")
4114                    .unwrap()
4115                    .is_ignored
4116            );
4117            assert!(tree.entry_for_path(".git").unwrap().is_ignored);
4118        });
4119    }
4120
4121    #[gpui::test]
4122    async fn test_write_file(cx: &mut TestAppContext) {
4123        let dir = temp_tree(json!({
4124            ".git": {},
4125            ".gitignore": "ignored-dir\n",
4126            "tracked-dir": {},
4127            "ignored-dir": {}
4128        }));
4129
4130        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4131
4132        let tree = Worktree::local(
4133            client,
4134            dir.path(),
4135            true,
4136            Arc::new(RealFs),
4137            Default::default(),
4138            &mut cx.to_async(),
4139        )
4140        .await
4141        .unwrap();
4142        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4143            .await;
4144        tree.flush_fs_events(cx).await;
4145
4146        tree.update(cx, |tree, cx| {
4147            tree.as_local().unwrap().write_file(
4148                Path::new("tracked-dir/file.txt"),
4149                "hello".into(),
4150                Default::default(),
4151                cx,
4152            )
4153        })
4154        .await
4155        .unwrap();
4156        tree.update(cx, |tree, cx| {
4157            tree.as_local().unwrap().write_file(
4158                Path::new("ignored-dir/file.txt"),
4159                "world".into(),
4160                Default::default(),
4161                cx,
4162            )
4163        })
4164        .await
4165        .unwrap();
4166
4167        tree.read_with(cx, |tree, _| {
4168            let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4169            let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4170            assert!(!tracked.is_ignored);
4171            assert!(ignored.is_ignored);
4172        });
4173    }
4174
4175    #[gpui::test(iterations = 30)]
4176    async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4177        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4178
4179        let fs = FakeFs::new(cx.background());
4180        fs.insert_tree(
4181            "/root",
4182            json!({
4183                "b": {},
4184                "c": {},
4185                "d": {},
4186            }),
4187        )
4188        .await;
4189
4190        let tree = Worktree::local(
4191            client,
4192            "/root".as_ref(),
4193            true,
4194            fs,
4195            Default::default(),
4196            &mut cx.to_async(),
4197        )
4198        .await
4199        .unwrap();
4200
4201        let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4202
4203        let entry = tree
4204            .update(cx, |tree, cx| {
4205                tree.as_local_mut()
4206                    .unwrap()
4207                    .create_entry("a/e".as_ref(), true, cx)
4208            })
4209            .await
4210            .unwrap();
4211        assert!(entry.is_dir());
4212
4213        cx.foreground().run_until_parked();
4214        tree.read_with(cx, |tree, _| {
4215            assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4216        });
4217
4218        let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4219        let update = snapshot2.build_update(&snapshot1, 0, 0, true);
4220        snapshot1.apply_remote_update(update).unwrap();
4221        assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
4222    }
4223
4224    #[gpui::test(iterations = 100)]
4225    async fn test_random_worktree_operations_during_initial_scan(
4226        cx: &mut TestAppContext,
4227        mut rng: StdRng,
4228    ) {
4229        let operations = env::var("OPERATIONS")
4230            .map(|o| o.parse().unwrap())
4231            .unwrap_or(5);
4232        let initial_entries = env::var("INITIAL_ENTRIES")
4233            .map(|o| o.parse().unwrap())
4234            .unwrap_or(20);
4235
4236        let root_dir = Path::new("/test");
4237        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4238        fs.as_fake().insert_tree(root_dir, json!({})).await;
4239        for _ in 0..initial_entries {
4240            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4241        }
4242        log::info!("generated initial tree");
4243
4244        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4245        let worktree = Worktree::local(
4246            client.clone(),
4247            root_dir,
4248            true,
4249            fs.clone(),
4250            Default::default(),
4251            &mut cx.to_async(),
4252        )
4253        .await
4254        .unwrap();
4255
4256        let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4257
4258        for _ in 0..operations {
4259            worktree
4260                .update(cx, |worktree, cx| {
4261                    randomly_mutate_worktree(worktree, &mut rng, cx)
4262                })
4263                .await
4264                .log_err();
4265            worktree.read_with(cx, |tree, _| {
4266                tree.as_local().unwrap().snapshot.check_invariants()
4267            });
4268
4269            if rng.gen_bool(0.6) {
4270                let new_snapshot =
4271                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4272                let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4273                snapshot.apply_remote_update(update.clone()).unwrap();
4274                assert_eq!(
4275                    snapshot.to_vec(true),
4276                    new_snapshot.to_vec(true),
4277                    "incorrect snapshot after update {:?}",
4278                    update
4279                );
4280            }
4281        }
4282
4283        worktree
4284            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4285            .await;
4286        worktree.read_with(cx, |tree, _| {
4287            tree.as_local().unwrap().snapshot.check_invariants()
4288        });
4289
4290        let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4291        let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4292        snapshot.apply_remote_update(update.clone()).unwrap();
4293        assert_eq!(
4294            snapshot.to_vec(true),
4295            new_snapshot.to_vec(true),
4296            "incorrect snapshot after update {:?}",
4297            update
4298        );
4299    }
4300
4301    #[gpui::test(iterations = 100)]
4302    async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4303        let operations = env::var("OPERATIONS")
4304            .map(|o| o.parse().unwrap())
4305            .unwrap_or(40);
4306        let initial_entries = env::var("INITIAL_ENTRIES")
4307            .map(|o| o.parse().unwrap())
4308            .unwrap_or(20);
4309
4310        let root_dir = Path::new("/test");
4311        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4312        fs.as_fake().insert_tree(root_dir, json!({})).await;
4313        for _ in 0..initial_entries {
4314            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4315        }
4316        log::info!("generated initial tree");
4317
4318        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4319        let worktree = Worktree::local(
4320            client.clone(),
4321            root_dir,
4322            true,
4323            fs.clone(),
4324            Default::default(),
4325            &mut cx.to_async(),
4326        )
4327        .await
4328        .unwrap();
4329
4330        // The worktree's `UpdatedEntries` event can be used to follow along with
4331        // all changes to the worktree's snapshot.
4332        worktree.update(cx, |tree, cx| {
4333            let mut paths = tree
4334                .entries(true)
4335                .map(|e| (e.path.clone(), e.mtime))
4336                .collect::<Vec<_>>();
4337
4338            cx.subscribe(&worktree, move |tree, _, event, _| {
4339                if let Event::UpdatedEntries(changes) = event {
4340                    for ((path, _), change_type) in changes.iter() {
4341                        let mtime = tree.entry_for_path(&path).map(|e| e.mtime);
4342                        let path = path.clone();
4343                        let ix = match paths.binary_search_by_key(&&path, |e| &e.0) {
4344                            Ok(ix) | Err(ix) => ix,
4345                        };
4346                        match change_type {
4347                            PathChange::Loaded => {
4348                                paths.insert(ix, (path, mtime.unwrap()));
4349                            }
4350                            PathChange::Added => {
4351                                paths.insert(ix, (path, mtime.unwrap()));
4352                            }
4353                            PathChange::Removed => {
4354                                paths.remove(ix);
4355                            }
4356                            PathChange::Updated => {
4357                                let entry = paths.get_mut(ix).unwrap();
4358                                assert_eq!(entry.0, path);
4359                                entry.1 = mtime.unwrap();
4360                            }
4361                            PathChange::AddedOrUpdated => {
4362                                if paths.get(ix).map(|e| &e.0) == Some(&path) {
4363                                    paths.get_mut(ix).unwrap().1 = mtime.unwrap();
4364                                } else {
4365                                    paths.insert(ix, (path, mtime.unwrap()));
4366                                }
4367                            }
4368                        }
4369                    }
4370
4371                    let new_paths = tree
4372                        .entries(true)
4373                        .map(|e| (e.path.clone(), e.mtime))
4374                        .collect::<Vec<_>>();
4375                    assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
4376                }
4377            })
4378            .detach();
4379        });
4380
4381        worktree
4382            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4383            .await;
4384
4385        fs.as_fake().pause_events();
4386        let mut snapshots = Vec::new();
4387        let mut mutations_len = operations;
4388        while mutations_len > 1 {
4389            if rng.gen_bool(0.2) {
4390                worktree
4391                    .update(cx, |worktree, cx| {
4392                        randomly_mutate_worktree(worktree, &mut rng, cx)
4393                    })
4394                    .await
4395                    .log_err();
4396            } else {
4397                randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4398            }
4399
4400            let buffered_event_count = fs.as_fake().buffered_event_count();
4401            if buffered_event_count > 0 && rng.gen_bool(0.3) {
4402                let len = rng.gen_range(0..=buffered_event_count);
4403                log::info!("flushing {} events", len);
4404                fs.as_fake().flush_events(len);
4405            } else {
4406                randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4407                mutations_len -= 1;
4408            }
4409
4410            cx.foreground().run_until_parked();
4411            if rng.gen_bool(0.2) {
4412                log::info!("storing snapshot {}", snapshots.len());
4413                let snapshot =
4414                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4415                snapshots.push(snapshot);
4416            }
4417        }
4418
4419        log::info!("quiescing");
4420        fs.as_fake().flush_events(usize::MAX);
4421        cx.foreground().run_until_parked();
4422        let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4423        snapshot.check_invariants();
4424
4425        {
4426            let new_worktree = Worktree::local(
4427                client.clone(),
4428                root_dir,
4429                true,
4430                fs.clone(),
4431                Default::default(),
4432                &mut cx.to_async(),
4433            )
4434            .await
4435            .unwrap();
4436            new_worktree
4437                .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4438                .await;
4439            let new_snapshot =
4440                new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4441            assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4442        }
4443
4444        for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4445            let include_ignored = rng.gen::<bool>();
4446            if !include_ignored {
4447                let mut entries_by_path_edits = Vec::new();
4448                let mut entries_by_id_edits = Vec::new();
4449                for entry in prev_snapshot
4450                    .entries_by_id
4451                    .cursor::<()>()
4452                    .filter(|e| e.is_ignored)
4453                {
4454                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4455                    entries_by_id_edits.push(Edit::Remove(entry.id));
4456                }
4457
4458                prev_snapshot
4459                    .entries_by_path
4460                    .edit(entries_by_path_edits, &());
4461                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4462            }
4463
4464            let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4465            prev_snapshot.apply_remote_update(update.clone()).unwrap();
4466            assert_eq!(
4467                prev_snapshot.to_vec(include_ignored),
4468                snapshot.to_vec(include_ignored),
4469                "wrong update for snapshot {i}. update: {:?}",
4470                update
4471            );
4472        }
4473    }
4474
4475    fn randomly_mutate_worktree(
4476        worktree: &mut Worktree,
4477        rng: &mut impl Rng,
4478        cx: &mut ModelContext<Worktree>,
4479    ) -> Task<Result<()>> {
4480        log::info!("mutating worktree");
4481        let worktree = worktree.as_local_mut().unwrap();
4482        let snapshot = worktree.snapshot();
4483        let entry = snapshot.entries(false).choose(rng).unwrap();
4484
4485        match rng.gen_range(0_u32..100) {
4486            0..=33 if entry.path.as_ref() != Path::new("") => {
4487                log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4488                worktree.delete_entry(entry.id, cx).unwrap()
4489            }
4490            ..=66 if entry.path.as_ref() != Path::new("") => {
4491                let other_entry = snapshot.entries(false).choose(rng).unwrap();
4492                let new_parent_path = if other_entry.is_dir() {
4493                    other_entry.path.clone()
4494                } else {
4495                    other_entry.path.parent().unwrap().into()
4496                };
4497                let mut new_path = new_parent_path.join(gen_name(rng));
4498                if new_path.starts_with(&entry.path) {
4499                    new_path = gen_name(rng).into();
4500                }
4501
4502                log::info!(
4503                    "renaming entry {:?} ({}) to {:?}",
4504                    entry.path,
4505                    entry.id.0,
4506                    new_path
4507                );
4508                let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4509                cx.foreground().spawn(async move {
4510                    task.await?;
4511                    Ok(())
4512                })
4513            }
4514            _ => {
4515                let task = if entry.is_dir() {
4516                    let child_path = entry.path.join(gen_name(rng));
4517                    let is_dir = rng.gen_bool(0.3);
4518                    log::info!(
4519                        "creating {} at {:?}",
4520                        if is_dir { "dir" } else { "file" },
4521                        child_path,
4522                    );
4523                    worktree.create_entry(child_path, is_dir, cx)
4524                } else {
4525                    log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4526                    worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4527                };
4528                cx.foreground().spawn(async move {
4529                    task.await?;
4530                    Ok(())
4531                })
4532            }
4533        }
4534    }
4535
4536    async fn randomly_mutate_fs(
4537        fs: &Arc<dyn Fs>,
4538        root_path: &Path,
4539        insertion_probability: f64,
4540        rng: &mut impl Rng,
4541    ) {
4542        log::info!("mutating fs");
4543        let mut files = Vec::new();
4544        let mut dirs = Vec::new();
4545        for path in fs.as_fake().paths() {
4546            if path.starts_with(root_path) {
4547                if fs.is_file(&path).await {
4548                    files.push(path);
4549                } else {
4550                    dirs.push(path);
4551                }
4552            }
4553        }
4554
4555        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4556            let path = dirs.choose(rng).unwrap();
4557            let new_path = path.join(gen_name(rng));
4558
4559            if rng.gen() {
4560                log::info!(
4561                    "creating dir {:?}",
4562                    new_path.strip_prefix(root_path).unwrap()
4563                );
4564                fs.create_dir(&new_path).await.unwrap();
4565            } else {
4566                log::info!(
4567                    "creating file {:?}",
4568                    new_path.strip_prefix(root_path).unwrap()
4569                );
4570                fs.create_file(&new_path, Default::default()).await.unwrap();
4571            }
4572        } else if rng.gen_bool(0.05) {
4573            let ignore_dir_path = dirs.choose(rng).unwrap();
4574            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4575
4576            let subdirs = dirs
4577                .iter()
4578                .filter(|d| d.starts_with(&ignore_dir_path))
4579                .cloned()
4580                .collect::<Vec<_>>();
4581            let subfiles = files
4582                .iter()
4583                .filter(|d| d.starts_with(&ignore_dir_path))
4584                .cloned()
4585                .collect::<Vec<_>>();
4586            let files_to_ignore = {
4587                let len = rng.gen_range(0..=subfiles.len());
4588                subfiles.choose_multiple(rng, len)
4589            };
4590            let dirs_to_ignore = {
4591                let len = rng.gen_range(0..subdirs.len());
4592                subdirs.choose_multiple(rng, len)
4593            };
4594
4595            let mut ignore_contents = String::new();
4596            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4597                writeln!(
4598                    ignore_contents,
4599                    "{}",
4600                    path_to_ignore
4601                        .strip_prefix(&ignore_dir_path)
4602                        .unwrap()
4603                        .to_str()
4604                        .unwrap()
4605                )
4606                .unwrap();
4607            }
4608            log::info!(
4609                "creating gitignore {:?} with contents:\n{}",
4610                ignore_path.strip_prefix(&root_path).unwrap(),
4611                ignore_contents
4612            );
4613            fs.save(
4614                &ignore_path,
4615                &ignore_contents.as_str().into(),
4616                Default::default(),
4617            )
4618            .await
4619            .unwrap();
4620        } else {
4621            let old_path = {
4622                let file_path = files.choose(rng);
4623                let dir_path = dirs[1..].choose(rng);
4624                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4625            };
4626
4627            let is_rename = rng.gen();
4628            if is_rename {
4629                let new_path_parent = dirs
4630                    .iter()
4631                    .filter(|d| !d.starts_with(old_path))
4632                    .choose(rng)
4633                    .unwrap();
4634
4635                let overwrite_existing_dir =
4636                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4637                let new_path = if overwrite_existing_dir {
4638                    fs.remove_dir(
4639                        &new_path_parent,
4640                        RemoveOptions {
4641                            recursive: true,
4642                            ignore_if_not_exists: true,
4643                        },
4644                    )
4645                    .await
4646                    .unwrap();
4647                    new_path_parent.to_path_buf()
4648                } else {
4649                    new_path_parent.join(gen_name(rng))
4650                };
4651
4652                log::info!(
4653                    "renaming {:?} to {}{:?}",
4654                    old_path.strip_prefix(&root_path).unwrap(),
4655                    if overwrite_existing_dir {
4656                        "overwrite "
4657                    } else {
4658                        ""
4659                    },
4660                    new_path.strip_prefix(&root_path).unwrap()
4661                );
4662                fs.rename(
4663                    &old_path,
4664                    &new_path,
4665                    fs::RenameOptions {
4666                        overwrite: true,
4667                        ignore_if_exists: true,
4668                    },
4669                )
4670                .await
4671                .unwrap();
4672            } else if fs.is_file(&old_path).await {
4673                log::info!(
4674                    "deleting file {:?}",
4675                    old_path.strip_prefix(&root_path).unwrap()
4676                );
4677                fs.remove_file(old_path, Default::default()).await.unwrap();
4678            } else {
4679                log::info!(
4680                    "deleting dir {:?}",
4681                    old_path.strip_prefix(&root_path).unwrap()
4682                );
4683                fs.remove_dir(
4684                    &old_path,
4685                    RemoveOptions {
4686                        recursive: true,
4687                        ignore_if_not_exists: true,
4688                    },
4689                )
4690                .await
4691                .unwrap();
4692            }
4693        }
4694    }
4695
4696    fn gen_name(rng: &mut impl Rng) -> String {
4697        (0..6)
4698            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4699            .map(char::from)
4700            .collect()
4701    }
4702
4703    impl LocalSnapshot {
4704        fn check_invariants(&self) {
4705            assert_eq!(
4706                self.entries_by_path
4707                    .cursor::<()>()
4708                    .map(|e| (&e.path, e.id))
4709                    .collect::<Vec<_>>(),
4710                self.entries_by_id
4711                    .cursor::<()>()
4712                    .map(|e| (&e.path, e.id))
4713                    .collect::<collections::BTreeSet<_>>()
4714                    .into_iter()
4715                    .collect::<Vec<_>>(),
4716                "entries_by_path and entries_by_id are inconsistent"
4717            );
4718
4719            let mut files = self.files(true, 0);
4720            let mut visible_files = self.files(false, 0);
4721            for entry in self.entries_by_path.cursor::<()>() {
4722                if entry.is_file() {
4723                    assert_eq!(files.next().unwrap().inode, entry.inode);
4724                    if !entry.is_ignored {
4725                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4726                    }
4727                }
4728            }
4729
4730            assert!(files.next().is_none());
4731            assert!(visible_files.next().is_none());
4732
4733            let mut bfs_paths = Vec::new();
4734            let mut stack = vec![Path::new("")];
4735            while let Some(path) = stack.pop() {
4736                bfs_paths.push(path);
4737                let ix = stack.len();
4738                for child_entry in self.child_entries(path) {
4739                    stack.insert(ix, &child_entry.path);
4740                }
4741            }
4742
4743            let dfs_paths_via_iter = self
4744                .entries_by_path
4745                .cursor::<()>()
4746                .map(|e| e.path.as_ref())
4747                .collect::<Vec<_>>();
4748            assert_eq!(bfs_paths, dfs_paths_via_iter);
4749
4750            let dfs_paths_via_traversal = self
4751                .entries(true)
4752                .map(|e| e.path.as_ref())
4753                .collect::<Vec<_>>();
4754            assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4755
4756            for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4757                let ignore_parent_path =
4758                    ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4759                assert!(self.entry_for_path(&ignore_parent_path).is_some());
4760                assert!(self
4761                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4762                    .is_some());
4763            }
4764        }
4765
4766        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4767            let mut paths = Vec::new();
4768            for entry in self.entries_by_path.cursor::<()>() {
4769                if include_ignored || !entry.is_ignored {
4770                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4771                }
4772            }
4773            paths.sort_by(|a, b| a.0.cmp(b.0));
4774            paths
4775        }
4776    }
4777
4778    mod git_tests {
4779        use super::*;
4780        use pretty_assertions::assert_eq;
4781
4782        #[gpui::test]
4783        async fn test_rename_work_directory(cx: &mut TestAppContext) {
4784            let root = temp_tree(json!({
4785                "projects": {
4786                    "project1": {
4787                        "a": "",
4788                        "b": "",
4789                    }
4790                },
4791
4792            }));
4793            let root_path = root.path();
4794
4795            let http_client = FakeHttpClient::with_404_response();
4796            let client = cx.read(|cx| Client::new(http_client, cx));
4797            let tree = Worktree::local(
4798                client,
4799                root_path,
4800                true,
4801                Arc::new(RealFs),
4802                Default::default(),
4803                &mut cx.to_async(),
4804            )
4805            .await
4806            .unwrap();
4807
4808            let repo = git_init(&root_path.join("projects/project1"));
4809            git_add("a", &repo);
4810            git_commit("init", &repo);
4811            std::fs::write(root_path.join("projects/project1/a"), "aa").ok();
4812
4813            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4814                .await;
4815
4816            tree.flush_fs_events(cx).await;
4817
4818            cx.read(|cx| {
4819                let tree = tree.read(cx);
4820                let (work_dir, repo) = tree.repositories().next().unwrap();
4821                assert_eq!(work_dir.as_ref(), Path::new("projects/project1"));
4822                assert_eq!(
4823                    repo.status_for_file(tree, Path::new("projects/project1/a")),
4824                    Some(GitFileStatus::Modified)
4825                );
4826                assert_eq!(
4827                    repo.status_for_file(tree, Path::new("projects/project1/b")),
4828                    Some(GitFileStatus::Added)
4829                );
4830            });
4831
4832            std::fs::rename(
4833                root_path.join("projects/project1"),
4834                root_path.join("projects/project2"),
4835            )
4836            .ok();
4837            tree.flush_fs_events(cx).await;
4838
4839            cx.read(|cx| {
4840                let tree = tree.read(cx);
4841                let (work_dir, repo) = tree.repositories().next().unwrap();
4842                assert_eq!(work_dir.as_ref(), Path::new("projects/project2"));
4843                assert_eq!(
4844                    repo.status_for_file(tree, Path::new("projects/project2/a")),
4845                    Some(GitFileStatus::Modified)
4846                );
4847                assert_eq!(
4848                    repo.status_for_file(tree, Path::new("projects/project2/b")),
4849                    Some(GitFileStatus::Added)
4850                );
4851            });
4852        }
4853
4854        #[gpui::test]
4855        async fn test_git_repository_for_path(cx: &mut TestAppContext) {
4856            let root = temp_tree(json!({
4857                "c.txt": "",
4858                "dir1": {
4859                    ".git": {},
4860                    "deps": {
4861                        "dep1": {
4862                            ".git": {},
4863                            "src": {
4864                                "a.txt": ""
4865                            }
4866                        }
4867                    },
4868                    "src": {
4869                        "b.txt": ""
4870                    }
4871                },
4872            }));
4873
4874            let http_client = FakeHttpClient::with_404_response();
4875            let client = cx.read(|cx| Client::new(http_client, cx));
4876            let tree = Worktree::local(
4877                client,
4878                root.path(),
4879                true,
4880                Arc::new(RealFs),
4881                Default::default(),
4882                &mut cx.to_async(),
4883            )
4884            .await
4885            .unwrap();
4886
4887            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4888                .await;
4889            tree.flush_fs_events(cx).await;
4890
4891            tree.read_with(cx, |tree, _cx| {
4892                let tree = tree.as_local().unwrap();
4893
4894                assert!(tree.repository_for_path("c.txt".as_ref()).is_none());
4895
4896                let entry = tree.repository_for_path("dir1/src/b.txt".as_ref()).unwrap();
4897                assert_eq!(
4898                    entry
4899                        .work_directory(tree)
4900                        .map(|directory| directory.as_ref().to_owned()),
4901                    Some(Path::new("dir1").to_owned())
4902                );
4903
4904                let entry = tree
4905                    .repository_for_path("dir1/deps/dep1/src/a.txt".as_ref())
4906                    .unwrap();
4907                assert_eq!(
4908                    entry
4909                        .work_directory(tree)
4910                        .map(|directory| directory.as_ref().to_owned()),
4911                    Some(Path::new("dir1/deps/dep1").to_owned())
4912                );
4913
4914                let entries = tree.files(false, 0);
4915
4916                let paths_with_repos = tree
4917                    .entries_with_repositories(entries)
4918                    .map(|(entry, repo)| {
4919                        (
4920                            entry.path.as_ref(),
4921                            repo.and_then(|repo| {
4922                                repo.work_directory(&tree)
4923                                    .map(|work_directory| work_directory.0.to_path_buf())
4924                            }),
4925                        )
4926                    })
4927                    .collect::<Vec<_>>();
4928
4929                assert_eq!(
4930                    paths_with_repos,
4931                    &[
4932                        (Path::new("c.txt"), None),
4933                        (
4934                            Path::new("dir1/deps/dep1/src/a.txt"),
4935                            Some(Path::new("dir1/deps/dep1").into())
4936                        ),
4937                        (Path::new("dir1/src/b.txt"), Some(Path::new("dir1").into())),
4938                    ]
4939                );
4940            });
4941
4942            let repo_update_events = Arc::new(Mutex::new(vec![]));
4943            tree.update(cx, |_, cx| {
4944                let repo_update_events = repo_update_events.clone();
4945                cx.subscribe(&tree, move |_, _, event, _| {
4946                    if let Event::UpdatedGitRepositories(update) = event {
4947                        repo_update_events.lock().push(update.clone());
4948                    }
4949                })
4950                .detach();
4951            });
4952
4953            std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
4954            tree.flush_fs_events(cx).await;
4955
4956            assert_eq!(
4957                repo_update_events.lock()[0]
4958                    .keys()
4959                    .cloned()
4960                    .collect::<Vec<Arc<Path>>>(),
4961                vec![Path::new("dir1").into()]
4962            );
4963
4964            std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
4965            tree.flush_fs_events(cx).await;
4966
4967            tree.read_with(cx, |tree, _cx| {
4968                let tree = tree.as_local().unwrap();
4969
4970                assert!(tree
4971                    .repository_for_path("dir1/src/b.txt".as_ref())
4972                    .is_none());
4973            });
4974        }
4975
4976        #[gpui::test]
4977        async fn test_git_status(cx: &mut TestAppContext) {
4978            const IGNORE_RULE: &'static str = "**/target";
4979
4980            let root = temp_tree(json!({
4981                "project": {
4982                    "a.txt": "a",
4983                    "b.txt": "bb",
4984                    "c": {
4985                        "d": {
4986                            "e.txt": "eee"
4987                        }
4988                    },
4989                    "f.txt": "ffff",
4990                    "target": {
4991                        "build_file": "???"
4992                    },
4993                    ".gitignore": IGNORE_RULE
4994                },
4995
4996            }));
4997
4998            let http_client = FakeHttpClient::with_404_response();
4999            let client = cx.read(|cx| Client::new(http_client, cx));
5000            let tree = Worktree::local(
5001                client,
5002                root.path(),
5003                true,
5004                Arc::new(RealFs),
5005                Default::default(),
5006                &mut cx.to_async(),
5007            )
5008            .await
5009            .unwrap();
5010
5011            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
5012                .await;
5013
5014            const A_TXT: &'static str = "a.txt";
5015            const B_TXT: &'static str = "b.txt";
5016            const E_TXT: &'static str = "c/d/e.txt";
5017            const F_TXT: &'static str = "f.txt";
5018            const DOTGITIGNORE: &'static str = ".gitignore";
5019            const BUILD_FILE: &'static str = "target/build_file";
5020
5021            let work_dir = root.path().join("project");
5022            let mut repo = git_init(work_dir.as_path());
5023            repo.add_ignore_rule(IGNORE_RULE).unwrap();
5024            git_add(Path::new(A_TXT), &repo);
5025            git_add(Path::new(E_TXT), &repo);
5026            git_add(Path::new(DOTGITIGNORE), &repo);
5027            git_commit("Initial commit", &repo);
5028
5029            std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
5030
5031            tree.flush_fs_events(cx).await;
5032
5033            // Check that the right git state is observed on startup
5034            tree.read_with(cx, |tree, _cx| {
5035                let snapshot = tree.snapshot();
5036                assert_eq!(snapshot.repository_entries.iter().count(), 1);
5037                let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
5038                assert_eq!(dir.0.as_ref(), Path::new("project"));
5039
5040                assert_eq!(repo.statuses.iter().count(), 3);
5041                assert_eq!(
5042                    repo.statuses.get(&Path::new(A_TXT).into()),
5043                    Some(&GitFileStatus::Modified)
5044                );
5045                assert_eq!(
5046                    repo.statuses.get(&Path::new(B_TXT).into()),
5047                    Some(&GitFileStatus::Added)
5048                );
5049                assert_eq!(
5050                    repo.statuses.get(&Path::new(F_TXT).into()),
5051                    Some(&GitFileStatus::Added)
5052                );
5053            });
5054
5055            git_add(Path::new(A_TXT), &repo);
5056            git_add(Path::new(B_TXT), &repo);
5057            git_commit("Committing modified and added", &repo);
5058            tree.flush_fs_events(cx).await;
5059
5060            // Check that repo only changes are tracked
5061            tree.read_with(cx, |tree, _cx| {
5062                let snapshot = tree.snapshot();
5063                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5064
5065                assert_eq!(repo.statuses.iter().count(), 1);
5066                assert_eq!(
5067                    repo.statuses.get(&Path::new(F_TXT).into()),
5068                    Some(&GitFileStatus::Added)
5069                );
5070            });
5071
5072            git_reset(0, &repo);
5073            git_remove_index(Path::new(B_TXT), &repo);
5074            git_stash(&mut repo);
5075            std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
5076            std::fs::write(work_dir.join(BUILD_FILE), "this should be ignored").unwrap();
5077            tree.flush_fs_events(cx).await;
5078
5079            // Check that more complex repo changes are tracked
5080            tree.read_with(cx, |tree, _cx| {
5081                let snapshot = tree.snapshot();
5082                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5083
5084                assert_eq!(repo.statuses.iter().count(), 3);
5085                assert_eq!(repo.statuses.get(&Path::new(A_TXT).into()), None);
5086                assert_eq!(
5087                    repo.statuses.get(&Path::new(B_TXT).into()),
5088                    Some(&GitFileStatus::Added)
5089                );
5090                assert_eq!(
5091                    repo.statuses.get(&Path::new(E_TXT).into()),
5092                    Some(&GitFileStatus::Modified)
5093                );
5094                assert_eq!(
5095                    repo.statuses.get(&Path::new(F_TXT).into()),
5096                    Some(&GitFileStatus::Added)
5097                );
5098            });
5099
5100            std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
5101            std::fs::remove_dir_all(work_dir.join("c")).unwrap();
5102            std::fs::write(
5103                work_dir.join(DOTGITIGNORE),
5104                [IGNORE_RULE, "f.txt"].join("\n"),
5105            )
5106            .unwrap();
5107
5108            git_add(Path::new(DOTGITIGNORE), &repo);
5109            git_commit("Committing modified git ignore", &repo);
5110
5111            tree.flush_fs_events(cx).await;
5112
5113            // Check that non-repo behavior is tracked
5114            tree.read_with(cx, |tree, _cx| {
5115                let snapshot = tree.snapshot();
5116                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5117
5118                assert_eq!(repo.statuses.iter().count(), 0);
5119            });
5120
5121            let mut renamed_dir_name = "first_directory/second_directory";
5122            const RENAMED_FILE: &'static str = "rf.txt";
5123
5124            std::fs::create_dir_all(work_dir.join(renamed_dir_name)).unwrap();
5125            std::fs::write(
5126                work_dir.join(renamed_dir_name).join(RENAMED_FILE),
5127                "new-contents",
5128            )
5129            .unwrap();
5130
5131            tree.flush_fs_events(cx).await;
5132
5133            tree.read_with(cx, |tree, _cx| {
5134                let snapshot = tree.snapshot();
5135                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5136
5137                assert_eq!(repo.statuses.iter().count(), 1);
5138                assert_eq!(
5139                    repo.statuses
5140                        .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5141                    Some(&GitFileStatus::Added)
5142                );
5143            });
5144
5145            renamed_dir_name = "new_first_directory/second_directory";
5146
5147            std::fs::rename(
5148                work_dir.join("first_directory"),
5149                work_dir.join("new_first_directory"),
5150            )
5151            .unwrap();
5152
5153            tree.flush_fs_events(cx).await;
5154
5155            tree.read_with(cx, |tree, _cx| {
5156                let snapshot = tree.snapshot();
5157                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5158
5159                assert_eq!(repo.statuses.iter().count(), 1);
5160                assert_eq!(
5161                    repo.statuses
5162                        .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5163                    Some(&GitFileStatus::Added)
5164                );
5165            });
5166        }
5167
5168        #[track_caller]
5169        fn git_init(path: &Path) -> git2::Repository {
5170            git2::Repository::init(path).expect("Failed to initialize git repository")
5171        }
5172
5173        #[track_caller]
5174        fn git_add<P: AsRef<Path>>(path: P, repo: &git2::Repository) {
5175            let path = path.as_ref();
5176            let mut index = repo.index().expect("Failed to get index");
5177            index.add_path(path).expect("Failed to add a.txt");
5178            index.write().expect("Failed to write index");
5179        }
5180
5181        #[track_caller]
5182        fn git_remove_index(path: &Path, repo: &git2::Repository) {
5183            let mut index = repo.index().expect("Failed to get index");
5184            index.remove_path(path).expect("Failed to add a.txt");
5185            index.write().expect("Failed to write index");
5186        }
5187
5188        #[track_caller]
5189        fn git_commit(msg: &'static str, repo: &git2::Repository) {
5190            use git2::Signature;
5191
5192            let signature = Signature::now("test", "test@zed.dev").unwrap();
5193            let oid = repo.index().unwrap().write_tree().unwrap();
5194            let tree = repo.find_tree(oid).unwrap();
5195            if let Some(head) = repo.head().ok() {
5196                let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
5197
5198                let parent_commit = parent_obj.as_commit().unwrap();
5199
5200                repo.commit(
5201                    Some("HEAD"),
5202                    &signature,
5203                    &signature,
5204                    msg,
5205                    &tree,
5206                    &[parent_commit],
5207                )
5208                .expect("Failed to commit with parent");
5209            } else {
5210                repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
5211                    .expect("Failed to commit");
5212            }
5213        }
5214
5215        #[track_caller]
5216        fn git_stash(repo: &mut git2::Repository) {
5217            use git2::Signature;
5218
5219            let signature = Signature::now("test", "test@zed.dev").unwrap();
5220            repo.stash_save(&signature, "N/A", None)
5221                .expect("Failed to stash");
5222        }
5223
5224        #[track_caller]
5225        fn git_reset(offset: usize, repo: &git2::Repository) {
5226            let head = repo.head().expect("Couldn't get repo head");
5227            let object = head.peel(git2::ObjectType::Commit).unwrap();
5228            let commit = object.as_commit().unwrap();
5229            let new_head = commit
5230                .parents()
5231                .inspect(|parnet| {
5232                    parnet.message();
5233                })
5234                .skip(offset)
5235                .next()
5236                .expect("Not enough history");
5237            repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
5238                .expect("Could not reset");
5239        }
5240
5241        #[allow(dead_code)]
5242        #[track_caller]
5243        fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
5244            repo.statuses(None)
5245                .unwrap()
5246                .iter()
5247                .map(|status| (status.path().unwrap().to_string(), status.status()))
5248                .collect()
5249        }
5250    }
5251}