worktree.rs

   1mod ignore;
   2mod worktree_settings;
   3#[cfg(test)]
   4mod worktree_tests;
   5
   6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   7use anyhow::{anyhow, Context as _, Result};
   8use clock::ReplicaId;
   9use collections::{HashMap, HashSet, VecDeque};
  10use fs::{copy_recursive, Fs, RemoveOptions, Watcher};
  11use futures::{
  12    channel::{
  13        mpsc::{self, UnboundedSender},
  14        oneshot,
  15    },
  16    select_biased,
  17    stream::select,
  18    task::Poll,
  19    FutureExt as _, Stream, StreamExt,
  20};
  21use fuzzy::CharBag;
  22use git::{
  23    repository::{GitFileStatus, GitRepository, RepoPath},
  24    status::GitStatus,
  25    DOT_GIT, GITIGNORE,
  26};
  27use gpui::{
  28    AppContext, AsyncAppContext, BackgroundExecutor, Context, EventEmitter, Model, ModelContext,
  29    Task,
  30};
  31use ignore::IgnoreStack;
  32use parking_lot::Mutex;
  33use paths::local_settings_folder_relative_path;
  34use postage::{
  35    barrier,
  36    prelude::{Sink as _, Stream as _},
  37    watch,
  38};
  39use rpc::proto::{self, AnyProtoClient};
  40use settings::{Settings, SettingsLocation, SettingsStore};
  41use smol::channel::{self, Sender};
  42use std::{
  43    any::Any,
  44    cmp::{self, Ordering},
  45    convert::TryFrom,
  46    ffi::OsStr,
  47    fmt,
  48    future::Future,
  49    mem,
  50    ops::{AddAssign, Deref, DerefMut, Sub},
  51    path::{Path, PathBuf},
  52    pin::Pin,
  53    sync::{
  54        atomic::{AtomicUsize, Ordering::SeqCst},
  55        Arc,
  56    },
  57    time::{Duration, Instant, SystemTime},
  58};
  59use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
  60use text::{LineEnding, Rope};
  61use util::{paths::home_dir, ResultExt};
  62pub use worktree_settings::WorktreeSettings;
  63
  64#[cfg(feature = "test-support")]
  65pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
  66#[cfg(not(feature = "test-support"))]
  67pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
  68
  69#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  70pub struct WorktreeId(usize);
  71
  72impl From<WorktreeId> for usize {
  73    fn from(value: WorktreeId) -> Self {
  74        value.0
  75    }
  76}
  77
  78/// A set of local or remote files that are being opened as part of a project.
  79/// Responsible for tracking related FS (for local)/collab (for remote) events and corresponding updates.
  80/// Stores git repositories data and the diagnostics for the file(s).
  81///
  82/// Has an absolute path, and may be set to be visible in Zed UI or not.
  83/// May correspond to a directory or a single file.
  84/// Possible examples:
  85/// * a drag and dropped file — may be added as an invisible, "ephemeral" entry to the current worktree
  86/// * a directory opened in Zed — may be added as a visible entry to the current worktree
  87///
  88/// Uses [`Entry`] to track the state of each file/directory, can look up absolute paths for entries.
  89pub enum Worktree {
  90    Local(LocalWorktree),
  91    Remote(RemoteWorktree),
  92}
  93
  94/// An entry, created in the worktree.
  95#[derive(Debug)]
  96pub enum CreatedEntry {
  97    /// Got created and indexed by the worktree, receiving a corresponding entry.
  98    Included(Entry),
  99    /// Got created, but not indexed due to falling under exclusion filters.
 100    Excluded { abs_path: PathBuf },
 101}
 102
 103pub struct LoadedFile {
 104    pub file: Arc<File>,
 105    pub text: String,
 106    pub diff_base: Option<String>,
 107}
 108
 109pub struct LocalWorktree {
 110    snapshot: LocalSnapshot,
 111    scan_requests_tx: channel::Sender<ScanRequest>,
 112    path_prefixes_to_scan_tx: channel::Sender<Arc<Path>>,
 113    is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
 114    _background_scanner_tasks: Vec<Task<()>>,
 115    update_observer: Option<UpdateObservationState>,
 116    fs: Arc<dyn Fs>,
 117    fs_case_sensitive: bool,
 118    visible: bool,
 119    next_entry_id: Arc<AtomicUsize>,
 120    settings: WorktreeSettings,
 121    share_private_files: bool,
 122}
 123
 124struct ScanRequest {
 125    relative_paths: Vec<Arc<Path>>,
 126    done: barrier::Sender,
 127}
 128
 129pub struct RemoteWorktree {
 130    snapshot: Snapshot,
 131    background_snapshot: Arc<Mutex<(Snapshot, Vec<proto::UpdateWorktree>)>>,
 132    project_id: u64,
 133    client: AnyProtoClient,
 134    updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
 135    update_observer: Option<mpsc::UnboundedSender<proto::UpdateWorktree>>,
 136    snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
 137    replica_id: ReplicaId,
 138    visible: bool,
 139    disconnected: bool,
 140}
 141
 142#[derive(Clone)]
 143pub struct Snapshot {
 144    id: WorktreeId,
 145    abs_path: Arc<Path>,
 146    root_name: String,
 147    root_char_bag: CharBag,
 148    entries_by_path: SumTree<Entry>,
 149    entries_by_id: SumTree<PathEntry>,
 150    repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
 151
 152    /// A number that increases every time the worktree begins scanning
 153    /// a set of paths from the filesystem. This scanning could be caused
 154    /// by some operation performed on the worktree, such as reading or
 155    /// writing a file, or by an event reported by the filesystem.
 156    scan_id: usize,
 157
 158    /// The latest scan id that has completed, and whose preceding scans
 159    /// have all completed. The current `scan_id` could be more than one
 160    /// greater than the `completed_scan_id` if operations are performed
 161    /// on the worktree while it is processing a file-system event.
 162    completed_scan_id: usize,
 163}
 164
 165#[derive(Clone, Debug, PartialEq, Eq)]
 166pub struct RepositoryEntry {
 167    pub(crate) work_directory: WorkDirectoryEntry,
 168    pub(crate) branch: Option<Arc<str>>,
 169
 170    /// If location_in_repo is set, it means the .git folder is external
 171    /// and in a parent folder of the project root.
 172    /// In that case, the work_directory field will point to the
 173    /// project-root and location_in_repo contains the location of the
 174    /// project-root in the repository.
 175    ///
 176    /// Example:
 177    ///
 178    ///     my_root_folder/          <-- repository root
 179    ///       .git
 180    ///       my_sub_folder_1/
 181    ///         project_root/        <-- Project root, Zed opened here
 182    ///           ...
 183    ///
 184    /// For this setup, the attributes will have the following values:
 185    ///
 186    ///     work_directory: pointing to "" entry
 187    ///     location_in_repo: Some("my_sub_folder_1/project_root")
 188    pub(crate) location_in_repo: Option<Arc<Path>>,
 189}
 190
 191impl RepositoryEntry {
 192    pub fn branch(&self) -> Option<Arc<str>> {
 193        self.branch.clone()
 194    }
 195
 196    pub fn work_directory_id(&self) -> ProjectEntryId {
 197        *self.work_directory
 198    }
 199
 200    pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
 201        snapshot
 202            .entry_for_id(self.work_directory_id())
 203            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
 204    }
 205
 206    pub fn build_update(&self, _: &Self) -> proto::RepositoryEntry {
 207        self.into()
 208    }
 209
 210    /// relativize returns the given project path relative to the root folder of the
 211    /// repository.
 212    /// If the root of the repository (and its .git folder) are located in a parent folder
 213    /// of the project root folder, then the returned RepoPath is relative to the root
 214    /// of the repository and not a valid path inside the project.
 215    pub fn relativize(&self, worktree: &Snapshot, path: &Path) -> Result<RepoPath> {
 216        let relativize_path = |path: &Path| {
 217            let entry = worktree
 218                .entry_for_id(self.work_directory.0)
 219                .ok_or_else(|| anyhow!("entry not found"))?;
 220
 221            let relativized_path = path
 222                .strip_prefix(&entry.path)
 223                .map_err(|_| anyhow!("could not relativize {:?} against {:?}", path, entry.path))?;
 224
 225            Ok(relativized_path.into())
 226        };
 227
 228        if let Some(location_in_repo) = &self.location_in_repo {
 229            relativize_path(&location_in_repo.join(path))
 230        } else {
 231            relativize_path(path)
 232        }
 233    }
 234}
 235
 236impl From<&RepositoryEntry> for proto::RepositoryEntry {
 237    fn from(value: &RepositoryEntry) -> Self {
 238        proto::RepositoryEntry {
 239            work_directory_id: value.work_directory.to_proto(),
 240            branch: value.branch.as_ref().map(|str| str.to_string()),
 241        }
 242    }
 243}
 244
 245/// This path corresponds to the 'content path' of a repository in relation
 246/// to Zed's project root.
 247/// In the majority of the cases, this is the folder that contains the .git folder.
 248/// But if a sub-folder of a git repository is opened, this corresponds to the
 249/// project root and the .git folder is located in a parent directory.
 250#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 251pub struct RepositoryWorkDirectory(pub(crate) Arc<Path>);
 252
 253impl Default for RepositoryWorkDirectory {
 254    fn default() -> Self {
 255        RepositoryWorkDirectory(Arc::from(Path::new("")))
 256    }
 257}
 258
 259impl AsRef<Path> for RepositoryWorkDirectory {
 260    fn as_ref(&self) -> &Path {
 261        self.0.as_ref()
 262    }
 263}
 264
 265#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 266pub struct WorkDirectoryEntry(ProjectEntryId);
 267
 268impl Deref for WorkDirectoryEntry {
 269    type Target = ProjectEntryId;
 270
 271    fn deref(&self) -> &Self::Target {
 272        &self.0
 273    }
 274}
 275
 276impl From<ProjectEntryId> for WorkDirectoryEntry {
 277    fn from(value: ProjectEntryId) -> Self {
 278        WorkDirectoryEntry(value)
 279    }
 280}
 281
 282#[derive(Debug, Clone)]
 283pub struct LocalSnapshot {
 284    snapshot: Snapshot,
 285    /// All of the gitignore files in the worktree, indexed by their relative path.
 286    /// The boolean indicates whether the gitignore needs to be updated.
 287    ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
 288    /// All of the git repositories in the worktree, indexed by the project entry
 289    /// id of their parent directory.
 290    git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 291}
 292
 293struct BackgroundScannerState {
 294    snapshot: LocalSnapshot,
 295    scanned_dirs: HashSet<ProjectEntryId>,
 296    path_prefixes_to_scan: HashSet<Arc<Path>>,
 297    paths_to_scan: HashSet<Arc<Path>>,
 298    /// The ids of all of the entries that were removed from the snapshot
 299    /// as part of the current update. These entry ids may be re-used
 300    /// if the same inode is discovered at a new path, or if the given
 301    /// path is re-created after being deleted.
 302    removed_entry_ids: HashMap<(u64, SystemTime), ProjectEntryId>,
 303    changed_paths: Vec<Arc<Path>>,
 304    prev_snapshot: Snapshot,
 305}
 306
 307#[derive(Debug, Clone)]
 308pub struct LocalRepositoryEntry {
 309    pub(crate) git_dir_scan_id: usize,
 310    pub(crate) repo_ptr: Arc<dyn GitRepository>,
 311    /// Path to the actual .git folder.
 312    /// Note: if .git is a file, this points to the folder indicated by the .git file
 313    pub(crate) git_dir_path: Arc<Path>,
 314}
 315
 316impl LocalRepositoryEntry {
 317    pub fn repo(&self) -> &Arc<dyn GitRepository> {
 318        &self.repo_ptr
 319    }
 320}
 321
 322impl Deref for LocalSnapshot {
 323    type Target = Snapshot;
 324
 325    fn deref(&self) -> &Self::Target {
 326        &self.snapshot
 327    }
 328}
 329
 330impl DerefMut for LocalSnapshot {
 331    fn deref_mut(&mut self) -> &mut Self::Target {
 332        &mut self.snapshot
 333    }
 334}
 335
 336enum ScanState {
 337    Started,
 338    Updated {
 339        snapshot: LocalSnapshot,
 340        changes: UpdatedEntriesSet,
 341        barrier: Option<barrier::Sender>,
 342        scanning: bool,
 343    },
 344}
 345
 346struct UpdateObservationState {
 347    snapshots_tx:
 348        mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
 349    resume_updates: watch::Sender<()>,
 350    _maintain_remote_snapshot: Task<Option<()>>,
 351}
 352
 353#[derive(Clone)]
 354pub enum Event {
 355    UpdatedEntries(UpdatedEntriesSet),
 356    UpdatedGitRepositories(UpdatedGitRepositoriesSet),
 357    DeletedEntry(ProjectEntryId),
 358}
 359
 360static EMPTY_PATH: &str = "";
 361
 362impl EventEmitter<Event> for Worktree {}
 363
 364impl Worktree {
 365    pub async fn local(
 366        path: impl Into<Arc<Path>>,
 367        visible: bool,
 368        fs: Arc<dyn Fs>,
 369        next_entry_id: Arc<AtomicUsize>,
 370        cx: &mut AsyncAppContext,
 371    ) -> Result<Model<Self>> {
 372        let abs_path = path.into();
 373        let metadata = fs
 374            .metadata(&abs_path)
 375            .await
 376            .context("failed to stat worktree path")?;
 377
 378        let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
 379            log::error!(
 380                "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
 381            );
 382            true
 383        });
 384
 385        cx.new_model(move |cx: &mut ModelContext<Worktree>| {
 386            let worktree_id = cx.handle().entity_id().as_u64();
 387            let settings_location = Some(SettingsLocation {
 388                worktree_id: worktree_id as usize,
 389                path: Path::new(EMPTY_PATH),
 390            });
 391
 392            let settings = WorktreeSettings::get(settings_location, cx).clone();
 393            cx.observe_global::<SettingsStore>(move |this, cx| {
 394                if let Self::Local(this) = this {
 395                    let settings = WorktreeSettings::get(settings_location, cx).clone();
 396                    if settings != this.settings {
 397                        this.settings = settings;
 398                        this.restart_background_scanners(cx);
 399                    }
 400                }
 401            })
 402            .detach();
 403
 404            let mut snapshot = LocalSnapshot {
 405                ignores_by_parent_abs_path: Default::default(),
 406                git_repositories: Default::default(),
 407                snapshot: Snapshot::new(
 408                    cx.entity_id().as_u64(),
 409                    abs_path
 410                        .file_name()
 411                        .map_or(String::new(), |f| f.to_string_lossy().to_string()),
 412                    abs_path,
 413                ),
 414            };
 415
 416            if let Some(metadata) = metadata {
 417                snapshot.insert_entry(
 418                    Entry::new(
 419                        Arc::from(Path::new("")),
 420                        &metadata,
 421                        &next_entry_id,
 422                        snapshot.root_char_bag,
 423                        None,
 424                    ),
 425                    fs.as_ref(),
 426                );
 427            }
 428
 429            let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
 430            let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
 431            let mut worktree = LocalWorktree {
 432                share_private_files: false,
 433                next_entry_id,
 434                snapshot,
 435                is_scanning: watch::channel_with(true),
 436                update_observer: None,
 437                scan_requests_tx,
 438                path_prefixes_to_scan_tx,
 439                _background_scanner_tasks: Vec::new(),
 440                fs,
 441                fs_case_sensitive,
 442                visible,
 443                settings,
 444            };
 445            worktree.start_background_scanner(scan_requests_rx, path_prefixes_to_scan_rx, cx);
 446            Worktree::Local(worktree)
 447        })
 448    }
 449
 450    pub fn remote(
 451        project_id: u64,
 452        replica_id: ReplicaId,
 453        worktree: proto::WorktreeMetadata,
 454        client: AnyProtoClient,
 455        cx: &mut AppContext,
 456    ) -> Model<Self> {
 457        cx.new_model(|cx: &mut ModelContext<Self>| {
 458            let snapshot = Snapshot::new(
 459                worktree.id,
 460                worktree.root_name,
 461                Arc::from(PathBuf::from(worktree.abs_path)),
 462            );
 463
 464            let background_snapshot = Arc::new(Mutex::new((snapshot.clone(), Vec::new())));
 465            let (background_updates_tx, mut background_updates_rx) = mpsc::unbounded();
 466            let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
 467
 468            let worktree = RemoteWorktree {
 469                client,
 470                project_id,
 471                replica_id,
 472                snapshot,
 473                background_snapshot: background_snapshot.clone(),
 474                updates_tx: Some(background_updates_tx),
 475                update_observer: None,
 476                snapshot_subscriptions: Default::default(),
 477                visible: worktree.visible,
 478                disconnected: false,
 479            };
 480
 481            // Apply updates to a separate snapshto in a background task, then
 482            // send them to a foreground task which updates the model.
 483            cx.background_executor()
 484                .spawn(async move {
 485                    while let Some(update) = background_updates_rx.next().await {
 486                        {
 487                            let mut lock = background_snapshot.lock();
 488                            if let Err(error) = lock.0.apply_remote_update(update.clone()) {
 489                                log::error!("error applying worktree update: {}", error);
 490                            }
 491                            lock.1.push(update);
 492                        }
 493                        snapshot_updated_tx.send(()).await.ok();
 494                    }
 495                })
 496                .detach();
 497
 498            // On the foreground task, update to the latest snapshot and notify
 499            // any update observer of all updates that led to that snapshot.
 500            cx.spawn(|this, mut cx| async move {
 501                while (snapshot_updated_rx.recv().await).is_some() {
 502                    this.update(&mut cx, |this, cx| {
 503                        let this = this.as_remote_mut().unwrap();
 504                        {
 505                            let mut lock = this.background_snapshot.lock();
 506                            this.snapshot = lock.0.clone();
 507                            if let Some(tx) = &this.update_observer {
 508                                for update in lock.1.drain(..) {
 509                                    tx.unbounded_send(update).ok();
 510                                }
 511                            }
 512                        };
 513                        cx.emit(Event::UpdatedEntries(Arc::default()));
 514                        cx.notify();
 515                        while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
 516                            if this.observed_snapshot(*scan_id) {
 517                                let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
 518                                let _ = tx.send(());
 519                            } else {
 520                                break;
 521                            }
 522                        }
 523                    })?;
 524                }
 525                anyhow::Ok(())
 526            })
 527            .detach();
 528
 529            Worktree::Remote(worktree)
 530        })
 531    }
 532
 533    pub fn as_local(&self) -> Option<&LocalWorktree> {
 534        if let Worktree::Local(worktree) = self {
 535            Some(worktree)
 536        } else {
 537            None
 538        }
 539    }
 540
 541    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 542        if let Worktree::Remote(worktree) = self {
 543            Some(worktree)
 544        } else {
 545            None
 546        }
 547    }
 548
 549    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 550        if let Worktree::Local(worktree) = self {
 551            Some(worktree)
 552        } else {
 553            None
 554        }
 555    }
 556
 557    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 558        if let Worktree::Remote(worktree) = self {
 559            Some(worktree)
 560        } else {
 561            None
 562        }
 563    }
 564
 565    pub fn is_local(&self) -> bool {
 566        matches!(self, Worktree::Local(_))
 567    }
 568
 569    pub fn is_remote(&self) -> bool {
 570        !self.is_local()
 571    }
 572
 573    pub fn snapshot(&self) -> Snapshot {
 574        match self {
 575            Worktree::Local(worktree) => worktree.snapshot.snapshot.clone(),
 576            Worktree::Remote(worktree) => worktree.snapshot.clone(),
 577        }
 578    }
 579
 580    pub fn scan_id(&self) -> usize {
 581        match self {
 582            Worktree::Local(worktree) => worktree.snapshot.scan_id,
 583            Worktree::Remote(worktree) => worktree.snapshot.scan_id,
 584        }
 585    }
 586
 587    pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
 588        proto::WorktreeMetadata {
 589            id: self.id().to_proto(),
 590            root_name: self.root_name().to_string(),
 591            visible: self.is_visible(),
 592            abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
 593        }
 594    }
 595
 596    pub fn completed_scan_id(&self) -> usize {
 597        match self {
 598            Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
 599            Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
 600        }
 601    }
 602
 603    pub fn is_visible(&self) -> bool {
 604        match self {
 605            Worktree::Local(worktree) => worktree.visible,
 606            Worktree::Remote(worktree) => worktree.visible,
 607        }
 608    }
 609
 610    pub fn replica_id(&self) -> ReplicaId {
 611        match self {
 612            Worktree::Local(_) => 0,
 613            Worktree::Remote(worktree) => worktree.replica_id,
 614        }
 615    }
 616
 617    pub fn abs_path(&self) -> Arc<Path> {
 618        match self {
 619            Worktree::Local(worktree) => worktree.abs_path.clone(),
 620            Worktree::Remote(worktree) => worktree.abs_path.clone(),
 621        }
 622    }
 623
 624    pub fn root_file(&self, cx: &mut ModelContext<Self>) -> Option<Arc<File>> {
 625        let entry = self.root_entry()?;
 626        Some(File::for_entry(entry.clone(), cx.handle()))
 627    }
 628
 629    pub fn observe_updates<F, Fut>(
 630        &mut self,
 631        project_id: u64,
 632        cx: &mut ModelContext<Worktree>,
 633        callback: F,
 634    ) where
 635        F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
 636        Fut: 'static + Send + Future<Output = bool>,
 637    {
 638        match self {
 639            Worktree::Local(this) => this.observe_updates(project_id, cx, callback),
 640            Worktree::Remote(this) => this.observe_updates(project_id, cx, callback),
 641        }
 642    }
 643
 644    pub fn stop_observing_updates(&mut self) {
 645        match self {
 646            Worktree::Local(this) => {
 647                this.update_observer.take();
 648            }
 649            Worktree::Remote(this) => {
 650                this.update_observer.take();
 651            }
 652        }
 653    }
 654
 655    #[cfg(any(test, feature = "test-support"))]
 656    pub fn has_update_observer(&self) -> bool {
 657        match self {
 658            Worktree::Local(this) => this.update_observer.is_some(),
 659            Worktree::Remote(this) => this.update_observer.is_some(),
 660        }
 661    }
 662
 663    pub fn load_file(
 664        &self,
 665        path: &Path,
 666        cx: &mut ModelContext<Worktree>,
 667    ) -> Task<Result<LoadedFile>> {
 668        match self {
 669            Worktree::Local(this) => this.load_file(path, cx),
 670            Worktree::Remote(_) => {
 671                Task::ready(Err(anyhow!("remote worktrees can't yet load files")))
 672            }
 673        }
 674    }
 675
 676    pub fn write_file(
 677        &self,
 678        path: &Path,
 679        text: Rope,
 680        line_ending: LineEnding,
 681        cx: &mut ModelContext<Worktree>,
 682    ) -> Task<Result<Arc<File>>> {
 683        match self {
 684            Worktree::Local(this) => this.write_file(path, text, line_ending, cx),
 685            Worktree::Remote(_) => {
 686                Task::ready(Err(anyhow!("remote worktree can't yet write files")))
 687            }
 688        }
 689    }
 690
 691    pub fn create_entry(
 692        &mut self,
 693        path: impl Into<Arc<Path>>,
 694        is_directory: bool,
 695        cx: &mut ModelContext<Worktree>,
 696    ) -> Task<Result<CreatedEntry>> {
 697        let path = path.into();
 698        let worktree_id = self.id();
 699        match self {
 700            Worktree::Local(this) => this.create_entry(path, is_directory, cx),
 701            Worktree::Remote(this) => {
 702                let project_id = this.project_id;
 703                let request = this.client.request(proto::CreateProjectEntry {
 704                    worktree_id: worktree_id.to_proto(),
 705                    project_id,
 706                    path: path.to_string_lossy().into(),
 707                    is_directory,
 708                });
 709                cx.spawn(move |this, mut cx| async move {
 710                    let response = request.await?;
 711                    match response.entry {
 712                        Some(entry) => this
 713                            .update(&mut cx, |worktree, cx| {
 714                                worktree.as_remote_mut().unwrap().insert_entry(
 715                                    entry,
 716                                    response.worktree_scan_id as usize,
 717                                    cx,
 718                                )
 719                            })?
 720                            .await
 721                            .map(CreatedEntry::Included),
 722                        None => {
 723                            let abs_path = this.update(&mut cx, |worktree, _| {
 724                                worktree
 725                                    .absolutize(&path)
 726                                    .with_context(|| format!("absolutizing {path:?}"))
 727                            })??;
 728                            Ok(CreatedEntry::Excluded { abs_path })
 729                        }
 730                    }
 731                })
 732            }
 733        }
 734    }
 735
 736    pub fn delete_entry(
 737        &mut self,
 738        entry_id: ProjectEntryId,
 739        trash: bool,
 740        cx: &mut ModelContext<Worktree>,
 741    ) -> Option<Task<Result<()>>> {
 742        let task = match self {
 743            Worktree::Local(this) => this.delete_entry(entry_id, trash, cx),
 744            Worktree::Remote(this) => this.delete_entry(entry_id, trash, cx),
 745        }?;
 746        cx.emit(Event::DeletedEntry(entry_id));
 747        Some(task)
 748    }
 749
 750    pub fn rename_entry(
 751        &mut self,
 752        entry_id: ProjectEntryId,
 753        new_path: impl Into<Arc<Path>>,
 754        cx: &mut ModelContext<Self>,
 755    ) -> Task<Result<CreatedEntry>> {
 756        let new_path = new_path.into();
 757        match self {
 758            Worktree::Local(this) => this.rename_entry(entry_id, new_path, cx),
 759            Worktree::Remote(this) => this.rename_entry(entry_id, new_path, cx),
 760        }
 761    }
 762
 763    pub fn copy_entry(
 764        &mut self,
 765        entry_id: ProjectEntryId,
 766        new_path: impl Into<Arc<Path>>,
 767        cx: &mut ModelContext<Self>,
 768    ) -> Task<Result<Option<Entry>>> {
 769        let new_path = new_path.into();
 770        match self {
 771            Worktree::Local(this) => this.copy_entry(entry_id, new_path, cx),
 772            Worktree::Remote(this) => {
 773                let response = this.client.request(proto::CopyProjectEntry {
 774                    project_id: this.project_id,
 775                    entry_id: entry_id.to_proto(),
 776                    new_path: new_path.to_string_lossy().into(),
 777                });
 778                cx.spawn(move |this, mut cx| async move {
 779                    let response = response.await?;
 780                    match response.entry {
 781                        Some(entry) => this
 782                            .update(&mut cx, |worktree, cx| {
 783                                worktree.as_remote_mut().unwrap().insert_entry(
 784                                    entry,
 785                                    response.worktree_scan_id as usize,
 786                                    cx,
 787                                )
 788                            })?
 789                            .await
 790                            .map(Some),
 791                        None => Ok(None),
 792                    }
 793                })
 794            }
 795        }
 796    }
 797
 798    pub fn copy_external_entries(
 799        &mut self,
 800        target_directory: PathBuf,
 801        paths: Vec<Arc<Path>>,
 802        overwrite_existing_files: bool,
 803        cx: &mut ModelContext<Worktree>,
 804    ) -> Task<Result<Vec<ProjectEntryId>>> {
 805        match self {
 806            Worktree::Local(this) => {
 807                this.copy_external_entries(target_directory, paths, overwrite_existing_files, cx)
 808            }
 809            _ => Task::ready(Err(anyhow!(
 810                "Copying external entries is not supported for remote worktrees"
 811            ))),
 812        }
 813    }
 814
 815    pub fn expand_entry(
 816        &mut self,
 817        entry_id: ProjectEntryId,
 818        cx: &mut ModelContext<Worktree>,
 819    ) -> Option<Task<Result<()>>> {
 820        match self {
 821            Worktree::Local(this) => this.expand_entry(entry_id, cx),
 822            Worktree::Remote(this) => {
 823                let response = this.client.request(proto::ExpandProjectEntry {
 824                    project_id: this.project_id,
 825                    entry_id: entry_id.to_proto(),
 826                });
 827                Some(cx.spawn(move |this, mut cx| async move {
 828                    let response = response.await?;
 829                    this.update(&mut cx, |this, _| {
 830                        this.as_remote_mut()
 831                            .unwrap()
 832                            .wait_for_snapshot(response.worktree_scan_id as usize)
 833                    })?
 834                    .await?;
 835                    Ok(())
 836                }))
 837            }
 838        }
 839    }
 840
 841    pub async fn handle_create_entry(
 842        this: Model<Self>,
 843        request: proto::CreateProjectEntry,
 844        mut cx: AsyncAppContext,
 845    ) -> Result<proto::ProjectEntryResponse> {
 846        let (scan_id, entry) = this.update(&mut cx, |this, cx| {
 847            (
 848                this.scan_id(),
 849                this.create_entry(PathBuf::from(request.path), request.is_directory, cx),
 850            )
 851        })?;
 852        Ok(proto::ProjectEntryResponse {
 853            entry: match &entry.await? {
 854                CreatedEntry::Included(entry) => Some(entry.into()),
 855                CreatedEntry::Excluded { .. } => None,
 856            },
 857            worktree_scan_id: scan_id as u64,
 858        })
 859    }
 860
 861    pub async fn handle_delete_entry(
 862        this: Model<Self>,
 863        request: proto::DeleteProjectEntry,
 864        mut cx: AsyncAppContext,
 865    ) -> Result<proto::ProjectEntryResponse> {
 866        let (scan_id, task) = this.update(&mut cx, |this, cx| {
 867            (
 868                this.scan_id(),
 869                this.delete_entry(
 870                    ProjectEntryId::from_proto(request.entry_id),
 871                    request.use_trash,
 872                    cx,
 873                ),
 874            )
 875        })?;
 876        task.ok_or_else(|| anyhow!("invalid entry"))?.await?;
 877        Ok(proto::ProjectEntryResponse {
 878            entry: None,
 879            worktree_scan_id: scan_id as u64,
 880        })
 881    }
 882
 883    pub async fn handle_expand_entry(
 884        this: Model<Self>,
 885        request: proto::ExpandProjectEntry,
 886        mut cx: AsyncAppContext,
 887    ) -> Result<proto::ExpandProjectEntryResponse> {
 888        let task = this.update(&mut cx, |this, cx| {
 889            this.expand_entry(ProjectEntryId::from_proto(request.entry_id), cx)
 890        })?;
 891        task.ok_or_else(|| anyhow!("no such entry"))?.await?;
 892        let scan_id = this.read_with(&cx, |this, _| this.scan_id())?;
 893        Ok(proto::ExpandProjectEntryResponse {
 894            worktree_scan_id: scan_id as u64,
 895        })
 896    }
 897
 898    pub async fn handle_rename_entry(
 899        this: Model<Self>,
 900        request: proto::RenameProjectEntry,
 901        mut cx: AsyncAppContext,
 902    ) -> Result<proto::ProjectEntryResponse> {
 903        let (scan_id, task) = this.update(&mut cx, |this, cx| {
 904            (
 905                this.scan_id(),
 906                this.rename_entry(
 907                    ProjectEntryId::from_proto(request.entry_id),
 908                    PathBuf::from(request.new_path),
 909                    cx,
 910                ),
 911            )
 912        })?;
 913        Ok(proto::ProjectEntryResponse {
 914            entry: match &task.await? {
 915                CreatedEntry::Included(entry) => Some(entry.into()),
 916                CreatedEntry::Excluded { .. } => None,
 917            },
 918            worktree_scan_id: scan_id as u64,
 919        })
 920    }
 921
 922    pub async fn handle_copy_entry(
 923        this: Model<Self>,
 924        request: proto::CopyProjectEntry,
 925        mut cx: AsyncAppContext,
 926    ) -> Result<proto::ProjectEntryResponse> {
 927        let (scan_id, task) = this.update(&mut cx, |this, cx| {
 928            (
 929                this.scan_id(),
 930                this.copy_entry(
 931                    ProjectEntryId::from_proto(request.entry_id),
 932                    PathBuf::from(request.new_path),
 933                    cx,
 934                ),
 935            )
 936        })?;
 937        Ok(proto::ProjectEntryResponse {
 938            entry: task.await?.as_ref().map(|e| e.into()),
 939            worktree_scan_id: scan_id as u64,
 940        })
 941    }
 942}
 943
 944impl LocalWorktree {
 945    pub fn contains_abs_path(&self, path: &Path) -> bool {
 946        path.starts_with(&self.abs_path)
 947    }
 948
 949    pub fn is_path_private(&self, path: &Path) -> bool {
 950        !self.share_private_files && self.settings.is_path_private(path)
 951    }
 952
 953    fn restart_background_scanners(&mut self, cx: &mut ModelContext<Worktree>) {
 954        let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
 955        let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
 956        self.scan_requests_tx = scan_requests_tx;
 957        self.path_prefixes_to_scan_tx = path_prefixes_to_scan_tx;
 958        self.start_background_scanner(scan_requests_rx, path_prefixes_to_scan_rx, cx);
 959    }
 960
 961    fn start_background_scanner(
 962        &mut self,
 963        scan_requests_rx: channel::Receiver<ScanRequest>,
 964        path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
 965        cx: &mut ModelContext<Worktree>,
 966    ) {
 967        let snapshot = self.snapshot();
 968        let share_private_files = self.share_private_files;
 969        let next_entry_id = self.next_entry_id.clone();
 970        let fs = self.fs.clone();
 971        let settings = self.settings.clone();
 972        let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
 973        let background_scanner = cx.background_executor().spawn({
 974            let abs_path = &snapshot.abs_path;
 975            let abs_path = if cfg!(target_os = "windows") {
 976                abs_path
 977                    .canonicalize()
 978                    .unwrap_or_else(|_| abs_path.to_path_buf())
 979            } else {
 980                abs_path.to_path_buf()
 981            };
 982            let background = cx.background_executor().clone();
 983            async move {
 984                let (events, watcher) = fs.watch(&abs_path, FS_WATCH_LATENCY).await;
 985                let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
 986                    log::error!("Failed to determine whether filesystem is case sensitive: {e:#}");
 987                    true
 988                });
 989
 990                let mut scanner = BackgroundScanner {
 991                    fs,
 992                    fs_case_sensitive,
 993                    status_updates_tx: scan_states_tx,
 994                    executor: background,
 995                    scan_requests_rx,
 996                    path_prefixes_to_scan_rx,
 997                    next_entry_id,
 998                    state: Mutex::new(BackgroundScannerState {
 999                        prev_snapshot: snapshot.snapshot.clone(),
1000                        snapshot,
1001                        scanned_dirs: Default::default(),
1002                        path_prefixes_to_scan: Default::default(),
1003                        paths_to_scan: Default::default(),
1004                        removed_entry_ids: Default::default(),
1005                        changed_paths: Default::default(),
1006                    }),
1007                    phase: BackgroundScannerPhase::InitialScan,
1008                    share_private_files,
1009                    settings,
1010                    watcher,
1011                };
1012
1013                scanner.run(events).await;
1014            }
1015        });
1016        let scan_state_updater = cx.spawn(|this, mut cx| async move {
1017            while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade()) {
1018                this.update(&mut cx, |this, cx| {
1019                    let this = this.as_local_mut().unwrap();
1020                    match state {
1021                        ScanState::Started => {
1022                            *this.is_scanning.0.borrow_mut() = true;
1023                        }
1024                        ScanState::Updated {
1025                            snapshot,
1026                            changes,
1027                            barrier,
1028                            scanning,
1029                        } => {
1030                            *this.is_scanning.0.borrow_mut() = scanning;
1031                            this.set_snapshot(snapshot, changes, cx);
1032                            drop(barrier);
1033                        }
1034                    }
1035                    cx.notify();
1036                })
1037                .ok();
1038            }
1039        });
1040        self._background_scanner_tasks = vec![background_scanner, scan_state_updater];
1041        self.is_scanning = watch::channel_with(true);
1042    }
1043
1044    fn set_snapshot(
1045        &mut self,
1046        new_snapshot: LocalSnapshot,
1047        entry_changes: UpdatedEntriesSet,
1048        cx: &mut ModelContext<Worktree>,
1049    ) {
1050        let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
1051        self.snapshot = new_snapshot;
1052
1053        if let Some(share) = self.update_observer.as_mut() {
1054            share
1055                .snapshots_tx
1056                .unbounded_send((
1057                    self.snapshot.clone(),
1058                    entry_changes.clone(),
1059                    repo_changes.clone(),
1060                ))
1061                .ok();
1062        }
1063
1064        if !entry_changes.is_empty() {
1065            cx.emit(Event::UpdatedEntries(entry_changes));
1066        }
1067        if !repo_changes.is_empty() {
1068            cx.emit(Event::UpdatedGitRepositories(repo_changes));
1069        }
1070    }
1071
1072    fn changed_repos(
1073        &self,
1074        old_snapshot: &LocalSnapshot,
1075        new_snapshot: &LocalSnapshot,
1076    ) -> UpdatedGitRepositoriesSet {
1077        let mut changes = Vec::new();
1078        let mut old_repos = old_snapshot.git_repositories.iter().peekable();
1079        let mut new_repos = new_snapshot.git_repositories.iter().peekable();
1080        loop {
1081            match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
1082                (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
1083                    match Ord::cmp(&new_entry_id, &old_entry_id) {
1084                        Ordering::Less => {
1085                            if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
1086                                changes.push((
1087                                    entry.path.clone(),
1088                                    GitRepositoryChange {
1089                                        old_repository: None,
1090                                    },
1091                                ));
1092                            }
1093                            new_repos.next();
1094                        }
1095                        Ordering::Equal => {
1096                            if new_repo.git_dir_scan_id != old_repo.git_dir_scan_id {
1097                                if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
1098                                    let old_repo = old_snapshot
1099                                        .repository_entries
1100                                        .get(&RepositoryWorkDirectory(entry.path.clone()))
1101                                        .cloned();
1102                                    changes.push((
1103                                        entry.path.clone(),
1104                                        GitRepositoryChange {
1105                                            old_repository: old_repo,
1106                                        },
1107                                    ));
1108                                }
1109                            }
1110                            new_repos.next();
1111                            old_repos.next();
1112                        }
1113                        Ordering::Greater => {
1114                            if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
1115                                let old_repo = old_snapshot
1116                                    .repository_entries
1117                                    .get(&RepositoryWorkDirectory(entry.path.clone()))
1118                                    .cloned();
1119                                changes.push((
1120                                    entry.path.clone(),
1121                                    GitRepositoryChange {
1122                                        old_repository: old_repo,
1123                                    },
1124                                ));
1125                            }
1126                            old_repos.next();
1127                        }
1128                    }
1129                }
1130                (Some((entry_id, _)), None) => {
1131                    if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
1132                        changes.push((
1133                            entry.path.clone(),
1134                            GitRepositoryChange {
1135                                old_repository: None,
1136                            },
1137                        ));
1138                    }
1139                    new_repos.next();
1140                }
1141                (None, Some((entry_id, _))) => {
1142                    if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
1143                        let old_repo = old_snapshot
1144                            .repository_entries
1145                            .get(&RepositoryWorkDirectory(entry.path.clone()))
1146                            .cloned();
1147                        changes.push((
1148                            entry.path.clone(),
1149                            GitRepositoryChange {
1150                                old_repository: old_repo,
1151                            },
1152                        ));
1153                    }
1154                    old_repos.next();
1155                }
1156                (None, None) => break,
1157            }
1158        }
1159
1160        fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
1161            (value.0.clone(), value.1.clone())
1162        }
1163
1164        changes.into()
1165    }
1166
1167    pub fn scan_complete(&self) -> impl Future<Output = ()> {
1168        let mut is_scanning_rx = self.is_scanning.1.clone();
1169        async move {
1170            let mut is_scanning = *is_scanning_rx.borrow();
1171            while is_scanning {
1172                if let Some(value) = is_scanning_rx.recv().await {
1173                    is_scanning = value;
1174                } else {
1175                    break;
1176                }
1177            }
1178        }
1179    }
1180
1181    pub fn snapshot(&self) -> LocalSnapshot {
1182        self.snapshot.clone()
1183    }
1184
1185    pub fn settings(&self) -> WorktreeSettings {
1186        self.settings.clone()
1187    }
1188
1189    pub fn local_git_repo(&self, path: &Path) -> Option<Arc<dyn GitRepository>> {
1190        self.repo_for_path(path)
1191            .map(|(_, entry)| entry.repo_ptr.clone())
1192    }
1193
1194    pub fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1195        self.git_repositories.get(&repo.work_directory.0)
1196    }
1197
1198    fn load_file(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<LoadedFile>> {
1199        let path = Arc::from(path);
1200        let abs_path = self.absolutize(&path);
1201        let fs = self.fs.clone();
1202        let entry = self.refresh_entry(path.clone(), None, cx);
1203        let is_private = self.is_path_private(path.as_ref());
1204
1205        cx.spawn(|this, mut cx| async move {
1206            let abs_path = abs_path?;
1207            let text = fs.load(&abs_path).await?;
1208            let mut index_task = None;
1209            let snapshot = this.update(&mut cx, |this, _| this.as_local().unwrap().snapshot())?;
1210            if let Some(repo) = snapshot.repository_for_path(&path) {
1211                if let Some(repo_path) = repo.relativize(&snapshot, &path).log_err() {
1212                    if let Some(git_repo) = snapshot.git_repositories.get(&*repo.work_directory) {
1213                        let git_repo = git_repo.repo_ptr.clone();
1214                        index_task = Some(
1215                            cx.background_executor()
1216                                .spawn(async move { git_repo.load_index_text(&repo_path) }),
1217                        );
1218                    }
1219                }
1220            }
1221
1222            let diff_base = if let Some(index_task) = index_task {
1223                index_task.await
1224            } else {
1225                None
1226            };
1227
1228            let worktree = this
1229                .upgrade()
1230                .ok_or_else(|| anyhow!("worktree was dropped"))?;
1231            let file = match entry.await? {
1232                Some(entry) => File::for_entry(entry, worktree),
1233                None => {
1234                    let metadata = fs
1235                        .metadata(&abs_path)
1236                        .await
1237                        .with_context(|| {
1238                            format!("Loading metadata for excluded file {abs_path:?}")
1239                        })?
1240                        .with_context(|| {
1241                            format!("Excluded file {abs_path:?} got removed during loading")
1242                        })?;
1243                    Arc::new(File {
1244                        entry_id: None,
1245                        worktree,
1246                        path,
1247                        mtime: Some(metadata.mtime),
1248                        is_local: true,
1249                        is_deleted: false,
1250                        is_private,
1251                    })
1252                }
1253            };
1254
1255            Ok(LoadedFile {
1256                file,
1257                text,
1258                diff_base,
1259            })
1260        })
1261    }
1262
1263    /// Find the lowest path in the worktree's datastructures that is an ancestor
1264    fn lowest_ancestor(&self, path: &Path) -> PathBuf {
1265        let mut lowest_ancestor = None;
1266        for path in path.ancestors() {
1267            if self.entry_for_path(path).is_some() {
1268                lowest_ancestor = Some(path.to_path_buf());
1269                break;
1270            }
1271        }
1272
1273        lowest_ancestor.unwrap_or_else(|| PathBuf::from(""))
1274    }
1275
1276    fn create_entry(
1277        &self,
1278        path: impl Into<Arc<Path>>,
1279        is_dir: bool,
1280        cx: &mut ModelContext<Worktree>,
1281    ) -> Task<Result<CreatedEntry>> {
1282        let path = path.into();
1283        let abs_path = match self.absolutize(&path) {
1284            Ok(path) => path,
1285            Err(e) => return Task::ready(Err(e.context(format!("absolutizing path {path:?}")))),
1286        };
1287        let path_excluded = self.settings.is_path_excluded(&abs_path);
1288        let fs = self.fs.clone();
1289        let task_abs_path = abs_path.clone();
1290        let write = cx.background_executor().spawn(async move {
1291            if is_dir {
1292                fs.create_dir(&task_abs_path)
1293                    .await
1294                    .with_context(|| format!("creating directory {task_abs_path:?}"))
1295            } else {
1296                fs.save(&task_abs_path, &Rope::default(), LineEnding::default())
1297                    .await
1298                    .with_context(|| format!("creating file {task_abs_path:?}"))
1299            }
1300        });
1301
1302        let lowest_ancestor = self.lowest_ancestor(&path);
1303        cx.spawn(|this, mut cx| async move {
1304            write.await?;
1305            if path_excluded {
1306                return Ok(CreatedEntry::Excluded { abs_path });
1307            }
1308
1309            let (result, refreshes) = this.update(&mut cx, |this, cx| {
1310                let mut refreshes = Vec::new();
1311                let refresh_paths = path.strip_prefix(&lowest_ancestor).unwrap();
1312                for refresh_path in refresh_paths.ancestors() {
1313                    if refresh_path == Path::new("") {
1314                        continue;
1315                    }
1316                    let refresh_full_path = lowest_ancestor.join(refresh_path);
1317
1318                    refreshes.push(this.as_local_mut().unwrap().refresh_entry(
1319                        refresh_full_path.into(),
1320                        None,
1321                        cx,
1322                    ));
1323                }
1324                (
1325                    this.as_local_mut().unwrap().refresh_entry(path, None, cx),
1326                    refreshes,
1327                )
1328            })?;
1329            for refresh in refreshes {
1330                refresh.await.log_err();
1331            }
1332
1333            Ok(result
1334                .await?
1335                .map(CreatedEntry::Included)
1336                .unwrap_or_else(|| CreatedEntry::Excluded { abs_path }))
1337        })
1338    }
1339
1340    fn write_file(
1341        &self,
1342        path: impl Into<Arc<Path>>,
1343        text: Rope,
1344        line_ending: LineEnding,
1345        cx: &mut ModelContext<Worktree>,
1346    ) -> Task<Result<Arc<File>>> {
1347        let path = path.into();
1348        let fs = self.fs.clone();
1349        let is_private = self.is_path_private(&path);
1350        let Ok(abs_path) = self.absolutize(&path) else {
1351            return Task::ready(Err(anyhow!("invalid path {path:?}")));
1352        };
1353
1354        let write = cx.background_executor().spawn({
1355            let fs = fs.clone();
1356            let abs_path = abs_path.clone();
1357            async move { fs.save(&abs_path, &text, line_ending).await }
1358        });
1359
1360        cx.spawn(move |this, mut cx| async move {
1361            write.await?;
1362            let entry = this
1363                .update(&mut cx, |this, cx| {
1364                    this.as_local_mut()
1365                        .unwrap()
1366                        .refresh_entry(path.clone(), None, cx)
1367                })?
1368                .await?;
1369            let worktree = this.upgrade().ok_or_else(|| anyhow!("worktree dropped"))?;
1370            if let Some(entry) = entry {
1371                Ok(File::for_entry(entry, worktree))
1372            } else {
1373                let metadata = fs
1374                    .metadata(&abs_path)
1375                    .await
1376                    .with_context(|| {
1377                        format!("Fetching metadata after saving the excluded buffer {abs_path:?}")
1378                    })?
1379                    .with_context(|| {
1380                        format!("Excluded buffer {path:?} got removed during saving")
1381                    })?;
1382                Ok(Arc::new(File {
1383                    worktree,
1384                    path,
1385                    mtime: Some(metadata.mtime),
1386                    entry_id: None,
1387                    is_local: true,
1388                    is_deleted: false,
1389                    is_private,
1390                }))
1391            }
1392        })
1393    }
1394
1395    fn delete_entry(
1396        &self,
1397        entry_id: ProjectEntryId,
1398        trash: bool,
1399        cx: &mut ModelContext<Worktree>,
1400    ) -> Option<Task<Result<()>>> {
1401        let entry = self.entry_for_id(entry_id)?.clone();
1402        let abs_path = self.absolutize(&entry.path);
1403        let fs = self.fs.clone();
1404
1405        let delete = cx.background_executor().spawn(async move {
1406            if entry.is_file() {
1407                if trash {
1408                    fs.trash_file(&abs_path?, Default::default()).await?;
1409                } else {
1410                    fs.remove_file(&abs_path?, Default::default()).await?;
1411                }
1412            } else {
1413                if trash {
1414                    fs.trash_dir(
1415                        &abs_path?,
1416                        RemoveOptions {
1417                            recursive: true,
1418                            ignore_if_not_exists: false,
1419                        },
1420                    )
1421                    .await?;
1422                } else {
1423                    fs.remove_dir(
1424                        &abs_path?,
1425                        RemoveOptions {
1426                            recursive: true,
1427                            ignore_if_not_exists: false,
1428                        },
1429                    )
1430                    .await?;
1431                }
1432            }
1433            anyhow::Ok(entry.path)
1434        });
1435
1436        Some(cx.spawn(|this, mut cx| async move {
1437            let path = delete.await?;
1438            this.update(&mut cx, |this, _| {
1439                this.as_local_mut()
1440                    .unwrap()
1441                    .refresh_entries_for_paths(vec![path])
1442            })?
1443            .recv()
1444            .await;
1445            Ok(())
1446        }))
1447    }
1448
1449    fn rename_entry(
1450        &self,
1451        entry_id: ProjectEntryId,
1452        new_path: impl Into<Arc<Path>>,
1453        cx: &mut ModelContext<Worktree>,
1454    ) -> Task<Result<CreatedEntry>> {
1455        let old_path = match self.entry_for_id(entry_id) {
1456            Some(entry) => entry.path.clone(),
1457            None => return Task::ready(Err(anyhow!("no entry to rename for id {entry_id:?}"))),
1458        };
1459        let new_path = new_path.into();
1460        let abs_old_path = self.absolutize(&old_path);
1461        let Ok(abs_new_path) = self.absolutize(&new_path) else {
1462            return Task::ready(Err(anyhow!("absolutizing path {new_path:?}")));
1463        };
1464        let abs_path = abs_new_path.clone();
1465        let fs = self.fs.clone();
1466        let case_sensitive = self.fs_case_sensitive;
1467        let rename = cx.background_executor().spawn(async move {
1468            let abs_old_path = abs_old_path?;
1469            let abs_new_path = abs_new_path;
1470
1471            let abs_old_path_lower = abs_old_path.to_str().map(|p| p.to_lowercase());
1472            let abs_new_path_lower = abs_new_path.to_str().map(|p| p.to_lowercase());
1473
1474            // If we're on a case-insensitive FS and we're doing a case-only rename (i.e. `foobar` to `FOOBAR`)
1475            // we want to overwrite, because otherwise we run into a file-already-exists error.
1476            let overwrite = !case_sensitive
1477                && abs_old_path != abs_new_path
1478                && abs_old_path_lower == abs_new_path_lower;
1479
1480            fs.rename(
1481                &abs_old_path,
1482                &abs_new_path,
1483                fs::RenameOptions {
1484                    overwrite,
1485                    ..Default::default()
1486                },
1487            )
1488            .await
1489            .with_context(|| format!("Renaming {abs_old_path:?} into {abs_new_path:?}"))
1490        });
1491
1492        cx.spawn(|this, mut cx| async move {
1493            rename.await?;
1494            Ok(this
1495                .update(&mut cx, |this, cx| {
1496                    this.as_local_mut()
1497                        .unwrap()
1498                        .refresh_entry(new_path.clone(), Some(old_path), cx)
1499                })?
1500                .await?
1501                .map(CreatedEntry::Included)
1502                .unwrap_or_else(|| CreatedEntry::Excluded { abs_path }))
1503        })
1504    }
1505
1506    fn copy_entry(
1507        &self,
1508        entry_id: ProjectEntryId,
1509        new_path: impl Into<Arc<Path>>,
1510        cx: &mut ModelContext<Worktree>,
1511    ) -> Task<Result<Option<Entry>>> {
1512        let old_path = match self.entry_for_id(entry_id) {
1513            Some(entry) => entry.path.clone(),
1514            None => return Task::ready(Ok(None)),
1515        };
1516        let new_path = new_path.into();
1517        let abs_old_path = self.absolutize(&old_path);
1518        let abs_new_path = self.absolutize(&new_path);
1519        let fs = self.fs.clone();
1520        let copy = cx.background_executor().spawn(async move {
1521            copy_recursive(
1522                fs.as_ref(),
1523                &abs_old_path?,
1524                &abs_new_path?,
1525                Default::default(),
1526            )
1527            .await
1528        });
1529
1530        cx.spawn(|this, mut cx| async move {
1531            copy.await?;
1532            this.update(&mut cx, |this, cx| {
1533                this.as_local_mut()
1534                    .unwrap()
1535                    .refresh_entry(new_path.clone(), None, cx)
1536            })?
1537            .await
1538        })
1539    }
1540
1541    pub fn copy_external_entries(
1542        &mut self,
1543        target_directory: PathBuf,
1544        paths: Vec<Arc<Path>>,
1545        overwrite_existing_files: bool,
1546        cx: &mut ModelContext<Worktree>,
1547    ) -> Task<Result<Vec<ProjectEntryId>>> {
1548        let worktree_path = self.abs_path().clone();
1549        let fs = self.fs.clone();
1550        let paths = paths
1551            .into_iter()
1552            .filter_map(|source| {
1553                let file_name = source.file_name()?;
1554                let mut target = target_directory.clone();
1555                target.push(file_name);
1556
1557                // Do not allow copying the same file to itself.
1558                if source.as_ref() != target.as_path() {
1559                    Some((source, target))
1560                } else {
1561                    None
1562                }
1563            })
1564            .collect::<Vec<_>>();
1565
1566        let paths_to_refresh = paths
1567            .iter()
1568            .filter_map(|(_, target)| Some(target.strip_prefix(&worktree_path).ok()?.into()))
1569            .collect::<Vec<_>>();
1570
1571        cx.spawn(|this, cx| async move {
1572            cx.background_executor()
1573                .spawn(async move {
1574                    for (source, target) in paths {
1575                        copy_recursive(
1576                            fs.as_ref(),
1577                            &source,
1578                            &target,
1579                            fs::CopyOptions {
1580                                overwrite: overwrite_existing_files,
1581                                ..Default::default()
1582                            },
1583                        )
1584                        .await
1585                        .with_context(|| {
1586                            anyhow!("Failed to copy file from {source:?} to {target:?}")
1587                        })?;
1588                    }
1589                    Ok::<(), anyhow::Error>(())
1590                })
1591                .await
1592                .log_err();
1593            let mut refresh = cx.read_model(
1594                &this.upgrade().with_context(|| "Dropped worktree")?,
1595                |this, _| {
1596                    Ok::<postage::barrier::Receiver, anyhow::Error>(
1597                        this.as_local()
1598                            .with_context(|| "Worktree is not local")?
1599                            .refresh_entries_for_paths(paths_to_refresh.clone()),
1600                    )
1601                },
1602            )??;
1603
1604            cx.background_executor()
1605                .spawn(async move {
1606                    refresh.next().await;
1607                    Ok::<(), anyhow::Error>(())
1608                })
1609                .await
1610                .log_err();
1611
1612            let this = this.upgrade().with_context(|| "Dropped worktree")?;
1613            cx.read_model(&this, |this, _| {
1614                paths_to_refresh
1615                    .iter()
1616                    .filter_map(|path| Some(this.entry_for_path(path)?.id))
1617                    .collect()
1618            })
1619        })
1620    }
1621
1622    fn expand_entry(
1623        &mut self,
1624        entry_id: ProjectEntryId,
1625        cx: &mut ModelContext<Worktree>,
1626    ) -> Option<Task<Result<()>>> {
1627        let path = self.entry_for_id(entry_id)?.path.clone();
1628        let mut refresh = self.refresh_entries_for_paths(vec![path]);
1629        Some(cx.background_executor().spawn(async move {
1630            refresh.next().await;
1631            Ok(())
1632        }))
1633    }
1634
1635    fn refresh_entries_for_paths(&self, paths: Vec<Arc<Path>>) -> barrier::Receiver {
1636        let (tx, rx) = barrier::channel();
1637        self.scan_requests_tx
1638            .try_send(ScanRequest {
1639                relative_paths: paths,
1640                done: tx,
1641            })
1642            .ok();
1643        rx
1644    }
1645
1646    pub fn add_path_prefix_to_scan(&self, path_prefix: Arc<Path>) {
1647        self.path_prefixes_to_scan_tx.try_send(path_prefix).ok();
1648    }
1649
1650    fn refresh_entry(
1651        &self,
1652        path: Arc<Path>,
1653        old_path: Option<Arc<Path>>,
1654        cx: &mut ModelContext<Worktree>,
1655    ) -> Task<Result<Option<Entry>>> {
1656        if self.settings.is_path_excluded(&path) {
1657            return Task::ready(Ok(None));
1658        }
1659        let paths = if let Some(old_path) = old_path.as_ref() {
1660            vec![old_path.clone(), path.clone()]
1661        } else {
1662            vec![path.clone()]
1663        };
1664        let t0 = Instant::now();
1665        let mut refresh = self.refresh_entries_for_paths(paths);
1666        cx.spawn(move |this, mut cx| async move {
1667            refresh.recv().await;
1668            log::trace!("refreshed entry {path:?} in {:?}", t0.elapsed());
1669            let new_entry = this.update(&mut cx, |this, _| {
1670                this.entry_for_path(path)
1671                    .cloned()
1672                    .ok_or_else(|| anyhow!("failed to read path after update"))
1673            })??;
1674            Ok(Some(new_entry))
1675        })
1676    }
1677
1678    fn observe_updates<F, Fut>(
1679        &mut self,
1680        project_id: u64,
1681        cx: &mut ModelContext<Worktree>,
1682        callback: F,
1683    ) where
1684        F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1685        Fut: Send + Future<Output = bool>,
1686    {
1687        #[cfg(any(test, feature = "test-support"))]
1688        const MAX_CHUNK_SIZE: usize = 2;
1689        #[cfg(not(any(test, feature = "test-support")))]
1690        const MAX_CHUNK_SIZE: usize = 256;
1691
1692        if let Some(observer) = self.update_observer.as_mut() {
1693            *observer.resume_updates.borrow_mut() = ();
1694            return;
1695        }
1696
1697        let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
1698        let (snapshots_tx, mut snapshots_rx) =
1699            mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
1700        snapshots_tx
1701            .unbounded_send((self.snapshot(), Arc::default(), Arc::default()))
1702            .ok();
1703
1704        let worktree_id = cx.entity_id().as_u64();
1705        let _maintain_remote_snapshot = cx.background_executor().spawn(async move {
1706            let mut is_first = true;
1707            while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
1708                let update;
1709                if is_first {
1710                    update = snapshot.build_initial_update(project_id, worktree_id);
1711                    is_first = false;
1712                } else {
1713                    update =
1714                        snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes);
1715                }
1716
1717                for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1718                    let _ = resume_updates_rx.try_recv();
1719                    loop {
1720                        let result = callback(update.clone());
1721                        if result.await {
1722                            break;
1723                        } else {
1724                            log::info!("waiting to resume updates");
1725                            if resume_updates_rx.next().await.is_none() {
1726                                return Some(());
1727                            }
1728                        }
1729                    }
1730                }
1731            }
1732            Some(())
1733        });
1734
1735        self.update_observer = Some(UpdateObservationState {
1736            snapshots_tx,
1737            resume_updates: resume_updates_tx,
1738            _maintain_remote_snapshot,
1739        });
1740    }
1741
1742    pub fn share_private_files(&mut self, cx: &mut ModelContext<Worktree>) {
1743        self.share_private_files = true;
1744        self.restart_background_scanners(cx);
1745    }
1746}
1747
1748impl RemoteWorktree {
1749    pub fn project_id(&self) -> u64 {
1750        self.project_id
1751    }
1752
1753    pub fn client(&self) -> AnyProtoClient {
1754        self.client.clone()
1755    }
1756
1757    pub fn disconnected_from_host(&mut self) {
1758        self.updates_tx.take();
1759        self.snapshot_subscriptions.clear();
1760        self.disconnected = true;
1761    }
1762
1763    pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1764        if let Some(updates_tx) = &self.updates_tx {
1765            updates_tx
1766                .unbounded_send(update)
1767                .expect("consumer runs to completion");
1768        }
1769    }
1770
1771    fn observe_updates<F, Fut>(
1772        &mut self,
1773        project_id: u64,
1774        cx: &mut ModelContext<Worktree>,
1775        callback: F,
1776    ) where
1777        F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1778        Fut: 'static + Send + Future<Output = bool>,
1779    {
1780        let (tx, mut rx) = mpsc::unbounded();
1781        let initial_update = self
1782            .snapshot
1783            .build_initial_update(project_id, self.id().to_proto());
1784        self.updates_tx = Some(tx);
1785        cx.spawn(|this, mut cx| async move {
1786            let mut update = initial_update;
1787            loop {
1788                if !callback(update).await {
1789                    break;
1790                }
1791                if let Some(next_update) = rx.next().await {
1792                    update = next_update;
1793                } else {
1794                    break;
1795                }
1796            }
1797            this.update(&mut cx, |this, _| {
1798                let this = this.as_remote_mut().unwrap();
1799                this.updates_tx.take();
1800            })
1801        })
1802        .detach();
1803    }
1804
1805    fn observed_snapshot(&self, scan_id: usize) -> bool {
1806        self.completed_scan_id >= scan_id
1807    }
1808
1809    pub fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1810        let (tx, rx) = oneshot::channel();
1811        if self.observed_snapshot(scan_id) {
1812            let _ = tx.send(());
1813        } else if self.disconnected {
1814            drop(tx);
1815        } else {
1816            match self
1817                .snapshot_subscriptions
1818                .binary_search_by_key(&scan_id, |probe| probe.0)
1819            {
1820                Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1821            }
1822        }
1823
1824        async move {
1825            rx.await?;
1826            Ok(())
1827        }
1828    }
1829
1830    fn insert_entry(
1831        &mut self,
1832        entry: proto::Entry,
1833        scan_id: usize,
1834        cx: &mut ModelContext<Worktree>,
1835    ) -> Task<Result<Entry>> {
1836        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1837        cx.spawn(|this, mut cx| async move {
1838            wait_for_snapshot.await?;
1839            this.update(&mut cx, |worktree, _| {
1840                let worktree = worktree.as_remote_mut().unwrap();
1841                let snapshot = &mut worktree.background_snapshot.lock().0;
1842                let entry = snapshot.insert_entry(entry);
1843                worktree.snapshot = snapshot.clone();
1844                entry
1845            })?
1846        })
1847    }
1848
1849    fn delete_entry(
1850        &mut self,
1851        entry_id: ProjectEntryId,
1852        trash: bool,
1853        cx: &mut ModelContext<Worktree>,
1854    ) -> Option<Task<Result<()>>> {
1855        let response = self.client.request(proto::DeleteProjectEntry {
1856            project_id: self.project_id,
1857            entry_id: entry_id.to_proto(),
1858            use_trash: trash,
1859        });
1860        Some(cx.spawn(move |this, mut cx| async move {
1861            let response = response.await?;
1862            let scan_id = response.worktree_scan_id as usize;
1863
1864            this.update(&mut cx, move |this, _| {
1865                this.as_remote_mut().unwrap().wait_for_snapshot(scan_id)
1866            })?
1867            .await?;
1868
1869            this.update(&mut cx, |this, _| {
1870                let this = this.as_remote_mut().unwrap();
1871                let snapshot = &mut this.background_snapshot.lock().0;
1872                snapshot.delete_entry(entry_id);
1873                this.snapshot = snapshot.clone();
1874            })
1875        }))
1876    }
1877
1878    fn rename_entry(
1879        &mut self,
1880        entry_id: ProjectEntryId,
1881        new_path: impl Into<Arc<Path>>,
1882        cx: &mut ModelContext<Worktree>,
1883    ) -> Task<Result<CreatedEntry>> {
1884        let new_path = new_path.into();
1885        let response = self.client.request(proto::RenameProjectEntry {
1886            project_id: self.project_id,
1887            entry_id: entry_id.to_proto(),
1888            new_path: new_path.to_string_lossy().into(),
1889        });
1890        cx.spawn(move |this, mut cx| async move {
1891            let response = response.await?;
1892            match response.entry {
1893                Some(entry) => this
1894                    .update(&mut cx, |this, cx| {
1895                        this.as_remote_mut().unwrap().insert_entry(
1896                            entry,
1897                            response.worktree_scan_id as usize,
1898                            cx,
1899                        )
1900                    })?
1901                    .await
1902                    .map(CreatedEntry::Included),
1903                None => {
1904                    let abs_path = this.update(&mut cx, |worktree, _| {
1905                        worktree
1906                            .absolutize(&new_path)
1907                            .with_context(|| format!("absolutizing {new_path:?}"))
1908                    })??;
1909                    Ok(CreatedEntry::Excluded { abs_path })
1910                }
1911            }
1912        })
1913    }
1914}
1915
1916impl Snapshot {
1917    pub fn new(id: u64, root_name: String, abs_path: Arc<Path>) -> Self {
1918        Snapshot {
1919            id: WorktreeId::from_usize(id as usize),
1920            abs_path,
1921            root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
1922            root_name,
1923            entries_by_path: Default::default(),
1924            entries_by_id: Default::default(),
1925            repository_entries: Default::default(),
1926            scan_id: 1,
1927            completed_scan_id: 0,
1928        }
1929    }
1930
1931    pub fn id(&self) -> WorktreeId {
1932        self.id
1933    }
1934
1935    pub fn abs_path(&self) -> &Arc<Path> {
1936        &self.abs_path
1937    }
1938
1939    fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
1940        let mut updated_entries = self
1941            .entries_by_path
1942            .iter()
1943            .map(proto::Entry::from)
1944            .collect::<Vec<_>>();
1945        updated_entries.sort_unstable_by_key(|e| e.id);
1946
1947        let mut updated_repositories = self
1948            .repository_entries
1949            .values()
1950            .map(proto::RepositoryEntry::from)
1951            .collect::<Vec<_>>();
1952        updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
1953
1954        proto::UpdateWorktree {
1955            project_id,
1956            worktree_id,
1957            abs_path: self.abs_path().to_string_lossy().into(),
1958            root_name: self.root_name().to_string(),
1959            updated_entries,
1960            removed_entries: Vec::new(),
1961            scan_id: self.scan_id as u64,
1962            is_last_update: self.completed_scan_id == self.scan_id,
1963            updated_repositories,
1964            removed_repositories: Vec::new(),
1965        }
1966    }
1967
1968    pub fn absolutize(&self, path: &Path) -> Result<PathBuf> {
1969        if path
1970            .components()
1971            .any(|component| !matches!(component, std::path::Component::Normal(_)))
1972        {
1973            return Err(anyhow!("invalid path"));
1974        }
1975        if path.file_name().is_some() {
1976            Ok(self.abs_path.join(path))
1977        } else {
1978            Ok(self.abs_path.to_path_buf())
1979        }
1980    }
1981
1982    pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1983        self.entries_by_id.get(&entry_id, &()).is_some()
1984    }
1985
1986    fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1987        let entry = Entry::try_from((&self.root_char_bag, entry))?;
1988        let old_entry = self.entries_by_id.insert_or_replace(
1989            PathEntry {
1990                id: entry.id,
1991                path: entry.path.clone(),
1992                is_ignored: entry.is_ignored,
1993                scan_id: 0,
1994            },
1995            &(),
1996        );
1997        if let Some(old_entry) = old_entry {
1998            self.entries_by_path.remove(&PathKey(old_entry.path), &());
1999        }
2000        self.entries_by_path.insert_or_replace(entry.clone(), &());
2001        Ok(entry)
2002    }
2003
2004    fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
2005        let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
2006        self.entries_by_path = {
2007            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2008            let mut new_entries_by_path =
2009                cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
2010            while let Some(entry) = cursor.item() {
2011                if entry.path.starts_with(&removed_entry.path) {
2012                    self.entries_by_id.remove(&entry.id, &());
2013                    cursor.next(&());
2014                } else {
2015                    break;
2016                }
2017            }
2018            new_entries_by_path.append(cursor.suffix(&()), &());
2019            new_entries_by_path
2020        };
2021
2022        Some(removed_entry.path)
2023    }
2024
2025    #[cfg(any(test, feature = "test-support"))]
2026    pub fn status_for_file(&self, path: impl Into<PathBuf>) -> Option<GitFileStatus> {
2027        let path = path.into();
2028        self.entries_by_path
2029            .get(&PathKey(Arc::from(path)), &())
2030            .and_then(|entry| entry.git_status)
2031    }
2032
2033    pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
2034        log::trace!(
2035            "applying remote worktree update. {} entries updated, {} removed",
2036            update.updated_entries.len(),
2037            update.removed_entries.len()
2038        );
2039
2040        let mut entries_by_path_edits = Vec::new();
2041        let mut entries_by_id_edits = Vec::new();
2042
2043        for entry_id in update.removed_entries {
2044            let entry_id = ProjectEntryId::from_proto(entry_id);
2045            entries_by_id_edits.push(Edit::Remove(entry_id));
2046            if let Some(entry) = self.entry_for_id(entry_id) {
2047                entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
2048            }
2049        }
2050
2051        for entry in update.updated_entries {
2052            let entry = Entry::try_from((&self.root_char_bag, entry))?;
2053            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
2054                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
2055            }
2056            if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
2057                if old_entry.id != entry.id {
2058                    entries_by_id_edits.push(Edit::Remove(old_entry.id));
2059                }
2060            }
2061            entries_by_id_edits.push(Edit::Insert(PathEntry {
2062                id: entry.id,
2063                path: entry.path.clone(),
2064                is_ignored: entry.is_ignored,
2065                scan_id: 0,
2066            }));
2067            entries_by_path_edits.push(Edit::Insert(entry));
2068        }
2069
2070        self.entries_by_path.edit(entries_by_path_edits, &());
2071        self.entries_by_id.edit(entries_by_id_edits, &());
2072
2073        update.removed_repositories.sort_unstable();
2074        self.repository_entries.retain(|_, entry| {
2075            if let Ok(_) = update
2076                .removed_repositories
2077                .binary_search(&entry.work_directory.to_proto())
2078            {
2079                false
2080            } else {
2081                true
2082            }
2083        });
2084
2085        for repository in update.updated_repositories {
2086            let work_directory_entry: WorkDirectoryEntry =
2087                ProjectEntryId::from_proto(repository.work_directory_id).into();
2088
2089            if let Some(entry) = self.entry_for_id(*work_directory_entry) {
2090                let work_directory = RepositoryWorkDirectory(entry.path.clone());
2091                if self.repository_entries.get(&work_directory).is_some() {
2092                    self.repository_entries.update(&work_directory, |repo| {
2093                        repo.branch = repository.branch.map(Into::into);
2094                    });
2095                } else {
2096                    self.repository_entries.insert(
2097                        work_directory,
2098                        RepositoryEntry {
2099                            work_directory: work_directory_entry,
2100                            branch: repository.branch.map(Into::into),
2101                            // When syncing repository entries from a peer, we don't need
2102                            // the location_in_repo field, since git operations don't happen locally
2103                            // anyway.
2104                            location_in_repo: None,
2105                        },
2106                    )
2107                }
2108            } else {
2109                log::error!("no work directory entry for repository {:?}", repository)
2110            }
2111        }
2112
2113        self.scan_id = update.scan_id as usize;
2114        if update.is_last_update {
2115            self.completed_scan_id = update.scan_id as usize;
2116        }
2117
2118        Ok(())
2119    }
2120
2121    pub fn entry_count(&self) -> usize {
2122        self.entries_by_path.summary().count
2123    }
2124
2125    pub fn visible_entry_count(&self) -> usize {
2126        self.entries_by_path.summary().non_ignored_count
2127    }
2128
2129    pub fn dir_count(&self) -> usize {
2130        let summary = self.entries_by_path.summary();
2131        summary.count - summary.file_count
2132    }
2133
2134    pub fn visible_dir_count(&self) -> usize {
2135        let summary = self.entries_by_path.summary();
2136        summary.non_ignored_count - summary.non_ignored_file_count
2137    }
2138
2139    pub fn file_count(&self) -> usize {
2140        self.entries_by_path.summary().file_count
2141    }
2142
2143    pub fn visible_file_count(&self) -> usize {
2144        self.entries_by_path.summary().non_ignored_file_count
2145    }
2146
2147    fn traverse_from_offset(
2148        &self,
2149        include_files: bool,
2150        include_dirs: bool,
2151        include_ignored: bool,
2152        start_offset: usize,
2153    ) -> Traversal {
2154        let mut cursor = self.entries_by_path.cursor();
2155        cursor.seek(
2156            &TraversalTarget::Count {
2157                count: start_offset,
2158                include_files,
2159                include_dirs,
2160                include_ignored,
2161            },
2162            Bias::Right,
2163            &(),
2164        );
2165        Traversal {
2166            cursor,
2167            include_files,
2168            include_dirs,
2169            include_ignored,
2170        }
2171    }
2172
2173    pub fn traverse_from_path(
2174        &self,
2175        include_files: bool,
2176        include_dirs: bool,
2177        include_ignored: bool,
2178        path: &Path,
2179    ) -> Traversal {
2180        Traversal::new(
2181            &self.entries_by_path,
2182            include_files,
2183            include_dirs,
2184            include_ignored,
2185            path,
2186        )
2187    }
2188
2189    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
2190        self.traverse_from_offset(true, false, include_ignored, start)
2191    }
2192
2193    pub fn directories(&self, include_ignored: bool, start: usize) -> Traversal {
2194        self.traverse_from_offset(false, true, include_ignored, start)
2195    }
2196
2197    pub fn entries(&self, include_ignored: bool, start: usize) -> Traversal {
2198        self.traverse_from_offset(true, true, include_ignored, start)
2199    }
2200
2201    pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
2202        self.repository_entries
2203            .iter()
2204            .map(|(path, entry)| (&path.0, entry))
2205    }
2206
2207    /// Get the repository whose work directory contains the given path.
2208    pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
2209        self.repository_entries
2210            .get(&RepositoryWorkDirectory(path.into()))
2211            .cloned()
2212    }
2213
2214    /// Get the repository whose work directory contains the given path.
2215    pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
2216        self.repository_and_work_directory_for_path(path)
2217            .map(|e| e.1)
2218    }
2219
2220    pub fn repository_and_work_directory_for_path(
2221        &self,
2222        path: &Path,
2223    ) -> Option<(RepositoryWorkDirectory, RepositoryEntry)> {
2224        self.repository_entries
2225            .iter()
2226            .filter(|(workdir_path, _)| path.starts_with(workdir_path))
2227            .last()
2228            .map(|(path, repo)| (path.clone(), repo.clone()))
2229    }
2230
2231    /// Given an ordered iterator of entries, returns an iterator of those entries,
2232    /// along with their containing git repository.
2233    pub fn entries_with_repositories<'a>(
2234        &'a self,
2235        entries: impl 'a + Iterator<Item = &'a Entry>,
2236    ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
2237        let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
2238        let mut repositories = self.repositories().peekable();
2239        entries.map(move |entry| {
2240            while let Some((repo_path, _)) = containing_repos.last() {
2241                if entry.path.starts_with(repo_path) {
2242                    break;
2243                } else {
2244                    containing_repos.pop();
2245                }
2246            }
2247            while let Some((repo_path, _)) = repositories.peek() {
2248                if entry.path.starts_with(repo_path) {
2249                    containing_repos.push(repositories.next().unwrap());
2250                } else {
2251                    break;
2252                }
2253            }
2254            let repo = containing_repos.last().map(|(_, repo)| *repo);
2255            (entry, repo)
2256        })
2257    }
2258
2259    /// Updates the `git_status` of the given entries such that files'
2260    /// statuses bubble up to their ancestor directories.
2261    pub fn propagate_git_statuses(&self, result: &mut [Entry]) {
2262        let mut cursor = self
2263            .entries_by_path
2264            .cursor::<(TraversalProgress, GitStatuses)>();
2265        let mut entry_stack = Vec::<(usize, GitStatuses)>::new();
2266
2267        let mut result_ix = 0;
2268        loop {
2269            let next_entry = result.get(result_ix);
2270            let containing_entry = entry_stack.last().map(|(ix, _)| &result[*ix]);
2271
2272            let entry_to_finish = match (containing_entry, next_entry) {
2273                (Some(_), None) => entry_stack.pop(),
2274                (Some(containing_entry), Some(next_path)) => {
2275                    if next_path.path.starts_with(&containing_entry.path) {
2276                        None
2277                    } else {
2278                        entry_stack.pop()
2279                    }
2280                }
2281                (None, Some(_)) => None,
2282                (None, None) => break,
2283            };
2284
2285            if let Some((entry_ix, prev_statuses)) = entry_to_finish {
2286                cursor.seek_forward(
2287                    &TraversalTarget::PathSuccessor(&result[entry_ix].path),
2288                    Bias::Left,
2289                    &(),
2290                );
2291
2292                let statuses = cursor.start().1 - prev_statuses;
2293
2294                result[entry_ix].git_status = if statuses.conflict > 0 {
2295                    Some(GitFileStatus::Conflict)
2296                } else if statuses.modified > 0 {
2297                    Some(GitFileStatus::Modified)
2298                } else if statuses.added > 0 {
2299                    Some(GitFileStatus::Added)
2300                } else {
2301                    None
2302                };
2303            } else {
2304                if result[result_ix].is_dir() {
2305                    cursor.seek_forward(
2306                        &TraversalTarget::Path(&result[result_ix].path),
2307                        Bias::Left,
2308                        &(),
2309                    );
2310                    entry_stack.push((result_ix, cursor.start().1));
2311                }
2312                result_ix += 1;
2313            }
2314        }
2315    }
2316
2317    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
2318        let empty_path = Path::new("");
2319        self.entries_by_path
2320            .cursor::<()>()
2321            .filter(move |entry| entry.path.as_ref() != empty_path)
2322            .map(|entry| &entry.path)
2323    }
2324
2325    pub fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
2326        let mut cursor = self.entries_by_path.cursor();
2327        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
2328        let traversal = Traversal {
2329            cursor,
2330            include_files: true,
2331            include_dirs: true,
2332            include_ignored: true,
2333        };
2334        ChildEntriesIter {
2335            traversal,
2336            parent_path,
2337        }
2338    }
2339
2340    pub fn root_entry(&self) -> Option<&Entry> {
2341        self.entry_for_path("")
2342    }
2343
2344    pub fn root_name(&self) -> &str {
2345        &self.root_name
2346    }
2347
2348    pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
2349        self.repository_entries
2350            .get(&RepositoryWorkDirectory(Path::new("").into()))
2351            .map(|entry| entry.to_owned())
2352    }
2353
2354    pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
2355        self.repository_entries.values()
2356    }
2357
2358    pub fn scan_id(&self) -> usize {
2359        self.scan_id
2360    }
2361
2362    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
2363        let path = path.as_ref();
2364        self.traverse_from_path(true, true, true, path)
2365            .entry()
2366            .and_then(|entry| {
2367                if entry.path.as_ref() == path {
2368                    Some(entry)
2369                } else {
2370                    None
2371                }
2372            })
2373    }
2374
2375    pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
2376        let entry = self.entries_by_id.get(&id, &())?;
2377        self.entry_for_path(&entry.path)
2378    }
2379
2380    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
2381        self.entry_for_path(path.as_ref()).map(|e| e.inode)
2382    }
2383}
2384
2385impl LocalSnapshot {
2386    pub fn repo_for_path(&self, path: &Path) -> Option<(RepositoryEntry, &LocalRepositoryEntry)> {
2387        let (_, repo_entry) = self.repository_and_work_directory_for_path(path)?;
2388        let work_directory_id = repo_entry.work_directory_id();
2389        Some((repo_entry, self.git_repositories.get(&work_directory_id)?))
2390    }
2391
2392    fn build_update(
2393        &self,
2394        project_id: u64,
2395        worktree_id: u64,
2396        entry_changes: UpdatedEntriesSet,
2397        repo_changes: UpdatedGitRepositoriesSet,
2398    ) -> proto::UpdateWorktree {
2399        let mut updated_entries = Vec::new();
2400        let mut removed_entries = Vec::new();
2401        let mut updated_repositories = Vec::new();
2402        let mut removed_repositories = Vec::new();
2403
2404        for (_, entry_id, path_change) in entry_changes.iter() {
2405            if let PathChange::Removed = path_change {
2406                removed_entries.push(entry_id.0 as u64);
2407            } else if let Some(entry) = self.entry_for_id(*entry_id) {
2408                updated_entries.push(proto::Entry::from(entry));
2409            }
2410        }
2411
2412        for (work_dir_path, change) in repo_changes.iter() {
2413            let new_repo = self
2414                .repository_entries
2415                .get(&RepositoryWorkDirectory(work_dir_path.clone()));
2416            match (&change.old_repository, new_repo) {
2417                (Some(old_repo), Some(new_repo)) => {
2418                    updated_repositories.push(new_repo.build_update(old_repo));
2419                }
2420                (None, Some(new_repo)) => {
2421                    updated_repositories.push(proto::RepositoryEntry::from(new_repo));
2422                }
2423                (Some(old_repo), None) => {
2424                    removed_repositories.push(old_repo.work_directory.0.to_proto());
2425                }
2426                _ => {}
2427            }
2428        }
2429
2430        removed_entries.sort_unstable();
2431        updated_entries.sort_unstable_by_key(|e| e.id);
2432        removed_repositories.sort_unstable();
2433        updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
2434
2435        // TODO - optimize, knowing that removed_entries are sorted.
2436        removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
2437
2438        proto::UpdateWorktree {
2439            project_id,
2440            worktree_id,
2441            abs_path: self.abs_path().to_string_lossy().into(),
2442            root_name: self.root_name().to_string(),
2443            updated_entries,
2444            removed_entries,
2445            scan_id: self.scan_id as u64,
2446            is_last_update: self.completed_scan_id == self.scan_id,
2447            updated_repositories,
2448            removed_repositories,
2449        }
2450    }
2451
2452    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2453        if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
2454            let abs_path = self.abs_path.join(&entry.path);
2455            match smol::block_on(build_gitignore(&abs_path, fs)) {
2456                Ok(ignore) => {
2457                    self.ignores_by_parent_abs_path
2458                        .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
2459                }
2460                Err(error) => {
2461                    log::error!(
2462                        "error loading .gitignore file {:?} - {:?}",
2463                        &entry.path,
2464                        error
2465                    );
2466                }
2467            }
2468        }
2469
2470        if entry.kind == EntryKind::PendingDir {
2471            if let Some(existing_entry) =
2472                self.entries_by_path.get(&PathKey(entry.path.clone()), &())
2473            {
2474                entry.kind = existing_entry.kind;
2475            }
2476        }
2477
2478        let scan_id = self.scan_id;
2479        let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
2480        if let Some(removed) = removed {
2481            if removed.id != entry.id {
2482                self.entries_by_id.remove(&removed.id, &());
2483            }
2484        }
2485        self.entries_by_id.insert_or_replace(
2486            PathEntry {
2487                id: entry.id,
2488                path: entry.path.clone(),
2489                is_ignored: entry.is_ignored,
2490                scan_id,
2491            },
2492            &(),
2493        );
2494
2495        entry
2496    }
2497
2498    fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2499        let mut inodes = TreeSet::default();
2500        for ancestor in path.ancestors().skip(1) {
2501            if let Some(entry) = self.entry_for_path(ancestor) {
2502                inodes.insert(entry.inode);
2503            }
2504        }
2505        inodes
2506    }
2507
2508    fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2509        let mut new_ignores = Vec::new();
2510        for (index, ancestor) in abs_path.ancestors().enumerate() {
2511            if index > 0 {
2512                if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2513                    new_ignores.push((ancestor, Some(ignore.clone())));
2514                } else {
2515                    new_ignores.push((ancestor, None));
2516                }
2517            }
2518            if ancestor.join(&*DOT_GIT).is_dir() {
2519                break;
2520            }
2521        }
2522
2523        let mut ignore_stack = IgnoreStack::none();
2524        for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2525            if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2526                ignore_stack = IgnoreStack::all();
2527                break;
2528            } else if let Some(ignore) = ignore {
2529                ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2530            }
2531        }
2532
2533        if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2534            ignore_stack = IgnoreStack::all();
2535        }
2536
2537        ignore_stack
2538    }
2539
2540    #[cfg(test)]
2541    pub(crate) fn expanded_entries(&self) -> impl Iterator<Item = &Entry> {
2542        self.entries_by_path
2543            .cursor::<()>()
2544            .filter(|entry| entry.kind == EntryKind::Dir && (entry.is_external || entry.is_ignored))
2545    }
2546
2547    #[cfg(test)]
2548    pub fn check_invariants(&self, git_state: bool) {
2549        use pretty_assertions::assert_eq;
2550
2551        assert_eq!(
2552            self.entries_by_path
2553                .cursor::<()>()
2554                .map(|e| (&e.path, e.id))
2555                .collect::<Vec<_>>(),
2556            self.entries_by_id
2557                .cursor::<()>()
2558                .map(|e| (&e.path, e.id))
2559                .collect::<collections::BTreeSet<_>>()
2560                .into_iter()
2561                .collect::<Vec<_>>(),
2562            "entries_by_path and entries_by_id are inconsistent"
2563        );
2564
2565        let mut files = self.files(true, 0);
2566        let mut visible_files = self.files(false, 0);
2567        for entry in self.entries_by_path.cursor::<()>() {
2568            if entry.is_file() {
2569                assert_eq!(files.next().unwrap().inode, entry.inode);
2570                if !entry.is_ignored && !entry.is_external {
2571                    assert_eq!(visible_files.next().unwrap().inode, entry.inode);
2572                }
2573            }
2574        }
2575
2576        assert!(files.next().is_none());
2577        assert!(visible_files.next().is_none());
2578
2579        let mut bfs_paths = Vec::new();
2580        let mut stack = self
2581            .root_entry()
2582            .map(|e| e.path.as_ref())
2583            .into_iter()
2584            .collect::<Vec<_>>();
2585        while let Some(path) = stack.pop() {
2586            bfs_paths.push(path);
2587            let ix = stack.len();
2588            for child_entry in self.child_entries(path) {
2589                stack.insert(ix, &child_entry.path);
2590            }
2591        }
2592
2593        let dfs_paths_via_iter = self
2594            .entries_by_path
2595            .cursor::<()>()
2596            .map(|e| e.path.as_ref())
2597            .collect::<Vec<_>>();
2598        assert_eq!(bfs_paths, dfs_paths_via_iter);
2599
2600        let dfs_paths_via_traversal = self
2601            .entries(true, 0)
2602            .map(|e| e.path.as_ref())
2603            .collect::<Vec<_>>();
2604        assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
2605
2606        if git_state {
2607            for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
2608                let ignore_parent_path =
2609                    ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
2610                assert!(self.entry_for_path(&ignore_parent_path).is_some());
2611                assert!(self
2612                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
2613                    .is_some());
2614            }
2615        }
2616    }
2617
2618    #[cfg(test)]
2619    pub fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
2620        let mut paths = Vec::new();
2621        for entry in self.entries_by_path.cursor::<()>() {
2622            if include_ignored || !entry.is_ignored {
2623                paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
2624            }
2625        }
2626        paths.sort_by(|a, b| a.0.cmp(b.0));
2627        paths
2628    }
2629}
2630
2631impl BackgroundScannerState {
2632    fn should_scan_directory(&self, entry: &Entry) -> bool {
2633        (!entry.is_external && !entry.is_ignored)
2634            || entry.path.file_name() == Some(*DOT_GIT)
2635            || entry.path.file_name() == Some(local_settings_folder_relative_path().as_os_str())
2636            || self.scanned_dirs.contains(&entry.id) // If we've ever scanned it, keep scanning
2637            || self
2638                .paths_to_scan
2639                .iter()
2640                .any(|p| p.starts_with(&entry.path))
2641            || self
2642                .path_prefixes_to_scan
2643                .iter()
2644                .any(|p| entry.path.starts_with(p))
2645    }
2646
2647    fn enqueue_scan_dir(&self, abs_path: Arc<Path>, entry: &Entry, scan_job_tx: &Sender<ScanJob>) {
2648        let path = entry.path.clone();
2649        let ignore_stack = self.snapshot.ignore_stack_for_abs_path(&abs_path, true);
2650        let mut ancestor_inodes = self.snapshot.ancestor_inodes_for_path(&path);
2651        let mut containing_repository = None;
2652        if !ignore_stack.is_abs_path_ignored(&abs_path, true) {
2653            if let Some((repo_entry, repo)) = self.snapshot.repo_for_path(&path) {
2654                if let Some(workdir_path) = repo_entry.work_directory(&self.snapshot) {
2655                    if let Ok(repo_path) = repo_entry.relativize(&self.snapshot, &path) {
2656                        containing_repository = Some(ScanJobContainingRepository {
2657                            work_directory: workdir_path,
2658                            statuses: repo
2659                                .repo_ptr
2660                                .statuses(&repo_path)
2661                                .log_err()
2662                                .unwrap_or_default(),
2663                        });
2664                    }
2665                }
2666            }
2667        }
2668        if !ancestor_inodes.contains(&entry.inode) {
2669            ancestor_inodes.insert(entry.inode);
2670            scan_job_tx
2671                .try_send(ScanJob {
2672                    abs_path,
2673                    path,
2674                    ignore_stack,
2675                    scan_queue: scan_job_tx.clone(),
2676                    ancestor_inodes,
2677                    is_external: entry.is_external,
2678                    containing_repository,
2679                })
2680                .unwrap();
2681        }
2682    }
2683
2684    fn reuse_entry_id(&mut self, entry: &mut Entry) {
2685        if let Some(mtime) = entry.mtime {
2686            if let Some(removed_entry_id) = self.removed_entry_ids.remove(&(entry.inode, mtime)) {
2687                entry.id = removed_entry_id;
2688            } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
2689                entry.id = existing_entry.id;
2690            }
2691        }
2692    }
2693
2694    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2695        self.reuse_entry_id(&mut entry);
2696        let entry = self.snapshot.insert_entry(entry, fs);
2697        if entry.path.file_name() == Some(&DOT_GIT) {
2698            self.build_git_repository(entry.path.clone(), fs);
2699        }
2700
2701        #[cfg(test)]
2702        self.snapshot.check_invariants(false);
2703
2704        entry
2705    }
2706
2707    fn populate_dir(
2708        &mut self,
2709        parent_path: &Arc<Path>,
2710        entries: impl IntoIterator<Item = Entry>,
2711        ignore: Option<Arc<Gitignore>>,
2712    ) {
2713        let mut parent_entry = if let Some(parent_entry) = self
2714            .snapshot
2715            .entries_by_path
2716            .get(&PathKey(parent_path.clone()), &())
2717        {
2718            parent_entry.clone()
2719        } else {
2720            log::warn!(
2721                "populating a directory {:?} that has been removed",
2722                parent_path
2723            );
2724            return;
2725        };
2726
2727        match parent_entry.kind {
2728            EntryKind::PendingDir | EntryKind::UnloadedDir => parent_entry.kind = EntryKind::Dir,
2729            EntryKind::Dir => {}
2730            _ => return,
2731        }
2732
2733        if let Some(ignore) = ignore {
2734            let abs_parent_path = self.snapshot.abs_path.join(&parent_path).into();
2735            self.snapshot
2736                .ignores_by_parent_abs_path
2737                .insert(abs_parent_path, (ignore, false));
2738        }
2739
2740        let parent_entry_id = parent_entry.id;
2741        self.scanned_dirs.insert(parent_entry_id);
2742        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2743        let mut entries_by_id_edits = Vec::new();
2744
2745        for entry in entries {
2746            entries_by_id_edits.push(Edit::Insert(PathEntry {
2747                id: entry.id,
2748                path: entry.path.clone(),
2749                is_ignored: entry.is_ignored,
2750                scan_id: self.snapshot.scan_id,
2751            }));
2752            entries_by_path_edits.push(Edit::Insert(entry));
2753        }
2754
2755        self.snapshot
2756            .entries_by_path
2757            .edit(entries_by_path_edits, &());
2758        self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2759
2760        if let Err(ix) = self.changed_paths.binary_search(parent_path) {
2761            self.changed_paths.insert(ix, parent_path.clone());
2762        }
2763
2764        #[cfg(test)]
2765        self.snapshot.check_invariants(false);
2766    }
2767
2768    fn remove_path(&mut self, path: &Path) {
2769        let mut new_entries;
2770        let removed_entries;
2771        {
2772            let mut cursor = self.snapshot.entries_by_path.cursor::<TraversalProgress>();
2773            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2774            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2775            new_entries.append(cursor.suffix(&()), &());
2776        }
2777        self.snapshot.entries_by_path = new_entries;
2778
2779        let mut entries_by_id_edits = Vec::new();
2780        for entry in removed_entries.cursor::<()>() {
2781            if let Some(mtime) = entry.mtime {
2782                let removed_entry_id = self
2783                    .removed_entry_ids
2784                    .entry((entry.inode, mtime))
2785                    .or_insert(entry.id);
2786                *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2787            }
2788            entries_by_id_edits.push(Edit::Remove(entry.id));
2789        }
2790        self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2791
2792        if path.file_name() == Some(&GITIGNORE) {
2793            let abs_parent_path = self.snapshot.abs_path.join(path.parent().unwrap());
2794            if let Some((_, needs_update)) = self
2795                .snapshot
2796                .ignores_by_parent_abs_path
2797                .get_mut(abs_parent_path.as_path())
2798            {
2799                *needs_update = true;
2800            }
2801        }
2802
2803        #[cfg(test)]
2804        self.snapshot.check_invariants(false);
2805    }
2806
2807    fn build_git_repository(
2808        &mut self,
2809        dot_git_path: Arc<Path>,
2810        fs: &dyn Fs,
2811    ) -> Option<(RepositoryWorkDirectory, Arc<dyn GitRepository>)> {
2812        let work_dir_path: Arc<Path> = match dot_git_path.parent() {
2813            Some(parent_dir) => {
2814                // Guard against repositories inside the repository metadata
2815                if parent_dir.iter().any(|component| component == *DOT_GIT) {
2816                    log::info!(
2817                        "not building git repository for nested `.git` directory, `.git` path in the worktree: {dot_git_path:?}"
2818                    );
2819                    return None;
2820                };
2821                log::info!(
2822                    "building git repository, `.git` path in the worktree: {dot_git_path:?}"
2823                );
2824
2825                parent_dir.into()
2826            }
2827            None => {
2828                // `dot_git_path.parent().is_none()` means `.git` directory is the opened worktree itself,
2829                // no files inside that directory are tracked by git, so no need to build the repo around it
2830                log::info!(
2831                    "not building git repository for the worktree itself, `.git` path in the worktree: {dot_git_path:?}"
2832                );
2833                return None;
2834            }
2835        };
2836
2837        self.build_git_repository_for_path(work_dir_path, dot_git_path, None, fs)
2838    }
2839
2840    fn build_git_repository_for_path(
2841        &mut self,
2842        work_dir_path: Arc<Path>,
2843        dot_git_path: Arc<Path>,
2844        location_in_repo: Option<Arc<Path>>,
2845        fs: &dyn Fs,
2846    ) -> Option<(RepositoryWorkDirectory, Arc<dyn GitRepository>)> {
2847        let work_dir_id = self
2848            .snapshot
2849            .entry_for_path(work_dir_path.clone())
2850            .map(|entry| entry.id)?;
2851
2852        if self.snapshot.git_repositories.get(&work_dir_id).is_some() {
2853            return None;
2854        }
2855
2856        let abs_path = self.snapshot.abs_path.join(&dot_git_path);
2857        let t0 = Instant::now();
2858        let repository = fs.open_repo(&abs_path)?;
2859        log::trace!("constructed libgit2 repo in {:?}", t0.elapsed());
2860        let work_directory = RepositoryWorkDirectory(work_dir_path.clone());
2861
2862        self.snapshot.repository_entries.insert(
2863            work_directory.clone(),
2864            RepositoryEntry {
2865                work_directory: work_dir_id.into(),
2866                branch: repository.branch_name().map(Into::into),
2867                location_in_repo,
2868            },
2869        );
2870        self.snapshot.git_repositories.insert(
2871            work_dir_id,
2872            LocalRepositoryEntry {
2873                git_dir_scan_id: 0,
2874                repo_ptr: repository.clone(),
2875                git_dir_path: dot_git_path.clone(),
2876            },
2877        );
2878
2879        Some((work_directory, repository))
2880    }
2881}
2882
2883async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2884    let contents = fs.load(abs_path).await?;
2885    let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2886    let mut builder = GitignoreBuilder::new(parent);
2887    for line in contents.lines() {
2888        builder.add_line(Some(abs_path.into()), line)?;
2889    }
2890    Ok(builder.build()?)
2891}
2892
2893impl WorktreeId {
2894    pub fn from_usize(handle_id: usize) -> Self {
2895        Self(handle_id)
2896    }
2897
2898    pub fn from_proto(id: u64) -> Self {
2899        Self(id as usize)
2900    }
2901
2902    pub fn to_proto(&self) -> u64 {
2903        self.0 as u64
2904    }
2905
2906    pub fn to_usize(&self) -> usize {
2907        self.0
2908    }
2909}
2910
2911impl fmt::Display for WorktreeId {
2912    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2913        self.0.fmt(f)
2914    }
2915}
2916
2917impl Deref for Worktree {
2918    type Target = Snapshot;
2919
2920    fn deref(&self) -> &Self::Target {
2921        match self {
2922            Worktree::Local(worktree) => &worktree.snapshot,
2923            Worktree::Remote(worktree) => &worktree.snapshot,
2924        }
2925    }
2926}
2927
2928impl Deref for LocalWorktree {
2929    type Target = LocalSnapshot;
2930
2931    fn deref(&self) -> &Self::Target {
2932        &self.snapshot
2933    }
2934}
2935
2936impl Deref for RemoteWorktree {
2937    type Target = Snapshot;
2938
2939    fn deref(&self) -> &Self::Target {
2940        &self.snapshot
2941    }
2942}
2943
2944impl fmt::Debug for LocalWorktree {
2945    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2946        self.snapshot.fmt(f)
2947    }
2948}
2949
2950impl fmt::Debug for Snapshot {
2951    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2952        struct EntriesById<'a>(&'a SumTree<PathEntry>);
2953        struct EntriesByPath<'a>(&'a SumTree<Entry>);
2954
2955        impl<'a> fmt::Debug for EntriesByPath<'a> {
2956            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2957                f.debug_map()
2958                    .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2959                    .finish()
2960            }
2961        }
2962
2963        impl<'a> fmt::Debug for EntriesById<'a> {
2964            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2965                f.debug_list().entries(self.0.iter()).finish()
2966            }
2967        }
2968
2969        f.debug_struct("Snapshot")
2970            .field("id", &self.id)
2971            .field("root_name", &self.root_name)
2972            .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2973            .field("entries_by_id", &EntriesById(&self.entries_by_id))
2974            .finish()
2975    }
2976}
2977
2978#[derive(Clone, PartialEq)]
2979pub struct File {
2980    pub worktree: Model<Worktree>,
2981    pub path: Arc<Path>,
2982    pub mtime: Option<SystemTime>,
2983    pub entry_id: Option<ProjectEntryId>,
2984    pub is_local: bool,
2985    pub is_deleted: bool,
2986    pub is_private: bool,
2987}
2988
2989impl language::File for File {
2990    fn as_local(&self) -> Option<&dyn language::LocalFile> {
2991        if self.is_local {
2992            Some(self)
2993        } else {
2994            None
2995        }
2996    }
2997
2998    fn mtime(&self) -> Option<SystemTime> {
2999        self.mtime
3000    }
3001
3002    fn path(&self) -> &Arc<Path> {
3003        &self.path
3004    }
3005
3006    fn full_path(&self, cx: &AppContext) -> PathBuf {
3007        let mut full_path = PathBuf::new();
3008        let worktree = self.worktree.read(cx);
3009
3010        if worktree.is_visible() {
3011            full_path.push(worktree.root_name());
3012        } else {
3013            let path = worktree.abs_path();
3014
3015            if worktree.is_local() && path.starts_with(home_dir().as_path()) {
3016                full_path.push("~");
3017                full_path.push(path.strip_prefix(home_dir().as_path()).unwrap());
3018            } else {
3019                full_path.push(path)
3020            }
3021        }
3022
3023        if self.path.components().next().is_some() {
3024            full_path.push(&self.path);
3025        }
3026
3027        full_path
3028    }
3029
3030    /// Returns the last component of this handle's absolute path. If this handle refers to the root
3031    /// of its worktree, then this method will return the name of the worktree itself.
3032    fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
3033        self.path
3034            .file_name()
3035            .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
3036    }
3037
3038    fn worktree_id(&self) -> usize {
3039        self.worktree.entity_id().as_u64() as usize
3040    }
3041
3042    fn is_deleted(&self) -> bool {
3043        self.is_deleted
3044    }
3045
3046    fn as_any(&self) -> &dyn Any {
3047        self
3048    }
3049
3050    fn to_proto(&self, cx: &AppContext) -> rpc::proto::File {
3051        rpc::proto::File {
3052            worktree_id: self.worktree.read(cx).id().to_proto(),
3053            entry_id: self.entry_id.map(|id| id.to_proto()),
3054            path: self.path.to_string_lossy().into(),
3055            mtime: self.mtime.map(|time| time.into()),
3056            is_deleted: self.is_deleted,
3057        }
3058    }
3059
3060    fn is_private(&self) -> bool {
3061        self.is_private
3062    }
3063}
3064
3065impl language::LocalFile for File {
3066    fn abs_path(&self, cx: &AppContext) -> PathBuf {
3067        let worktree_path = &self.worktree.read(cx).as_local().unwrap().abs_path;
3068        if self.path.as_ref() == Path::new("") {
3069            worktree_path.to_path_buf()
3070        } else {
3071            worktree_path.join(&self.path)
3072        }
3073    }
3074
3075    fn load(&self, cx: &AppContext) -> Task<Result<String>> {
3076        let worktree = self.worktree.read(cx).as_local().unwrap();
3077        let abs_path = worktree.absolutize(&self.path);
3078        let fs = worktree.fs.clone();
3079        cx.background_executor()
3080            .spawn(async move { fs.load(&abs_path?).await })
3081    }
3082}
3083
3084impl File {
3085    pub fn for_entry(entry: Entry, worktree: Model<Worktree>) -> Arc<Self> {
3086        Arc::new(Self {
3087            worktree,
3088            path: entry.path.clone(),
3089            mtime: entry.mtime,
3090            entry_id: Some(entry.id),
3091            is_local: true,
3092            is_deleted: false,
3093            is_private: entry.is_private,
3094        })
3095    }
3096
3097    pub fn from_proto(
3098        proto: rpc::proto::File,
3099        worktree: Model<Worktree>,
3100        cx: &AppContext,
3101    ) -> Result<Self> {
3102        let worktree_id = worktree
3103            .read(cx)
3104            .as_remote()
3105            .ok_or_else(|| anyhow!("not remote"))?
3106            .id();
3107
3108        if worktree_id.to_proto() != proto.worktree_id {
3109            return Err(anyhow!("worktree id does not match file"));
3110        }
3111
3112        Ok(Self {
3113            worktree,
3114            path: Path::new(&proto.path).into(),
3115            mtime: proto.mtime.map(|time| time.into()),
3116            entry_id: proto.entry_id.map(ProjectEntryId::from_proto),
3117            is_local: false,
3118            is_deleted: proto.is_deleted,
3119            is_private: false,
3120        })
3121    }
3122
3123    pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
3124        file.and_then(|f| f.as_any().downcast_ref())
3125    }
3126
3127    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
3128        self.worktree.read(cx).id()
3129    }
3130
3131    pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
3132        if self.is_deleted {
3133            None
3134        } else {
3135            self.entry_id
3136        }
3137    }
3138}
3139
3140#[derive(Clone, Debug, PartialEq, Eq, Hash)]
3141pub struct Entry {
3142    pub id: ProjectEntryId,
3143    pub kind: EntryKind,
3144    pub path: Arc<Path>,
3145    pub inode: u64,
3146    pub mtime: Option<SystemTime>,
3147
3148    pub canonical_path: Option<Box<Path>>,
3149    pub is_symlink: bool,
3150    /// Whether this entry is ignored by Git.
3151    ///
3152    /// We only scan ignored entries once the directory is expanded and
3153    /// exclude them from searches.
3154    pub is_ignored: bool,
3155
3156    /// Whether this entry's canonical path is outside of the worktree.
3157    /// This means the entry is only accessible from the worktree root via a
3158    /// symlink.
3159    ///
3160    /// We only scan entries outside of the worktree once the symlinked
3161    /// directory is expanded. External entries are treated like gitignored
3162    /// entries in that they are not included in searches.
3163    pub is_external: bool,
3164    pub git_status: Option<GitFileStatus>,
3165    /// Whether this entry is considered to be a `.env` file.
3166    pub is_private: bool,
3167    pub char_bag: CharBag,
3168}
3169
3170#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
3171pub enum EntryKind {
3172    UnloadedDir,
3173    PendingDir,
3174    Dir,
3175    File,
3176}
3177
3178#[derive(Clone, Copy, Debug, PartialEq)]
3179pub enum PathChange {
3180    /// A filesystem entry was was created.
3181    Added,
3182    /// A filesystem entry was removed.
3183    Removed,
3184    /// A filesystem entry was updated.
3185    Updated,
3186    /// A filesystem entry was either updated or added. We don't know
3187    /// whether or not it already existed, because the path had not
3188    /// been loaded before the event.
3189    AddedOrUpdated,
3190    /// A filesystem entry was found during the initial scan of the worktree.
3191    Loaded,
3192}
3193
3194pub struct GitRepositoryChange {
3195    /// The previous state of the repository, if it already existed.
3196    pub old_repository: Option<RepositoryEntry>,
3197}
3198
3199pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
3200pub type UpdatedGitRepositoriesSet = Arc<[(Arc<Path>, GitRepositoryChange)]>;
3201
3202impl Entry {
3203    fn new(
3204        path: Arc<Path>,
3205        metadata: &fs::Metadata,
3206        next_entry_id: &AtomicUsize,
3207        root_char_bag: CharBag,
3208        canonical_path: Option<Box<Path>>,
3209    ) -> Self {
3210        let char_bag = char_bag_for_path(root_char_bag, &path);
3211        Self {
3212            id: ProjectEntryId::new(next_entry_id),
3213            kind: if metadata.is_dir {
3214                EntryKind::PendingDir
3215            } else {
3216                EntryKind::File
3217            },
3218            path,
3219            inode: metadata.inode,
3220            mtime: Some(metadata.mtime),
3221            canonical_path,
3222            is_symlink: metadata.is_symlink,
3223            is_ignored: false,
3224            is_external: false,
3225            is_private: false,
3226            git_status: None,
3227            char_bag,
3228        }
3229    }
3230
3231    pub fn is_created(&self) -> bool {
3232        self.mtime.is_some()
3233    }
3234
3235    pub fn is_dir(&self) -> bool {
3236        self.kind.is_dir()
3237    }
3238
3239    pub fn is_file(&self) -> bool {
3240        self.kind.is_file()
3241    }
3242
3243    pub fn git_status(&self) -> Option<GitFileStatus> {
3244        self.git_status
3245    }
3246}
3247
3248impl EntryKind {
3249    pub fn is_dir(&self) -> bool {
3250        matches!(
3251            self,
3252            EntryKind::Dir | EntryKind::PendingDir | EntryKind::UnloadedDir
3253        )
3254    }
3255
3256    pub fn is_unloaded(&self) -> bool {
3257        matches!(self, EntryKind::UnloadedDir)
3258    }
3259
3260    pub fn is_file(&self) -> bool {
3261        matches!(self, EntryKind::File)
3262    }
3263}
3264
3265impl sum_tree::Item for Entry {
3266    type Summary = EntrySummary;
3267
3268    fn summary(&self) -> Self::Summary {
3269        let non_ignored_count = if self.is_ignored || self.is_external {
3270            0
3271        } else {
3272            1
3273        };
3274        let file_count;
3275        let non_ignored_file_count;
3276        if self.is_file() {
3277            file_count = 1;
3278            non_ignored_file_count = non_ignored_count;
3279        } else {
3280            file_count = 0;
3281            non_ignored_file_count = 0;
3282        }
3283
3284        let mut statuses = GitStatuses::default();
3285        match self.git_status {
3286            Some(status) => match status {
3287                GitFileStatus::Added => statuses.added = 1,
3288                GitFileStatus::Modified => statuses.modified = 1,
3289                GitFileStatus::Conflict => statuses.conflict = 1,
3290            },
3291            None => {}
3292        }
3293
3294        EntrySummary {
3295            max_path: self.path.clone(),
3296            count: 1,
3297            non_ignored_count,
3298            file_count,
3299            non_ignored_file_count,
3300            statuses,
3301        }
3302    }
3303}
3304
3305impl sum_tree::KeyedItem for Entry {
3306    type Key = PathKey;
3307
3308    fn key(&self) -> Self::Key {
3309        PathKey(self.path.clone())
3310    }
3311}
3312
3313#[derive(Clone, Debug)]
3314pub struct EntrySummary {
3315    max_path: Arc<Path>,
3316    count: usize,
3317    non_ignored_count: usize,
3318    file_count: usize,
3319    non_ignored_file_count: usize,
3320    statuses: GitStatuses,
3321}
3322
3323impl Default for EntrySummary {
3324    fn default() -> Self {
3325        Self {
3326            max_path: Arc::from(Path::new("")),
3327            count: 0,
3328            non_ignored_count: 0,
3329            file_count: 0,
3330            non_ignored_file_count: 0,
3331            statuses: Default::default(),
3332        }
3333    }
3334}
3335
3336impl sum_tree::Summary for EntrySummary {
3337    type Context = ();
3338
3339    fn add_summary(&mut self, rhs: &Self, _: &()) {
3340        self.max_path = rhs.max_path.clone();
3341        self.count += rhs.count;
3342        self.non_ignored_count += rhs.non_ignored_count;
3343        self.file_count += rhs.file_count;
3344        self.non_ignored_file_count += rhs.non_ignored_file_count;
3345        self.statuses += rhs.statuses;
3346    }
3347}
3348
3349#[derive(Clone, Debug)]
3350struct PathEntry {
3351    id: ProjectEntryId,
3352    path: Arc<Path>,
3353    is_ignored: bool,
3354    scan_id: usize,
3355}
3356
3357impl sum_tree::Item for PathEntry {
3358    type Summary = PathEntrySummary;
3359
3360    fn summary(&self) -> Self::Summary {
3361        PathEntrySummary { max_id: self.id }
3362    }
3363}
3364
3365impl sum_tree::KeyedItem for PathEntry {
3366    type Key = ProjectEntryId;
3367
3368    fn key(&self) -> Self::Key {
3369        self.id
3370    }
3371}
3372
3373#[derive(Clone, Debug, Default)]
3374struct PathEntrySummary {
3375    max_id: ProjectEntryId,
3376}
3377
3378impl sum_tree::Summary for PathEntrySummary {
3379    type Context = ();
3380
3381    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
3382        self.max_id = summary.max_id;
3383    }
3384}
3385
3386impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
3387    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
3388        *self = summary.max_id;
3389    }
3390}
3391
3392#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
3393pub struct PathKey(Arc<Path>);
3394
3395impl Default for PathKey {
3396    fn default() -> Self {
3397        Self(Path::new("").into())
3398    }
3399}
3400
3401impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
3402    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3403        self.0 = summary.max_path.clone();
3404    }
3405}
3406
3407struct BackgroundScanner {
3408    state: Mutex<BackgroundScannerState>,
3409    fs: Arc<dyn Fs>,
3410    fs_case_sensitive: bool,
3411    status_updates_tx: UnboundedSender<ScanState>,
3412    executor: BackgroundExecutor,
3413    scan_requests_rx: channel::Receiver<ScanRequest>,
3414    path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
3415    next_entry_id: Arc<AtomicUsize>,
3416    phase: BackgroundScannerPhase,
3417    watcher: Arc<dyn Watcher>,
3418    settings: WorktreeSettings,
3419    share_private_files: bool,
3420}
3421
3422#[derive(PartialEq)]
3423enum BackgroundScannerPhase {
3424    InitialScan,
3425    EventsReceivedDuringInitialScan,
3426    Events,
3427}
3428
3429impl BackgroundScanner {
3430    async fn run(&mut self, mut fs_events_rx: Pin<Box<dyn Send + Stream<Item = Vec<PathBuf>>>>) {
3431        use futures::FutureExt as _;
3432
3433        // If the worktree root does not contain a git repository, then find
3434        // the git repository in an ancestor directory. Find any gitignore files
3435        // in ancestor directories.
3436        let root_abs_path = self.state.lock().snapshot.abs_path.clone();
3437        for (index, ancestor) in root_abs_path.ancestors().enumerate() {
3438            if index != 0 {
3439                if let Ok(ignore) =
3440                    build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
3441                {
3442                    self.state
3443                        .lock()
3444                        .snapshot
3445                        .ignores_by_parent_abs_path
3446                        .insert(ancestor.into(), (ignore.into(), false));
3447                }
3448            }
3449
3450            let ancestor_dot_git = ancestor.join(&*DOT_GIT);
3451            if ancestor_dot_git.is_dir() {
3452                if index != 0 {
3453                    // We canonicalize, since the FS events use the canonicalized path.
3454                    if let Some(ancestor_dot_git) =
3455                        self.fs.canonicalize(&ancestor_dot_git).await.log_err()
3456                    {
3457                        let (ancestor_git_events, _) =
3458                            self.fs.watch(&ancestor_dot_git, FS_WATCH_LATENCY).await;
3459                        fs_events_rx = select(fs_events_rx, ancestor_git_events).boxed();
3460
3461                        // We associate the external git repo with our root folder and
3462                        // also mark where in the git repo the root folder is located.
3463                        self.state.lock().build_git_repository_for_path(
3464                            Path::new("").into(),
3465                            ancestor_dot_git.into(),
3466                            Some(root_abs_path.strip_prefix(ancestor).unwrap().into()),
3467                            self.fs.as_ref(),
3468                        );
3469                    };
3470                }
3471
3472                // Reached root of git repository.
3473                break;
3474            }
3475        }
3476
3477        let (scan_job_tx, scan_job_rx) = channel::unbounded();
3478        {
3479            let mut state = self.state.lock();
3480            state.snapshot.scan_id += 1;
3481            if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
3482                let ignore_stack = state
3483                    .snapshot
3484                    .ignore_stack_for_abs_path(&root_abs_path, true);
3485                if ignore_stack.is_abs_path_ignored(&root_abs_path, true) {
3486                    root_entry.is_ignored = true;
3487                    state.insert_entry(root_entry.clone(), self.fs.as_ref());
3488                }
3489                state.enqueue_scan_dir(root_abs_path, &root_entry, &scan_job_tx);
3490            }
3491        };
3492
3493        // Perform an initial scan of the directory.
3494        drop(scan_job_tx);
3495        self.scan_dirs(true, scan_job_rx).await;
3496        {
3497            let mut state = self.state.lock();
3498            state.snapshot.completed_scan_id = state.snapshot.scan_id;
3499        }
3500
3501        self.send_status_update(false, None);
3502
3503        // Process any any FS events that occurred while performing the initial scan.
3504        // For these events, update events cannot be as precise, because we didn't
3505        // have the previous state loaded yet.
3506        self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
3507        if let Poll::Ready(Some(mut paths)) = futures::poll!(fs_events_rx.next()) {
3508            while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
3509                paths.extend(more_paths);
3510            }
3511            self.process_events(paths).await;
3512        }
3513
3514        // Continue processing events until the worktree is dropped.
3515        self.phase = BackgroundScannerPhase::Events;
3516
3517        loop {
3518            select_biased! {
3519                // Process any path refresh requests from the worktree. Prioritize
3520                // these before handling changes reported by the filesystem.
3521                request = self.scan_requests_rx.recv().fuse() => {
3522                    let Ok(request) = request else { break };
3523                    if !self.process_scan_request(request, false).await {
3524                        return;
3525                    }
3526                }
3527
3528                path_prefix = self.path_prefixes_to_scan_rx.recv().fuse() => {
3529                    let Ok(path_prefix) = path_prefix else { break };
3530                    log::trace!("adding path prefix {:?}", path_prefix);
3531
3532                    let did_scan = self.forcibly_load_paths(&[path_prefix.clone()]).await;
3533                    if did_scan {
3534                        let abs_path =
3535                        {
3536                            let mut state = self.state.lock();
3537                            state.path_prefixes_to_scan.insert(path_prefix.clone());
3538                            state.snapshot.abs_path.join(&path_prefix)
3539                        };
3540
3541                        if let Some(abs_path) = self.fs.canonicalize(&abs_path).await.log_err() {
3542                            self.process_events(vec![abs_path]).await;
3543                        }
3544                    }
3545                }
3546
3547                paths = fs_events_rx.next().fuse() => {
3548                    let Some(mut paths) = paths else { break };
3549                    while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
3550                        paths.extend(more_paths);
3551                    }
3552                    self.process_events(paths.clone()).await;
3553                }
3554            }
3555        }
3556    }
3557
3558    async fn process_scan_request(&self, mut request: ScanRequest, scanning: bool) -> bool {
3559        log::debug!("rescanning paths {:?}", request.relative_paths);
3560
3561        request.relative_paths.sort_unstable();
3562        self.forcibly_load_paths(&request.relative_paths).await;
3563
3564        let root_path = self.state.lock().snapshot.abs_path.clone();
3565        let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3566            Ok(path) => path,
3567            Err(err) => {
3568                log::error!("failed to canonicalize root path: {}", err);
3569                return true;
3570            }
3571        };
3572        let abs_paths = request
3573            .relative_paths
3574            .iter()
3575            .map(|path| {
3576                if path.file_name().is_some() {
3577                    root_canonical_path.join(path)
3578                } else {
3579                    root_canonical_path.clone()
3580                }
3581            })
3582            .collect::<Vec<_>>();
3583
3584        {
3585            let mut state = self.state.lock();
3586            let is_idle = state.snapshot.completed_scan_id == state.snapshot.scan_id;
3587            state.snapshot.scan_id += 1;
3588            if is_idle {
3589                state.snapshot.completed_scan_id = state.snapshot.scan_id;
3590            }
3591        }
3592
3593        self.reload_entries_for_paths(
3594            root_path,
3595            root_canonical_path,
3596            &request.relative_paths,
3597            abs_paths,
3598            None,
3599        )
3600        .await;
3601
3602        self.send_status_update(scanning, Some(request.done))
3603    }
3604
3605    async fn process_events(&mut self, mut abs_paths: Vec<PathBuf>) {
3606        let root_path = self.state.lock().snapshot.abs_path.clone();
3607        let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3608            Ok(path) => path,
3609            Err(err) => {
3610                log::error!("failed to canonicalize root path: {}", err);
3611                return;
3612            }
3613        };
3614
3615        let mut relative_paths = Vec::with_capacity(abs_paths.len());
3616        let mut dot_git_paths = Vec::new();
3617        abs_paths.sort_unstable();
3618        abs_paths.dedup_by(|a, b| a.starts_with(&b));
3619        abs_paths.retain(|abs_path| {
3620            let snapshot = &self.state.lock().snapshot;
3621            {
3622                let mut is_git_related = false;
3623                if let Some(dot_git_dir) = abs_path
3624                    .ancestors()
3625                    .find(|ancestor| ancestor.file_name() == Some(*DOT_GIT))
3626                {
3627                    let dot_git_path = dot_git_dir
3628                        .strip_prefix(&root_canonical_path)
3629                        .unwrap_or(dot_git_dir)
3630                        .to_path_buf();
3631                    if !dot_git_paths.contains(&dot_git_path) {
3632                        dot_git_paths.push(dot_git_path);
3633                    }
3634                    is_git_related = true;
3635                }
3636
3637                let relative_path: Arc<Path> =
3638                    if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3639                        path.into()
3640                    } else {
3641                        if is_git_related {
3642                            log::debug!(
3643                              "ignoring event {abs_path:?}, since it's in git dir outside of root path {root_canonical_path:?}",
3644                            );
3645                        } else {
3646                            log::error!(
3647                              "ignoring event {abs_path:?} outside of root path {root_canonical_path:?}",
3648                            );
3649                        }
3650                        return false;
3651                    };
3652
3653                let parent_dir_is_loaded = relative_path.parent().map_or(true, |parent| {
3654                    snapshot
3655                        .entry_for_path(parent)
3656                        .map_or(false, |entry| entry.kind == EntryKind::Dir)
3657                });
3658                if !parent_dir_is_loaded {
3659                    log::debug!("ignoring event {relative_path:?} within unloaded directory");
3660                    return false;
3661                }
3662
3663                if self.settings.is_path_excluded(&relative_path) {
3664                    if !is_git_related {
3665                        log::debug!("ignoring FS event for excluded path {relative_path:?}");
3666                    }
3667                    return false;
3668                }
3669
3670                relative_paths.push(relative_path);
3671                true
3672            }
3673        });
3674
3675        if relative_paths.is_empty() && dot_git_paths.is_empty() {
3676            return;
3677        }
3678
3679        self.state.lock().snapshot.scan_id += 1;
3680
3681        let (scan_job_tx, scan_job_rx) = channel::unbounded();
3682        log::debug!("received fs events {:?}", relative_paths);
3683        self.reload_entries_for_paths(
3684            root_path,
3685            root_canonical_path,
3686            &relative_paths,
3687            abs_paths,
3688            Some(scan_job_tx.clone()),
3689        )
3690        .await;
3691
3692        self.update_ignore_statuses(scan_job_tx).await;
3693        self.scan_dirs(false, scan_job_rx).await;
3694
3695        if !dot_git_paths.is_empty() {
3696            self.update_git_repositories(dot_git_paths).await;
3697        }
3698
3699        {
3700            let mut state = self.state.lock();
3701            state.snapshot.completed_scan_id = state.snapshot.scan_id;
3702            for (_, entry_id) in mem::take(&mut state.removed_entry_ids) {
3703                state.scanned_dirs.remove(&entry_id);
3704            }
3705        }
3706
3707        self.send_status_update(false, None);
3708    }
3709
3710    async fn forcibly_load_paths(&self, paths: &[Arc<Path>]) -> bool {
3711        let (scan_job_tx, mut scan_job_rx) = channel::unbounded();
3712        {
3713            let mut state = self.state.lock();
3714            let root_path = state.snapshot.abs_path.clone();
3715            for path in paths {
3716                for ancestor in path.ancestors() {
3717                    if let Some(entry) = state.snapshot.entry_for_path(ancestor) {
3718                        if entry.kind == EntryKind::UnloadedDir {
3719                            let abs_path = root_path.join(ancestor);
3720                            state.enqueue_scan_dir(abs_path.into(), entry, &scan_job_tx);
3721                            state.paths_to_scan.insert(path.clone());
3722                            break;
3723                        }
3724                    }
3725                }
3726            }
3727            drop(scan_job_tx);
3728        }
3729        while let Some(job) = scan_job_rx.next().await {
3730            self.scan_dir(&job).await.log_err();
3731        }
3732
3733        mem::take(&mut self.state.lock().paths_to_scan).len() > 0
3734    }
3735
3736    async fn scan_dirs(
3737        &self,
3738        enable_progress_updates: bool,
3739        scan_jobs_rx: channel::Receiver<ScanJob>,
3740    ) {
3741        use futures::FutureExt as _;
3742
3743        if self
3744            .status_updates_tx
3745            .unbounded_send(ScanState::Started)
3746            .is_err()
3747        {
3748            return;
3749        }
3750
3751        let progress_update_count = AtomicUsize::new(0);
3752        self.executor
3753            .scoped(|scope| {
3754                for _ in 0..self.executor.num_cpus() {
3755                    scope.spawn(async {
3756                        let mut last_progress_update_count = 0;
3757                        let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
3758                        futures::pin_mut!(progress_update_timer);
3759
3760                        loop {
3761                            select_biased! {
3762                                // Process any path refresh requests before moving on to process
3763                                // the scan queue, so that user operations are prioritized.
3764                                request = self.scan_requests_rx.recv().fuse() => {
3765                                    let Ok(request) = request else { break };
3766                                    if !self.process_scan_request(request, true).await {
3767                                        return;
3768                                    }
3769                                }
3770
3771                                // Send periodic progress updates to the worktree. Use an atomic counter
3772                                // to ensure that only one of the workers sends a progress update after
3773                                // the update interval elapses.
3774                                _ = progress_update_timer => {
3775                                    match progress_update_count.compare_exchange(
3776                                        last_progress_update_count,
3777                                        last_progress_update_count + 1,
3778                                        SeqCst,
3779                                        SeqCst
3780                                    ) {
3781                                        Ok(_) => {
3782                                            last_progress_update_count += 1;
3783                                            self.send_status_update(true, None);
3784                                        }
3785                                        Err(count) => {
3786                                            last_progress_update_count = count;
3787                                        }
3788                                    }
3789                                    progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
3790                                }
3791
3792                                // Recursively load directories from the file system.
3793                                job = scan_jobs_rx.recv().fuse() => {
3794                                    let Ok(job) = job else { break };
3795                                    if let Err(err) = self.scan_dir(&job).await {
3796                                        if job.path.as_ref() != Path::new("") {
3797                                            log::error!("error scanning directory {:?}: {}", job.abs_path, err);
3798                                        }
3799                                    }
3800                                }
3801                            }
3802                        }
3803                    })
3804                }
3805            })
3806            .await;
3807    }
3808
3809    fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
3810        let mut state = self.state.lock();
3811        if state.changed_paths.is_empty() && scanning {
3812            return true;
3813        }
3814
3815        let new_snapshot = state.snapshot.clone();
3816        let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
3817        let changes = self.build_change_set(&old_snapshot, &new_snapshot, &state.changed_paths);
3818        state.changed_paths.clear();
3819
3820        self.status_updates_tx
3821            .unbounded_send(ScanState::Updated {
3822                snapshot: new_snapshot,
3823                changes,
3824                scanning,
3825                barrier,
3826            })
3827            .is_ok()
3828    }
3829
3830    async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
3831        let root_abs_path;
3832        let root_char_bag;
3833        {
3834            let snapshot = &self.state.lock().snapshot;
3835            if self.settings.is_path_excluded(&job.path) {
3836                log::error!("skipping excluded directory {:?}", job.path);
3837                return Ok(());
3838            }
3839            log::debug!("scanning directory {:?}", job.path);
3840            root_abs_path = snapshot.abs_path().clone();
3841            root_char_bag = snapshot.root_char_bag;
3842        }
3843
3844        let next_entry_id = self.next_entry_id.clone();
3845        let mut ignore_stack = job.ignore_stack.clone();
3846        let mut containing_repository = job.containing_repository.clone();
3847        let mut new_ignore = None;
3848        let mut root_canonical_path = None;
3849        let mut new_entries: Vec<Entry> = Vec::new();
3850        let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
3851        let mut child_paths = self
3852            .fs
3853            .read_dir(&job.abs_path)
3854            .await?
3855            .filter_map(|entry| async {
3856                match entry {
3857                    Ok(entry) => Some(entry),
3858                    Err(error) => {
3859                        log::error!("error processing entry {:?}", error);
3860                        None
3861                    }
3862                }
3863            })
3864            .collect::<Vec<_>>()
3865            .await;
3866
3867        // Ensure that .git and .gitignore are processed first.
3868        swap_to_front(&mut child_paths, *GITIGNORE);
3869        swap_to_front(&mut child_paths, *DOT_GIT);
3870
3871        for child_abs_path in child_paths {
3872            let child_abs_path: Arc<Path> = child_abs_path.into();
3873            let child_name = child_abs_path.file_name().unwrap();
3874            let child_path: Arc<Path> = job.path.join(child_name).into();
3875
3876            if child_name == *DOT_GIT {
3877                let repo = self
3878                    .state
3879                    .lock()
3880                    .build_git_repository(child_path.clone(), self.fs.as_ref());
3881                if let Some((work_directory, repository)) = repo {
3882                    let t0 = Instant::now();
3883                    let statuses = repository
3884                        .statuses(Path::new(""))
3885                        .log_err()
3886                        .unwrap_or_default();
3887                    log::trace!("computed git status in {:?}", t0.elapsed());
3888                    containing_repository = Some(ScanJobContainingRepository {
3889                        work_directory,
3890                        statuses,
3891                    });
3892                }
3893                self.watcher.add(child_abs_path.as_ref()).log_err();
3894            } else if child_name == *GITIGNORE {
3895                match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
3896                    Ok(ignore) => {
3897                        let ignore = Arc::new(ignore);
3898                        ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3899                        new_ignore = Some(ignore);
3900                    }
3901                    Err(error) => {
3902                        log::error!(
3903                            "error loading .gitignore file {:?} - {:?}",
3904                            child_name,
3905                            error
3906                        );
3907                    }
3908                }
3909            }
3910
3911            if self.settings.is_path_excluded(&child_path) {
3912                log::debug!("skipping excluded child entry {child_path:?}");
3913                self.state.lock().remove_path(&child_path);
3914                continue;
3915            }
3916
3917            let child_metadata = match self.fs.metadata(&child_abs_path).await {
3918                Ok(Some(metadata)) => metadata,
3919                Ok(None) => continue,
3920                Err(err) => {
3921                    log::error!("error processing {child_abs_path:?}: {err:?}");
3922                    continue;
3923                }
3924            };
3925
3926            let mut child_entry = Entry::new(
3927                child_path.clone(),
3928                &child_metadata,
3929                &next_entry_id,
3930                root_char_bag,
3931                None,
3932            );
3933
3934            if job.is_external {
3935                child_entry.is_external = true;
3936            } else if child_metadata.is_symlink {
3937                let canonical_path = match self.fs.canonicalize(&child_abs_path).await {
3938                    Ok(path) => path,
3939                    Err(err) => {
3940                        log::error!(
3941                            "error reading target of symlink {:?}: {:?}",
3942                            child_abs_path,
3943                            err
3944                        );
3945                        continue;
3946                    }
3947                };
3948
3949                // lazily canonicalize the root path in order to determine if
3950                // symlinks point outside of the worktree.
3951                let root_canonical_path = match &root_canonical_path {
3952                    Some(path) => path,
3953                    None => match self.fs.canonicalize(&root_abs_path).await {
3954                        Ok(path) => root_canonical_path.insert(path),
3955                        Err(err) => {
3956                            log::error!("error canonicalizing root {:?}: {:?}", root_abs_path, err);
3957                            continue;
3958                        }
3959                    },
3960                };
3961
3962                if !canonical_path.starts_with(root_canonical_path) {
3963                    child_entry.is_external = true;
3964                }
3965
3966                child_entry.canonical_path = Some(canonical_path.into());
3967            }
3968
3969            if child_entry.is_dir() {
3970                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
3971
3972                // Avoid recursing until crash in the case of a recursive symlink
3973                if job.ancestor_inodes.contains(&child_entry.inode) {
3974                    new_jobs.push(None);
3975                } else {
3976                    let mut ancestor_inodes = job.ancestor_inodes.clone();
3977                    ancestor_inodes.insert(child_entry.inode);
3978
3979                    new_jobs.push(Some(ScanJob {
3980                        abs_path: child_abs_path.clone(),
3981                        path: child_path,
3982                        is_external: child_entry.is_external,
3983                        ignore_stack: if child_entry.is_ignored {
3984                            IgnoreStack::all()
3985                        } else {
3986                            ignore_stack.clone()
3987                        },
3988                        ancestor_inodes,
3989                        scan_queue: job.scan_queue.clone(),
3990                        containing_repository: containing_repository.clone(),
3991                    }));
3992                }
3993            } else {
3994                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
3995                if !child_entry.is_ignored {
3996                    if let Some(repo) = &containing_repository {
3997                        if let Ok(repo_path) = child_entry.path.strip_prefix(&repo.work_directory) {
3998                            let repo_path = RepoPath(repo_path.into());
3999                            child_entry.git_status = repo.statuses.get(&repo_path);
4000                        }
4001                    }
4002                }
4003            }
4004
4005            {
4006                let relative_path = job.path.join(child_name);
4007                if self.is_path_private(&relative_path) {
4008                    log::debug!("detected private file: {relative_path:?}");
4009                    child_entry.is_private = true;
4010                }
4011            }
4012
4013            new_entries.push(child_entry);
4014        }
4015
4016        let mut state = self.state.lock();
4017
4018        // Identify any subdirectories that should not be scanned.
4019        let mut job_ix = 0;
4020        for entry in &mut new_entries {
4021            state.reuse_entry_id(entry);
4022            if entry.is_dir() {
4023                if state.should_scan_directory(entry) {
4024                    job_ix += 1;
4025                } else {
4026                    log::debug!("defer scanning directory {:?}", entry.path);
4027                    entry.kind = EntryKind::UnloadedDir;
4028                    new_jobs.remove(job_ix);
4029                }
4030            }
4031        }
4032
4033        state.populate_dir(&job.path, new_entries, new_ignore);
4034        self.watcher.add(job.abs_path.as_ref()).log_err();
4035
4036        for new_job in new_jobs.into_iter().flatten() {
4037            job.scan_queue
4038                .try_send(new_job)
4039                .expect("channel is unbounded");
4040        }
4041
4042        Ok(())
4043    }
4044
4045    async fn reload_entries_for_paths(
4046        &self,
4047        root_abs_path: Arc<Path>,
4048        root_canonical_path: PathBuf,
4049        relative_paths: &[Arc<Path>],
4050        abs_paths: Vec<PathBuf>,
4051        scan_queue_tx: Option<Sender<ScanJob>>,
4052    ) {
4053        let metadata = futures::future::join_all(
4054            abs_paths
4055                .iter()
4056                .map(|abs_path| async move {
4057                    let metadata = self.fs.metadata(abs_path).await?;
4058                    if let Some(metadata) = metadata {
4059                        let canonical_path = self.fs.canonicalize(abs_path).await?;
4060
4061                        // If we're on a case-insensitive filesystem (default on macOS), we want
4062                        // to only ignore metadata for non-symlink files if their absolute-path matches
4063                        // the canonical-path.
4064                        // Because if not, this might be a case-only-renaming (`mv test.txt TEST.TXT`)
4065                        // and we want to ignore the metadata for the old path (`test.txt`) so it's
4066                        // treated as removed.
4067                        if !self.fs_case_sensitive && !metadata.is_symlink {
4068                            let canonical_file_name = canonical_path.file_name();
4069                            let file_name = abs_path.file_name();
4070                            if canonical_file_name != file_name {
4071                                return Ok(None);
4072                            }
4073                        }
4074
4075                        anyhow::Ok(Some((metadata, canonical_path)))
4076                    } else {
4077                        Ok(None)
4078                    }
4079                })
4080                .collect::<Vec<_>>(),
4081        )
4082        .await;
4083
4084        let mut state = self.state.lock();
4085        let doing_recursive_update = scan_queue_tx.is_some();
4086
4087        // Remove any entries for paths that no longer exist or are being recursively
4088        // refreshed. Do this before adding any new entries, so that renames can be
4089        // detected regardless of the order of the paths.
4090        for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
4091            if matches!(metadata, Ok(None)) || doing_recursive_update {
4092                log::trace!("remove path {:?}", path);
4093                state.remove_path(path);
4094            }
4095        }
4096
4097        for (path, metadata) in relative_paths.iter().zip(metadata.into_iter()) {
4098            let abs_path: Arc<Path> = root_abs_path.join(&path).into();
4099            match metadata {
4100                Ok(Some((metadata, canonical_path))) => {
4101                    let ignore_stack = state
4102                        .snapshot
4103                        .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
4104                    let is_external = !canonical_path.starts_with(&root_canonical_path);
4105                    let mut fs_entry = Entry::new(
4106                        path.clone(),
4107                        &metadata,
4108                        self.next_entry_id.as_ref(),
4109                        state.snapshot.root_char_bag,
4110                        if metadata.is_symlink {
4111                            Some(canonical_path.into())
4112                        } else {
4113                            None
4114                        },
4115                    );
4116
4117                    let is_dir = fs_entry.is_dir();
4118                    fs_entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, is_dir);
4119
4120                    fs_entry.is_external = is_external;
4121                    fs_entry.is_private = self.is_path_private(path);
4122
4123                    if !is_dir && !fs_entry.is_ignored && !fs_entry.is_external {
4124                        if let Some((repo_entry, repo)) = state.snapshot.repo_for_path(path) {
4125                            if let Ok(repo_path) = repo_entry.relativize(&state.snapshot, path) {
4126                                fs_entry.git_status = repo.repo_ptr.status(&repo_path);
4127                            }
4128                        }
4129                    }
4130
4131                    if let (Some(scan_queue_tx), true) = (&scan_queue_tx, fs_entry.is_dir()) {
4132                        if state.should_scan_directory(&fs_entry)
4133                            || (fs_entry.path.as_os_str().is_empty()
4134                                && abs_path.file_name() == Some(*DOT_GIT))
4135                        {
4136                            state.enqueue_scan_dir(abs_path, &fs_entry, scan_queue_tx);
4137                        } else {
4138                            fs_entry.kind = EntryKind::UnloadedDir;
4139                        }
4140                    }
4141
4142                    state.insert_entry(fs_entry, self.fs.as_ref());
4143                }
4144                Ok(None) => {
4145                    self.remove_repo_path(path, &mut state.snapshot);
4146                }
4147                Err(err) => {
4148                    // TODO - create a special 'error' entry in the entries tree to mark this
4149                    log::error!("error reading file {abs_path:?} on event: {err:#}");
4150                }
4151            }
4152        }
4153
4154        util::extend_sorted(
4155            &mut state.changed_paths,
4156            relative_paths.iter().cloned(),
4157            usize::MAX,
4158            Ord::cmp,
4159        );
4160    }
4161
4162    fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
4163        if !path
4164            .components()
4165            .any(|component| component.as_os_str() == *DOT_GIT)
4166        {
4167            if let Some(repository) = snapshot.repository_for_work_directory(path) {
4168                let entry = repository.work_directory.0;
4169                snapshot.git_repositories.remove(&entry);
4170                snapshot
4171                    .snapshot
4172                    .repository_entries
4173                    .remove(&RepositoryWorkDirectory(path.into()));
4174                return Some(());
4175            }
4176        }
4177
4178        // TODO statuses
4179        // Track when a .git is removed and iterate over the file system there
4180
4181        Some(())
4182    }
4183
4184    async fn update_ignore_statuses(&self, scan_job_tx: Sender<ScanJob>) {
4185        use futures::FutureExt as _;
4186
4187        let mut ignores_to_update = Vec::new();
4188        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
4189        let prev_snapshot;
4190        {
4191            let snapshot = &mut self.state.lock().snapshot;
4192            let abs_path = snapshot.abs_path.clone();
4193            snapshot
4194                .ignores_by_parent_abs_path
4195                .retain(|parent_abs_path, (_, needs_update)| {
4196                    if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
4197                        if *needs_update {
4198                            *needs_update = false;
4199                            if snapshot.snapshot.entry_for_path(parent_path).is_some() {
4200                                ignores_to_update.push(parent_abs_path.clone());
4201                            }
4202                        }
4203
4204                        let ignore_path = parent_path.join(&*GITIGNORE);
4205                        if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
4206                            return false;
4207                        }
4208                    }
4209                    true
4210                });
4211
4212            ignores_to_update.sort_unstable();
4213            let mut ignores_to_update = ignores_to_update.into_iter().peekable();
4214            while let Some(parent_abs_path) = ignores_to_update.next() {
4215                while ignores_to_update
4216                    .peek()
4217                    .map_or(false, |p| p.starts_with(&parent_abs_path))
4218                {
4219                    ignores_to_update.next().unwrap();
4220                }
4221
4222                let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
4223                ignore_queue_tx
4224                    .send_blocking(UpdateIgnoreStatusJob {
4225                        abs_path: parent_abs_path,
4226                        ignore_stack,
4227                        ignore_queue: ignore_queue_tx.clone(),
4228                        scan_queue: scan_job_tx.clone(),
4229                    })
4230                    .unwrap();
4231            }
4232
4233            prev_snapshot = snapshot.clone();
4234        }
4235        drop(ignore_queue_tx);
4236
4237        self.executor
4238            .scoped(|scope| {
4239                for _ in 0..self.executor.num_cpus() {
4240                    scope.spawn(async {
4241                        loop {
4242                            select_biased! {
4243                                // Process any path refresh requests before moving on to process
4244                                // the queue of ignore statuses.
4245                                request = self.scan_requests_rx.recv().fuse() => {
4246                                    let Ok(request) = request else { break };
4247                                    if !self.process_scan_request(request, true).await {
4248                                        return;
4249                                    }
4250                                }
4251
4252                                // Recursively process directories whose ignores have changed.
4253                                job = ignore_queue_rx.recv().fuse() => {
4254                                    let Ok(job) = job else { break };
4255                                    self.update_ignore_status(job, &prev_snapshot).await;
4256                                }
4257                            }
4258                        }
4259                    });
4260                }
4261            })
4262            .await;
4263    }
4264
4265    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
4266        log::trace!("update ignore status {:?}", job.abs_path);
4267
4268        let mut ignore_stack = job.ignore_stack;
4269        if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
4270            ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
4271        }
4272
4273        let mut entries_by_id_edits = Vec::new();
4274        let mut entries_by_path_edits = Vec::new();
4275        let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
4276        let repo = snapshot.repo_for_path(path);
4277        for mut entry in snapshot.child_entries(path).cloned() {
4278            let was_ignored = entry.is_ignored;
4279            let abs_path: Arc<Path> = snapshot.abs_path().join(&entry.path).into();
4280            entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
4281
4282            if entry.is_dir() {
4283                let child_ignore_stack = if entry.is_ignored {
4284                    IgnoreStack::all()
4285                } else {
4286                    ignore_stack.clone()
4287                };
4288
4289                // Scan any directories that were previously ignored and weren't previously scanned.
4290                if was_ignored && !entry.is_ignored && entry.kind.is_unloaded() {
4291                    let state = self.state.lock();
4292                    if state.should_scan_directory(&entry) {
4293                        state.enqueue_scan_dir(abs_path.clone(), &entry, &job.scan_queue);
4294                    }
4295                }
4296
4297                job.ignore_queue
4298                    .send(UpdateIgnoreStatusJob {
4299                        abs_path: abs_path.clone(),
4300                        ignore_stack: child_ignore_stack,
4301                        ignore_queue: job.ignore_queue.clone(),
4302                        scan_queue: job.scan_queue.clone(),
4303                    })
4304                    .await
4305                    .unwrap();
4306            }
4307
4308            if entry.is_ignored != was_ignored {
4309                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
4310                path_entry.scan_id = snapshot.scan_id;
4311                path_entry.is_ignored = entry.is_ignored;
4312                if !entry.is_dir() && !entry.is_ignored && !entry.is_external {
4313                    if let Some((ref repo_entry, local_repo)) = repo {
4314                        if let Ok(repo_path) = repo_entry.relativize(&snapshot, &entry.path) {
4315                            entry.git_status = local_repo.repo_ptr.status(&repo_path);
4316                        }
4317                    }
4318                }
4319                entries_by_id_edits.push(Edit::Insert(path_entry));
4320                entries_by_path_edits.push(Edit::Insert(entry));
4321            }
4322        }
4323
4324        let state = &mut self.state.lock();
4325        for edit in &entries_by_path_edits {
4326            if let Edit::Insert(entry) = edit {
4327                if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
4328                    state.changed_paths.insert(ix, entry.path.clone());
4329                }
4330            }
4331        }
4332
4333        state
4334            .snapshot
4335            .entries_by_path
4336            .edit(entries_by_path_edits, &());
4337        state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
4338    }
4339
4340    async fn update_git_repositories(&self, dot_git_paths: Vec<PathBuf>) {
4341        log::debug!("reloading repositories: {dot_git_paths:?}");
4342
4343        let mut repo_updates = Vec::new();
4344        {
4345            let mut state = self.state.lock();
4346            let scan_id = state.snapshot.scan_id;
4347            for dot_git_dir in dot_git_paths {
4348                let existing_repository_entry =
4349                    state
4350                        .snapshot
4351                        .git_repositories
4352                        .iter()
4353                        .find_map(|(entry_id, repo)| {
4354                            (repo.git_dir_path.as_ref() == dot_git_dir)
4355                                .then(|| (*entry_id, repo.clone()))
4356                        });
4357
4358                let (work_directory, repository) = match existing_repository_entry {
4359                    None => {
4360                        match state.build_git_repository(dot_git_dir.into(), self.fs.as_ref()) {
4361                            Some(output) => output,
4362                            None => continue,
4363                        }
4364                    }
4365                    Some((entry_id, repository)) => {
4366                        if repository.git_dir_scan_id == scan_id {
4367                            continue;
4368                        }
4369                        let Some(work_dir) = state
4370                            .snapshot
4371                            .entry_for_id(entry_id)
4372                            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
4373                        else {
4374                            continue;
4375                        };
4376
4377                        let repo = &repository.repo_ptr;
4378                        let branch = repo.branch_name();
4379                        repo.reload_index();
4380
4381                        state
4382                            .snapshot
4383                            .git_repositories
4384                            .update(&entry_id, |entry| entry.git_dir_scan_id = scan_id);
4385                        state
4386                            .snapshot
4387                            .snapshot
4388                            .repository_entries
4389                            .update(&work_dir, |entry| entry.branch = branch.map(Into::into));
4390                        (work_dir, repository.repo_ptr.clone())
4391                    }
4392                };
4393
4394                repo_updates.push(UpdateGitStatusesJob {
4395                    location_in_repo: state
4396                        .snapshot
4397                        .repository_entries
4398                        .get(&work_directory)
4399                        .and_then(|repo| repo.location_in_repo.clone())
4400                        .clone(),
4401                    work_directory,
4402                    repository,
4403                });
4404            }
4405
4406            // Remove any git repositories whose .git entry no longer exists.
4407            let snapshot = &mut state.snapshot;
4408            let mut ids_to_preserve = HashSet::default();
4409            for (&work_directory_id, entry) in snapshot.git_repositories.iter() {
4410                let exists_in_snapshot = snapshot
4411                    .entry_for_id(work_directory_id)
4412                    .map_or(false, |entry| {
4413                        snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
4414                    });
4415                if exists_in_snapshot {
4416                    ids_to_preserve.insert(work_directory_id);
4417                } else {
4418                    let git_dir_abs_path = snapshot.abs_path().join(&entry.git_dir_path);
4419                    let git_dir_excluded = self.settings.is_path_excluded(&entry.git_dir_path);
4420                    if git_dir_excluded
4421                        && !matches!(
4422                            smol::block_on(self.fs.metadata(&git_dir_abs_path)),
4423                            Ok(None)
4424                        )
4425                    {
4426                        ids_to_preserve.insert(work_directory_id);
4427                    }
4428                }
4429            }
4430
4431            snapshot
4432                .git_repositories
4433                .retain(|work_directory_id, _| ids_to_preserve.contains(work_directory_id));
4434            snapshot
4435                .repository_entries
4436                .retain(|_, entry| ids_to_preserve.contains(&entry.work_directory.0));
4437        }
4438
4439        let (mut updates_done_tx, mut updates_done_rx) = barrier::channel();
4440        self.executor
4441            .scoped(|scope| {
4442                scope.spawn(async {
4443                    for repo_update in repo_updates {
4444                        self.update_git_statuses(repo_update);
4445                    }
4446                    updates_done_tx.blocking_send(()).ok();
4447                });
4448
4449                scope.spawn(async {
4450                    loop {
4451                        select_biased! {
4452                            // Process any path refresh requests before moving on to process
4453                            // the queue of git statuses.
4454                            request = self.scan_requests_rx.recv().fuse() => {
4455                                let Ok(request) = request else { break };
4456                                if !self.process_scan_request(request, true).await {
4457                                    return;
4458                                }
4459                            }
4460                            _ = updates_done_rx.recv().fuse() =>  break,
4461                        }
4462                    }
4463                });
4464            })
4465            .await;
4466    }
4467
4468    /// Update the git statuses for a given batch of entries.
4469    fn update_git_statuses(&self, job: UpdateGitStatusesJob) {
4470        log::trace!("updating git statuses for repo {:?}", job.work_directory.0);
4471        let t0 = Instant::now();
4472        let Some(statuses) = job.repository.statuses(Path::new("")).log_err() else {
4473            return;
4474        };
4475        log::trace!(
4476            "computed git statuses for repo {:?} in {:?}",
4477            job.work_directory.0,
4478            t0.elapsed()
4479        );
4480
4481        let t0 = Instant::now();
4482        let mut changes = Vec::new();
4483        let snapshot = self.state.lock().snapshot.snapshot.clone();
4484        for file in snapshot.traverse_from_path(true, false, false, job.work_directory.0.as_ref()) {
4485            let Ok(repo_path) = file.path.strip_prefix(&job.work_directory.0) else {
4486                break;
4487            };
4488            let git_status = if let Some(location) = &job.location_in_repo {
4489                statuses.get(&location.join(repo_path))
4490            } else {
4491                statuses.get(&repo_path)
4492            };
4493            if file.git_status != git_status {
4494                let mut entry = file.clone();
4495                entry.git_status = git_status;
4496                changes.push((entry.path, git_status));
4497            }
4498        }
4499
4500        let mut state = self.state.lock();
4501        let edits = changes
4502            .iter()
4503            .filter_map(|(path, git_status)| {
4504                let entry = state.snapshot.entry_for_path(path)?.clone();
4505                Some(Edit::Insert(Entry {
4506                    git_status: *git_status,
4507                    ..entry.clone()
4508                }))
4509            })
4510            .collect();
4511
4512        // Apply the git status changes.
4513        util::extend_sorted(
4514            &mut state.changed_paths,
4515            changes.iter().map(|p| p.0.clone()),
4516            usize::MAX,
4517            Ord::cmp,
4518        );
4519        state.snapshot.entries_by_path.edit(edits, &());
4520        log::trace!(
4521            "applied git status updates for repo {:?} in {:?}",
4522            job.work_directory.0,
4523            t0.elapsed(),
4524        );
4525    }
4526
4527    fn build_change_set(
4528        &self,
4529        old_snapshot: &Snapshot,
4530        new_snapshot: &Snapshot,
4531        event_paths: &[Arc<Path>],
4532    ) -> UpdatedEntriesSet {
4533        use BackgroundScannerPhase::*;
4534        use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
4535
4536        // Identify which paths have changed. Use the known set of changed
4537        // parent paths to optimize the search.
4538        let mut changes = Vec::new();
4539        let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
4540        let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
4541        let mut last_newly_loaded_dir_path = None;
4542        old_paths.next(&());
4543        new_paths.next(&());
4544        for path in event_paths {
4545            let path = PathKey(path.clone());
4546            if old_paths.item().map_or(false, |e| e.path < path.0) {
4547                old_paths.seek_forward(&path, Bias::Left, &());
4548            }
4549            if new_paths.item().map_or(false, |e| e.path < path.0) {
4550                new_paths.seek_forward(&path, Bias::Left, &());
4551            }
4552            loop {
4553                match (old_paths.item(), new_paths.item()) {
4554                    (Some(old_entry), Some(new_entry)) => {
4555                        if old_entry.path > path.0
4556                            && new_entry.path > path.0
4557                            && !old_entry.path.starts_with(&path.0)
4558                            && !new_entry.path.starts_with(&path.0)
4559                        {
4560                            break;
4561                        }
4562
4563                        match Ord::cmp(&old_entry.path, &new_entry.path) {
4564                            Ordering::Less => {
4565                                changes.push((old_entry.path.clone(), old_entry.id, Removed));
4566                                old_paths.next(&());
4567                            }
4568                            Ordering::Equal => {
4569                                if self.phase == EventsReceivedDuringInitialScan {
4570                                    if old_entry.id != new_entry.id {
4571                                        changes.push((
4572                                            old_entry.path.clone(),
4573                                            old_entry.id,
4574                                            Removed,
4575                                        ));
4576                                    }
4577                                    // If the worktree was not fully initialized when this event was generated,
4578                                    // we can't know whether this entry was added during the scan or whether
4579                                    // it was merely updated.
4580                                    changes.push((
4581                                        new_entry.path.clone(),
4582                                        new_entry.id,
4583                                        AddedOrUpdated,
4584                                    ));
4585                                } else if old_entry.id != new_entry.id {
4586                                    changes.push((old_entry.path.clone(), old_entry.id, Removed));
4587                                    changes.push((new_entry.path.clone(), new_entry.id, Added));
4588                                } else if old_entry != new_entry {
4589                                    if old_entry.kind.is_unloaded() {
4590                                        last_newly_loaded_dir_path = Some(&new_entry.path);
4591                                        changes.push((
4592                                            new_entry.path.clone(),
4593                                            new_entry.id,
4594                                            Loaded,
4595                                        ));
4596                                    } else {
4597                                        changes.push((
4598                                            new_entry.path.clone(),
4599                                            new_entry.id,
4600                                            Updated,
4601                                        ));
4602                                    }
4603                                }
4604                                old_paths.next(&());
4605                                new_paths.next(&());
4606                            }
4607                            Ordering::Greater => {
4608                                let is_newly_loaded = self.phase == InitialScan
4609                                    || last_newly_loaded_dir_path
4610                                        .as_ref()
4611                                        .map_or(false, |dir| new_entry.path.starts_with(&dir));
4612                                changes.push((
4613                                    new_entry.path.clone(),
4614                                    new_entry.id,
4615                                    if is_newly_loaded { Loaded } else { Added },
4616                                ));
4617                                new_paths.next(&());
4618                            }
4619                        }
4620                    }
4621                    (Some(old_entry), None) => {
4622                        changes.push((old_entry.path.clone(), old_entry.id, Removed));
4623                        old_paths.next(&());
4624                    }
4625                    (None, Some(new_entry)) => {
4626                        let is_newly_loaded = self.phase == InitialScan
4627                            || last_newly_loaded_dir_path
4628                                .as_ref()
4629                                .map_or(false, |dir| new_entry.path.starts_with(&dir));
4630                        changes.push((
4631                            new_entry.path.clone(),
4632                            new_entry.id,
4633                            if is_newly_loaded { Loaded } else { Added },
4634                        ));
4635                        new_paths.next(&());
4636                    }
4637                    (None, None) => break,
4638                }
4639            }
4640        }
4641
4642        changes.into()
4643    }
4644
4645    async fn progress_timer(&self, running: bool) {
4646        if !running {
4647            return futures::future::pending().await;
4648        }
4649
4650        #[cfg(any(test, feature = "test-support"))]
4651        if self.fs.is_fake() {
4652            return self.executor.simulate_random_delay().await;
4653        }
4654
4655        smol::Timer::after(FS_WATCH_LATENCY).await;
4656    }
4657
4658    fn is_path_private(&self, path: &Path) -> bool {
4659        !self.share_private_files && self.settings.is_path_private(path)
4660    }
4661}
4662
4663fn swap_to_front(child_paths: &mut Vec<PathBuf>, file: &OsStr) {
4664    let position = child_paths
4665        .iter()
4666        .position(|path| path.file_name().unwrap() == file);
4667    if let Some(position) = position {
4668        let temp = child_paths.remove(position);
4669        child_paths.insert(0, temp);
4670    }
4671}
4672
4673fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
4674    let mut result = root_char_bag;
4675    result.extend(
4676        path.to_string_lossy()
4677            .chars()
4678            .map(|c| c.to_ascii_lowercase()),
4679    );
4680    result
4681}
4682
4683struct ScanJob {
4684    abs_path: Arc<Path>,
4685    path: Arc<Path>,
4686    ignore_stack: Arc<IgnoreStack>,
4687    scan_queue: Sender<ScanJob>,
4688    ancestor_inodes: TreeSet<u64>,
4689    is_external: bool,
4690    containing_repository: Option<ScanJobContainingRepository>,
4691}
4692
4693#[derive(Clone)]
4694struct ScanJobContainingRepository {
4695    work_directory: RepositoryWorkDirectory,
4696    statuses: GitStatus,
4697}
4698
4699struct UpdateIgnoreStatusJob {
4700    abs_path: Arc<Path>,
4701    ignore_stack: Arc<IgnoreStack>,
4702    ignore_queue: Sender<UpdateIgnoreStatusJob>,
4703    scan_queue: Sender<ScanJob>,
4704}
4705
4706struct UpdateGitStatusesJob {
4707    work_directory: RepositoryWorkDirectory,
4708    location_in_repo: Option<Arc<Path>>,
4709    repository: Arc<dyn GitRepository>,
4710}
4711
4712pub trait WorktreeModelHandle {
4713    #[cfg(any(test, feature = "test-support"))]
4714    fn flush_fs_events<'a>(
4715        &self,
4716        cx: &'a mut gpui::TestAppContext,
4717    ) -> futures::future::LocalBoxFuture<'a, ()>;
4718
4719    #[cfg(any(test, feature = "test-support"))]
4720    fn flush_fs_events_in_root_git_repository<'a>(
4721        &self,
4722        cx: &'a mut gpui::TestAppContext,
4723    ) -> futures::future::LocalBoxFuture<'a, ()>;
4724}
4725
4726impl WorktreeModelHandle for Model<Worktree> {
4727    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
4728    // occurred before the worktree was constructed. These events can cause the worktree to perform
4729    // extra directory scans, and emit extra scan-state notifications.
4730    //
4731    // This function mutates the worktree's directory and waits for those mutations to be picked up,
4732    // to ensure that all redundant FS events have already been processed.
4733    #[cfg(any(test, feature = "test-support"))]
4734    fn flush_fs_events<'a>(
4735        &self,
4736        cx: &'a mut gpui::TestAppContext,
4737    ) -> futures::future::LocalBoxFuture<'a, ()> {
4738        let file_name = "fs-event-sentinel";
4739
4740        let tree = self.clone();
4741        let (fs, root_path) = self.update(cx, |tree, _| {
4742            let tree = tree.as_local().unwrap();
4743            (tree.fs.clone(), tree.abs_path().clone())
4744        });
4745
4746        async move {
4747            fs.create_file(&root_path.join(file_name), Default::default())
4748                .await
4749                .unwrap();
4750
4751            cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_some())
4752                .await;
4753
4754            fs.remove_file(&root_path.join(file_name), Default::default())
4755                .await
4756                .unwrap();
4757            cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_none())
4758                .await;
4759
4760            cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4761                .await;
4762        }
4763        .boxed_local()
4764    }
4765
4766    // This function is similar to flush_fs_events, except that it waits for events to be flushed in
4767    // the .git folder of the root repository.
4768    // The reason for its existence is that a repository's .git folder might live *outside* of the
4769    // worktree and thus its FS events might go through a different path.
4770    // In order to flush those, we need to create artificial events in the .git folder and wait
4771    // for the repository to be reloaded.
4772    #[cfg(any(test, feature = "test-support"))]
4773    fn flush_fs_events_in_root_git_repository<'a>(
4774        &self,
4775        cx: &'a mut gpui::TestAppContext,
4776    ) -> futures::future::LocalBoxFuture<'a, ()> {
4777        let file_name = "fs-event-sentinel";
4778
4779        let tree = self.clone();
4780        let (fs, root_path, mut git_dir_scan_id) = self.update(cx, |tree, _| {
4781            let tree = tree.as_local().unwrap();
4782            let root_entry = tree.root_git_entry().unwrap();
4783            let local_repo_entry = tree.get_local_repo(&root_entry).unwrap();
4784            (
4785                tree.fs.clone(),
4786                local_repo_entry.git_dir_path.clone(),
4787                local_repo_entry.git_dir_scan_id,
4788            )
4789        });
4790
4791        let scan_id_increased = |tree: &mut Worktree, git_dir_scan_id: &mut usize| {
4792            let root_entry = tree.root_git_entry().unwrap();
4793            let local_repo_entry = tree
4794                .as_local()
4795                .unwrap()
4796                .get_local_repo(&root_entry)
4797                .unwrap();
4798
4799            if local_repo_entry.git_dir_scan_id > *git_dir_scan_id {
4800                *git_dir_scan_id = local_repo_entry.git_dir_scan_id;
4801                true
4802            } else {
4803                false
4804            }
4805        };
4806
4807        async move {
4808            fs.create_file(&root_path.join(file_name), Default::default())
4809                .await
4810                .unwrap();
4811
4812            cx.condition(&tree, |tree, _| {
4813                scan_id_increased(tree, &mut git_dir_scan_id)
4814            })
4815            .await;
4816
4817            fs.remove_file(&root_path.join(file_name), Default::default())
4818                .await
4819                .unwrap();
4820
4821            cx.condition(&tree, |tree, _| {
4822                scan_id_increased(tree, &mut git_dir_scan_id)
4823            })
4824            .await;
4825
4826            cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4827                .await;
4828        }
4829        .boxed_local()
4830    }
4831}
4832
4833#[derive(Clone, Debug)]
4834struct TraversalProgress<'a> {
4835    max_path: &'a Path,
4836    count: usize,
4837    non_ignored_count: usize,
4838    file_count: usize,
4839    non_ignored_file_count: usize,
4840}
4841
4842impl<'a> TraversalProgress<'a> {
4843    fn count(&self, include_files: bool, include_dirs: bool, include_ignored: bool) -> usize {
4844        match (include_files, include_dirs, include_ignored) {
4845            (true, true, true) => self.count,
4846            (true, true, false) => self.non_ignored_count,
4847            (true, false, true) => self.file_count,
4848            (true, false, false) => self.non_ignored_file_count,
4849            (false, true, true) => self.count - self.file_count,
4850            (false, true, false) => self.non_ignored_count - self.non_ignored_file_count,
4851            (false, false, _) => 0,
4852        }
4853    }
4854}
4855
4856impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
4857    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4858        self.max_path = summary.max_path.as_ref();
4859        self.count += summary.count;
4860        self.non_ignored_count += summary.non_ignored_count;
4861        self.file_count += summary.file_count;
4862        self.non_ignored_file_count += summary.non_ignored_file_count;
4863    }
4864}
4865
4866impl<'a> Default for TraversalProgress<'a> {
4867    fn default() -> Self {
4868        Self {
4869            max_path: Path::new(""),
4870            count: 0,
4871            non_ignored_count: 0,
4872            file_count: 0,
4873            non_ignored_file_count: 0,
4874        }
4875    }
4876}
4877
4878#[derive(Clone, Debug, Default, Copy)]
4879struct GitStatuses {
4880    added: usize,
4881    modified: usize,
4882    conflict: usize,
4883}
4884
4885impl AddAssign for GitStatuses {
4886    fn add_assign(&mut self, rhs: Self) {
4887        self.added += rhs.added;
4888        self.modified += rhs.modified;
4889        self.conflict += rhs.conflict;
4890    }
4891}
4892
4893impl Sub for GitStatuses {
4894    type Output = GitStatuses;
4895
4896    fn sub(self, rhs: Self) -> Self::Output {
4897        GitStatuses {
4898            added: self.added - rhs.added,
4899            modified: self.modified - rhs.modified,
4900            conflict: self.conflict - rhs.conflict,
4901        }
4902    }
4903}
4904
4905impl<'a> sum_tree::Dimension<'a, EntrySummary> for GitStatuses {
4906    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4907        *self += summary.statuses
4908    }
4909}
4910
4911pub struct Traversal<'a> {
4912    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
4913    include_ignored: bool,
4914    include_files: bool,
4915    include_dirs: bool,
4916}
4917
4918impl<'a> Traversal<'a> {
4919    fn new(
4920        entries: &'a SumTree<Entry>,
4921        include_files: bool,
4922        include_dirs: bool,
4923        include_ignored: bool,
4924        start_path: &Path,
4925    ) -> Self {
4926        let mut cursor = entries.cursor();
4927        cursor.seek(&TraversalTarget::Path(start_path), Bias::Left, &());
4928        let mut traversal = Self {
4929            cursor,
4930            include_files,
4931            include_dirs,
4932            include_ignored,
4933        };
4934        if traversal.end_offset() == traversal.start_offset() {
4935            traversal.next();
4936        }
4937        traversal
4938    }
4939    pub fn advance(&mut self) -> bool {
4940        self.advance_by(1)
4941    }
4942
4943    pub fn advance_by(&mut self, count: usize) -> bool {
4944        self.cursor.seek_forward(
4945            &TraversalTarget::Count {
4946                count: self.end_offset() + count,
4947                include_dirs: self.include_dirs,
4948                include_files: self.include_files,
4949                include_ignored: self.include_ignored,
4950            },
4951            Bias::Left,
4952            &(),
4953        )
4954    }
4955
4956    pub fn advance_to_sibling(&mut self) -> bool {
4957        while let Some(entry) = self.cursor.item() {
4958            self.cursor.seek_forward(
4959                &TraversalTarget::PathSuccessor(&entry.path),
4960                Bias::Left,
4961                &(),
4962            );
4963            if let Some(entry) = self.cursor.item() {
4964                if (self.include_files || !entry.is_file())
4965                    && (self.include_dirs || !entry.is_dir())
4966                    && (self.include_ignored || !entry.is_ignored)
4967                {
4968                    return true;
4969                }
4970            }
4971        }
4972        false
4973    }
4974
4975    pub fn back_to_parent(&mut self) -> bool {
4976        let Some(parent_path) = self.cursor.item().and_then(|entry| entry.path.parent()) else {
4977            return false;
4978        };
4979        self.cursor
4980            .seek(&TraversalTarget::Path(parent_path), Bias::Left, &())
4981    }
4982
4983    pub fn entry(&self) -> Option<&'a Entry> {
4984        self.cursor.item()
4985    }
4986
4987    pub fn start_offset(&self) -> usize {
4988        self.cursor
4989            .start()
4990            .count(self.include_files, self.include_dirs, self.include_ignored)
4991    }
4992
4993    pub fn end_offset(&self) -> usize {
4994        self.cursor
4995            .end(&())
4996            .count(self.include_files, self.include_dirs, self.include_ignored)
4997    }
4998}
4999
5000impl<'a> Iterator for Traversal<'a> {
5001    type Item = &'a Entry;
5002
5003    fn next(&mut self) -> Option<Self::Item> {
5004        if let Some(item) = self.entry() {
5005            self.advance();
5006            Some(item)
5007        } else {
5008            None
5009        }
5010    }
5011}
5012
5013#[derive(Debug)]
5014enum TraversalTarget<'a> {
5015    Path(&'a Path),
5016    PathSuccessor(&'a Path),
5017    Count {
5018        count: usize,
5019        include_files: bool,
5020        include_ignored: bool,
5021        include_dirs: bool,
5022    },
5023}
5024
5025impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
5026    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
5027        match self {
5028            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
5029            TraversalTarget::PathSuccessor(path) => {
5030                if cursor_location.max_path.starts_with(path) {
5031                    Ordering::Greater
5032                } else {
5033                    Ordering::Equal
5034                }
5035            }
5036            TraversalTarget::Count {
5037                count,
5038                include_files,
5039                include_dirs,
5040                include_ignored,
5041            } => Ord::cmp(
5042                count,
5043                &cursor_location.count(*include_files, *include_dirs, *include_ignored),
5044            ),
5045        }
5046    }
5047}
5048
5049impl<'a, 'b> SeekTarget<'a, EntrySummary, (TraversalProgress<'a>, GitStatuses)>
5050    for TraversalTarget<'b>
5051{
5052    fn cmp(&self, cursor_location: &(TraversalProgress<'a>, GitStatuses), _: &()) -> Ordering {
5053        self.cmp(&cursor_location.0, &())
5054    }
5055}
5056
5057pub struct ChildEntriesIter<'a> {
5058    parent_path: &'a Path,
5059    traversal: Traversal<'a>,
5060}
5061
5062impl<'a> Iterator for ChildEntriesIter<'a> {
5063    type Item = &'a Entry;
5064
5065    fn next(&mut self) -> Option<Self::Item> {
5066        if let Some(item) = self.traversal.entry() {
5067            if item.path.starts_with(&self.parent_path) {
5068                self.traversal.advance_to_sibling();
5069                return Some(item);
5070            }
5071        }
5072        None
5073    }
5074}
5075
5076impl<'a> From<&'a Entry> for proto::Entry {
5077    fn from(entry: &'a Entry) -> Self {
5078        Self {
5079            id: entry.id.to_proto(),
5080            is_dir: entry.is_dir(),
5081            path: entry.path.to_string_lossy().into(),
5082            inode: entry.inode,
5083            mtime: entry.mtime.map(|time| time.into()),
5084            is_symlink: entry.is_symlink,
5085            is_ignored: entry.is_ignored,
5086            is_external: entry.is_external,
5087            git_status: entry.git_status.map(git_status_to_proto),
5088        }
5089    }
5090}
5091
5092impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
5093    type Error = anyhow::Error;
5094
5095    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
5096        let kind = if entry.is_dir {
5097            EntryKind::Dir
5098        } else {
5099            EntryKind::File
5100        };
5101        let path: Arc<Path> = PathBuf::from(entry.path).into();
5102        let char_bag = char_bag_for_path(*root_char_bag, &path);
5103        Ok(Entry {
5104            id: ProjectEntryId::from_proto(entry.id),
5105            kind,
5106            path,
5107            inode: entry.inode,
5108            mtime: entry.mtime.map(|time| time.into()),
5109            canonical_path: None,
5110            is_ignored: entry.is_ignored,
5111            is_external: entry.is_external,
5112            git_status: git_status_from_proto(entry.git_status),
5113            is_private: false,
5114            is_symlink: entry.is_symlink,
5115            char_bag,
5116        })
5117    }
5118}
5119
5120fn git_status_from_proto(git_status: Option<i32>) -> Option<GitFileStatus> {
5121    git_status.and_then(|status| {
5122        proto::GitStatus::from_i32(status).map(|status| match status {
5123            proto::GitStatus::Added => GitFileStatus::Added,
5124            proto::GitStatus::Modified => GitFileStatus::Modified,
5125            proto::GitStatus::Conflict => GitFileStatus::Conflict,
5126        })
5127    })
5128}
5129
5130fn git_status_to_proto(status: GitFileStatus) -> i32 {
5131    match status {
5132        GitFileStatus::Added => proto::GitStatus::Added as i32,
5133        GitFileStatus::Modified => proto::GitStatus::Modified as i32,
5134        GitFileStatus::Conflict => proto::GitStatus::Conflict as i32,
5135    }
5136}
5137
5138#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)]
5139pub struct ProjectEntryId(usize);
5140
5141impl ProjectEntryId {
5142    pub const MAX: Self = Self(usize::MAX);
5143    pub const MIN: Self = Self(usize::MIN);
5144
5145    pub fn new(counter: &AtomicUsize) -> Self {
5146        Self(counter.fetch_add(1, SeqCst))
5147    }
5148
5149    pub fn from_proto(id: u64) -> Self {
5150        Self(id as usize)
5151    }
5152
5153    pub fn to_proto(&self) -> u64 {
5154        self.0 as u64
5155    }
5156
5157    pub fn to_usize(&self) -> usize {
5158        self.0
5159    }
5160}
5161
5162#[cfg(any(test, feature = "test-support"))]
5163impl CreatedEntry {
5164    pub fn to_included(self) -> Option<Entry> {
5165        match self {
5166            CreatedEntry::Included(entry) => Some(entry),
5167            CreatedEntry::Excluded { .. } => None,
5168        }
5169    }
5170}