worktree.rs

   1mod ignore;
   2mod worktree_settings;
   3#[cfg(test)]
   4mod worktree_tests;
   5
   6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   7use anyhow::{anyhow, Context as _, Result};
   8use clock::ReplicaId;
   9use collections::{HashMap, HashSet, VecDeque};
  10use fs::{copy_recursive, Fs, RemoveOptions, Watcher};
  11use futures::{
  12    channel::{
  13        mpsc::{self, UnboundedSender},
  14        oneshot,
  15    },
  16    select_biased,
  17    stream::select,
  18    task::Poll,
  19    FutureExt as _, Stream, StreamExt,
  20};
  21use fuzzy::CharBag;
  22use git::{
  23    repository::{GitFileStatus, GitRepository, RepoPath},
  24    status::GitStatus,
  25    DOT_GIT, GITIGNORE,
  26};
  27use gpui::{
  28    AppContext, AsyncAppContext, BackgroundExecutor, Context, EventEmitter, Model, ModelContext,
  29    Task,
  30};
  31use ignore::IgnoreStack;
  32use parking_lot::Mutex;
  33use paths::local_settings_folder_relative_path;
  34use postage::{
  35    barrier,
  36    prelude::{Sink as _, Stream as _},
  37    watch,
  38};
  39use rpc::proto::{self, AnyProtoClient};
  40use settings::{Settings, SettingsLocation, SettingsStore};
  41use smol::channel::{self, Sender};
  42use std::{
  43    any::Any,
  44    cmp::{self, Ordering},
  45    convert::TryFrom,
  46    ffi::OsStr,
  47    fmt,
  48    future::Future,
  49    mem,
  50    ops::{AddAssign, Deref, DerefMut, Sub},
  51    path::{Path, PathBuf},
  52    pin::Pin,
  53    sync::{
  54        atomic::{AtomicUsize, Ordering::SeqCst},
  55        Arc,
  56    },
  57    time::{Duration, Instant, SystemTime},
  58};
  59use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
  60use text::{LineEnding, Rope};
  61use util::{paths::home_dir, ResultExt};
  62pub use worktree_settings::WorktreeSettings;
  63
  64#[cfg(feature = "test-support")]
  65pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
  66#[cfg(not(feature = "test-support"))]
  67pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
  68
  69#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  70pub struct WorktreeId(usize);
  71
  72impl From<WorktreeId> for usize {
  73    fn from(value: WorktreeId) -> Self {
  74        value.0
  75    }
  76}
  77
  78/// A set of local or remote files that are being opened as part of a project.
  79/// Responsible for tracking related FS (for local)/collab (for remote) events and corresponding updates.
  80/// Stores git repositories data and the diagnostics for the file(s).
  81///
  82/// Has an absolute path, and may be set to be visible in Zed UI or not.
  83/// May correspond to a directory or a single file.
  84/// Possible examples:
  85/// * a drag and dropped file — may be added as an invisible, "ephemeral" entry to the current worktree
  86/// * a directory opened in Zed — may be added as a visible entry to the current worktree
  87///
  88/// Uses [`Entry`] to track the state of each file/directory, can look up absolute paths for entries.
  89pub enum Worktree {
  90    Local(LocalWorktree),
  91    Remote(RemoteWorktree),
  92}
  93
  94/// An entry, created in the worktree.
  95#[derive(Debug)]
  96pub enum CreatedEntry {
  97    /// Got created and indexed by the worktree, receiving a corresponding entry.
  98    Included(Entry),
  99    /// Got created, but not indexed due to falling under exclusion filters.
 100    Excluded { abs_path: PathBuf },
 101}
 102
 103pub struct LoadedFile {
 104    pub file: Arc<File>,
 105    pub text: String,
 106    pub diff_base: Option<String>,
 107}
 108
 109pub struct LocalWorktree {
 110    snapshot: LocalSnapshot,
 111    scan_requests_tx: channel::Sender<ScanRequest>,
 112    path_prefixes_to_scan_tx: channel::Sender<Arc<Path>>,
 113    is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
 114    _background_scanner_tasks: Vec<Task<()>>,
 115    update_observer: Option<UpdateObservationState>,
 116    fs: Arc<dyn Fs>,
 117    fs_case_sensitive: bool,
 118    visible: bool,
 119    next_entry_id: Arc<AtomicUsize>,
 120    settings: WorktreeSettings,
 121    share_private_files: bool,
 122}
 123
 124struct ScanRequest {
 125    relative_paths: Vec<Arc<Path>>,
 126    done: barrier::Sender,
 127}
 128
 129pub struct RemoteWorktree {
 130    snapshot: Snapshot,
 131    background_snapshot: Arc<Mutex<(Snapshot, Vec<proto::UpdateWorktree>)>>,
 132    project_id: u64,
 133    client: AnyProtoClient,
 134    updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
 135    update_observer: Option<mpsc::UnboundedSender<proto::UpdateWorktree>>,
 136    snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
 137    replica_id: ReplicaId,
 138    visible: bool,
 139    disconnected: bool,
 140}
 141
 142#[derive(Clone)]
 143pub struct Snapshot {
 144    id: WorktreeId,
 145    abs_path: Arc<Path>,
 146    root_name: String,
 147    root_char_bag: CharBag,
 148    entries_by_path: SumTree<Entry>,
 149    entries_by_id: SumTree<PathEntry>,
 150    repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
 151
 152    /// A number that increases every time the worktree begins scanning
 153    /// a set of paths from the filesystem. This scanning could be caused
 154    /// by some operation performed on the worktree, such as reading or
 155    /// writing a file, or by an event reported by the filesystem.
 156    scan_id: usize,
 157
 158    /// The latest scan id that has completed, and whose preceding scans
 159    /// have all completed. The current `scan_id` could be more than one
 160    /// greater than the `completed_scan_id` if operations are performed
 161    /// on the worktree while it is processing a file-system event.
 162    completed_scan_id: usize,
 163}
 164
 165#[derive(Clone, Debug, PartialEq, Eq)]
 166pub struct RepositoryEntry {
 167    pub(crate) work_directory: WorkDirectoryEntry,
 168    pub(crate) branch: Option<Arc<str>>,
 169
 170    /// If location_in_repo is set, it means the .git folder is external
 171    /// and in a parent folder of the project root.
 172    /// In that case, the work_directory field will point to the
 173    /// project-root and location_in_repo contains the location of the
 174    /// project-root in the repository.
 175    ///
 176    /// Example:
 177    ///
 178    ///     my_root_folder/          <-- repository root
 179    ///       .git
 180    ///       my_sub_folder_1/
 181    ///         project_root/        <-- Project root, Zed opened here
 182    ///           ...
 183    ///
 184    /// For this setup, the attributes will have the following values:
 185    ///
 186    ///     work_directory: pointing to "" entry
 187    ///     location_in_repo: Some("my_sub_folder_1/project_root")
 188    pub(crate) location_in_repo: Option<Arc<Path>>,
 189}
 190
 191impl RepositoryEntry {
 192    pub fn branch(&self) -> Option<Arc<str>> {
 193        self.branch.clone()
 194    }
 195
 196    pub fn work_directory_id(&self) -> ProjectEntryId {
 197        *self.work_directory
 198    }
 199
 200    pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
 201        snapshot
 202            .entry_for_id(self.work_directory_id())
 203            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
 204    }
 205
 206    pub fn build_update(&self, _: &Self) -> proto::RepositoryEntry {
 207        self.into()
 208    }
 209
 210    /// relativize returns the given project path relative to the root folder of the
 211    /// repository.
 212    /// If the root of the repository (and its .git folder) are located in a parent folder
 213    /// of the project root folder, then the returned RepoPath is relative to the root
 214    /// of the repository and not a valid path inside the project.
 215    pub fn relativize(&self, worktree: &Snapshot, path: &Path) -> Result<RepoPath> {
 216        let relativize_path = |path: &Path| {
 217            let entry = worktree
 218                .entry_for_id(self.work_directory.0)
 219                .ok_or_else(|| anyhow!("entry not found"))?;
 220
 221            let relativized_path = path
 222                .strip_prefix(&entry.path)
 223                .map_err(|_| anyhow!("could not relativize {:?} against {:?}", path, entry.path))?;
 224
 225            Ok(relativized_path.into())
 226        };
 227
 228        if let Some(location_in_repo) = &self.location_in_repo {
 229            relativize_path(&location_in_repo.join(path))
 230        } else {
 231            relativize_path(path)
 232        }
 233    }
 234}
 235
 236impl From<&RepositoryEntry> for proto::RepositoryEntry {
 237    fn from(value: &RepositoryEntry) -> Self {
 238        proto::RepositoryEntry {
 239            work_directory_id: value.work_directory.to_proto(),
 240            branch: value.branch.as_ref().map(|str| str.to_string()),
 241        }
 242    }
 243}
 244
 245/// This path corresponds to the 'content path' of a repository in relation
 246/// to Zed's project root.
 247/// In the majority of the cases, this is the folder that contains the .git folder.
 248/// But if a sub-folder of a git repository is opened, this corresponds to the
 249/// project root and the .git folder is located in a parent directory.
 250#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 251pub struct RepositoryWorkDirectory(pub(crate) Arc<Path>);
 252
 253impl Default for RepositoryWorkDirectory {
 254    fn default() -> Self {
 255        RepositoryWorkDirectory(Arc::from(Path::new("")))
 256    }
 257}
 258
 259impl AsRef<Path> for RepositoryWorkDirectory {
 260    fn as_ref(&self) -> &Path {
 261        self.0.as_ref()
 262    }
 263}
 264
 265#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 266pub struct WorkDirectoryEntry(ProjectEntryId);
 267
 268impl Deref for WorkDirectoryEntry {
 269    type Target = ProjectEntryId;
 270
 271    fn deref(&self) -> &Self::Target {
 272        &self.0
 273    }
 274}
 275
 276impl From<ProjectEntryId> for WorkDirectoryEntry {
 277    fn from(value: ProjectEntryId) -> Self {
 278        WorkDirectoryEntry(value)
 279    }
 280}
 281
 282#[derive(Debug, Clone)]
 283pub struct LocalSnapshot {
 284    snapshot: Snapshot,
 285    /// All of the gitignore files in the worktree, indexed by their relative path.
 286    /// The boolean indicates whether the gitignore needs to be updated.
 287    ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
 288    /// All of the git repositories in the worktree, indexed by the project entry
 289    /// id of their parent directory.
 290    git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 291}
 292
 293struct BackgroundScannerState {
 294    snapshot: LocalSnapshot,
 295    scanned_dirs: HashSet<ProjectEntryId>,
 296    path_prefixes_to_scan: HashSet<Arc<Path>>,
 297    paths_to_scan: HashSet<Arc<Path>>,
 298    /// The ids of all of the entries that were removed from the snapshot
 299    /// as part of the current update. These entry ids may be re-used
 300    /// if the same inode is discovered at a new path, or if the given
 301    /// path is re-created after being deleted.
 302    removed_entry_ids: HashMap<(u64, SystemTime), ProjectEntryId>,
 303    changed_paths: Vec<Arc<Path>>,
 304    prev_snapshot: Snapshot,
 305}
 306
 307#[derive(Debug, Clone)]
 308pub struct LocalRepositoryEntry {
 309    pub(crate) git_dir_scan_id: usize,
 310    pub(crate) repo_ptr: Arc<dyn GitRepository>,
 311    /// Path to the actual .git folder.
 312    /// Note: if .git is a file, this points to the folder indicated by the .git file
 313    pub(crate) git_dir_path: Arc<Path>,
 314}
 315
 316impl LocalRepositoryEntry {
 317    pub fn repo(&self) -> &Arc<dyn GitRepository> {
 318        &self.repo_ptr
 319    }
 320}
 321
 322impl Deref for LocalSnapshot {
 323    type Target = Snapshot;
 324
 325    fn deref(&self) -> &Self::Target {
 326        &self.snapshot
 327    }
 328}
 329
 330impl DerefMut for LocalSnapshot {
 331    fn deref_mut(&mut self) -> &mut Self::Target {
 332        &mut self.snapshot
 333    }
 334}
 335
 336enum ScanState {
 337    Started,
 338    Updated {
 339        snapshot: LocalSnapshot,
 340        changes: UpdatedEntriesSet,
 341        barrier: Option<barrier::Sender>,
 342        scanning: bool,
 343    },
 344}
 345
 346struct UpdateObservationState {
 347    snapshots_tx:
 348        mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
 349    resume_updates: watch::Sender<()>,
 350    _maintain_remote_snapshot: Task<Option<()>>,
 351}
 352
 353#[derive(Clone)]
 354pub enum Event {
 355    UpdatedEntries(UpdatedEntriesSet),
 356    UpdatedGitRepositories(UpdatedGitRepositoriesSet),
 357    DeletedEntry(ProjectEntryId),
 358}
 359
 360static EMPTY_PATH: &str = "";
 361
 362impl EventEmitter<Event> for Worktree {}
 363
 364impl Worktree {
 365    pub async fn local(
 366        path: impl Into<Arc<Path>>,
 367        visible: bool,
 368        fs: Arc<dyn Fs>,
 369        next_entry_id: Arc<AtomicUsize>,
 370        cx: &mut AsyncAppContext,
 371    ) -> Result<Model<Self>> {
 372        let abs_path = path.into();
 373        let metadata = fs
 374            .metadata(&abs_path)
 375            .await
 376            .context("failed to stat worktree path")?;
 377
 378        let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
 379            log::error!(
 380                "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
 381            );
 382            true
 383        });
 384
 385        cx.new_model(move |cx: &mut ModelContext<Worktree>| {
 386            let worktree_id = cx.handle().entity_id().as_u64();
 387            let settings_location = Some(SettingsLocation {
 388                worktree_id: worktree_id as usize,
 389                path: Path::new(EMPTY_PATH),
 390            });
 391
 392            let settings = WorktreeSettings::get(settings_location, cx).clone();
 393            cx.observe_global::<SettingsStore>(move |this, cx| {
 394                if let Self::Local(this) = this {
 395                    let settings = WorktreeSettings::get(settings_location, cx).clone();
 396                    if settings != this.settings {
 397                        this.settings = settings;
 398                        this.restart_background_scanners(cx);
 399                    }
 400                }
 401            })
 402            .detach();
 403
 404            let mut snapshot = LocalSnapshot {
 405                ignores_by_parent_abs_path: Default::default(),
 406                git_repositories: Default::default(),
 407                snapshot: Snapshot::new(
 408                    cx.entity_id().as_u64(),
 409                    abs_path
 410                        .file_name()
 411                        .map_or(String::new(), |f| f.to_string_lossy().to_string()),
 412                    abs_path,
 413                ),
 414            };
 415
 416            if let Some(metadata) = metadata {
 417                snapshot.insert_entry(
 418                    Entry::new(
 419                        Arc::from(Path::new("")),
 420                        &metadata,
 421                        &next_entry_id,
 422                        snapshot.root_char_bag,
 423                        None,
 424                    ),
 425                    fs.as_ref(),
 426                );
 427            }
 428
 429            let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
 430            let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
 431            let mut worktree = LocalWorktree {
 432                share_private_files: false,
 433                next_entry_id,
 434                snapshot,
 435                is_scanning: watch::channel_with(true),
 436                update_observer: None,
 437                scan_requests_tx,
 438                path_prefixes_to_scan_tx,
 439                _background_scanner_tasks: Vec::new(),
 440                fs,
 441                fs_case_sensitive,
 442                visible,
 443                settings,
 444            };
 445            worktree.start_background_scanner(scan_requests_rx, path_prefixes_to_scan_rx, cx);
 446            Worktree::Local(worktree)
 447        })
 448    }
 449
 450    pub fn remote(
 451        project_id: u64,
 452        replica_id: ReplicaId,
 453        worktree: proto::WorktreeMetadata,
 454        client: AnyProtoClient,
 455        cx: &mut AppContext,
 456    ) -> Model<Self> {
 457        cx.new_model(|cx: &mut ModelContext<Self>| {
 458            let snapshot = Snapshot::new(
 459                worktree.id,
 460                worktree.root_name,
 461                Arc::from(PathBuf::from(worktree.abs_path)),
 462            );
 463
 464            let background_snapshot = Arc::new(Mutex::new((snapshot.clone(), Vec::new())));
 465            let (background_updates_tx, mut background_updates_rx) = mpsc::unbounded();
 466            let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
 467
 468            let worktree = RemoteWorktree {
 469                client,
 470                project_id,
 471                replica_id,
 472                snapshot,
 473                background_snapshot: background_snapshot.clone(),
 474                updates_tx: Some(background_updates_tx),
 475                update_observer: None,
 476                snapshot_subscriptions: Default::default(),
 477                visible: worktree.visible,
 478                disconnected: false,
 479            };
 480
 481            // Apply updates to a separate snapshto in a background task, then
 482            // send them to a foreground task which updates the model.
 483            cx.background_executor()
 484                .spawn(async move {
 485                    while let Some(update) = background_updates_rx.next().await {
 486                        {
 487                            let mut lock = background_snapshot.lock();
 488                            if let Err(error) = lock.0.apply_remote_update(update.clone()) {
 489                                log::error!("error applying worktree update: {}", error);
 490                            }
 491                            lock.1.push(update);
 492                        }
 493                        snapshot_updated_tx.send(()).await.ok();
 494                    }
 495                })
 496                .detach();
 497
 498            // On the foreground task, update to the latest snapshot and notify
 499            // any update observer of all updates that led to that snapshot.
 500            cx.spawn(|this, mut cx| async move {
 501                while (snapshot_updated_rx.recv().await).is_some() {
 502                    this.update(&mut cx, |this, cx| {
 503                        let this = this.as_remote_mut().unwrap();
 504                        {
 505                            let mut lock = this.background_snapshot.lock();
 506                            this.snapshot = lock.0.clone();
 507                            if let Some(tx) = &this.update_observer {
 508                                for update in lock.1.drain(..) {
 509                                    tx.unbounded_send(update).ok();
 510                                }
 511                            }
 512                        };
 513                        cx.emit(Event::UpdatedEntries(Arc::from([])));
 514                        cx.notify();
 515                        while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
 516                            if this.observed_snapshot(*scan_id) {
 517                                let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
 518                                let _ = tx.send(());
 519                            } else {
 520                                break;
 521                            }
 522                        }
 523                    })?;
 524                }
 525                anyhow::Ok(())
 526            })
 527            .detach();
 528
 529            Worktree::Remote(worktree)
 530        })
 531    }
 532
 533    pub fn as_local(&self) -> Option<&LocalWorktree> {
 534        if let Worktree::Local(worktree) = self {
 535            Some(worktree)
 536        } else {
 537            None
 538        }
 539    }
 540
 541    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 542        if let Worktree::Remote(worktree) = self {
 543            Some(worktree)
 544        } else {
 545            None
 546        }
 547    }
 548
 549    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 550        if let Worktree::Local(worktree) = self {
 551            Some(worktree)
 552        } else {
 553            None
 554        }
 555    }
 556
 557    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 558        if let Worktree::Remote(worktree) = self {
 559            Some(worktree)
 560        } else {
 561            None
 562        }
 563    }
 564
 565    pub fn is_local(&self) -> bool {
 566        matches!(self, Worktree::Local(_))
 567    }
 568
 569    pub fn is_remote(&self) -> bool {
 570        !self.is_local()
 571    }
 572
 573    pub fn snapshot(&self) -> Snapshot {
 574        match self {
 575            Worktree::Local(worktree) => worktree.snapshot.snapshot.clone(),
 576            Worktree::Remote(worktree) => worktree.snapshot.clone(),
 577        }
 578    }
 579
 580    pub fn scan_id(&self) -> usize {
 581        match self {
 582            Worktree::Local(worktree) => worktree.snapshot.scan_id,
 583            Worktree::Remote(worktree) => worktree.snapshot.scan_id,
 584        }
 585    }
 586
 587    pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
 588        proto::WorktreeMetadata {
 589            id: self.id().to_proto(),
 590            root_name: self.root_name().to_string(),
 591            visible: self.is_visible(),
 592            abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
 593        }
 594    }
 595
 596    pub fn completed_scan_id(&self) -> usize {
 597        match self {
 598            Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
 599            Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
 600        }
 601    }
 602
 603    pub fn is_visible(&self) -> bool {
 604        match self {
 605            Worktree::Local(worktree) => worktree.visible,
 606            Worktree::Remote(worktree) => worktree.visible,
 607        }
 608    }
 609
 610    pub fn replica_id(&self) -> ReplicaId {
 611        match self {
 612            Worktree::Local(_) => 0,
 613            Worktree::Remote(worktree) => worktree.replica_id,
 614        }
 615    }
 616
 617    pub fn abs_path(&self) -> Arc<Path> {
 618        match self {
 619            Worktree::Local(worktree) => worktree.abs_path.clone(),
 620            Worktree::Remote(worktree) => worktree.abs_path.clone(),
 621        }
 622    }
 623
 624    pub fn root_file(&self, cx: &mut ModelContext<Self>) -> Option<Arc<File>> {
 625        let entry = self.root_entry()?;
 626        Some(File::for_entry(entry.clone(), cx.handle()))
 627    }
 628
 629    pub fn observe_updates<F, Fut>(
 630        &mut self,
 631        project_id: u64,
 632        cx: &mut ModelContext<Worktree>,
 633        callback: F,
 634    ) where
 635        F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
 636        Fut: 'static + Send + Future<Output = bool>,
 637    {
 638        match self {
 639            Worktree::Local(this) => this.observe_updates(project_id, cx, callback),
 640            Worktree::Remote(this) => this.observe_updates(project_id, cx, callback),
 641        }
 642    }
 643
 644    pub fn stop_observing_updates(&mut self) {
 645        match self {
 646            Worktree::Local(this) => {
 647                this.update_observer.take();
 648            }
 649            Worktree::Remote(this) => {
 650                this.update_observer.take();
 651            }
 652        }
 653    }
 654
 655    #[cfg(any(test, feature = "test-support"))]
 656    pub fn has_update_observer(&self) -> bool {
 657        match self {
 658            Worktree::Local(this) => this.update_observer.is_some(),
 659            Worktree::Remote(this) => this.update_observer.is_some(),
 660        }
 661    }
 662
 663    pub fn load_file(
 664        &self,
 665        path: &Path,
 666        cx: &mut ModelContext<Worktree>,
 667    ) -> Task<Result<LoadedFile>> {
 668        match self {
 669            Worktree::Local(this) => this.load_file(path, cx),
 670            Worktree::Remote(_) => {
 671                Task::ready(Err(anyhow!("remote worktrees can't yet load files")))
 672            }
 673        }
 674    }
 675
 676    pub fn write_file(
 677        &self,
 678        path: &Path,
 679        text: Rope,
 680        line_ending: LineEnding,
 681        cx: &mut ModelContext<Worktree>,
 682    ) -> Task<Result<Arc<File>>> {
 683        match self {
 684            Worktree::Local(this) => this.write_file(path, text, line_ending, cx),
 685            Worktree::Remote(_) => {
 686                Task::ready(Err(anyhow!("remote worktree can't yet write files")))
 687            }
 688        }
 689    }
 690
 691    pub fn create_entry(
 692        &mut self,
 693        path: impl Into<Arc<Path>>,
 694        is_directory: bool,
 695        cx: &mut ModelContext<Worktree>,
 696    ) -> Task<Result<CreatedEntry>> {
 697        let path = path.into();
 698        let worktree_id = self.id();
 699        match self {
 700            Worktree::Local(this) => this.create_entry(path, is_directory, cx),
 701            Worktree::Remote(this) => {
 702                let project_id = this.project_id;
 703                let request = this.client.request(proto::CreateProjectEntry {
 704                    worktree_id: worktree_id.to_proto(),
 705                    project_id,
 706                    path: path.to_string_lossy().into(),
 707                    is_directory,
 708                });
 709                cx.spawn(move |this, mut cx| async move {
 710                    let response = request.await?;
 711                    match response.entry {
 712                        Some(entry) => this
 713                            .update(&mut cx, |worktree, cx| {
 714                                worktree.as_remote_mut().unwrap().insert_entry(
 715                                    entry,
 716                                    response.worktree_scan_id as usize,
 717                                    cx,
 718                                )
 719                            })?
 720                            .await
 721                            .map(CreatedEntry::Included),
 722                        None => {
 723                            let abs_path = this.update(&mut cx, |worktree, _| {
 724                                worktree
 725                                    .absolutize(&path)
 726                                    .with_context(|| format!("absolutizing {path:?}"))
 727                            })??;
 728                            Ok(CreatedEntry::Excluded { abs_path })
 729                        }
 730                    }
 731                })
 732            }
 733        }
 734    }
 735
 736    pub fn delete_entry(
 737        &mut self,
 738        entry_id: ProjectEntryId,
 739        trash: bool,
 740        cx: &mut ModelContext<Worktree>,
 741    ) -> Option<Task<Result<()>>> {
 742        let task = match self {
 743            Worktree::Local(this) => this.delete_entry(entry_id, trash, cx),
 744            Worktree::Remote(this) => this.delete_entry(entry_id, trash, cx),
 745        }?;
 746        cx.emit(Event::DeletedEntry(entry_id));
 747        Some(task)
 748    }
 749
 750    pub fn rename_entry(
 751        &mut self,
 752        entry_id: ProjectEntryId,
 753        new_path: impl Into<Arc<Path>>,
 754        cx: &mut ModelContext<Self>,
 755    ) -> Task<Result<CreatedEntry>> {
 756        let new_path = new_path.into();
 757        match self {
 758            Worktree::Local(this) => this.rename_entry(entry_id, new_path, cx),
 759            Worktree::Remote(this) => this.rename_entry(entry_id, new_path, cx),
 760        }
 761    }
 762
 763    pub fn copy_entry(
 764        &mut self,
 765        entry_id: ProjectEntryId,
 766        new_path: impl Into<Arc<Path>>,
 767        cx: &mut ModelContext<Self>,
 768    ) -> Task<Result<Option<Entry>>> {
 769        let new_path = new_path.into();
 770        match self {
 771            Worktree::Local(this) => this.copy_entry(entry_id, new_path, cx),
 772            Worktree::Remote(this) => {
 773                let response = this.client.request(proto::CopyProjectEntry {
 774                    project_id: this.project_id,
 775                    entry_id: entry_id.to_proto(),
 776                    new_path: new_path.to_string_lossy().into(),
 777                });
 778                cx.spawn(move |this, mut cx| async move {
 779                    let response = response.await?;
 780                    match response.entry {
 781                        Some(entry) => this
 782                            .update(&mut cx, |worktree, cx| {
 783                                worktree.as_remote_mut().unwrap().insert_entry(
 784                                    entry,
 785                                    response.worktree_scan_id as usize,
 786                                    cx,
 787                                )
 788                            })?
 789                            .await
 790                            .map(Some),
 791                        None => Ok(None),
 792                    }
 793                })
 794            }
 795        }
 796    }
 797
 798    pub fn copy_external_entries(
 799        &mut self,
 800        target_directory: PathBuf,
 801        paths: Vec<Arc<Path>>,
 802        overwrite_existing_files: bool,
 803        cx: &mut ModelContext<Worktree>,
 804    ) -> Task<Result<Vec<ProjectEntryId>>> {
 805        match self {
 806            Worktree::Local(this) => {
 807                this.copy_external_entries(target_directory, paths, overwrite_existing_files, cx)
 808            }
 809            _ => Task::ready(Err(anyhow!(
 810                "Copying external entries is not supported for remote worktrees"
 811            ))),
 812        }
 813    }
 814
 815    pub fn expand_entry(
 816        &mut self,
 817        entry_id: ProjectEntryId,
 818        cx: &mut ModelContext<Worktree>,
 819    ) -> Option<Task<Result<()>>> {
 820        match self {
 821            Worktree::Local(this) => this.expand_entry(entry_id, cx),
 822            Worktree::Remote(this) => {
 823                let response = this.client.request(proto::ExpandProjectEntry {
 824                    project_id: this.project_id,
 825                    entry_id: entry_id.to_proto(),
 826                });
 827                Some(cx.spawn(move |this, mut cx| async move {
 828                    let response = response.await?;
 829                    this.update(&mut cx, |this, _| {
 830                        this.as_remote_mut()
 831                            .unwrap()
 832                            .wait_for_snapshot(response.worktree_scan_id as usize)
 833                    })?
 834                    .await?;
 835                    Ok(())
 836                }))
 837            }
 838        }
 839    }
 840
 841    pub async fn handle_create_entry(
 842        this: Model<Self>,
 843        request: proto::CreateProjectEntry,
 844        mut cx: AsyncAppContext,
 845    ) -> Result<proto::ProjectEntryResponse> {
 846        let (scan_id, entry) = this.update(&mut cx, |this, cx| {
 847            (
 848                this.scan_id(),
 849                this.create_entry(PathBuf::from(request.path), request.is_directory, cx),
 850            )
 851        })?;
 852        Ok(proto::ProjectEntryResponse {
 853            entry: match &entry.await? {
 854                CreatedEntry::Included(entry) => Some(entry.into()),
 855                CreatedEntry::Excluded { .. } => None,
 856            },
 857            worktree_scan_id: scan_id as u64,
 858        })
 859    }
 860
 861    pub async fn handle_delete_entry(
 862        this: Model<Self>,
 863        request: proto::DeleteProjectEntry,
 864        mut cx: AsyncAppContext,
 865    ) -> Result<proto::ProjectEntryResponse> {
 866        let (scan_id, task) = this.update(&mut cx, |this, cx| {
 867            (
 868                this.scan_id(),
 869                this.delete_entry(
 870                    ProjectEntryId::from_proto(request.entry_id),
 871                    request.use_trash,
 872                    cx,
 873                ),
 874            )
 875        })?;
 876        task.ok_or_else(|| anyhow!("invalid entry"))?.await?;
 877        Ok(proto::ProjectEntryResponse {
 878            entry: None,
 879            worktree_scan_id: scan_id as u64,
 880        })
 881    }
 882
 883    pub async fn handle_expand_entry(
 884        this: Model<Self>,
 885        request: proto::ExpandProjectEntry,
 886        mut cx: AsyncAppContext,
 887    ) -> Result<proto::ExpandProjectEntryResponse> {
 888        let task = this.update(&mut cx, |this, cx| {
 889            this.expand_entry(ProjectEntryId::from_proto(request.entry_id), cx)
 890        })?;
 891        task.ok_or_else(|| anyhow!("no such entry"))?.await?;
 892        let scan_id = this.read_with(&cx, |this, _| this.scan_id())?;
 893        Ok(proto::ExpandProjectEntryResponse {
 894            worktree_scan_id: scan_id as u64,
 895        })
 896    }
 897
 898    pub async fn handle_rename_entry(
 899        this: Model<Self>,
 900        request: proto::RenameProjectEntry,
 901        mut cx: AsyncAppContext,
 902    ) -> Result<proto::ProjectEntryResponse> {
 903        let (scan_id, task) = this.update(&mut cx, |this, cx| {
 904            (
 905                this.scan_id(),
 906                this.rename_entry(
 907                    ProjectEntryId::from_proto(request.entry_id),
 908                    PathBuf::from(request.new_path),
 909                    cx,
 910                ),
 911            )
 912        })?;
 913        Ok(proto::ProjectEntryResponse {
 914            entry: match &task.await? {
 915                CreatedEntry::Included(entry) => Some(entry.into()),
 916                CreatedEntry::Excluded { .. } => None,
 917            },
 918            worktree_scan_id: scan_id as u64,
 919        })
 920    }
 921
 922    pub async fn handle_copy_entry(
 923        this: Model<Self>,
 924        request: proto::CopyProjectEntry,
 925        mut cx: AsyncAppContext,
 926    ) -> Result<proto::ProjectEntryResponse> {
 927        let (scan_id, task) = this.update(&mut cx, |this, cx| {
 928            (
 929                this.scan_id(),
 930                this.copy_entry(
 931                    ProjectEntryId::from_proto(request.entry_id),
 932                    PathBuf::from(request.new_path),
 933                    cx,
 934                ),
 935            )
 936        })?;
 937        Ok(proto::ProjectEntryResponse {
 938            entry: task.await?.as_ref().map(|e| e.into()),
 939            worktree_scan_id: scan_id as u64,
 940        })
 941    }
 942}
 943
 944impl LocalWorktree {
 945    pub fn contains_abs_path(&self, path: &Path) -> bool {
 946        path.starts_with(&self.abs_path)
 947    }
 948
 949    pub fn is_path_private(&self, path: &Path) -> bool {
 950        !self.share_private_files && self.settings.is_path_private(path)
 951    }
 952
 953    fn restart_background_scanners(&mut self, cx: &mut ModelContext<Worktree>) {
 954        let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
 955        let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
 956        self.scan_requests_tx = scan_requests_tx;
 957        self.path_prefixes_to_scan_tx = path_prefixes_to_scan_tx;
 958        self.start_background_scanner(scan_requests_rx, path_prefixes_to_scan_rx, cx);
 959    }
 960
 961    fn start_background_scanner(
 962        &mut self,
 963        scan_requests_rx: channel::Receiver<ScanRequest>,
 964        path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
 965        cx: &mut ModelContext<Worktree>,
 966    ) {
 967        let snapshot = self.snapshot();
 968        let share_private_files = self.share_private_files;
 969        let next_entry_id = self.next_entry_id.clone();
 970        let fs = self.fs.clone();
 971        let settings = self.settings.clone();
 972        let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
 973        let background_scanner = cx.background_executor().spawn({
 974            let abs_path = &snapshot.abs_path;
 975            let abs_path = if cfg!(target_os = "windows") {
 976                abs_path
 977                    .canonicalize()
 978                    .unwrap_or_else(|_| abs_path.to_path_buf())
 979            } else {
 980                abs_path.to_path_buf()
 981            };
 982            let background = cx.background_executor().clone();
 983            async move {
 984                let (events, watcher) = fs.watch(&abs_path, FS_WATCH_LATENCY).await;
 985                let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
 986                    log::error!("Failed to determine whether filesystem is case sensitive: {e:#}");
 987                    true
 988                });
 989
 990                let mut scanner = BackgroundScanner {
 991                    fs,
 992                    fs_case_sensitive,
 993                    status_updates_tx: scan_states_tx,
 994                    executor: background,
 995                    scan_requests_rx,
 996                    path_prefixes_to_scan_rx,
 997                    next_entry_id,
 998                    state: Mutex::new(BackgroundScannerState {
 999                        prev_snapshot: snapshot.snapshot.clone(),
1000                        snapshot,
1001                        scanned_dirs: Default::default(),
1002                        path_prefixes_to_scan: Default::default(),
1003                        paths_to_scan: Default::default(),
1004                        removed_entry_ids: Default::default(),
1005                        changed_paths: Default::default(),
1006                    }),
1007                    phase: BackgroundScannerPhase::InitialScan,
1008                    share_private_files,
1009                    settings,
1010                    watcher,
1011                };
1012
1013                scanner.run(events).await;
1014            }
1015        });
1016        let scan_state_updater = cx.spawn(|this, mut cx| async move {
1017            while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade()) {
1018                this.update(&mut cx, |this, cx| {
1019                    let this = this.as_local_mut().unwrap();
1020                    match state {
1021                        ScanState::Started => {
1022                            *this.is_scanning.0.borrow_mut() = true;
1023                        }
1024                        ScanState::Updated {
1025                            snapshot,
1026                            changes,
1027                            barrier,
1028                            scanning,
1029                        } => {
1030                            *this.is_scanning.0.borrow_mut() = scanning;
1031                            this.set_snapshot(snapshot, changes, cx);
1032                            drop(barrier);
1033                        }
1034                    }
1035                    cx.notify();
1036                })
1037                .ok();
1038            }
1039        });
1040        self._background_scanner_tasks = vec![background_scanner, scan_state_updater];
1041        self.is_scanning = watch::channel_with(true);
1042    }
1043
1044    fn set_snapshot(
1045        &mut self,
1046        new_snapshot: LocalSnapshot,
1047        entry_changes: UpdatedEntriesSet,
1048        cx: &mut ModelContext<Worktree>,
1049    ) {
1050        let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
1051        self.snapshot = new_snapshot;
1052
1053        if let Some(share) = self.update_observer.as_mut() {
1054            share
1055                .snapshots_tx
1056                .unbounded_send((
1057                    self.snapshot.clone(),
1058                    entry_changes.clone(),
1059                    repo_changes.clone(),
1060                ))
1061                .ok();
1062        }
1063
1064        if !entry_changes.is_empty() {
1065            cx.emit(Event::UpdatedEntries(entry_changes));
1066        }
1067        if !repo_changes.is_empty() {
1068            cx.emit(Event::UpdatedGitRepositories(repo_changes));
1069        }
1070    }
1071
1072    fn changed_repos(
1073        &self,
1074        old_snapshot: &LocalSnapshot,
1075        new_snapshot: &LocalSnapshot,
1076    ) -> UpdatedGitRepositoriesSet {
1077        let mut changes = Vec::new();
1078        let mut old_repos = old_snapshot.git_repositories.iter().peekable();
1079        let mut new_repos = new_snapshot.git_repositories.iter().peekable();
1080        loop {
1081            match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
1082                (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
1083                    match Ord::cmp(&new_entry_id, &old_entry_id) {
1084                        Ordering::Less => {
1085                            if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
1086                                changes.push((
1087                                    entry.path.clone(),
1088                                    GitRepositoryChange {
1089                                        old_repository: None,
1090                                    },
1091                                ));
1092                            }
1093                            new_repos.next();
1094                        }
1095                        Ordering::Equal => {
1096                            if new_repo.git_dir_scan_id != old_repo.git_dir_scan_id {
1097                                if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
1098                                    let old_repo = old_snapshot
1099                                        .repository_entries
1100                                        .get(&RepositoryWorkDirectory(entry.path.clone()))
1101                                        .cloned();
1102                                    changes.push((
1103                                        entry.path.clone(),
1104                                        GitRepositoryChange {
1105                                            old_repository: old_repo,
1106                                        },
1107                                    ));
1108                                }
1109                            }
1110                            new_repos.next();
1111                            old_repos.next();
1112                        }
1113                        Ordering::Greater => {
1114                            if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
1115                                let old_repo = old_snapshot
1116                                    .repository_entries
1117                                    .get(&RepositoryWorkDirectory(entry.path.clone()))
1118                                    .cloned();
1119                                changes.push((
1120                                    entry.path.clone(),
1121                                    GitRepositoryChange {
1122                                        old_repository: old_repo,
1123                                    },
1124                                ));
1125                            }
1126                            old_repos.next();
1127                        }
1128                    }
1129                }
1130                (Some((entry_id, _)), None) => {
1131                    if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
1132                        changes.push((
1133                            entry.path.clone(),
1134                            GitRepositoryChange {
1135                                old_repository: None,
1136                            },
1137                        ));
1138                    }
1139                    new_repos.next();
1140                }
1141                (None, Some((entry_id, _))) => {
1142                    if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
1143                        let old_repo = old_snapshot
1144                            .repository_entries
1145                            .get(&RepositoryWorkDirectory(entry.path.clone()))
1146                            .cloned();
1147                        changes.push((
1148                            entry.path.clone(),
1149                            GitRepositoryChange {
1150                                old_repository: old_repo,
1151                            },
1152                        ));
1153                    }
1154                    old_repos.next();
1155                }
1156                (None, None) => break,
1157            }
1158        }
1159
1160        fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
1161            (value.0.clone(), value.1.clone())
1162        }
1163
1164        changes.into()
1165    }
1166
1167    pub fn scan_complete(&self) -> impl Future<Output = ()> {
1168        let mut is_scanning_rx = self.is_scanning.1.clone();
1169        async move {
1170            let mut is_scanning = *is_scanning_rx.borrow();
1171            while is_scanning {
1172                if let Some(value) = is_scanning_rx.recv().await {
1173                    is_scanning = value;
1174                } else {
1175                    break;
1176                }
1177            }
1178        }
1179    }
1180
1181    pub fn snapshot(&self) -> LocalSnapshot {
1182        self.snapshot.clone()
1183    }
1184
1185    pub fn settings(&self) -> WorktreeSettings {
1186        self.settings.clone()
1187    }
1188
1189    pub fn local_git_repo(&self, path: &Path) -> Option<Arc<dyn GitRepository>> {
1190        self.repo_for_path(path)
1191            .map(|(_, entry)| entry.repo_ptr.clone())
1192    }
1193
1194    pub fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1195        self.git_repositories.get(&repo.work_directory.0)
1196    }
1197
1198    fn load_file(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<LoadedFile>> {
1199        let path = Arc::from(path);
1200        let abs_path = self.absolutize(&path);
1201        let fs = self.fs.clone();
1202        let entry = self.refresh_entry(path.clone(), None, cx);
1203        let is_private = self.is_path_private(path.as_ref());
1204
1205        cx.spawn(|this, mut cx| async move {
1206            let abs_path = abs_path?;
1207            let text = fs.load(&abs_path).await?;
1208            let mut index_task = None;
1209            let snapshot = this.update(&mut cx, |this, _| this.as_local().unwrap().snapshot())?;
1210            if let Some(repo) = snapshot.repository_for_path(&path) {
1211                if let Some(repo_path) = repo.relativize(&snapshot, &path).log_err() {
1212                    if let Some(git_repo) = snapshot.git_repositories.get(&*repo.work_directory) {
1213                        let git_repo = git_repo.repo_ptr.clone();
1214                        index_task = Some(
1215                            cx.background_executor()
1216                                .spawn(async move { git_repo.load_index_text(&repo_path) }),
1217                        );
1218                    }
1219                }
1220            }
1221
1222            let diff_base = if let Some(index_task) = index_task {
1223                index_task.await
1224            } else {
1225                None
1226            };
1227
1228            let worktree = this
1229                .upgrade()
1230                .ok_or_else(|| anyhow!("worktree was dropped"))?;
1231            let file = match entry.await? {
1232                Some(entry) => File::for_entry(entry, worktree),
1233                None => {
1234                    let metadata = fs
1235                        .metadata(&abs_path)
1236                        .await
1237                        .with_context(|| {
1238                            format!("Loading metadata for excluded file {abs_path:?}")
1239                        })?
1240                        .with_context(|| {
1241                            format!("Excluded file {abs_path:?} got removed during loading")
1242                        })?;
1243                    Arc::new(File {
1244                        entry_id: None,
1245                        worktree,
1246                        path,
1247                        mtime: Some(metadata.mtime),
1248                        is_local: true,
1249                        is_deleted: false,
1250                        is_private,
1251                    })
1252                }
1253            };
1254
1255            Ok(LoadedFile {
1256                file,
1257                text,
1258                diff_base,
1259            })
1260        })
1261    }
1262
1263    /// Find the lowest path in the worktree's datastructures that is an ancestor
1264    fn lowest_ancestor(&self, path: &Path) -> PathBuf {
1265        let mut lowest_ancestor = None;
1266        for path in path.ancestors() {
1267            if self.entry_for_path(path).is_some() {
1268                lowest_ancestor = Some(path.to_path_buf());
1269                break;
1270            }
1271        }
1272
1273        lowest_ancestor.unwrap_or_else(|| PathBuf::from(""))
1274    }
1275
1276    fn create_entry(
1277        &self,
1278        path: impl Into<Arc<Path>>,
1279        is_dir: bool,
1280        cx: &mut ModelContext<Worktree>,
1281    ) -> Task<Result<CreatedEntry>> {
1282        let path = path.into();
1283        let abs_path = match self.absolutize(&path) {
1284            Ok(path) => path,
1285            Err(e) => return Task::ready(Err(e.context(format!("absolutizing path {path:?}")))),
1286        };
1287        let path_excluded = self.settings.is_path_excluded(&abs_path);
1288        let fs = self.fs.clone();
1289        let task_abs_path = abs_path.clone();
1290        let write = cx.background_executor().spawn(async move {
1291            if is_dir {
1292                fs.create_dir(&task_abs_path)
1293                    .await
1294                    .with_context(|| format!("creating directory {task_abs_path:?}"))
1295            } else {
1296                fs.save(&task_abs_path, &Rope::default(), LineEnding::default())
1297                    .await
1298                    .with_context(|| format!("creating file {task_abs_path:?}"))
1299            }
1300        });
1301
1302        let lowest_ancestor = self.lowest_ancestor(&path);
1303        cx.spawn(|this, mut cx| async move {
1304            write.await?;
1305            if path_excluded {
1306                return Ok(CreatedEntry::Excluded { abs_path });
1307            }
1308
1309            let (result, refreshes) = this.update(&mut cx, |this, cx| {
1310                let mut refreshes = Vec::new();
1311                let refresh_paths = path.strip_prefix(&lowest_ancestor).unwrap();
1312                for refresh_path in refresh_paths.ancestors() {
1313                    if refresh_path == Path::new("") {
1314                        continue;
1315                    }
1316                    let refresh_full_path = lowest_ancestor.join(refresh_path);
1317
1318                    refreshes.push(this.as_local_mut().unwrap().refresh_entry(
1319                        refresh_full_path.into(),
1320                        None,
1321                        cx,
1322                    ));
1323                }
1324                (
1325                    this.as_local_mut().unwrap().refresh_entry(path, None, cx),
1326                    refreshes,
1327                )
1328            })?;
1329            for refresh in refreshes {
1330                refresh.await.log_err();
1331            }
1332
1333            Ok(result
1334                .await?
1335                .map(CreatedEntry::Included)
1336                .unwrap_or_else(|| CreatedEntry::Excluded { abs_path }))
1337        })
1338    }
1339
1340    fn write_file(
1341        &self,
1342        path: impl Into<Arc<Path>>,
1343        text: Rope,
1344        line_ending: LineEnding,
1345        cx: &mut ModelContext<Worktree>,
1346    ) -> Task<Result<Arc<File>>> {
1347        let path = path.into();
1348        let fs = self.fs.clone();
1349        let is_private = self.is_path_private(&path);
1350        let Ok(abs_path) = self.absolutize(&path) else {
1351            return Task::ready(Err(anyhow!("invalid path {path:?}")));
1352        };
1353
1354        let write = cx.background_executor().spawn({
1355            let fs = fs.clone();
1356            let abs_path = abs_path.clone();
1357            async move { fs.save(&abs_path, &text, line_ending).await }
1358        });
1359
1360        cx.spawn(move |this, mut cx| async move {
1361            write.await?;
1362            let entry = this
1363                .update(&mut cx, |this, cx| {
1364                    this.as_local_mut()
1365                        .unwrap()
1366                        .refresh_entry(path.clone(), None, cx)
1367                })?
1368                .await?;
1369            let worktree = this.upgrade().ok_or_else(|| anyhow!("worktree dropped"))?;
1370            if let Some(entry) = entry {
1371                Ok(File::for_entry(entry, worktree))
1372            } else {
1373                let metadata = fs
1374                    .metadata(&abs_path)
1375                    .await
1376                    .with_context(|| {
1377                        format!("Fetching metadata after saving the excluded buffer {abs_path:?}")
1378                    })?
1379                    .with_context(|| {
1380                        format!("Excluded buffer {path:?} got removed during saving")
1381                    })?;
1382                Ok(Arc::new(File {
1383                    worktree,
1384                    path,
1385                    mtime: Some(metadata.mtime),
1386                    entry_id: None,
1387                    is_local: true,
1388                    is_deleted: false,
1389                    is_private,
1390                }))
1391            }
1392        })
1393    }
1394
1395    fn delete_entry(
1396        &self,
1397        entry_id: ProjectEntryId,
1398        trash: bool,
1399        cx: &mut ModelContext<Worktree>,
1400    ) -> Option<Task<Result<()>>> {
1401        let entry = self.entry_for_id(entry_id)?.clone();
1402        let abs_path = self.absolutize(&entry.path);
1403        let fs = self.fs.clone();
1404
1405        let delete = cx.background_executor().spawn(async move {
1406            if entry.is_file() {
1407                if trash {
1408                    fs.trash_file(&abs_path?, Default::default()).await?;
1409                } else {
1410                    fs.remove_file(&abs_path?, Default::default()).await?;
1411                }
1412            } else {
1413                if trash {
1414                    fs.trash_dir(
1415                        &abs_path?,
1416                        RemoveOptions {
1417                            recursive: true,
1418                            ignore_if_not_exists: false,
1419                        },
1420                    )
1421                    .await?;
1422                } else {
1423                    fs.remove_dir(
1424                        &abs_path?,
1425                        RemoveOptions {
1426                            recursive: true,
1427                            ignore_if_not_exists: false,
1428                        },
1429                    )
1430                    .await?;
1431                }
1432            }
1433            anyhow::Ok(entry.path)
1434        });
1435
1436        Some(cx.spawn(|this, mut cx| async move {
1437            let path = delete.await?;
1438            this.update(&mut cx, |this, _| {
1439                this.as_local_mut()
1440                    .unwrap()
1441                    .refresh_entries_for_paths(vec![path])
1442            })?
1443            .recv()
1444            .await;
1445            Ok(())
1446        }))
1447    }
1448
1449    fn rename_entry(
1450        &self,
1451        entry_id: ProjectEntryId,
1452        new_path: impl Into<Arc<Path>>,
1453        cx: &mut ModelContext<Worktree>,
1454    ) -> Task<Result<CreatedEntry>> {
1455        let old_path = match self.entry_for_id(entry_id) {
1456            Some(entry) => entry.path.clone(),
1457            None => return Task::ready(Err(anyhow!("no entry to rename for id {entry_id:?}"))),
1458        };
1459        let new_path = new_path.into();
1460        let abs_old_path = self.absolutize(&old_path);
1461        let Ok(abs_new_path) = self.absolutize(&new_path) else {
1462            return Task::ready(Err(anyhow!("absolutizing path {new_path:?}")));
1463        };
1464        let abs_path = abs_new_path.clone();
1465        let fs = self.fs.clone();
1466        let case_sensitive = self.fs_case_sensitive;
1467        let rename = cx.background_executor().spawn(async move {
1468            let abs_old_path = abs_old_path?;
1469            let abs_new_path = abs_new_path;
1470
1471            let abs_old_path_lower = abs_old_path.to_str().map(|p| p.to_lowercase());
1472            let abs_new_path_lower = abs_new_path.to_str().map(|p| p.to_lowercase());
1473
1474            // If we're on a case-insensitive FS and we're doing a case-only rename (i.e. `foobar` to `FOOBAR`)
1475            // we want to overwrite, because otherwise we run into a file-already-exists error.
1476            let overwrite = !case_sensitive
1477                && abs_old_path != abs_new_path
1478                && abs_old_path_lower == abs_new_path_lower;
1479
1480            fs.rename(
1481                &abs_old_path,
1482                &abs_new_path,
1483                fs::RenameOptions {
1484                    overwrite,
1485                    ..Default::default()
1486                },
1487            )
1488            .await
1489            .with_context(|| format!("Renaming {abs_old_path:?} into {abs_new_path:?}"))
1490        });
1491
1492        cx.spawn(|this, mut cx| async move {
1493            rename.await?;
1494            Ok(this
1495                .update(&mut cx, |this, cx| {
1496                    this.as_local_mut()
1497                        .unwrap()
1498                        .refresh_entry(new_path.clone(), Some(old_path), cx)
1499                })?
1500                .await?
1501                .map(CreatedEntry::Included)
1502                .unwrap_or_else(|| CreatedEntry::Excluded { abs_path }))
1503        })
1504    }
1505
1506    fn copy_entry(
1507        &self,
1508        entry_id: ProjectEntryId,
1509        new_path: impl Into<Arc<Path>>,
1510        cx: &mut ModelContext<Worktree>,
1511    ) -> Task<Result<Option<Entry>>> {
1512        let old_path = match self.entry_for_id(entry_id) {
1513            Some(entry) => entry.path.clone(),
1514            None => return Task::ready(Ok(None)),
1515        };
1516        let new_path = new_path.into();
1517        let abs_old_path = self.absolutize(&old_path);
1518        let abs_new_path = self.absolutize(&new_path);
1519        let fs = self.fs.clone();
1520        let copy = cx.background_executor().spawn(async move {
1521            copy_recursive(
1522                fs.as_ref(),
1523                &abs_old_path?,
1524                &abs_new_path?,
1525                Default::default(),
1526            )
1527            .await
1528        });
1529
1530        cx.spawn(|this, mut cx| async move {
1531            copy.await?;
1532            this.update(&mut cx, |this, cx| {
1533                this.as_local_mut()
1534                    .unwrap()
1535                    .refresh_entry(new_path.clone(), None, cx)
1536            })?
1537            .await
1538        })
1539    }
1540
1541    pub fn copy_external_entries(
1542        &mut self,
1543        target_directory: PathBuf,
1544        paths: Vec<Arc<Path>>,
1545        overwrite_existing_files: bool,
1546        cx: &mut ModelContext<Worktree>,
1547    ) -> Task<Result<Vec<ProjectEntryId>>> {
1548        let worktree_path = self.abs_path().clone();
1549        let fs = self.fs.clone();
1550        let paths = paths
1551            .into_iter()
1552            .filter_map(|source| {
1553                let file_name = source.file_name()?;
1554                let mut target = target_directory.clone();
1555                target.push(file_name);
1556
1557                // Do not allow copying the same file to itself.
1558                if source.as_ref() != target.as_path() {
1559                    Some((source, target))
1560                } else {
1561                    None
1562                }
1563            })
1564            .collect::<Vec<_>>();
1565
1566        let paths_to_refresh = paths
1567            .iter()
1568            .filter_map(|(_, target)| Some(target.strip_prefix(&worktree_path).ok()?.into()))
1569            .collect::<Vec<_>>();
1570
1571        cx.spawn(|this, cx| async move {
1572            cx.background_executor()
1573                .spawn(async move {
1574                    for (source, target) in paths {
1575                        copy_recursive(
1576                            fs.as_ref(),
1577                            &source,
1578                            &target,
1579                            fs::CopyOptions {
1580                                overwrite: overwrite_existing_files,
1581                                ..Default::default()
1582                            },
1583                        )
1584                        .await
1585                        .with_context(|| {
1586                            anyhow!("Failed to copy file from {source:?} to {target:?}")
1587                        })?;
1588                    }
1589                    Ok::<(), anyhow::Error>(())
1590                })
1591                .await
1592                .log_err();
1593            let mut refresh = cx.read_model(
1594                &this.upgrade().with_context(|| "Dropped worktree")?,
1595                |this, _| {
1596                    Ok::<postage::barrier::Receiver, anyhow::Error>(
1597                        this.as_local()
1598                            .with_context(|| "Worktree is not local")?
1599                            .refresh_entries_for_paths(paths_to_refresh.clone()),
1600                    )
1601                },
1602            )??;
1603
1604            cx.background_executor()
1605                .spawn(async move {
1606                    refresh.next().await;
1607                    Ok::<(), anyhow::Error>(())
1608                })
1609                .await
1610                .log_err();
1611
1612            let this = this.upgrade().with_context(|| "Dropped worktree")?;
1613            cx.read_model(&this, |this, _| {
1614                paths_to_refresh
1615                    .iter()
1616                    .filter_map(|path| Some(this.entry_for_path(path)?.id))
1617                    .collect()
1618            })
1619        })
1620    }
1621
1622    fn expand_entry(
1623        &mut self,
1624        entry_id: ProjectEntryId,
1625        cx: &mut ModelContext<Worktree>,
1626    ) -> Option<Task<Result<()>>> {
1627        let path = self.entry_for_id(entry_id)?.path.clone();
1628        let mut refresh = self.refresh_entries_for_paths(vec![path]);
1629        Some(cx.background_executor().spawn(async move {
1630            refresh.next().await;
1631            Ok(())
1632        }))
1633    }
1634
1635    fn refresh_entries_for_paths(&self, paths: Vec<Arc<Path>>) -> barrier::Receiver {
1636        let (tx, rx) = barrier::channel();
1637        self.scan_requests_tx
1638            .try_send(ScanRequest {
1639                relative_paths: paths,
1640                done: tx,
1641            })
1642            .ok();
1643        rx
1644    }
1645
1646    pub fn add_path_prefix_to_scan(&self, path_prefix: Arc<Path>) {
1647        self.path_prefixes_to_scan_tx.try_send(path_prefix).ok();
1648    }
1649
1650    fn refresh_entry(
1651        &self,
1652        path: Arc<Path>,
1653        old_path: Option<Arc<Path>>,
1654        cx: &mut ModelContext<Worktree>,
1655    ) -> Task<Result<Option<Entry>>> {
1656        if self.settings.is_path_excluded(&path) {
1657            return Task::ready(Ok(None));
1658        }
1659        let paths = if let Some(old_path) = old_path.as_ref() {
1660            vec![old_path.clone(), path.clone()]
1661        } else {
1662            vec![path.clone()]
1663        };
1664        let t0 = Instant::now();
1665        let mut refresh = self.refresh_entries_for_paths(paths);
1666        cx.spawn(move |this, mut cx| async move {
1667            refresh.recv().await;
1668            log::trace!("refreshed entry {path:?} in {:?}", t0.elapsed());
1669            let new_entry = this.update(&mut cx, |this, _| {
1670                this.entry_for_path(path)
1671                    .cloned()
1672                    .ok_or_else(|| anyhow!("failed to read path after update"))
1673            })??;
1674            Ok(Some(new_entry))
1675        })
1676    }
1677
1678    fn observe_updates<F, Fut>(
1679        &mut self,
1680        project_id: u64,
1681        cx: &mut ModelContext<Worktree>,
1682        callback: F,
1683    ) where
1684        F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1685        Fut: Send + Future<Output = bool>,
1686    {
1687        #[cfg(any(test, feature = "test-support"))]
1688        const MAX_CHUNK_SIZE: usize = 2;
1689        #[cfg(not(any(test, feature = "test-support")))]
1690        const MAX_CHUNK_SIZE: usize = 256;
1691
1692        if let Some(observer) = self.update_observer.as_mut() {
1693            *observer.resume_updates.borrow_mut() = ();
1694            return;
1695        }
1696
1697        let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
1698        let (snapshots_tx, mut snapshots_rx) =
1699            mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
1700        snapshots_tx
1701            .unbounded_send((self.snapshot(), Arc::from([]), Arc::from([])))
1702            .ok();
1703
1704        let worktree_id = cx.entity_id().as_u64();
1705        let _maintain_remote_snapshot = cx.background_executor().spawn(async move {
1706            let mut is_first = true;
1707            while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
1708                let update;
1709                if is_first {
1710                    update = snapshot.build_initial_update(project_id, worktree_id);
1711                    is_first = false;
1712                } else {
1713                    update =
1714                        snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes);
1715                }
1716
1717                for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1718                    let _ = resume_updates_rx.try_recv();
1719                    loop {
1720                        let result = callback(update.clone());
1721                        if result.await {
1722                            break;
1723                        } else {
1724                            log::info!("waiting to resume updates");
1725                            if resume_updates_rx.next().await.is_none() {
1726                                return Some(());
1727                            }
1728                        }
1729                    }
1730                }
1731            }
1732            Some(())
1733        });
1734
1735        self.update_observer = Some(UpdateObservationState {
1736            snapshots_tx,
1737            resume_updates: resume_updates_tx,
1738            _maintain_remote_snapshot,
1739        });
1740    }
1741
1742    pub fn share_private_files(&mut self, cx: &mut ModelContext<Worktree>) {
1743        self.share_private_files = true;
1744        self.restart_background_scanners(cx);
1745    }
1746}
1747
1748impl RemoteWorktree {
1749    pub fn project_id(&self) -> u64 {
1750        self.project_id
1751    }
1752
1753    pub fn client(&self) -> AnyProtoClient {
1754        self.client.clone()
1755    }
1756
1757    pub fn disconnected_from_host(&mut self) {
1758        self.updates_tx.take();
1759        self.snapshot_subscriptions.clear();
1760        self.disconnected = true;
1761    }
1762
1763    pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1764        if let Some(updates_tx) = &self.updates_tx {
1765            updates_tx
1766                .unbounded_send(update)
1767                .expect("consumer runs to completion");
1768        }
1769    }
1770
1771    fn observe_updates<F, Fut>(
1772        &mut self,
1773        project_id: u64,
1774        cx: &mut ModelContext<Worktree>,
1775        callback: F,
1776    ) where
1777        F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1778        Fut: 'static + Send + Future<Output = bool>,
1779    {
1780        let (tx, mut rx) = mpsc::unbounded();
1781        let initial_update = self
1782            .snapshot
1783            .build_initial_update(project_id, self.id().to_proto());
1784        self.updates_tx = Some(tx);
1785        cx.spawn(|this, mut cx| async move {
1786            let mut update = initial_update;
1787            loop {
1788                if !callback(update).await {
1789                    break;
1790                }
1791                if let Some(next_update) = rx.next().await {
1792                    update = next_update;
1793                } else {
1794                    break;
1795                }
1796            }
1797            this.update(&mut cx, |this, _| {
1798                let this = this.as_remote_mut().unwrap();
1799                this.updates_tx.take();
1800            })
1801        })
1802        .detach();
1803    }
1804
1805    fn observed_snapshot(&self, scan_id: usize) -> bool {
1806        self.completed_scan_id >= scan_id
1807    }
1808
1809    pub fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1810        let (tx, rx) = oneshot::channel();
1811        if self.observed_snapshot(scan_id) {
1812            let _ = tx.send(());
1813        } else if self.disconnected {
1814            drop(tx);
1815        } else {
1816            match self
1817                .snapshot_subscriptions
1818                .binary_search_by_key(&scan_id, |probe| probe.0)
1819            {
1820                Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1821            }
1822        }
1823
1824        async move {
1825            rx.await?;
1826            Ok(())
1827        }
1828    }
1829
1830    fn insert_entry(
1831        &mut self,
1832        entry: proto::Entry,
1833        scan_id: usize,
1834        cx: &mut ModelContext<Worktree>,
1835    ) -> Task<Result<Entry>> {
1836        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1837        cx.spawn(|this, mut cx| async move {
1838            wait_for_snapshot.await?;
1839            this.update(&mut cx, |worktree, _| {
1840                let worktree = worktree.as_remote_mut().unwrap();
1841                let snapshot = &mut worktree.background_snapshot.lock().0;
1842                let entry = snapshot.insert_entry(entry);
1843                worktree.snapshot = snapshot.clone();
1844                entry
1845            })?
1846        })
1847    }
1848
1849    fn delete_entry(
1850        &mut self,
1851        entry_id: ProjectEntryId,
1852        trash: bool,
1853        cx: &mut ModelContext<Worktree>,
1854    ) -> Option<Task<Result<()>>> {
1855        let response = self.client.request(proto::DeleteProjectEntry {
1856            project_id: self.project_id,
1857            entry_id: entry_id.to_proto(),
1858            use_trash: trash,
1859        });
1860        Some(cx.spawn(move |this, mut cx| async move {
1861            let response = response.await?;
1862            let scan_id = response.worktree_scan_id as usize;
1863
1864            this.update(&mut cx, move |this, _| {
1865                this.as_remote_mut().unwrap().wait_for_snapshot(scan_id)
1866            })?
1867            .await?;
1868
1869            this.update(&mut cx, |this, _| {
1870                let this = this.as_remote_mut().unwrap();
1871                let snapshot = &mut this.background_snapshot.lock().0;
1872                snapshot.delete_entry(entry_id);
1873                this.snapshot = snapshot.clone();
1874            })
1875        }))
1876    }
1877
1878    fn rename_entry(
1879        &mut self,
1880        entry_id: ProjectEntryId,
1881        new_path: impl Into<Arc<Path>>,
1882        cx: &mut ModelContext<Worktree>,
1883    ) -> Task<Result<CreatedEntry>> {
1884        let new_path = new_path.into();
1885        let response = self.client.request(proto::RenameProjectEntry {
1886            project_id: self.project_id,
1887            entry_id: entry_id.to_proto(),
1888            new_path: new_path.to_string_lossy().into(),
1889        });
1890        cx.spawn(move |this, mut cx| async move {
1891            let response = response.await?;
1892            match response.entry {
1893                Some(entry) => this
1894                    .update(&mut cx, |this, cx| {
1895                        this.as_remote_mut().unwrap().insert_entry(
1896                            entry,
1897                            response.worktree_scan_id as usize,
1898                            cx,
1899                        )
1900                    })?
1901                    .await
1902                    .map(CreatedEntry::Included),
1903                None => {
1904                    let abs_path = this.update(&mut cx, |worktree, _| {
1905                        worktree
1906                            .absolutize(&new_path)
1907                            .with_context(|| format!("absolutizing {new_path:?}"))
1908                    })??;
1909                    Ok(CreatedEntry::Excluded { abs_path })
1910                }
1911            }
1912        })
1913    }
1914}
1915
1916impl Snapshot {
1917    pub fn new(id: u64, root_name: String, abs_path: Arc<Path>) -> Self {
1918        Snapshot {
1919            id: WorktreeId::from_usize(id as usize),
1920            abs_path,
1921            root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
1922            root_name,
1923            entries_by_path: Default::default(),
1924            entries_by_id: Default::default(),
1925            repository_entries: Default::default(),
1926            scan_id: 1,
1927            completed_scan_id: 0,
1928        }
1929    }
1930
1931    pub fn id(&self) -> WorktreeId {
1932        self.id
1933    }
1934
1935    pub fn abs_path(&self) -> &Arc<Path> {
1936        &self.abs_path
1937    }
1938
1939    fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
1940        let mut updated_entries = self
1941            .entries_by_path
1942            .iter()
1943            .map(proto::Entry::from)
1944            .collect::<Vec<_>>();
1945        updated_entries.sort_unstable_by_key(|e| e.id);
1946
1947        let mut updated_repositories = self
1948            .repository_entries
1949            .values()
1950            .map(proto::RepositoryEntry::from)
1951            .collect::<Vec<_>>();
1952        updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
1953
1954        proto::UpdateWorktree {
1955            project_id,
1956            worktree_id,
1957            abs_path: self.abs_path().to_string_lossy().into(),
1958            root_name: self.root_name().to_string(),
1959            updated_entries,
1960            removed_entries: Vec::new(),
1961            scan_id: self.scan_id as u64,
1962            is_last_update: self.completed_scan_id == self.scan_id,
1963            updated_repositories,
1964            removed_repositories: Vec::new(),
1965        }
1966    }
1967
1968    pub fn absolutize(&self, path: &Path) -> Result<PathBuf> {
1969        if path
1970            .components()
1971            .any(|component| !matches!(component, std::path::Component::Normal(_)))
1972        {
1973            return Err(anyhow!("invalid path"));
1974        }
1975        if path.file_name().is_some() {
1976            Ok(self.abs_path.join(path))
1977        } else {
1978            Ok(self.abs_path.to_path_buf())
1979        }
1980    }
1981
1982    pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1983        self.entries_by_id.get(&entry_id, &()).is_some()
1984    }
1985
1986    fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1987        let entry = Entry::try_from((&self.root_char_bag, entry))?;
1988        let old_entry = self.entries_by_id.insert_or_replace(
1989            PathEntry {
1990                id: entry.id,
1991                path: entry.path.clone(),
1992                is_ignored: entry.is_ignored,
1993                scan_id: 0,
1994            },
1995            &(),
1996        );
1997        if let Some(old_entry) = old_entry {
1998            self.entries_by_path.remove(&PathKey(old_entry.path), &());
1999        }
2000        self.entries_by_path.insert_or_replace(entry.clone(), &());
2001        Ok(entry)
2002    }
2003
2004    fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
2005        let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
2006        self.entries_by_path = {
2007            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2008            let mut new_entries_by_path =
2009                cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
2010            while let Some(entry) = cursor.item() {
2011                if entry.path.starts_with(&removed_entry.path) {
2012                    self.entries_by_id.remove(&entry.id, &());
2013                    cursor.next(&());
2014                } else {
2015                    break;
2016                }
2017            }
2018            new_entries_by_path.append(cursor.suffix(&()), &());
2019            new_entries_by_path
2020        };
2021
2022        Some(removed_entry.path)
2023    }
2024
2025    #[cfg(any(test, feature = "test-support"))]
2026    pub fn status_for_file(&self, path: impl Into<PathBuf>) -> Option<GitFileStatus> {
2027        let path = path.into();
2028        self.entries_by_path
2029            .get(&PathKey(Arc::from(path)), &())
2030            .and_then(|entry| entry.git_status)
2031    }
2032
2033    pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
2034        log::trace!(
2035            "applying remote worktree update. {} entries updated, {} removed",
2036            update.updated_entries.len(),
2037            update.removed_entries.len()
2038        );
2039
2040        let mut entries_by_path_edits = Vec::new();
2041        let mut entries_by_id_edits = Vec::new();
2042
2043        for entry_id in update.removed_entries {
2044            let entry_id = ProjectEntryId::from_proto(entry_id);
2045            entries_by_id_edits.push(Edit::Remove(entry_id));
2046            if let Some(entry) = self.entry_for_id(entry_id) {
2047                entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
2048            }
2049        }
2050
2051        for entry in update.updated_entries {
2052            let entry = Entry::try_from((&self.root_char_bag, entry))?;
2053            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
2054                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
2055            }
2056            if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
2057                if old_entry.id != entry.id {
2058                    entries_by_id_edits.push(Edit::Remove(old_entry.id));
2059                }
2060            }
2061            entries_by_id_edits.push(Edit::Insert(PathEntry {
2062                id: entry.id,
2063                path: entry.path.clone(),
2064                is_ignored: entry.is_ignored,
2065                scan_id: 0,
2066            }));
2067            entries_by_path_edits.push(Edit::Insert(entry));
2068        }
2069
2070        self.entries_by_path.edit(entries_by_path_edits, &());
2071        self.entries_by_id.edit(entries_by_id_edits, &());
2072
2073        update.removed_repositories.sort_unstable();
2074        self.repository_entries.retain(|_, entry| {
2075            if let Ok(_) = update
2076                .removed_repositories
2077                .binary_search(&entry.work_directory.to_proto())
2078            {
2079                false
2080            } else {
2081                true
2082            }
2083        });
2084
2085        for repository in update.updated_repositories {
2086            let work_directory_entry: WorkDirectoryEntry =
2087                ProjectEntryId::from_proto(repository.work_directory_id).into();
2088
2089            if let Some(entry) = self.entry_for_id(*work_directory_entry) {
2090                let work_directory = RepositoryWorkDirectory(entry.path.clone());
2091                if self.repository_entries.get(&work_directory).is_some() {
2092                    self.repository_entries.update(&work_directory, |repo| {
2093                        repo.branch = repository.branch.map(Into::into);
2094                    });
2095                } else {
2096                    self.repository_entries.insert(
2097                        work_directory,
2098                        RepositoryEntry {
2099                            work_directory: work_directory_entry,
2100                            branch: repository.branch.map(Into::into),
2101                            // When syncing repository entries from a peer, we don't need
2102                            // the location_in_repo field, since git operations don't happen locally
2103                            // anyway.
2104                            location_in_repo: None,
2105                        },
2106                    )
2107                }
2108            } else {
2109                log::error!("no work directory entry for repository {:?}", repository)
2110            }
2111        }
2112
2113        self.scan_id = update.scan_id as usize;
2114        if update.is_last_update {
2115            self.completed_scan_id = update.scan_id as usize;
2116        }
2117
2118        Ok(())
2119    }
2120
2121    pub fn entry_count(&self) -> usize {
2122        self.entries_by_path.summary().count
2123    }
2124
2125    pub fn visible_entry_count(&self) -> usize {
2126        self.entries_by_path.summary().non_ignored_count
2127    }
2128
2129    pub fn dir_count(&self) -> usize {
2130        let summary = self.entries_by_path.summary();
2131        summary.count - summary.file_count
2132    }
2133
2134    pub fn visible_dir_count(&self) -> usize {
2135        let summary = self.entries_by_path.summary();
2136        summary.non_ignored_count - summary.non_ignored_file_count
2137    }
2138
2139    pub fn file_count(&self) -> usize {
2140        self.entries_by_path.summary().file_count
2141    }
2142
2143    pub fn visible_file_count(&self) -> usize {
2144        self.entries_by_path.summary().non_ignored_file_count
2145    }
2146
2147    fn traverse_from_offset(
2148        &self,
2149        include_files: bool,
2150        include_dirs: bool,
2151        include_ignored: bool,
2152        start_offset: usize,
2153    ) -> Traversal {
2154        let mut cursor = self.entries_by_path.cursor();
2155        cursor.seek(
2156            &TraversalTarget::Count {
2157                count: start_offset,
2158                include_files,
2159                include_dirs,
2160                include_ignored,
2161            },
2162            Bias::Right,
2163            &(),
2164        );
2165        Traversal {
2166            cursor,
2167            include_files,
2168            include_dirs,
2169            include_ignored,
2170        }
2171    }
2172
2173    pub fn traverse_from_path(
2174        &self,
2175        include_files: bool,
2176        include_dirs: bool,
2177        include_ignored: bool,
2178        path: &Path,
2179    ) -> Traversal {
2180        Traversal::new(
2181            &self.entries_by_path,
2182            include_files,
2183            include_dirs,
2184            include_ignored,
2185            path,
2186        )
2187    }
2188
2189    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
2190        self.traverse_from_offset(true, false, include_ignored, start)
2191    }
2192
2193    pub fn directories(&self, include_ignored: bool, start: usize) -> Traversal {
2194        self.traverse_from_offset(false, true, include_ignored, start)
2195    }
2196
2197    pub fn entries(&self, include_ignored: bool, start: usize) -> Traversal {
2198        self.traverse_from_offset(true, true, include_ignored, start)
2199    }
2200
2201    pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
2202        self.repository_entries
2203            .iter()
2204            .map(|(path, entry)| (&path.0, entry))
2205    }
2206
2207    /// Get the repository whose work directory contains the given path.
2208    pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
2209        self.repository_entries
2210            .get(&RepositoryWorkDirectory(path.into()))
2211            .cloned()
2212    }
2213
2214    /// Get the repository whose work directory contains the given path.
2215    pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
2216        self.repository_and_work_directory_for_path(path)
2217            .map(|e| e.1)
2218    }
2219
2220    pub fn repository_and_work_directory_for_path(
2221        &self,
2222        path: &Path,
2223    ) -> Option<(RepositoryWorkDirectory, RepositoryEntry)> {
2224        self.repository_entries
2225            .iter()
2226            .filter(|(workdir_path, _)| path.starts_with(workdir_path))
2227            .last()
2228            .map(|(path, repo)| (path.clone(), repo.clone()))
2229    }
2230
2231    /// Given an ordered iterator of entries, returns an iterator of those entries,
2232    /// along with their containing git repository.
2233    pub fn entries_with_repositories<'a>(
2234        &'a self,
2235        entries: impl 'a + Iterator<Item = &'a Entry>,
2236    ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
2237        let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
2238        let mut repositories = self.repositories().peekable();
2239        entries.map(move |entry| {
2240            while let Some((repo_path, _)) = containing_repos.last() {
2241                if entry.path.starts_with(repo_path) {
2242                    break;
2243                } else {
2244                    containing_repos.pop();
2245                }
2246            }
2247            while let Some((repo_path, _)) = repositories.peek() {
2248                if entry.path.starts_with(repo_path) {
2249                    containing_repos.push(repositories.next().unwrap());
2250                } else {
2251                    break;
2252                }
2253            }
2254            let repo = containing_repos.last().map(|(_, repo)| *repo);
2255            (entry, repo)
2256        })
2257    }
2258
2259    /// Updates the `git_status` of the given entries such that files'
2260    /// statuses bubble up to their ancestor directories.
2261    pub fn propagate_git_statuses(&self, result: &mut [Entry]) {
2262        let mut cursor = self
2263            .entries_by_path
2264            .cursor::<(TraversalProgress, GitStatuses)>();
2265        let mut entry_stack = Vec::<(usize, GitStatuses)>::new();
2266
2267        let mut result_ix = 0;
2268        loop {
2269            let next_entry = result.get(result_ix);
2270            let containing_entry = entry_stack.last().map(|(ix, _)| &result[*ix]);
2271
2272            let entry_to_finish = match (containing_entry, next_entry) {
2273                (Some(_), None) => entry_stack.pop(),
2274                (Some(containing_entry), Some(next_path)) => {
2275                    if next_path.path.starts_with(&containing_entry.path) {
2276                        None
2277                    } else {
2278                        entry_stack.pop()
2279                    }
2280                }
2281                (None, Some(_)) => None,
2282                (None, None) => break,
2283            };
2284
2285            if let Some((entry_ix, prev_statuses)) = entry_to_finish {
2286                cursor.seek_forward(
2287                    &TraversalTarget::PathSuccessor(&result[entry_ix].path),
2288                    Bias::Left,
2289                    &(),
2290                );
2291
2292                let statuses = cursor.start().1 - prev_statuses;
2293
2294                result[entry_ix].git_status = if statuses.conflict > 0 {
2295                    Some(GitFileStatus::Conflict)
2296                } else if statuses.modified > 0 {
2297                    Some(GitFileStatus::Modified)
2298                } else if statuses.added > 0 {
2299                    Some(GitFileStatus::Added)
2300                } else {
2301                    None
2302                };
2303            } else {
2304                if result[result_ix].is_dir() {
2305                    cursor.seek_forward(
2306                        &TraversalTarget::Path(&result[result_ix].path),
2307                        Bias::Left,
2308                        &(),
2309                    );
2310                    entry_stack.push((result_ix, cursor.start().1));
2311                }
2312                result_ix += 1;
2313            }
2314        }
2315    }
2316
2317    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
2318        let empty_path = Path::new("");
2319        self.entries_by_path
2320            .cursor::<()>()
2321            .filter(move |entry| entry.path.as_ref() != empty_path)
2322            .map(|entry| &entry.path)
2323    }
2324
2325    pub fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
2326        let mut cursor = self.entries_by_path.cursor();
2327        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
2328        let traversal = Traversal {
2329            cursor,
2330            include_files: true,
2331            include_dirs: true,
2332            include_ignored: true,
2333        };
2334        ChildEntriesIter {
2335            traversal,
2336            parent_path,
2337        }
2338    }
2339
2340    pub fn root_entry(&self) -> Option<&Entry> {
2341        self.entry_for_path("")
2342    }
2343
2344    pub fn root_name(&self) -> &str {
2345        &self.root_name
2346    }
2347
2348    pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
2349        self.repository_entries
2350            .get(&RepositoryWorkDirectory(Path::new("").into()))
2351            .map(|entry| entry.to_owned())
2352    }
2353
2354    pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
2355        self.repository_entries.values()
2356    }
2357
2358    pub fn scan_id(&self) -> usize {
2359        self.scan_id
2360    }
2361
2362    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
2363        let path = path.as_ref();
2364        self.traverse_from_path(true, true, true, path)
2365            .entry()
2366            .and_then(|entry| {
2367                if entry.path.as_ref() == path {
2368                    Some(entry)
2369                } else {
2370                    None
2371                }
2372            })
2373    }
2374
2375    pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
2376        let entry = self.entries_by_id.get(&id, &())?;
2377        self.entry_for_path(&entry.path)
2378    }
2379
2380    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
2381        self.entry_for_path(path.as_ref()).map(|e| e.inode)
2382    }
2383}
2384
2385impl LocalSnapshot {
2386    pub fn repo_for_path(&self, path: &Path) -> Option<(RepositoryEntry, &LocalRepositoryEntry)> {
2387        let (_, repo_entry) = self.repository_and_work_directory_for_path(path)?;
2388        let work_directory_id = repo_entry.work_directory_id();
2389        Some((repo_entry, self.git_repositories.get(&work_directory_id)?))
2390    }
2391
2392    fn build_update(
2393        &self,
2394        project_id: u64,
2395        worktree_id: u64,
2396        entry_changes: UpdatedEntriesSet,
2397        repo_changes: UpdatedGitRepositoriesSet,
2398    ) -> proto::UpdateWorktree {
2399        let mut updated_entries = Vec::new();
2400        let mut removed_entries = Vec::new();
2401        let mut updated_repositories = Vec::new();
2402        let mut removed_repositories = Vec::new();
2403
2404        for (_, entry_id, path_change) in entry_changes.iter() {
2405            if let PathChange::Removed = path_change {
2406                removed_entries.push(entry_id.0 as u64);
2407            } else if let Some(entry) = self.entry_for_id(*entry_id) {
2408                updated_entries.push(proto::Entry::from(entry));
2409            }
2410        }
2411
2412        for (work_dir_path, change) in repo_changes.iter() {
2413            let new_repo = self
2414                .repository_entries
2415                .get(&RepositoryWorkDirectory(work_dir_path.clone()));
2416            match (&change.old_repository, new_repo) {
2417                (Some(old_repo), Some(new_repo)) => {
2418                    updated_repositories.push(new_repo.build_update(old_repo));
2419                }
2420                (None, Some(new_repo)) => {
2421                    updated_repositories.push(proto::RepositoryEntry::from(new_repo));
2422                }
2423                (Some(old_repo), None) => {
2424                    removed_repositories.push(old_repo.work_directory.0.to_proto());
2425                }
2426                _ => {}
2427            }
2428        }
2429
2430        removed_entries.sort_unstable();
2431        updated_entries.sort_unstable_by_key(|e| e.id);
2432        removed_repositories.sort_unstable();
2433        updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
2434
2435        // TODO - optimize, knowing that removed_entries are sorted.
2436        removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
2437
2438        proto::UpdateWorktree {
2439            project_id,
2440            worktree_id,
2441            abs_path: self.abs_path().to_string_lossy().into(),
2442            root_name: self.root_name().to_string(),
2443            updated_entries,
2444            removed_entries,
2445            scan_id: self.scan_id as u64,
2446            is_last_update: self.completed_scan_id == self.scan_id,
2447            updated_repositories,
2448            removed_repositories,
2449        }
2450    }
2451
2452    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2453        if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
2454            let abs_path = self.abs_path.join(&entry.path);
2455            match smol::block_on(build_gitignore(&abs_path, fs)) {
2456                Ok(ignore) => {
2457                    self.ignores_by_parent_abs_path
2458                        .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
2459                }
2460                Err(error) => {
2461                    log::error!(
2462                        "error loading .gitignore file {:?} - {:?}",
2463                        &entry.path,
2464                        error
2465                    );
2466                }
2467            }
2468        }
2469
2470        if entry.kind == EntryKind::PendingDir {
2471            if let Some(existing_entry) =
2472                self.entries_by_path.get(&PathKey(entry.path.clone()), &())
2473            {
2474                entry.kind = existing_entry.kind;
2475            }
2476        }
2477
2478        let scan_id = self.scan_id;
2479        let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
2480        if let Some(removed) = removed {
2481            if removed.id != entry.id {
2482                self.entries_by_id.remove(&removed.id, &());
2483            }
2484        }
2485        self.entries_by_id.insert_or_replace(
2486            PathEntry {
2487                id: entry.id,
2488                path: entry.path.clone(),
2489                is_ignored: entry.is_ignored,
2490                scan_id,
2491            },
2492            &(),
2493        );
2494
2495        entry
2496    }
2497
2498    fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2499        let mut inodes = TreeSet::default();
2500        for ancestor in path.ancestors().skip(1) {
2501            if let Some(entry) = self.entry_for_path(ancestor) {
2502                inodes.insert(entry.inode);
2503            }
2504        }
2505        inodes
2506    }
2507
2508    fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2509        let mut new_ignores = Vec::new();
2510        for (index, ancestor) in abs_path.ancestors().enumerate() {
2511            if index > 0 {
2512                if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2513                    new_ignores.push((ancestor, Some(ignore.clone())));
2514                } else {
2515                    new_ignores.push((ancestor, None));
2516                }
2517            }
2518            if ancestor.join(&*DOT_GIT).is_dir() {
2519                break;
2520            }
2521        }
2522
2523        let mut ignore_stack = IgnoreStack::none();
2524        for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2525            if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2526                ignore_stack = IgnoreStack::all();
2527                break;
2528            } else if let Some(ignore) = ignore {
2529                ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2530            }
2531        }
2532
2533        if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2534            ignore_stack = IgnoreStack::all();
2535        }
2536
2537        ignore_stack
2538    }
2539
2540    #[cfg(test)]
2541    pub(crate) fn expanded_entries(&self) -> impl Iterator<Item = &Entry> {
2542        self.entries_by_path
2543            .cursor::<()>()
2544            .filter(|entry| entry.kind == EntryKind::Dir && (entry.is_external || entry.is_ignored))
2545    }
2546
2547    #[cfg(test)]
2548    pub fn check_invariants(&self, git_state: bool) {
2549        use pretty_assertions::assert_eq;
2550
2551        assert_eq!(
2552            self.entries_by_path
2553                .cursor::<()>()
2554                .map(|e| (&e.path, e.id))
2555                .collect::<Vec<_>>(),
2556            self.entries_by_id
2557                .cursor::<()>()
2558                .map(|e| (&e.path, e.id))
2559                .collect::<collections::BTreeSet<_>>()
2560                .into_iter()
2561                .collect::<Vec<_>>(),
2562            "entries_by_path and entries_by_id are inconsistent"
2563        );
2564
2565        let mut files = self.files(true, 0);
2566        let mut visible_files = self.files(false, 0);
2567        for entry in self.entries_by_path.cursor::<()>() {
2568            if entry.is_file() {
2569                assert_eq!(files.next().unwrap().inode, entry.inode);
2570                if !entry.is_ignored && !entry.is_external {
2571                    assert_eq!(visible_files.next().unwrap().inode, entry.inode);
2572                }
2573            }
2574        }
2575
2576        assert!(files.next().is_none());
2577        assert!(visible_files.next().is_none());
2578
2579        let mut bfs_paths = Vec::new();
2580        let mut stack = self
2581            .root_entry()
2582            .map(|e| e.path.as_ref())
2583            .into_iter()
2584            .collect::<Vec<_>>();
2585        while let Some(path) = stack.pop() {
2586            bfs_paths.push(path);
2587            let ix = stack.len();
2588            for child_entry in self.child_entries(path) {
2589                stack.insert(ix, &child_entry.path);
2590            }
2591        }
2592
2593        let dfs_paths_via_iter = self
2594            .entries_by_path
2595            .cursor::<()>()
2596            .map(|e| e.path.as_ref())
2597            .collect::<Vec<_>>();
2598        assert_eq!(bfs_paths, dfs_paths_via_iter);
2599
2600        let dfs_paths_via_traversal = self
2601            .entries(true, 0)
2602            .map(|e| e.path.as_ref())
2603            .collect::<Vec<_>>();
2604        assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
2605
2606        if git_state {
2607            for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
2608                let ignore_parent_path =
2609                    ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
2610                assert!(self.entry_for_path(&ignore_parent_path).is_some());
2611                assert!(self
2612                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
2613                    .is_some());
2614            }
2615        }
2616    }
2617
2618    #[cfg(test)]
2619    pub fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
2620        let mut paths = Vec::new();
2621        for entry in self.entries_by_path.cursor::<()>() {
2622            if include_ignored || !entry.is_ignored {
2623                paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
2624            }
2625        }
2626        paths.sort_by(|a, b| a.0.cmp(b.0));
2627        paths
2628    }
2629}
2630
2631impl BackgroundScannerState {
2632    fn should_scan_directory(&self, entry: &Entry) -> bool {
2633        (!entry.is_external && !entry.is_ignored)
2634            || entry.path.file_name() == Some(*DOT_GIT)
2635            || entry.path.file_name() == Some(local_settings_folder_relative_path().as_os_str())
2636            || self.scanned_dirs.contains(&entry.id) // If we've ever scanned it, keep scanning
2637            || self
2638                .paths_to_scan
2639                .iter()
2640                .any(|p| p.starts_with(&entry.path))
2641            || self
2642                .path_prefixes_to_scan
2643                .iter()
2644                .any(|p| entry.path.starts_with(p))
2645    }
2646
2647    fn enqueue_scan_dir(&self, abs_path: Arc<Path>, entry: &Entry, scan_job_tx: &Sender<ScanJob>) {
2648        let path = entry.path.clone();
2649        let ignore_stack = self.snapshot.ignore_stack_for_abs_path(&abs_path, true);
2650        let mut ancestor_inodes = self.snapshot.ancestor_inodes_for_path(&path);
2651        let mut containing_repository = None;
2652        if !ignore_stack.is_abs_path_ignored(&abs_path, true) {
2653            if let Some((repo_entry, repo)) = self.snapshot.repo_for_path(&path) {
2654                if let Some(workdir_path) = repo_entry.work_directory(&self.snapshot) {
2655                    if let Ok(repo_path) = repo_entry.relativize(&self.snapshot, &path) {
2656                        containing_repository = Some(ScanJobContainingRepository {
2657                            work_directory: workdir_path,
2658                            statuses: repo
2659                                .repo_ptr
2660                                .statuses(&repo_path)
2661                                .log_err()
2662                                .unwrap_or_default(),
2663                        });
2664                    }
2665                }
2666            }
2667        }
2668        if !ancestor_inodes.contains(&entry.inode) {
2669            ancestor_inodes.insert(entry.inode);
2670            scan_job_tx
2671                .try_send(ScanJob {
2672                    abs_path,
2673                    path,
2674                    ignore_stack,
2675                    scan_queue: scan_job_tx.clone(),
2676                    ancestor_inodes,
2677                    is_external: entry.is_external,
2678                    containing_repository,
2679                })
2680                .unwrap();
2681        }
2682    }
2683
2684    fn reuse_entry_id(&mut self, entry: &mut Entry) {
2685        if let Some(mtime) = entry.mtime {
2686            if let Some(removed_entry_id) = self.removed_entry_ids.remove(&(entry.inode, mtime)) {
2687                entry.id = removed_entry_id;
2688            } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
2689                entry.id = existing_entry.id;
2690            }
2691        }
2692    }
2693
2694    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2695        self.reuse_entry_id(&mut entry);
2696        let entry = self.snapshot.insert_entry(entry, fs);
2697        if entry.path.file_name() == Some(&DOT_GIT) {
2698            self.build_git_repository(entry.path.clone(), fs);
2699        }
2700
2701        #[cfg(test)]
2702        self.snapshot.check_invariants(false);
2703
2704        entry
2705    }
2706
2707    fn populate_dir(
2708        &mut self,
2709        parent_path: &Arc<Path>,
2710        entries: impl IntoIterator<Item = Entry>,
2711        ignore: Option<Arc<Gitignore>>,
2712    ) {
2713        let mut parent_entry = if let Some(parent_entry) = self
2714            .snapshot
2715            .entries_by_path
2716            .get(&PathKey(parent_path.clone()), &())
2717        {
2718            parent_entry.clone()
2719        } else {
2720            log::warn!(
2721                "populating a directory {:?} that has been removed",
2722                parent_path
2723            );
2724            return;
2725        };
2726
2727        match parent_entry.kind {
2728            EntryKind::PendingDir | EntryKind::UnloadedDir => parent_entry.kind = EntryKind::Dir,
2729            EntryKind::Dir => {}
2730            _ => return,
2731        }
2732
2733        if let Some(ignore) = ignore {
2734            let abs_parent_path = self.snapshot.abs_path.join(&parent_path).into();
2735            self.snapshot
2736                .ignores_by_parent_abs_path
2737                .insert(abs_parent_path, (ignore, false));
2738        }
2739
2740        let parent_entry_id = parent_entry.id;
2741        self.scanned_dirs.insert(parent_entry_id);
2742        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2743        let mut entries_by_id_edits = Vec::new();
2744
2745        for entry in entries {
2746            entries_by_id_edits.push(Edit::Insert(PathEntry {
2747                id: entry.id,
2748                path: entry.path.clone(),
2749                is_ignored: entry.is_ignored,
2750                scan_id: self.snapshot.scan_id,
2751            }));
2752            entries_by_path_edits.push(Edit::Insert(entry));
2753        }
2754
2755        self.snapshot
2756            .entries_by_path
2757            .edit(entries_by_path_edits, &());
2758        self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2759
2760        if let Err(ix) = self.changed_paths.binary_search(parent_path) {
2761            self.changed_paths.insert(ix, parent_path.clone());
2762        }
2763
2764        #[cfg(test)]
2765        self.snapshot.check_invariants(false);
2766    }
2767
2768    fn remove_path(&mut self, path: &Path) {
2769        let mut new_entries;
2770        let removed_entries;
2771        {
2772            let mut cursor = self.snapshot.entries_by_path.cursor::<TraversalProgress>();
2773            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2774            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2775            new_entries.append(cursor.suffix(&()), &());
2776        }
2777        self.snapshot.entries_by_path = new_entries;
2778
2779        let mut entries_by_id_edits = Vec::new();
2780        for entry in removed_entries.cursor::<()>() {
2781            if let Some(mtime) = entry.mtime {
2782                let removed_entry_id = self
2783                    .removed_entry_ids
2784                    .entry((entry.inode, mtime))
2785                    .or_insert(entry.id);
2786                *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2787            }
2788            entries_by_id_edits.push(Edit::Remove(entry.id));
2789        }
2790        self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2791
2792        if path.file_name() == Some(&GITIGNORE) {
2793            let abs_parent_path = self.snapshot.abs_path.join(path.parent().unwrap());
2794            if let Some((_, needs_update)) = self
2795                .snapshot
2796                .ignores_by_parent_abs_path
2797                .get_mut(abs_parent_path.as_path())
2798            {
2799                *needs_update = true;
2800            }
2801        }
2802
2803        #[cfg(test)]
2804        self.snapshot.check_invariants(false);
2805    }
2806
2807    fn build_git_repository(
2808        &mut self,
2809        dot_git_path: Arc<Path>,
2810        fs: &dyn Fs,
2811    ) -> Option<(RepositoryWorkDirectory, Arc<dyn GitRepository>)> {
2812        let work_dir_path: Arc<Path> = match dot_git_path.parent() {
2813            Some(parent_dir) => {
2814                // Guard against repositories inside the repository metadata
2815                if parent_dir.iter().any(|component| component == *DOT_GIT) {
2816                    log::info!(
2817                        "not building git repository for nested `.git` directory, `.git` path in the worktree: {dot_git_path:?}"
2818                    );
2819                    return None;
2820                };
2821                log::info!(
2822                    "building git repository, `.git` path in the worktree: {dot_git_path:?}"
2823                );
2824
2825                parent_dir.into()
2826            }
2827            None => {
2828                // `dot_git_path.parent().is_none()` means `.git` directory is the opened worktree itself,
2829                // no files inside that directory are tracked by git, so no need to build the repo around it
2830                log::info!(
2831                    "not building git repository for the worktree itself, `.git` path in the worktree: {dot_git_path:?}"
2832                );
2833                return None;
2834            }
2835        };
2836
2837        self.build_git_repository_for_path(work_dir_path, dot_git_path, None, fs)
2838    }
2839
2840    fn build_git_repository_for_path(
2841        &mut self,
2842        work_dir_path: Arc<Path>,
2843        dot_git_path: Arc<Path>,
2844        location_in_repo: Option<Arc<Path>>,
2845        fs: &dyn Fs,
2846    ) -> Option<(RepositoryWorkDirectory, Arc<dyn GitRepository>)> {
2847        let work_dir_id = self
2848            .snapshot
2849            .entry_for_path(work_dir_path.clone())
2850            .map(|entry| entry.id)?;
2851
2852        if self.snapshot.git_repositories.get(&work_dir_id).is_some() {
2853            return None;
2854        }
2855
2856        let abs_path = self.snapshot.abs_path.join(&dot_git_path);
2857        let t0 = Instant::now();
2858        let repository = fs.open_repo(&abs_path)?;
2859        log::trace!("constructed libgit2 repo in {:?}", t0.elapsed());
2860        let work_directory = RepositoryWorkDirectory(work_dir_path.clone());
2861
2862        self.snapshot.repository_entries.insert(
2863            work_directory.clone(),
2864            RepositoryEntry {
2865                work_directory: work_dir_id.into(),
2866                branch: repository.branch_name().map(Into::into),
2867                location_in_repo,
2868            },
2869        );
2870        self.snapshot.git_repositories.insert(
2871            work_dir_id,
2872            LocalRepositoryEntry {
2873                git_dir_scan_id: 0,
2874                repo_ptr: repository.clone(),
2875                git_dir_path: dot_git_path.clone(),
2876            },
2877        );
2878
2879        Some((work_directory, repository))
2880    }
2881}
2882
2883async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2884    let contents = fs.load(abs_path).await?;
2885    let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2886    let mut builder = GitignoreBuilder::new(parent);
2887    for line in contents.lines() {
2888        builder.add_line(Some(abs_path.into()), line)?;
2889    }
2890    Ok(builder.build()?)
2891}
2892
2893impl WorktreeId {
2894    pub fn from_usize(handle_id: usize) -> Self {
2895        Self(handle_id)
2896    }
2897
2898    pub fn from_proto(id: u64) -> Self {
2899        Self(id as usize)
2900    }
2901
2902    pub fn to_proto(&self) -> u64 {
2903        self.0 as u64
2904    }
2905
2906    pub fn to_usize(&self) -> usize {
2907        self.0
2908    }
2909}
2910
2911impl fmt::Display for WorktreeId {
2912    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2913        self.0.fmt(f)
2914    }
2915}
2916
2917impl Deref for Worktree {
2918    type Target = Snapshot;
2919
2920    fn deref(&self) -> &Self::Target {
2921        match self {
2922            Worktree::Local(worktree) => &worktree.snapshot,
2923            Worktree::Remote(worktree) => &worktree.snapshot,
2924        }
2925    }
2926}
2927
2928impl Deref for LocalWorktree {
2929    type Target = LocalSnapshot;
2930
2931    fn deref(&self) -> &Self::Target {
2932        &self.snapshot
2933    }
2934}
2935
2936impl Deref for RemoteWorktree {
2937    type Target = Snapshot;
2938
2939    fn deref(&self) -> &Self::Target {
2940        &self.snapshot
2941    }
2942}
2943
2944impl fmt::Debug for LocalWorktree {
2945    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2946        self.snapshot.fmt(f)
2947    }
2948}
2949
2950impl fmt::Debug for Snapshot {
2951    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2952        struct EntriesById<'a>(&'a SumTree<PathEntry>);
2953        struct EntriesByPath<'a>(&'a SumTree<Entry>);
2954
2955        impl<'a> fmt::Debug for EntriesByPath<'a> {
2956            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2957                f.debug_map()
2958                    .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2959                    .finish()
2960            }
2961        }
2962
2963        impl<'a> fmt::Debug for EntriesById<'a> {
2964            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2965                f.debug_list().entries(self.0.iter()).finish()
2966            }
2967        }
2968
2969        f.debug_struct("Snapshot")
2970            .field("id", &self.id)
2971            .field("root_name", &self.root_name)
2972            .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2973            .field("entries_by_id", &EntriesById(&self.entries_by_id))
2974            .finish()
2975    }
2976}
2977
2978#[derive(Clone, PartialEq)]
2979pub struct File {
2980    pub worktree: Model<Worktree>,
2981    pub path: Arc<Path>,
2982    pub mtime: Option<SystemTime>,
2983    pub entry_id: Option<ProjectEntryId>,
2984    pub is_local: bool,
2985    pub is_deleted: bool,
2986    pub is_private: bool,
2987}
2988
2989impl language::File for File {
2990    fn as_local(&self) -> Option<&dyn language::LocalFile> {
2991        if self.is_local {
2992            Some(self)
2993        } else {
2994            None
2995        }
2996    }
2997
2998    fn mtime(&self) -> Option<SystemTime> {
2999        self.mtime
3000    }
3001
3002    fn path(&self) -> &Arc<Path> {
3003        &self.path
3004    }
3005
3006    fn full_path(&self, cx: &AppContext) -> PathBuf {
3007        let mut full_path = PathBuf::new();
3008        let worktree = self.worktree.read(cx);
3009
3010        if worktree.is_visible() {
3011            full_path.push(worktree.root_name());
3012        } else {
3013            let path = worktree.abs_path();
3014
3015            if worktree.is_local() && path.starts_with(home_dir().as_path()) {
3016                full_path.push("~");
3017                full_path.push(path.strip_prefix(home_dir().as_path()).unwrap());
3018            } else {
3019                full_path.push(path)
3020            }
3021        }
3022
3023        if self.path.components().next().is_some() {
3024            full_path.push(&self.path);
3025        }
3026
3027        full_path
3028    }
3029
3030    /// Returns the last component of this handle's absolute path. If this handle refers to the root
3031    /// of its worktree, then this method will return the name of the worktree itself.
3032    fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
3033        self.path
3034            .file_name()
3035            .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
3036    }
3037
3038    fn worktree_id(&self) -> usize {
3039        self.worktree.entity_id().as_u64() as usize
3040    }
3041
3042    fn is_deleted(&self) -> bool {
3043        self.is_deleted
3044    }
3045
3046    fn as_any(&self) -> &dyn Any {
3047        self
3048    }
3049
3050    fn to_proto(&self, cx: &AppContext) -> rpc::proto::File {
3051        rpc::proto::File {
3052            worktree_id: self.worktree.read(cx).id().to_proto(),
3053            entry_id: self.entry_id.map(|id| id.to_proto()),
3054            path: self.path.to_string_lossy().into(),
3055            mtime: self.mtime.map(|time| time.into()),
3056            is_deleted: self.is_deleted,
3057        }
3058    }
3059
3060    fn is_private(&self) -> bool {
3061        self.is_private
3062    }
3063}
3064
3065impl language::LocalFile for File {
3066    fn abs_path(&self, cx: &AppContext) -> PathBuf {
3067        let worktree_path = &self.worktree.read(cx).as_local().unwrap().abs_path;
3068        if self.path.as_ref() == Path::new("") {
3069            worktree_path.to_path_buf()
3070        } else {
3071            worktree_path.join(&self.path)
3072        }
3073    }
3074
3075    fn load(&self, cx: &AppContext) -> Task<Result<String>> {
3076        let worktree = self.worktree.read(cx).as_local().unwrap();
3077        let abs_path = worktree.absolutize(&self.path);
3078        let fs = worktree.fs.clone();
3079        cx.background_executor()
3080            .spawn(async move { fs.load(&abs_path?).await })
3081    }
3082}
3083
3084impl File {
3085    pub fn for_entry(entry: Entry, worktree: Model<Worktree>) -> Arc<Self> {
3086        Arc::new(Self {
3087            worktree,
3088            path: entry.path.clone(),
3089            mtime: entry.mtime,
3090            entry_id: Some(entry.id),
3091            is_local: true,
3092            is_deleted: false,
3093            is_private: entry.is_private,
3094        })
3095    }
3096
3097    pub fn from_proto(
3098        proto: rpc::proto::File,
3099        worktree: Model<Worktree>,
3100        cx: &AppContext,
3101    ) -> Result<Self> {
3102        let worktree_id = worktree
3103            .read(cx)
3104            .as_remote()
3105            .ok_or_else(|| anyhow!("not remote"))?
3106            .id();
3107
3108        if worktree_id.to_proto() != proto.worktree_id {
3109            return Err(anyhow!("worktree id does not match file"));
3110        }
3111
3112        Ok(Self {
3113            worktree,
3114            path: Path::new(&proto.path).into(),
3115            mtime: proto.mtime.map(|time| time.into()),
3116            entry_id: proto.entry_id.map(ProjectEntryId::from_proto),
3117            is_local: false,
3118            is_deleted: proto.is_deleted,
3119            is_private: false,
3120        })
3121    }
3122
3123    pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
3124        file.and_then(|f| f.as_any().downcast_ref())
3125    }
3126
3127    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
3128        self.worktree.read(cx).id()
3129    }
3130
3131    pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
3132        if self.is_deleted {
3133            None
3134        } else {
3135            self.entry_id
3136        }
3137    }
3138}
3139
3140#[derive(Clone, Debug, PartialEq, Eq, Hash)]
3141pub struct Entry {
3142    pub id: ProjectEntryId,
3143    pub kind: EntryKind,
3144    pub path: Arc<Path>,
3145    pub inode: u64,
3146    pub mtime: Option<SystemTime>,
3147
3148    pub canonical_path: Option<PathBuf>,
3149    pub is_symlink: bool,
3150    /// Whether this entry is ignored by Git.
3151    ///
3152    /// We only scan ignored entries once the directory is expanded and
3153    /// exclude them from searches.
3154    pub is_ignored: bool,
3155
3156    /// Whether this entry's canonical path is outside of the worktree.
3157    /// This means the entry is only accessible from the worktree root via a
3158    /// symlink.
3159    ///
3160    /// We only scan entries outside of the worktree once the symlinked
3161    /// directory is expanded. External entries are treated like gitignored
3162    /// entries in that they are not included in searches.
3163    pub is_external: bool,
3164    pub git_status: Option<GitFileStatus>,
3165    /// Whether this entry is considered to be a `.env` file.
3166    pub is_private: bool,
3167}
3168
3169#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
3170pub enum EntryKind {
3171    UnloadedDir,
3172    PendingDir,
3173    Dir,
3174    File(CharBag),
3175}
3176
3177#[derive(Clone, Copy, Debug, PartialEq)]
3178pub enum PathChange {
3179    /// A filesystem entry was was created.
3180    Added,
3181    /// A filesystem entry was removed.
3182    Removed,
3183    /// A filesystem entry was updated.
3184    Updated,
3185    /// A filesystem entry was either updated or added. We don't know
3186    /// whether or not it already existed, because the path had not
3187    /// been loaded before the event.
3188    AddedOrUpdated,
3189    /// A filesystem entry was found during the initial scan of the worktree.
3190    Loaded,
3191}
3192
3193pub struct GitRepositoryChange {
3194    /// The previous state of the repository, if it already existed.
3195    pub old_repository: Option<RepositoryEntry>,
3196}
3197
3198pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
3199pub type UpdatedGitRepositoriesSet = Arc<[(Arc<Path>, GitRepositoryChange)]>;
3200
3201impl Entry {
3202    fn new(
3203        path: Arc<Path>,
3204        metadata: &fs::Metadata,
3205        next_entry_id: &AtomicUsize,
3206        root_char_bag: CharBag,
3207        canonical_path: Option<PathBuf>,
3208    ) -> Self {
3209        Self {
3210            id: ProjectEntryId::new(next_entry_id),
3211            kind: if metadata.is_dir {
3212                EntryKind::PendingDir
3213            } else {
3214                EntryKind::File(char_bag_for_path(root_char_bag, &path))
3215            },
3216            path,
3217            inode: metadata.inode,
3218            mtime: Some(metadata.mtime),
3219            canonical_path,
3220            is_symlink: metadata.is_symlink,
3221            is_ignored: false,
3222            is_external: false,
3223            is_private: false,
3224            git_status: None,
3225        }
3226    }
3227
3228    pub fn is_created(&self) -> bool {
3229        self.mtime.is_some()
3230    }
3231
3232    pub fn is_dir(&self) -> bool {
3233        self.kind.is_dir()
3234    }
3235
3236    pub fn is_file(&self) -> bool {
3237        self.kind.is_file()
3238    }
3239
3240    pub fn git_status(&self) -> Option<GitFileStatus> {
3241        self.git_status
3242    }
3243}
3244
3245impl EntryKind {
3246    pub fn is_dir(&self) -> bool {
3247        matches!(
3248            self,
3249            EntryKind::Dir | EntryKind::PendingDir | EntryKind::UnloadedDir
3250        )
3251    }
3252
3253    pub fn is_unloaded(&self) -> bool {
3254        matches!(self, EntryKind::UnloadedDir)
3255    }
3256
3257    pub fn is_file(&self) -> bool {
3258        matches!(self, EntryKind::File(_))
3259    }
3260}
3261
3262impl sum_tree::Item for Entry {
3263    type Summary = EntrySummary;
3264
3265    fn summary(&self) -> Self::Summary {
3266        let non_ignored_count = if self.is_ignored || self.is_external {
3267            0
3268        } else {
3269            1
3270        };
3271        let file_count;
3272        let non_ignored_file_count;
3273        if self.is_file() {
3274            file_count = 1;
3275            non_ignored_file_count = non_ignored_count;
3276        } else {
3277            file_count = 0;
3278            non_ignored_file_count = 0;
3279        }
3280
3281        let mut statuses = GitStatuses::default();
3282        match self.git_status {
3283            Some(status) => match status {
3284                GitFileStatus::Added => statuses.added = 1,
3285                GitFileStatus::Modified => statuses.modified = 1,
3286                GitFileStatus::Conflict => statuses.conflict = 1,
3287            },
3288            None => {}
3289        }
3290
3291        EntrySummary {
3292            max_path: self.path.clone(),
3293            count: 1,
3294            non_ignored_count,
3295            file_count,
3296            non_ignored_file_count,
3297            statuses,
3298        }
3299    }
3300}
3301
3302impl sum_tree::KeyedItem for Entry {
3303    type Key = PathKey;
3304
3305    fn key(&self) -> Self::Key {
3306        PathKey(self.path.clone())
3307    }
3308}
3309
3310#[derive(Clone, Debug)]
3311pub struct EntrySummary {
3312    max_path: Arc<Path>,
3313    count: usize,
3314    non_ignored_count: usize,
3315    file_count: usize,
3316    non_ignored_file_count: usize,
3317    statuses: GitStatuses,
3318}
3319
3320impl Default for EntrySummary {
3321    fn default() -> Self {
3322        Self {
3323            max_path: Arc::from(Path::new("")),
3324            count: 0,
3325            non_ignored_count: 0,
3326            file_count: 0,
3327            non_ignored_file_count: 0,
3328            statuses: Default::default(),
3329        }
3330    }
3331}
3332
3333impl sum_tree::Summary for EntrySummary {
3334    type Context = ();
3335
3336    fn add_summary(&mut self, rhs: &Self, _: &()) {
3337        self.max_path = rhs.max_path.clone();
3338        self.count += rhs.count;
3339        self.non_ignored_count += rhs.non_ignored_count;
3340        self.file_count += rhs.file_count;
3341        self.non_ignored_file_count += rhs.non_ignored_file_count;
3342        self.statuses += rhs.statuses;
3343    }
3344}
3345
3346#[derive(Clone, Debug)]
3347struct PathEntry {
3348    id: ProjectEntryId,
3349    path: Arc<Path>,
3350    is_ignored: bool,
3351    scan_id: usize,
3352}
3353
3354impl sum_tree::Item for PathEntry {
3355    type Summary = PathEntrySummary;
3356
3357    fn summary(&self) -> Self::Summary {
3358        PathEntrySummary { max_id: self.id }
3359    }
3360}
3361
3362impl sum_tree::KeyedItem for PathEntry {
3363    type Key = ProjectEntryId;
3364
3365    fn key(&self) -> Self::Key {
3366        self.id
3367    }
3368}
3369
3370#[derive(Clone, Debug, Default)]
3371struct PathEntrySummary {
3372    max_id: ProjectEntryId,
3373}
3374
3375impl sum_tree::Summary for PathEntrySummary {
3376    type Context = ();
3377
3378    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
3379        self.max_id = summary.max_id;
3380    }
3381}
3382
3383impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
3384    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
3385        *self = summary.max_id;
3386    }
3387}
3388
3389#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
3390pub struct PathKey(Arc<Path>);
3391
3392impl Default for PathKey {
3393    fn default() -> Self {
3394        Self(Path::new("").into())
3395    }
3396}
3397
3398impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
3399    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3400        self.0 = summary.max_path.clone();
3401    }
3402}
3403
3404struct BackgroundScanner {
3405    state: Mutex<BackgroundScannerState>,
3406    fs: Arc<dyn Fs>,
3407    fs_case_sensitive: bool,
3408    status_updates_tx: UnboundedSender<ScanState>,
3409    executor: BackgroundExecutor,
3410    scan_requests_rx: channel::Receiver<ScanRequest>,
3411    path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
3412    next_entry_id: Arc<AtomicUsize>,
3413    phase: BackgroundScannerPhase,
3414    watcher: Arc<dyn Watcher>,
3415    settings: WorktreeSettings,
3416    share_private_files: bool,
3417}
3418
3419#[derive(PartialEq)]
3420enum BackgroundScannerPhase {
3421    InitialScan,
3422    EventsReceivedDuringInitialScan,
3423    Events,
3424}
3425
3426impl BackgroundScanner {
3427    async fn run(&mut self, mut fs_events_rx: Pin<Box<dyn Send + Stream<Item = Vec<PathBuf>>>>) {
3428        use futures::FutureExt as _;
3429
3430        // If the worktree root does not contain a git repository, then find
3431        // the git repository in an ancestor directory. Find any gitignore files
3432        // in ancestor directories.
3433        let root_abs_path = self.state.lock().snapshot.abs_path.clone();
3434        for (index, ancestor) in root_abs_path.ancestors().enumerate() {
3435            if index != 0 {
3436                if let Ok(ignore) =
3437                    build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
3438                {
3439                    self.state
3440                        .lock()
3441                        .snapshot
3442                        .ignores_by_parent_abs_path
3443                        .insert(ancestor.into(), (ignore.into(), false));
3444                }
3445            }
3446
3447            let ancestor_dot_git = ancestor.join(&*DOT_GIT);
3448            if ancestor_dot_git.is_dir() {
3449                if index != 0 {
3450                    // We canonicalize, since the FS events use the canonicalized path.
3451                    if let Some(ancestor_dot_git) =
3452                        self.fs.canonicalize(&ancestor_dot_git).await.log_err()
3453                    {
3454                        let (ancestor_git_events, _) =
3455                            self.fs.watch(&ancestor_dot_git, FS_WATCH_LATENCY).await;
3456                        fs_events_rx = select(fs_events_rx, ancestor_git_events).boxed();
3457
3458                        // We associate the external git repo with our root folder and
3459                        // also mark where in the git repo the root folder is located.
3460                        self.state.lock().build_git_repository_for_path(
3461                            Path::new("").into(),
3462                            ancestor_dot_git.into(),
3463                            Some(root_abs_path.strip_prefix(ancestor).unwrap().into()),
3464                            self.fs.as_ref(),
3465                        );
3466                    };
3467                }
3468
3469                // Reached root of git repository.
3470                break;
3471            }
3472        }
3473
3474        let (scan_job_tx, scan_job_rx) = channel::unbounded();
3475        {
3476            let mut state = self.state.lock();
3477            state.snapshot.scan_id += 1;
3478            if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
3479                let ignore_stack = state
3480                    .snapshot
3481                    .ignore_stack_for_abs_path(&root_abs_path, true);
3482                if ignore_stack.is_abs_path_ignored(&root_abs_path, true) {
3483                    root_entry.is_ignored = true;
3484                    state.insert_entry(root_entry.clone(), self.fs.as_ref());
3485                }
3486                state.enqueue_scan_dir(root_abs_path, &root_entry, &scan_job_tx);
3487            }
3488        };
3489
3490        // Perform an initial scan of the directory.
3491        drop(scan_job_tx);
3492        self.scan_dirs(true, scan_job_rx).await;
3493        {
3494            let mut state = self.state.lock();
3495            state.snapshot.completed_scan_id = state.snapshot.scan_id;
3496        }
3497
3498        self.send_status_update(false, None);
3499
3500        // Process any any FS events that occurred while performing the initial scan.
3501        // For these events, update events cannot be as precise, because we didn't
3502        // have the previous state loaded yet.
3503        self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
3504        if let Poll::Ready(Some(mut paths)) = futures::poll!(fs_events_rx.next()) {
3505            while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
3506                paths.extend(more_paths);
3507            }
3508            self.process_events(paths).await;
3509        }
3510
3511        // Continue processing events until the worktree is dropped.
3512        self.phase = BackgroundScannerPhase::Events;
3513
3514        loop {
3515            select_biased! {
3516                // Process any path refresh requests from the worktree. Prioritize
3517                // these before handling changes reported by the filesystem.
3518                request = self.scan_requests_rx.recv().fuse() => {
3519                    let Ok(request) = request else { break };
3520                    if !self.process_scan_request(request, false).await {
3521                        return;
3522                    }
3523                }
3524
3525                path_prefix = self.path_prefixes_to_scan_rx.recv().fuse() => {
3526                    let Ok(path_prefix) = path_prefix else { break };
3527                    log::trace!("adding path prefix {:?}", path_prefix);
3528
3529                    let did_scan = self.forcibly_load_paths(&[path_prefix.clone()]).await;
3530                    if did_scan {
3531                        let abs_path =
3532                        {
3533                            let mut state = self.state.lock();
3534                            state.path_prefixes_to_scan.insert(path_prefix.clone());
3535                            state.snapshot.abs_path.join(&path_prefix)
3536                        };
3537
3538                        if let Some(abs_path) = self.fs.canonicalize(&abs_path).await.log_err() {
3539                            self.process_events(vec![abs_path]).await;
3540                        }
3541                    }
3542                }
3543
3544                paths = fs_events_rx.next().fuse() => {
3545                    let Some(mut paths) = paths else { break };
3546                    while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
3547                        paths.extend(more_paths);
3548                    }
3549                    self.process_events(paths.clone()).await;
3550                }
3551            }
3552        }
3553    }
3554
3555    async fn process_scan_request(&self, mut request: ScanRequest, scanning: bool) -> bool {
3556        log::debug!("rescanning paths {:?}", request.relative_paths);
3557
3558        request.relative_paths.sort_unstable();
3559        self.forcibly_load_paths(&request.relative_paths).await;
3560
3561        let root_path = self.state.lock().snapshot.abs_path.clone();
3562        let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3563            Ok(path) => path,
3564            Err(err) => {
3565                log::error!("failed to canonicalize root path: {}", err);
3566                return true;
3567            }
3568        };
3569        let abs_paths = request
3570            .relative_paths
3571            .iter()
3572            .map(|path| {
3573                if path.file_name().is_some() {
3574                    root_canonical_path.join(path)
3575                } else {
3576                    root_canonical_path.clone()
3577                }
3578            })
3579            .collect::<Vec<_>>();
3580
3581        {
3582            let mut state = self.state.lock();
3583            let is_idle = state.snapshot.completed_scan_id == state.snapshot.scan_id;
3584            state.snapshot.scan_id += 1;
3585            if is_idle {
3586                state.snapshot.completed_scan_id = state.snapshot.scan_id;
3587            }
3588        }
3589
3590        self.reload_entries_for_paths(
3591            root_path,
3592            root_canonical_path,
3593            &request.relative_paths,
3594            abs_paths,
3595            None,
3596        )
3597        .await;
3598
3599        self.send_status_update(scanning, Some(request.done))
3600    }
3601
3602    async fn process_events(&mut self, mut abs_paths: Vec<PathBuf>) {
3603        let root_path = self.state.lock().snapshot.abs_path.clone();
3604        let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3605            Ok(path) => path,
3606            Err(err) => {
3607                log::error!("failed to canonicalize root path: {}", err);
3608                return;
3609            }
3610        };
3611
3612        let mut relative_paths = Vec::with_capacity(abs_paths.len());
3613        let mut dot_git_paths = Vec::new();
3614        abs_paths.sort_unstable();
3615        abs_paths.dedup_by(|a, b| a.starts_with(&b));
3616        abs_paths.retain(|abs_path| {
3617            let snapshot = &self.state.lock().snapshot;
3618            {
3619                let mut is_git_related = false;
3620                if let Some(dot_git_dir) = abs_path
3621                    .ancestors()
3622                    .find(|ancestor| ancestor.file_name() == Some(*DOT_GIT))
3623                {
3624                    let dot_git_path = dot_git_dir
3625                        .strip_prefix(&root_canonical_path)
3626                        .unwrap_or(dot_git_dir)
3627                        .to_path_buf();
3628                    if !dot_git_paths.contains(&dot_git_path) {
3629                        dot_git_paths.push(dot_git_path);
3630                    }
3631                    is_git_related = true;
3632                }
3633
3634                let relative_path: Arc<Path> =
3635                    if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3636                        path.into()
3637                    } else {
3638                        if is_git_related {
3639                            log::debug!(
3640                              "ignoring event {abs_path:?}, since it's in git dir outside of root path {root_canonical_path:?}",
3641                            );
3642                        } else {
3643                            log::error!(
3644                              "ignoring event {abs_path:?} outside of root path {root_canonical_path:?}",
3645                            );
3646                        }
3647                        return false;
3648                    };
3649
3650                let parent_dir_is_loaded = relative_path.parent().map_or(true, |parent| {
3651                    snapshot
3652                        .entry_for_path(parent)
3653                        .map_or(false, |entry| entry.kind == EntryKind::Dir)
3654                });
3655                if !parent_dir_is_loaded {
3656                    log::debug!("ignoring event {relative_path:?} within unloaded directory");
3657                    return false;
3658                }
3659
3660                if self.settings.is_path_excluded(&relative_path) {
3661                    if !is_git_related {
3662                        log::debug!("ignoring FS event for excluded path {relative_path:?}");
3663                    }
3664                    return false;
3665                }
3666
3667                relative_paths.push(relative_path);
3668                true
3669            }
3670        });
3671
3672        if relative_paths.is_empty() && dot_git_paths.is_empty() {
3673            return;
3674        }
3675
3676        self.state.lock().snapshot.scan_id += 1;
3677
3678        let (scan_job_tx, scan_job_rx) = channel::unbounded();
3679        log::debug!("received fs events {:?}", relative_paths);
3680        self.reload_entries_for_paths(
3681            root_path,
3682            root_canonical_path,
3683            &relative_paths,
3684            abs_paths,
3685            Some(scan_job_tx.clone()),
3686        )
3687        .await;
3688
3689        self.update_ignore_statuses(scan_job_tx).await;
3690        self.scan_dirs(false, scan_job_rx).await;
3691
3692        if !dot_git_paths.is_empty() {
3693            self.update_git_repositories(dot_git_paths).await;
3694        }
3695
3696        {
3697            let mut state = self.state.lock();
3698            state.snapshot.completed_scan_id = state.snapshot.scan_id;
3699            for (_, entry_id) in mem::take(&mut state.removed_entry_ids) {
3700                state.scanned_dirs.remove(&entry_id);
3701            }
3702        }
3703
3704        self.send_status_update(false, None);
3705    }
3706
3707    async fn forcibly_load_paths(&self, paths: &[Arc<Path>]) -> bool {
3708        let (scan_job_tx, mut scan_job_rx) = channel::unbounded();
3709        {
3710            let mut state = self.state.lock();
3711            let root_path = state.snapshot.abs_path.clone();
3712            for path in paths {
3713                for ancestor in path.ancestors() {
3714                    if let Some(entry) = state.snapshot.entry_for_path(ancestor) {
3715                        if entry.kind == EntryKind::UnloadedDir {
3716                            let abs_path = root_path.join(ancestor);
3717                            state.enqueue_scan_dir(abs_path.into(), entry, &scan_job_tx);
3718                            state.paths_to_scan.insert(path.clone());
3719                            break;
3720                        }
3721                    }
3722                }
3723            }
3724            drop(scan_job_tx);
3725        }
3726        while let Some(job) = scan_job_rx.next().await {
3727            self.scan_dir(&job).await.log_err();
3728        }
3729
3730        mem::take(&mut self.state.lock().paths_to_scan).len() > 0
3731    }
3732
3733    async fn scan_dirs(
3734        &self,
3735        enable_progress_updates: bool,
3736        scan_jobs_rx: channel::Receiver<ScanJob>,
3737    ) {
3738        use futures::FutureExt as _;
3739
3740        if self
3741            .status_updates_tx
3742            .unbounded_send(ScanState::Started)
3743            .is_err()
3744        {
3745            return;
3746        }
3747
3748        let progress_update_count = AtomicUsize::new(0);
3749        self.executor
3750            .scoped(|scope| {
3751                for _ in 0..self.executor.num_cpus() {
3752                    scope.spawn(async {
3753                        let mut last_progress_update_count = 0;
3754                        let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
3755                        futures::pin_mut!(progress_update_timer);
3756
3757                        loop {
3758                            select_biased! {
3759                                // Process any path refresh requests before moving on to process
3760                                // the scan queue, so that user operations are prioritized.
3761                                request = self.scan_requests_rx.recv().fuse() => {
3762                                    let Ok(request) = request else { break };
3763                                    if !self.process_scan_request(request, true).await {
3764                                        return;
3765                                    }
3766                                }
3767
3768                                // Send periodic progress updates to the worktree. Use an atomic counter
3769                                // to ensure that only one of the workers sends a progress update after
3770                                // the update interval elapses.
3771                                _ = progress_update_timer => {
3772                                    match progress_update_count.compare_exchange(
3773                                        last_progress_update_count,
3774                                        last_progress_update_count + 1,
3775                                        SeqCst,
3776                                        SeqCst
3777                                    ) {
3778                                        Ok(_) => {
3779                                            last_progress_update_count += 1;
3780                                            self.send_status_update(true, None);
3781                                        }
3782                                        Err(count) => {
3783                                            last_progress_update_count = count;
3784                                        }
3785                                    }
3786                                    progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
3787                                }
3788
3789                                // Recursively load directories from the file system.
3790                                job = scan_jobs_rx.recv().fuse() => {
3791                                    let Ok(job) = job else { break };
3792                                    if let Err(err) = self.scan_dir(&job).await {
3793                                        if job.path.as_ref() != Path::new("") {
3794                                            log::error!("error scanning directory {:?}: {}", job.abs_path, err);
3795                                        }
3796                                    }
3797                                }
3798                            }
3799                        }
3800                    })
3801                }
3802            })
3803            .await;
3804    }
3805
3806    fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
3807        let mut state = self.state.lock();
3808        if state.changed_paths.is_empty() && scanning {
3809            return true;
3810        }
3811
3812        let new_snapshot = state.snapshot.clone();
3813        let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
3814        let changes = self.build_change_set(&old_snapshot, &new_snapshot, &state.changed_paths);
3815        state.changed_paths.clear();
3816
3817        self.status_updates_tx
3818            .unbounded_send(ScanState::Updated {
3819                snapshot: new_snapshot,
3820                changes,
3821                scanning,
3822                barrier,
3823            })
3824            .is_ok()
3825    }
3826
3827    async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
3828        let root_abs_path;
3829        let root_char_bag;
3830        {
3831            let snapshot = &self.state.lock().snapshot;
3832            if self.settings.is_path_excluded(&job.path) {
3833                log::error!("skipping excluded directory {:?}", job.path);
3834                return Ok(());
3835            }
3836            log::debug!("scanning directory {:?}", job.path);
3837            root_abs_path = snapshot.abs_path().clone();
3838            root_char_bag = snapshot.root_char_bag;
3839        }
3840
3841        let next_entry_id = self.next_entry_id.clone();
3842        let mut ignore_stack = job.ignore_stack.clone();
3843        let mut containing_repository = job.containing_repository.clone();
3844        let mut new_ignore = None;
3845        let mut root_canonical_path = None;
3846        let mut new_entries: Vec<Entry> = Vec::new();
3847        let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
3848        let mut child_paths = self
3849            .fs
3850            .read_dir(&job.abs_path)
3851            .await?
3852            .filter_map(|entry| async {
3853                match entry {
3854                    Ok(entry) => Some(entry),
3855                    Err(error) => {
3856                        log::error!("error processing entry {:?}", error);
3857                        None
3858                    }
3859                }
3860            })
3861            .collect::<Vec<_>>()
3862            .await;
3863
3864        // Ensure that .git and .gitignore are processed first.
3865        swap_to_front(&mut child_paths, *GITIGNORE);
3866        swap_to_front(&mut child_paths, *DOT_GIT);
3867
3868        for child_abs_path in child_paths {
3869            let child_abs_path: Arc<Path> = child_abs_path.into();
3870            let child_name = child_abs_path.file_name().unwrap();
3871            let child_path: Arc<Path> = job.path.join(child_name).into();
3872
3873            if child_name == *DOT_GIT {
3874                let repo = self
3875                    .state
3876                    .lock()
3877                    .build_git_repository(child_path.clone(), self.fs.as_ref());
3878                if let Some((work_directory, repository)) = repo {
3879                    let t0 = Instant::now();
3880                    let statuses = repository
3881                        .statuses(Path::new(""))
3882                        .log_err()
3883                        .unwrap_or_default();
3884                    log::trace!("computed git status in {:?}", t0.elapsed());
3885                    containing_repository = Some(ScanJobContainingRepository {
3886                        work_directory,
3887                        statuses,
3888                    });
3889                }
3890                self.watcher.add(child_abs_path.as_ref()).log_err();
3891            } else if child_name == *GITIGNORE {
3892                match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
3893                    Ok(ignore) => {
3894                        let ignore = Arc::new(ignore);
3895                        ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3896                        new_ignore = Some(ignore);
3897                    }
3898                    Err(error) => {
3899                        log::error!(
3900                            "error loading .gitignore file {:?} - {:?}",
3901                            child_name,
3902                            error
3903                        );
3904                    }
3905                }
3906            }
3907
3908            if self.settings.is_path_excluded(&child_path) {
3909                log::debug!("skipping excluded child entry {child_path:?}");
3910                self.state.lock().remove_path(&child_path);
3911                continue;
3912            }
3913
3914            let child_metadata = match self.fs.metadata(&child_abs_path).await {
3915                Ok(Some(metadata)) => metadata,
3916                Ok(None) => continue,
3917                Err(err) => {
3918                    log::error!("error processing {child_abs_path:?}: {err:?}");
3919                    continue;
3920                }
3921            };
3922
3923            let mut child_entry = Entry::new(
3924                child_path.clone(),
3925                &child_metadata,
3926                &next_entry_id,
3927                root_char_bag,
3928                None,
3929            );
3930
3931            if job.is_external {
3932                child_entry.is_external = true;
3933            } else if child_metadata.is_symlink {
3934                let canonical_path = match self.fs.canonicalize(&child_abs_path).await {
3935                    Ok(path) => path,
3936                    Err(err) => {
3937                        log::error!(
3938                            "error reading target of symlink {:?}: {:?}",
3939                            child_abs_path,
3940                            err
3941                        );
3942                        continue;
3943                    }
3944                };
3945
3946                // lazily canonicalize the root path in order to determine if
3947                // symlinks point outside of the worktree.
3948                let root_canonical_path = match &root_canonical_path {
3949                    Some(path) => path,
3950                    None => match self.fs.canonicalize(&root_abs_path).await {
3951                        Ok(path) => root_canonical_path.insert(path),
3952                        Err(err) => {
3953                            log::error!("error canonicalizing root {:?}: {:?}", root_abs_path, err);
3954                            continue;
3955                        }
3956                    },
3957                };
3958
3959                if !canonical_path.starts_with(root_canonical_path) {
3960                    child_entry.is_external = true;
3961                }
3962
3963                child_entry.canonical_path = Some(canonical_path);
3964            }
3965
3966            if child_entry.is_dir() {
3967                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
3968
3969                // Avoid recursing until crash in the case of a recursive symlink
3970                if job.ancestor_inodes.contains(&child_entry.inode) {
3971                    new_jobs.push(None);
3972                } else {
3973                    let mut ancestor_inodes = job.ancestor_inodes.clone();
3974                    ancestor_inodes.insert(child_entry.inode);
3975
3976                    new_jobs.push(Some(ScanJob {
3977                        abs_path: child_abs_path.clone(),
3978                        path: child_path,
3979                        is_external: child_entry.is_external,
3980                        ignore_stack: if child_entry.is_ignored {
3981                            IgnoreStack::all()
3982                        } else {
3983                            ignore_stack.clone()
3984                        },
3985                        ancestor_inodes,
3986                        scan_queue: job.scan_queue.clone(),
3987                        containing_repository: containing_repository.clone(),
3988                    }));
3989                }
3990            } else {
3991                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
3992                if !child_entry.is_ignored {
3993                    if let Some(repo) = &containing_repository {
3994                        if let Ok(repo_path) = child_entry.path.strip_prefix(&repo.work_directory) {
3995                            let repo_path = RepoPath(repo_path.into());
3996                            child_entry.git_status = repo.statuses.get(&repo_path);
3997                        }
3998                    }
3999                }
4000            }
4001
4002            {
4003                let relative_path = job.path.join(child_name);
4004                if self.is_path_private(&relative_path) {
4005                    log::debug!("detected private file: {relative_path:?}");
4006                    child_entry.is_private = true;
4007                }
4008            }
4009
4010            new_entries.push(child_entry);
4011        }
4012
4013        let mut state = self.state.lock();
4014
4015        // Identify any subdirectories that should not be scanned.
4016        let mut job_ix = 0;
4017        for entry in &mut new_entries {
4018            state.reuse_entry_id(entry);
4019            if entry.is_dir() {
4020                if state.should_scan_directory(entry) {
4021                    job_ix += 1;
4022                } else {
4023                    log::debug!("defer scanning directory {:?}", entry.path);
4024                    entry.kind = EntryKind::UnloadedDir;
4025                    new_jobs.remove(job_ix);
4026                }
4027            }
4028        }
4029
4030        state.populate_dir(&job.path, new_entries, new_ignore);
4031        self.watcher.add(job.abs_path.as_ref()).log_err();
4032
4033        for new_job in new_jobs.into_iter().flatten() {
4034            job.scan_queue
4035                .try_send(new_job)
4036                .expect("channel is unbounded");
4037        }
4038
4039        Ok(())
4040    }
4041
4042    async fn reload_entries_for_paths(
4043        &self,
4044        root_abs_path: Arc<Path>,
4045        root_canonical_path: PathBuf,
4046        relative_paths: &[Arc<Path>],
4047        abs_paths: Vec<PathBuf>,
4048        scan_queue_tx: Option<Sender<ScanJob>>,
4049    ) {
4050        let metadata = futures::future::join_all(
4051            abs_paths
4052                .iter()
4053                .map(|abs_path| async move {
4054                    let metadata = self.fs.metadata(abs_path).await?;
4055                    if let Some(metadata) = metadata {
4056                        let canonical_path = self.fs.canonicalize(abs_path).await?;
4057
4058                        // If we're on a case-insensitive filesystem (default on macOS), we want
4059                        // to only ignore metadata for non-symlink files if their absolute-path matches
4060                        // the canonical-path.
4061                        // Because if not, this might be a case-only-renaming (`mv test.txt TEST.TXT`)
4062                        // and we want to ignore the metadata for the old path (`test.txt`) so it's
4063                        // treated as removed.
4064                        if !self.fs_case_sensitive && !metadata.is_symlink {
4065                            let canonical_file_name = canonical_path.file_name();
4066                            let file_name = abs_path.file_name();
4067                            if canonical_file_name != file_name {
4068                                return Ok(None);
4069                            }
4070                        }
4071
4072                        anyhow::Ok(Some((metadata, canonical_path)))
4073                    } else {
4074                        Ok(None)
4075                    }
4076                })
4077                .collect::<Vec<_>>(),
4078        )
4079        .await;
4080
4081        let mut state = self.state.lock();
4082        let doing_recursive_update = scan_queue_tx.is_some();
4083
4084        // Remove any entries for paths that no longer exist or are being recursively
4085        // refreshed. Do this before adding any new entries, so that renames can be
4086        // detected regardless of the order of the paths.
4087        for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
4088            if matches!(metadata, Ok(None)) || doing_recursive_update {
4089                log::trace!("remove path {:?}", path);
4090                state.remove_path(path);
4091            }
4092        }
4093
4094        for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
4095            let abs_path: Arc<Path> = root_abs_path.join(&path).into();
4096            match metadata {
4097                Ok(Some((metadata, canonical_path))) => {
4098                    let ignore_stack = state
4099                        .snapshot
4100                        .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
4101
4102                    let mut fs_entry = Entry::new(
4103                        path.clone(),
4104                        metadata,
4105                        self.next_entry_id.as_ref(),
4106                        state.snapshot.root_char_bag,
4107                        if metadata.is_symlink {
4108                            Some(canonical_path.to_path_buf())
4109                        } else {
4110                            None
4111                        },
4112                    );
4113
4114                    let is_dir = fs_entry.is_dir();
4115                    fs_entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, is_dir);
4116
4117                    fs_entry.is_external = !canonical_path.starts_with(&root_canonical_path);
4118                    fs_entry.is_private = self.is_path_private(path);
4119
4120                    if !is_dir && !fs_entry.is_ignored && !fs_entry.is_external {
4121                        if let Some((repo_entry, repo)) = state.snapshot.repo_for_path(path) {
4122                            if let Ok(repo_path) = repo_entry.relativize(&state.snapshot, path) {
4123                                fs_entry.git_status = repo.repo_ptr.status(&repo_path);
4124                            }
4125                        }
4126                    }
4127
4128                    if let (Some(scan_queue_tx), true) = (&scan_queue_tx, fs_entry.is_dir()) {
4129                        if state.should_scan_directory(&fs_entry)
4130                            || (fs_entry.path.as_os_str().is_empty()
4131                                && abs_path.file_name() == Some(*DOT_GIT))
4132                        {
4133                            state.enqueue_scan_dir(abs_path, &fs_entry, scan_queue_tx);
4134                        } else {
4135                            fs_entry.kind = EntryKind::UnloadedDir;
4136                        }
4137                    }
4138
4139                    state.insert_entry(fs_entry, self.fs.as_ref());
4140                }
4141                Ok(None) => {
4142                    self.remove_repo_path(path, &mut state.snapshot);
4143                }
4144                Err(err) => {
4145                    // TODO - create a special 'error' entry in the entries tree to mark this
4146                    log::error!("error reading file {abs_path:?} on event: {err:#}");
4147                }
4148            }
4149        }
4150
4151        util::extend_sorted(
4152            &mut state.changed_paths,
4153            relative_paths.iter().cloned(),
4154            usize::MAX,
4155            Ord::cmp,
4156        );
4157    }
4158
4159    fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
4160        if !path
4161            .components()
4162            .any(|component| component.as_os_str() == *DOT_GIT)
4163        {
4164            if let Some(repository) = snapshot.repository_for_work_directory(path) {
4165                let entry = repository.work_directory.0;
4166                snapshot.git_repositories.remove(&entry);
4167                snapshot
4168                    .snapshot
4169                    .repository_entries
4170                    .remove(&RepositoryWorkDirectory(path.into()));
4171                return Some(());
4172            }
4173        }
4174
4175        // TODO statuses
4176        // Track when a .git is removed and iterate over the file system there
4177
4178        Some(())
4179    }
4180
4181    async fn update_ignore_statuses(&self, scan_job_tx: Sender<ScanJob>) {
4182        use futures::FutureExt as _;
4183
4184        let mut ignores_to_update = Vec::new();
4185        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
4186        let prev_snapshot;
4187        {
4188            let snapshot = &mut self.state.lock().snapshot;
4189            let abs_path = snapshot.abs_path.clone();
4190            snapshot
4191                .ignores_by_parent_abs_path
4192                .retain(|parent_abs_path, (_, needs_update)| {
4193                    if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
4194                        if *needs_update {
4195                            *needs_update = false;
4196                            if snapshot.snapshot.entry_for_path(parent_path).is_some() {
4197                                ignores_to_update.push(parent_abs_path.clone());
4198                            }
4199                        }
4200
4201                        let ignore_path = parent_path.join(&*GITIGNORE);
4202                        if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
4203                            return false;
4204                        }
4205                    }
4206                    true
4207                });
4208
4209            ignores_to_update.sort_unstable();
4210            let mut ignores_to_update = ignores_to_update.into_iter().peekable();
4211            while let Some(parent_abs_path) = ignores_to_update.next() {
4212                while ignores_to_update
4213                    .peek()
4214                    .map_or(false, |p| p.starts_with(&parent_abs_path))
4215                {
4216                    ignores_to_update.next().unwrap();
4217                }
4218
4219                let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
4220                ignore_queue_tx
4221                    .send_blocking(UpdateIgnoreStatusJob {
4222                        abs_path: parent_abs_path,
4223                        ignore_stack,
4224                        ignore_queue: ignore_queue_tx.clone(),
4225                        scan_queue: scan_job_tx.clone(),
4226                    })
4227                    .unwrap();
4228            }
4229
4230            prev_snapshot = snapshot.clone();
4231        }
4232        drop(ignore_queue_tx);
4233
4234        self.executor
4235            .scoped(|scope| {
4236                for _ in 0..self.executor.num_cpus() {
4237                    scope.spawn(async {
4238                        loop {
4239                            select_biased! {
4240                                // Process any path refresh requests before moving on to process
4241                                // the queue of ignore statuses.
4242                                request = self.scan_requests_rx.recv().fuse() => {
4243                                    let Ok(request) = request else { break };
4244                                    if !self.process_scan_request(request, true).await {
4245                                        return;
4246                                    }
4247                                }
4248
4249                                // Recursively process directories whose ignores have changed.
4250                                job = ignore_queue_rx.recv().fuse() => {
4251                                    let Ok(job) = job else { break };
4252                                    self.update_ignore_status(job, &prev_snapshot).await;
4253                                }
4254                            }
4255                        }
4256                    });
4257                }
4258            })
4259            .await;
4260    }
4261
4262    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
4263        log::trace!("update ignore status {:?}", job.abs_path);
4264
4265        let mut ignore_stack = job.ignore_stack;
4266        if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
4267            ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
4268        }
4269
4270        let mut entries_by_id_edits = Vec::new();
4271        let mut entries_by_path_edits = Vec::new();
4272        let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
4273        let repo = snapshot.repo_for_path(path);
4274        for mut entry in snapshot.child_entries(path).cloned() {
4275            let was_ignored = entry.is_ignored;
4276            let abs_path: Arc<Path> = snapshot.abs_path().join(&entry.path).into();
4277            entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
4278
4279            if entry.is_dir() {
4280                let child_ignore_stack = if entry.is_ignored {
4281                    IgnoreStack::all()
4282                } else {
4283                    ignore_stack.clone()
4284                };
4285
4286                // Scan any directories that were previously ignored and weren't previously scanned.
4287                if was_ignored && !entry.is_ignored && entry.kind.is_unloaded() {
4288                    let state = self.state.lock();
4289                    if state.should_scan_directory(&entry) {
4290                        state.enqueue_scan_dir(abs_path.clone(), &entry, &job.scan_queue);
4291                    }
4292                }
4293
4294                job.ignore_queue
4295                    .send(UpdateIgnoreStatusJob {
4296                        abs_path: abs_path.clone(),
4297                        ignore_stack: child_ignore_stack,
4298                        ignore_queue: job.ignore_queue.clone(),
4299                        scan_queue: job.scan_queue.clone(),
4300                    })
4301                    .await
4302                    .unwrap();
4303            }
4304
4305            if entry.is_ignored != was_ignored {
4306                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
4307                path_entry.scan_id = snapshot.scan_id;
4308                path_entry.is_ignored = entry.is_ignored;
4309                if !entry.is_dir() && !entry.is_ignored && !entry.is_external {
4310                    if let Some((ref repo_entry, local_repo)) = repo {
4311                        if let Ok(repo_path) = repo_entry.relativize(&snapshot, &entry.path) {
4312                            entry.git_status = local_repo.repo_ptr.status(&repo_path);
4313                        }
4314                    }
4315                }
4316                entries_by_id_edits.push(Edit::Insert(path_entry));
4317                entries_by_path_edits.push(Edit::Insert(entry));
4318            }
4319        }
4320
4321        let state = &mut self.state.lock();
4322        for edit in &entries_by_path_edits {
4323            if let Edit::Insert(entry) = edit {
4324                if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
4325                    state.changed_paths.insert(ix, entry.path.clone());
4326                }
4327            }
4328        }
4329
4330        state
4331            .snapshot
4332            .entries_by_path
4333            .edit(entries_by_path_edits, &());
4334        state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
4335    }
4336
4337    async fn update_git_repositories(&self, dot_git_paths: Vec<PathBuf>) {
4338        log::debug!("reloading repositories: {dot_git_paths:?}");
4339
4340        let mut repo_updates = Vec::new();
4341        {
4342            let mut state = self.state.lock();
4343            let scan_id = state.snapshot.scan_id;
4344            for dot_git_dir in dot_git_paths {
4345                let existing_repository_entry =
4346                    state
4347                        .snapshot
4348                        .git_repositories
4349                        .iter()
4350                        .find_map(|(entry_id, repo)| {
4351                            (repo.git_dir_path.as_ref() == dot_git_dir)
4352                                .then(|| (*entry_id, repo.clone()))
4353                        });
4354
4355                let (work_directory, repository) = match existing_repository_entry {
4356                    None => {
4357                        match state.build_git_repository(dot_git_dir.into(), self.fs.as_ref()) {
4358                            Some(output) => output,
4359                            None => continue,
4360                        }
4361                    }
4362                    Some((entry_id, repository)) => {
4363                        if repository.git_dir_scan_id == scan_id {
4364                            continue;
4365                        }
4366                        let Some(work_dir) = state
4367                            .snapshot
4368                            .entry_for_id(entry_id)
4369                            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
4370                        else {
4371                            continue;
4372                        };
4373
4374                        let repo = &repository.repo_ptr;
4375                        let branch = repo.branch_name();
4376                        repo.reload_index();
4377
4378                        state
4379                            .snapshot
4380                            .git_repositories
4381                            .update(&entry_id, |entry| entry.git_dir_scan_id = scan_id);
4382                        state
4383                            .snapshot
4384                            .snapshot
4385                            .repository_entries
4386                            .update(&work_dir, |entry| entry.branch = branch.map(Into::into));
4387                        (work_dir, repository.repo_ptr.clone())
4388                    }
4389                };
4390
4391                repo_updates.push(UpdateGitStatusesJob {
4392                    location_in_repo: state
4393                        .snapshot
4394                        .repository_entries
4395                        .get(&work_directory)
4396                        .and_then(|repo| repo.location_in_repo.clone())
4397                        .clone(),
4398                    work_directory,
4399                    repository,
4400                });
4401            }
4402
4403            // Remove any git repositories whose .git entry no longer exists.
4404            let snapshot = &mut state.snapshot;
4405            let mut ids_to_preserve = HashSet::default();
4406            for (&work_directory_id, entry) in snapshot.git_repositories.iter() {
4407                let exists_in_snapshot = snapshot
4408                    .entry_for_id(work_directory_id)
4409                    .map_or(false, |entry| {
4410                        snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
4411                    });
4412                if exists_in_snapshot {
4413                    ids_to_preserve.insert(work_directory_id);
4414                } else {
4415                    let git_dir_abs_path = snapshot.abs_path().join(&entry.git_dir_path);
4416                    let git_dir_excluded = self.settings.is_path_excluded(&entry.git_dir_path);
4417                    if git_dir_excluded
4418                        && !matches!(
4419                            smol::block_on(self.fs.metadata(&git_dir_abs_path)),
4420                            Ok(None)
4421                        )
4422                    {
4423                        ids_to_preserve.insert(work_directory_id);
4424                    }
4425                }
4426            }
4427
4428            snapshot
4429                .git_repositories
4430                .retain(|work_directory_id, _| ids_to_preserve.contains(work_directory_id));
4431            snapshot
4432                .repository_entries
4433                .retain(|_, entry| ids_to_preserve.contains(&entry.work_directory.0));
4434        }
4435
4436        let (mut updates_done_tx, mut updates_done_rx) = barrier::channel();
4437        self.executor
4438            .scoped(|scope| {
4439                scope.spawn(async {
4440                    for repo_update in repo_updates {
4441                        self.update_git_statuses(repo_update);
4442                    }
4443                    updates_done_tx.blocking_send(()).ok();
4444                });
4445
4446                scope.spawn(async {
4447                    loop {
4448                        select_biased! {
4449                            // Process any path refresh requests before moving on to process
4450                            // the queue of git statuses.
4451                            request = self.scan_requests_rx.recv().fuse() => {
4452                                let Ok(request) = request else { break };
4453                                if !self.process_scan_request(request, true).await {
4454                                    return;
4455                                }
4456                            }
4457                            _ = updates_done_rx.recv().fuse() =>  break,
4458                        }
4459                    }
4460                });
4461            })
4462            .await;
4463    }
4464
4465    /// Update the git statuses for a given batch of entries.
4466    fn update_git_statuses(&self, job: UpdateGitStatusesJob) {
4467        log::trace!("updating git statuses for repo {:?}", job.work_directory.0);
4468        let t0 = Instant::now();
4469        let Some(statuses) = job.repository.statuses(Path::new("")).log_err() else {
4470            return;
4471        };
4472        log::trace!(
4473            "computed git statuses for repo {:?} in {:?}",
4474            job.work_directory.0,
4475            t0.elapsed()
4476        );
4477
4478        let t0 = Instant::now();
4479        let mut changes = Vec::new();
4480        let snapshot = self.state.lock().snapshot.snapshot.clone();
4481        for file in snapshot.traverse_from_path(true, false, false, job.work_directory.0.as_ref()) {
4482            let Ok(repo_path) = file.path.strip_prefix(&job.work_directory.0) else {
4483                break;
4484            };
4485            let git_status = if let Some(location) = &job.location_in_repo {
4486                statuses.get(&location.join(repo_path))
4487            } else {
4488                statuses.get(&repo_path)
4489            };
4490            if file.git_status != git_status {
4491                let mut entry = file.clone();
4492                entry.git_status = git_status;
4493                changes.push((entry.path, git_status));
4494            }
4495        }
4496
4497        let mut state = self.state.lock();
4498        let edits = changes
4499            .iter()
4500            .filter_map(|(path, git_status)| {
4501                let entry = state.snapshot.entry_for_path(path)?.clone();
4502                Some(Edit::Insert(Entry {
4503                    git_status: *git_status,
4504                    ..entry.clone()
4505                }))
4506            })
4507            .collect();
4508
4509        // Apply the git status changes.
4510        util::extend_sorted(
4511            &mut state.changed_paths,
4512            changes.iter().map(|p| p.0.clone()),
4513            usize::MAX,
4514            Ord::cmp,
4515        );
4516        state.snapshot.entries_by_path.edit(edits, &());
4517        log::trace!(
4518            "applied git status updates for repo {:?} in {:?}",
4519            job.work_directory.0,
4520            t0.elapsed(),
4521        );
4522    }
4523
4524    fn build_change_set(
4525        &self,
4526        old_snapshot: &Snapshot,
4527        new_snapshot: &Snapshot,
4528        event_paths: &[Arc<Path>],
4529    ) -> UpdatedEntriesSet {
4530        use BackgroundScannerPhase::*;
4531        use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
4532
4533        // Identify which paths have changed. Use the known set of changed
4534        // parent paths to optimize the search.
4535        let mut changes = Vec::new();
4536        let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
4537        let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
4538        let mut last_newly_loaded_dir_path = None;
4539        old_paths.next(&());
4540        new_paths.next(&());
4541        for path in event_paths {
4542            let path = PathKey(path.clone());
4543            if old_paths.item().map_or(false, |e| e.path < path.0) {
4544                old_paths.seek_forward(&path, Bias::Left, &());
4545            }
4546            if new_paths.item().map_or(false, |e| e.path < path.0) {
4547                new_paths.seek_forward(&path, Bias::Left, &());
4548            }
4549            loop {
4550                match (old_paths.item(), new_paths.item()) {
4551                    (Some(old_entry), Some(new_entry)) => {
4552                        if old_entry.path > path.0
4553                            && new_entry.path > path.0
4554                            && !old_entry.path.starts_with(&path.0)
4555                            && !new_entry.path.starts_with(&path.0)
4556                        {
4557                            break;
4558                        }
4559
4560                        match Ord::cmp(&old_entry.path, &new_entry.path) {
4561                            Ordering::Less => {
4562                                changes.push((old_entry.path.clone(), old_entry.id, Removed));
4563                                old_paths.next(&());
4564                            }
4565                            Ordering::Equal => {
4566                                if self.phase == EventsReceivedDuringInitialScan {
4567                                    if old_entry.id != new_entry.id {
4568                                        changes.push((
4569                                            old_entry.path.clone(),
4570                                            old_entry.id,
4571                                            Removed,
4572                                        ));
4573                                    }
4574                                    // If the worktree was not fully initialized when this event was generated,
4575                                    // we can't know whether this entry was added during the scan or whether
4576                                    // it was merely updated.
4577                                    changes.push((
4578                                        new_entry.path.clone(),
4579                                        new_entry.id,
4580                                        AddedOrUpdated,
4581                                    ));
4582                                } else if old_entry.id != new_entry.id {
4583                                    changes.push((old_entry.path.clone(), old_entry.id, Removed));
4584                                    changes.push((new_entry.path.clone(), new_entry.id, Added));
4585                                } else if old_entry != new_entry {
4586                                    if old_entry.kind.is_unloaded() {
4587                                        last_newly_loaded_dir_path = Some(&new_entry.path);
4588                                        changes.push((
4589                                            new_entry.path.clone(),
4590                                            new_entry.id,
4591                                            Loaded,
4592                                        ));
4593                                    } else {
4594                                        changes.push((
4595                                            new_entry.path.clone(),
4596                                            new_entry.id,
4597                                            Updated,
4598                                        ));
4599                                    }
4600                                }
4601                                old_paths.next(&());
4602                                new_paths.next(&());
4603                            }
4604                            Ordering::Greater => {
4605                                let is_newly_loaded = self.phase == InitialScan
4606                                    || last_newly_loaded_dir_path
4607                                        .as_ref()
4608                                        .map_or(false, |dir| new_entry.path.starts_with(&dir));
4609                                changes.push((
4610                                    new_entry.path.clone(),
4611                                    new_entry.id,
4612                                    if is_newly_loaded { Loaded } else { Added },
4613                                ));
4614                                new_paths.next(&());
4615                            }
4616                        }
4617                    }
4618                    (Some(old_entry), None) => {
4619                        changes.push((old_entry.path.clone(), old_entry.id, Removed));
4620                        old_paths.next(&());
4621                    }
4622                    (None, Some(new_entry)) => {
4623                        let is_newly_loaded = self.phase == InitialScan
4624                            || last_newly_loaded_dir_path
4625                                .as_ref()
4626                                .map_or(false, |dir| new_entry.path.starts_with(&dir));
4627                        changes.push((
4628                            new_entry.path.clone(),
4629                            new_entry.id,
4630                            if is_newly_loaded { Loaded } else { Added },
4631                        ));
4632                        new_paths.next(&());
4633                    }
4634                    (None, None) => break,
4635                }
4636            }
4637        }
4638
4639        changes.into()
4640    }
4641
4642    async fn progress_timer(&self, running: bool) {
4643        if !running {
4644            return futures::future::pending().await;
4645        }
4646
4647        #[cfg(any(test, feature = "test-support"))]
4648        if self.fs.is_fake() {
4649            return self.executor.simulate_random_delay().await;
4650        }
4651
4652        smol::Timer::after(FS_WATCH_LATENCY).await;
4653    }
4654
4655    fn is_path_private(&self, path: &Path) -> bool {
4656        !self.share_private_files && self.settings.is_path_private(path)
4657    }
4658}
4659
4660fn swap_to_front(child_paths: &mut Vec<PathBuf>, file: &OsStr) {
4661    let position = child_paths
4662        .iter()
4663        .position(|path| path.file_name().unwrap() == file);
4664    if let Some(position) = position {
4665        let temp = child_paths.remove(position);
4666        child_paths.insert(0, temp);
4667    }
4668}
4669
4670fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
4671    let mut result = root_char_bag;
4672    result.extend(
4673        path.to_string_lossy()
4674            .chars()
4675            .map(|c| c.to_ascii_lowercase()),
4676    );
4677    result
4678}
4679
4680struct ScanJob {
4681    abs_path: Arc<Path>,
4682    path: Arc<Path>,
4683    ignore_stack: Arc<IgnoreStack>,
4684    scan_queue: Sender<ScanJob>,
4685    ancestor_inodes: TreeSet<u64>,
4686    is_external: bool,
4687    containing_repository: Option<ScanJobContainingRepository>,
4688}
4689
4690#[derive(Clone)]
4691struct ScanJobContainingRepository {
4692    work_directory: RepositoryWorkDirectory,
4693    statuses: GitStatus,
4694}
4695
4696struct UpdateIgnoreStatusJob {
4697    abs_path: Arc<Path>,
4698    ignore_stack: Arc<IgnoreStack>,
4699    ignore_queue: Sender<UpdateIgnoreStatusJob>,
4700    scan_queue: Sender<ScanJob>,
4701}
4702
4703struct UpdateGitStatusesJob {
4704    work_directory: RepositoryWorkDirectory,
4705    location_in_repo: Option<Arc<Path>>,
4706    repository: Arc<dyn GitRepository>,
4707}
4708
4709pub trait WorktreeModelHandle {
4710    #[cfg(any(test, feature = "test-support"))]
4711    fn flush_fs_events<'a>(
4712        &self,
4713        cx: &'a mut gpui::TestAppContext,
4714    ) -> futures::future::LocalBoxFuture<'a, ()>;
4715
4716    #[cfg(any(test, feature = "test-support"))]
4717    fn flush_fs_events_in_root_git_repository<'a>(
4718        &self,
4719        cx: &'a mut gpui::TestAppContext,
4720    ) -> futures::future::LocalBoxFuture<'a, ()>;
4721}
4722
4723impl WorktreeModelHandle for Model<Worktree> {
4724    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
4725    // occurred before the worktree was constructed. These events can cause the worktree to perform
4726    // extra directory scans, and emit extra scan-state notifications.
4727    //
4728    // This function mutates the worktree's directory and waits for those mutations to be picked up,
4729    // to ensure that all redundant FS events have already been processed.
4730    #[cfg(any(test, feature = "test-support"))]
4731    fn flush_fs_events<'a>(
4732        &self,
4733        cx: &'a mut gpui::TestAppContext,
4734    ) -> futures::future::LocalBoxFuture<'a, ()> {
4735        let file_name = "fs-event-sentinel";
4736
4737        let tree = self.clone();
4738        let (fs, root_path) = self.update(cx, |tree, _| {
4739            let tree = tree.as_local().unwrap();
4740            (tree.fs.clone(), tree.abs_path().clone())
4741        });
4742
4743        async move {
4744            fs.create_file(&root_path.join(file_name), Default::default())
4745                .await
4746                .unwrap();
4747
4748            cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_some())
4749                .await;
4750
4751            fs.remove_file(&root_path.join(file_name), Default::default())
4752                .await
4753                .unwrap();
4754            cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_none())
4755                .await;
4756
4757            cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4758                .await;
4759        }
4760        .boxed_local()
4761    }
4762
4763    // This function is similar to flush_fs_events, except that it waits for events to be flushed in
4764    // the .git folder of the root repository.
4765    // The reason for its existence is that a repository's .git folder might live *outside* of the
4766    // worktree and thus its FS events might go through a different path.
4767    // In order to flush those, we need to create artificial events in the .git folder and wait
4768    // for the repository to be reloaded.
4769    #[cfg(any(test, feature = "test-support"))]
4770    fn flush_fs_events_in_root_git_repository<'a>(
4771        &self,
4772        cx: &'a mut gpui::TestAppContext,
4773    ) -> futures::future::LocalBoxFuture<'a, ()> {
4774        let file_name = "fs-event-sentinel";
4775
4776        let tree = self.clone();
4777        let (fs, root_path, mut git_dir_scan_id) = self.update(cx, |tree, _| {
4778            let tree = tree.as_local().unwrap();
4779            let root_entry = tree.root_git_entry().unwrap();
4780            let local_repo_entry = tree.get_local_repo(&root_entry).unwrap();
4781            (
4782                tree.fs.clone(),
4783                local_repo_entry.git_dir_path.clone(),
4784                local_repo_entry.git_dir_scan_id,
4785            )
4786        });
4787
4788        let scan_id_increased = |tree: &mut Worktree, git_dir_scan_id: &mut usize| {
4789            let root_entry = tree.root_git_entry().unwrap();
4790            let local_repo_entry = tree
4791                .as_local()
4792                .unwrap()
4793                .get_local_repo(&root_entry)
4794                .unwrap();
4795
4796            if local_repo_entry.git_dir_scan_id > *git_dir_scan_id {
4797                *git_dir_scan_id = local_repo_entry.git_dir_scan_id;
4798                true
4799            } else {
4800                false
4801            }
4802        };
4803
4804        async move {
4805            fs.create_file(&root_path.join(file_name), Default::default())
4806                .await
4807                .unwrap();
4808
4809            cx.condition(&tree, |tree, _| {
4810                scan_id_increased(tree, &mut git_dir_scan_id)
4811            })
4812            .await;
4813
4814            fs.remove_file(&root_path.join(file_name), Default::default())
4815                .await
4816                .unwrap();
4817
4818            cx.condition(&tree, |tree, _| {
4819                scan_id_increased(tree, &mut git_dir_scan_id)
4820            })
4821            .await;
4822
4823            cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4824                .await;
4825        }
4826        .boxed_local()
4827    }
4828}
4829
4830#[derive(Clone, Debug)]
4831struct TraversalProgress<'a> {
4832    max_path: &'a Path,
4833    count: usize,
4834    non_ignored_count: usize,
4835    file_count: usize,
4836    non_ignored_file_count: usize,
4837}
4838
4839impl<'a> TraversalProgress<'a> {
4840    fn count(&self, include_files: bool, include_dirs: bool, include_ignored: bool) -> usize {
4841        match (include_files, include_dirs, include_ignored) {
4842            (true, true, true) => self.count,
4843            (true, true, false) => self.non_ignored_count,
4844            (true, false, true) => self.file_count,
4845            (true, false, false) => self.non_ignored_file_count,
4846            (false, true, true) => self.count - self.file_count,
4847            (false, true, false) => self.non_ignored_count - self.non_ignored_file_count,
4848            (false, false, _) => 0,
4849        }
4850    }
4851}
4852
4853impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
4854    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4855        self.max_path = summary.max_path.as_ref();
4856        self.count += summary.count;
4857        self.non_ignored_count += summary.non_ignored_count;
4858        self.file_count += summary.file_count;
4859        self.non_ignored_file_count += summary.non_ignored_file_count;
4860    }
4861}
4862
4863impl<'a> Default for TraversalProgress<'a> {
4864    fn default() -> Self {
4865        Self {
4866            max_path: Path::new(""),
4867            count: 0,
4868            non_ignored_count: 0,
4869            file_count: 0,
4870            non_ignored_file_count: 0,
4871        }
4872    }
4873}
4874
4875#[derive(Clone, Debug, Default, Copy)]
4876struct GitStatuses {
4877    added: usize,
4878    modified: usize,
4879    conflict: usize,
4880}
4881
4882impl AddAssign for GitStatuses {
4883    fn add_assign(&mut self, rhs: Self) {
4884        self.added += rhs.added;
4885        self.modified += rhs.modified;
4886        self.conflict += rhs.conflict;
4887    }
4888}
4889
4890impl Sub for GitStatuses {
4891    type Output = GitStatuses;
4892
4893    fn sub(self, rhs: Self) -> Self::Output {
4894        GitStatuses {
4895            added: self.added - rhs.added,
4896            modified: self.modified - rhs.modified,
4897            conflict: self.conflict - rhs.conflict,
4898        }
4899    }
4900}
4901
4902impl<'a> sum_tree::Dimension<'a, EntrySummary> for GitStatuses {
4903    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4904        *self += summary.statuses
4905    }
4906}
4907
4908pub struct Traversal<'a> {
4909    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
4910    include_ignored: bool,
4911    include_files: bool,
4912    include_dirs: bool,
4913}
4914
4915impl<'a> Traversal<'a> {
4916    fn new(
4917        entries: &'a SumTree<Entry>,
4918        include_files: bool,
4919        include_dirs: bool,
4920        include_ignored: bool,
4921        start_path: &Path,
4922    ) -> Self {
4923        let mut cursor = entries.cursor();
4924        cursor.seek(&TraversalTarget::Path(start_path), Bias::Left, &());
4925        let mut traversal = Self {
4926            cursor,
4927            include_files,
4928            include_dirs,
4929            include_ignored,
4930        };
4931        if traversal.end_offset() == traversal.start_offset() {
4932            traversal.next();
4933        }
4934        traversal
4935    }
4936    pub fn advance(&mut self) -> bool {
4937        self.advance_by(1)
4938    }
4939
4940    pub fn advance_by(&mut self, count: usize) -> bool {
4941        self.cursor.seek_forward(
4942            &TraversalTarget::Count {
4943                count: self.end_offset() + count,
4944                include_dirs: self.include_dirs,
4945                include_files: self.include_files,
4946                include_ignored: self.include_ignored,
4947            },
4948            Bias::Left,
4949            &(),
4950        )
4951    }
4952
4953    pub fn advance_to_sibling(&mut self) -> bool {
4954        while let Some(entry) = self.cursor.item() {
4955            self.cursor.seek_forward(
4956                &TraversalTarget::PathSuccessor(&entry.path),
4957                Bias::Left,
4958                &(),
4959            );
4960            if let Some(entry) = self.cursor.item() {
4961                if (self.include_files || !entry.is_file())
4962                    && (self.include_dirs || !entry.is_dir())
4963                    && (self.include_ignored || !entry.is_ignored)
4964                {
4965                    return true;
4966                }
4967            }
4968        }
4969        false
4970    }
4971
4972    pub fn back_to_parent(&mut self) -> bool {
4973        let Some(parent_path) = self.cursor.item().and_then(|entry| entry.path.parent()) else {
4974            return false;
4975        };
4976        self.cursor
4977            .seek(&TraversalTarget::Path(parent_path), Bias::Left, &())
4978    }
4979
4980    pub fn entry(&self) -> Option<&'a Entry> {
4981        self.cursor.item()
4982    }
4983
4984    pub fn start_offset(&self) -> usize {
4985        self.cursor
4986            .start()
4987            .count(self.include_files, self.include_dirs, self.include_ignored)
4988    }
4989
4990    pub fn end_offset(&self) -> usize {
4991        self.cursor
4992            .end(&())
4993            .count(self.include_files, self.include_dirs, self.include_ignored)
4994    }
4995}
4996
4997impl<'a> Iterator for Traversal<'a> {
4998    type Item = &'a Entry;
4999
5000    fn next(&mut self) -> Option<Self::Item> {
5001        if let Some(item) = self.entry() {
5002            self.advance();
5003            Some(item)
5004        } else {
5005            None
5006        }
5007    }
5008}
5009
5010#[derive(Debug)]
5011enum TraversalTarget<'a> {
5012    Path(&'a Path),
5013    PathSuccessor(&'a Path),
5014    Count {
5015        count: usize,
5016        include_files: bool,
5017        include_ignored: bool,
5018        include_dirs: bool,
5019    },
5020}
5021
5022impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
5023    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
5024        match self {
5025            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
5026            TraversalTarget::PathSuccessor(path) => {
5027                if cursor_location.max_path.starts_with(path) {
5028                    Ordering::Greater
5029                } else {
5030                    Ordering::Equal
5031                }
5032            }
5033            TraversalTarget::Count {
5034                count,
5035                include_files,
5036                include_dirs,
5037                include_ignored,
5038            } => Ord::cmp(
5039                count,
5040                &cursor_location.count(*include_files, *include_dirs, *include_ignored),
5041            ),
5042        }
5043    }
5044}
5045
5046impl<'a, 'b> SeekTarget<'a, EntrySummary, (TraversalProgress<'a>, GitStatuses)>
5047    for TraversalTarget<'b>
5048{
5049    fn cmp(&self, cursor_location: &(TraversalProgress<'a>, GitStatuses), _: &()) -> Ordering {
5050        self.cmp(&cursor_location.0, &())
5051    }
5052}
5053
5054pub struct ChildEntriesIter<'a> {
5055    parent_path: &'a Path,
5056    traversal: Traversal<'a>,
5057}
5058
5059impl<'a> Iterator for ChildEntriesIter<'a> {
5060    type Item = &'a Entry;
5061
5062    fn next(&mut self) -> Option<Self::Item> {
5063        if let Some(item) = self.traversal.entry() {
5064            if item.path.starts_with(&self.parent_path) {
5065                self.traversal.advance_to_sibling();
5066                return Some(item);
5067            }
5068        }
5069        None
5070    }
5071}
5072
5073impl<'a> From<&'a Entry> for proto::Entry {
5074    fn from(entry: &'a Entry) -> Self {
5075        Self {
5076            id: entry.id.to_proto(),
5077            is_dir: entry.is_dir(),
5078            path: entry.path.to_string_lossy().into(),
5079            inode: entry.inode,
5080            mtime: entry.mtime.map(|time| time.into()),
5081            is_symlink: entry.is_symlink,
5082            is_ignored: entry.is_ignored,
5083            is_external: entry.is_external,
5084            git_status: entry.git_status.map(git_status_to_proto),
5085        }
5086    }
5087}
5088
5089impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
5090    type Error = anyhow::Error;
5091
5092    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
5093        let kind = if entry.is_dir {
5094            EntryKind::Dir
5095        } else {
5096            let mut char_bag = *root_char_bag;
5097            char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
5098            EntryKind::File(char_bag)
5099        };
5100        let path: Arc<Path> = PathBuf::from(entry.path).into();
5101        Ok(Entry {
5102            id: ProjectEntryId::from_proto(entry.id),
5103            kind,
5104            path,
5105            inode: entry.inode,
5106            mtime: entry.mtime.map(|time| time.into()),
5107            canonical_path: None,
5108            is_ignored: entry.is_ignored,
5109            is_external: entry.is_external,
5110            git_status: git_status_from_proto(entry.git_status),
5111            is_private: false,
5112            is_symlink: entry.is_symlink,
5113        })
5114    }
5115}
5116
5117fn git_status_from_proto(git_status: Option<i32>) -> Option<GitFileStatus> {
5118    git_status.and_then(|status| {
5119        proto::GitStatus::from_i32(status).map(|status| match status {
5120            proto::GitStatus::Added => GitFileStatus::Added,
5121            proto::GitStatus::Modified => GitFileStatus::Modified,
5122            proto::GitStatus::Conflict => GitFileStatus::Conflict,
5123        })
5124    })
5125}
5126
5127fn git_status_to_proto(status: GitFileStatus) -> i32 {
5128    match status {
5129        GitFileStatus::Added => proto::GitStatus::Added as i32,
5130        GitFileStatus::Modified => proto::GitStatus::Modified as i32,
5131        GitFileStatus::Conflict => proto::GitStatus::Conflict as i32,
5132    }
5133}
5134
5135#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)]
5136pub struct ProjectEntryId(usize);
5137
5138impl ProjectEntryId {
5139    pub const MAX: Self = Self(usize::MAX);
5140    pub const MIN: Self = Self(usize::MIN);
5141
5142    pub fn new(counter: &AtomicUsize) -> Self {
5143        Self(counter.fetch_add(1, SeqCst))
5144    }
5145
5146    pub fn from_proto(id: u64) -> Self {
5147        Self(id as usize)
5148    }
5149
5150    pub fn to_proto(&self) -> u64 {
5151        self.0 as u64
5152    }
5153
5154    pub fn to_usize(&self) -> usize {
5155        self.0
5156    }
5157}
5158
5159#[cfg(any(test, feature = "test-support"))]
5160impl CreatedEntry {
5161    pub fn to_included(self) -> Option<Entry> {
5162        match self {
5163            CreatedEntry::Included(entry) => Some(entry),
5164            CreatedEntry::Excluded { .. } => None,
5165        }
5166    }
5167}