worktree.rs

   1mod ignore;
   2mod worktree_settings;
   3#[cfg(test)]
   4mod worktree_tests;
   5
   6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   7use anyhow::{anyhow, Context as _, Result};
   8use clock::ReplicaId;
   9use collections::{HashMap, HashSet, VecDeque};
  10use fs::{copy_recursive, Fs, RemoveOptions, Watcher};
  11use futures::{
  12    channel::{
  13        mpsc::{self, UnboundedSender},
  14        oneshot,
  15    },
  16    select_biased,
  17    stream::select,
  18    task::Poll,
  19    FutureExt as _, Stream, StreamExt,
  20};
  21use fuzzy::CharBag;
  22use git::{
  23    repository::{GitFileStatus, GitRepository, RepoPath},
  24    status::GitStatus,
  25    DOT_GIT, GITIGNORE,
  26};
  27use gpui::{
  28    AppContext, AsyncAppContext, BackgroundExecutor, Context, EventEmitter, Model, ModelContext,
  29    Task,
  30};
  31use ignore::IgnoreStack;
  32use parking_lot::Mutex;
  33use paths::local_settings_folder_relative_path;
  34use postage::{
  35    barrier,
  36    prelude::{Sink as _, Stream as _},
  37    watch,
  38};
  39use rpc::proto::{self, AnyProtoClient};
  40use settings::{Settings, SettingsLocation, SettingsStore};
  41use smol::channel::{self, Sender};
  42use std::{
  43    any::Any,
  44    cmp::{self, Ordering},
  45    convert::TryFrom,
  46    ffi::OsStr,
  47    fmt,
  48    future::Future,
  49    mem,
  50    ops::{AddAssign, Deref, DerefMut, Sub},
  51    path::{Path, PathBuf},
  52    pin::Pin,
  53    sync::{
  54        atomic::{AtomicUsize, Ordering::SeqCst},
  55        Arc,
  56    },
  57    time::{Duration, Instant, SystemTime},
  58};
  59use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
  60use text::{LineEnding, Rope};
  61use util::{paths::home_dir, ResultExt};
  62pub use worktree_settings::WorktreeSettings;
  63
  64#[cfg(feature = "test-support")]
  65pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
  66#[cfg(not(feature = "test-support"))]
  67pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
  68
  69#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  70pub struct WorktreeId(usize);
  71
  72impl From<WorktreeId> for usize {
  73    fn from(value: WorktreeId) -> Self {
  74        value.0
  75    }
  76}
  77
  78/// A set of local or remote files that are being opened as part of a project.
  79/// Responsible for tracking related FS (for local)/collab (for remote) events and corresponding updates.
  80/// Stores git repositories data and the diagnostics for the file(s).
  81///
  82/// Has an absolute path, and may be set to be visible in Zed UI or not.
  83/// May correspond to a directory or a single file.
  84/// Possible examples:
  85/// * a drag and dropped file — may be added as an invisible, "ephemeral" entry to the current worktree
  86/// * a directory opened in Zed — may be added as a visible entry to the current worktree
  87///
  88/// Uses [`Entry`] to track the state of each file/directory, can look up absolute paths for entries.
  89pub enum Worktree {
  90    Local(LocalWorktree),
  91    Remote(RemoteWorktree),
  92}
  93
  94/// An entry, created in the worktree.
  95#[derive(Debug)]
  96pub enum CreatedEntry {
  97    /// Got created and indexed by the worktree, receiving a corresponding entry.
  98    Included(Entry),
  99    /// Got created, but not indexed due to falling under exclusion filters.
 100    Excluded { abs_path: PathBuf },
 101}
 102
 103pub struct LoadedFile {
 104    pub file: Arc<File>,
 105    pub text: String,
 106    pub diff_base: Option<String>,
 107}
 108
 109pub struct LocalWorktree {
 110    snapshot: LocalSnapshot,
 111    scan_requests_tx: channel::Sender<ScanRequest>,
 112    path_prefixes_to_scan_tx: channel::Sender<Arc<Path>>,
 113    is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
 114    _background_scanner_tasks: Vec<Task<()>>,
 115    update_observer: Option<UpdateObservationState>,
 116    fs: Arc<dyn Fs>,
 117    fs_case_sensitive: bool,
 118    visible: bool,
 119    next_entry_id: Arc<AtomicUsize>,
 120    settings: WorktreeSettings,
 121    share_private_files: bool,
 122}
 123
 124struct ScanRequest {
 125    relative_paths: Vec<Arc<Path>>,
 126    done: barrier::Sender,
 127}
 128
 129pub struct RemoteWorktree {
 130    snapshot: Snapshot,
 131    background_snapshot: Arc<Mutex<(Snapshot, Vec<proto::UpdateWorktree>)>>,
 132    project_id: u64,
 133    client: AnyProtoClient,
 134    updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
 135    update_observer: Option<mpsc::UnboundedSender<proto::UpdateWorktree>>,
 136    snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
 137    replica_id: ReplicaId,
 138    visible: bool,
 139    disconnected: bool,
 140}
 141
 142#[derive(Clone)]
 143pub struct Snapshot {
 144    id: WorktreeId,
 145    abs_path: Arc<Path>,
 146    root_name: String,
 147    root_char_bag: CharBag,
 148    entries_by_path: SumTree<Entry>,
 149    entries_by_id: SumTree<PathEntry>,
 150    repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
 151
 152    /// A number that increases every time the worktree begins scanning
 153    /// a set of paths from the filesystem. This scanning could be caused
 154    /// by some operation performed on the worktree, such as reading or
 155    /// writing a file, or by an event reported by the filesystem.
 156    scan_id: usize,
 157
 158    /// The latest scan id that has completed, and whose preceding scans
 159    /// have all completed. The current `scan_id` could be more than one
 160    /// greater than the `completed_scan_id` if operations are performed
 161    /// on the worktree while it is processing a file-system event.
 162    completed_scan_id: usize,
 163}
 164
 165#[derive(Clone, Debug, PartialEq, Eq)]
 166pub struct RepositoryEntry {
 167    pub(crate) work_directory: WorkDirectoryEntry,
 168    pub(crate) branch: Option<Arc<str>>,
 169
 170    /// If location_in_repo is set, it means the .git folder is external
 171    /// and in a parent folder of the project root.
 172    /// In that case, the work_directory field will point to the
 173    /// project-root and location_in_repo contains the location of the
 174    /// project-root in the repository.
 175    ///
 176    /// Example:
 177    ///
 178    ///     my_root_folder/          <-- repository root
 179    ///       .git
 180    ///       my_sub_folder_1/
 181    ///         project_root/        <-- Project root, Zed opened here
 182    ///           ...
 183    ///
 184    /// For this setup, the attributes will have the following values:
 185    ///
 186    ///     work_directory: pointing to "" entry
 187    ///     location_in_repo: Some("my_sub_folder_1/project_root")
 188    pub(crate) location_in_repo: Option<Arc<Path>>,
 189}
 190
 191impl RepositoryEntry {
 192    pub fn branch(&self) -> Option<Arc<str>> {
 193        self.branch.clone()
 194    }
 195
 196    pub fn work_directory_id(&self) -> ProjectEntryId {
 197        *self.work_directory
 198    }
 199
 200    pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
 201        snapshot
 202            .entry_for_id(self.work_directory_id())
 203            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
 204    }
 205
 206    pub fn build_update(&self, _: &Self) -> proto::RepositoryEntry {
 207        self.into()
 208    }
 209
 210    /// relativize returns the given project path relative to the root folder of the
 211    /// repository.
 212    /// If the root of the repository (and its .git folder) are located in a parent folder
 213    /// of the project root folder, then the returned RepoPath is relative to the root
 214    /// of the repository and not a valid path inside the project.
 215    pub fn relativize(&self, worktree: &Snapshot, path: &Path) -> Result<RepoPath> {
 216        let relativize_path = |path: &Path| {
 217            let entry = worktree
 218                .entry_for_id(self.work_directory.0)
 219                .ok_or_else(|| anyhow!("entry not found"))?;
 220
 221            let relativized_path = path
 222                .strip_prefix(&entry.path)
 223                .map_err(|_| anyhow!("could not relativize {:?} against {:?}", path, entry.path))?;
 224
 225            Ok(relativized_path.into())
 226        };
 227
 228        if let Some(location_in_repo) = &self.location_in_repo {
 229            relativize_path(&location_in_repo.join(path))
 230        } else {
 231            relativize_path(path)
 232        }
 233    }
 234}
 235
 236impl From<&RepositoryEntry> for proto::RepositoryEntry {
 237    fn from(value: &RepositoryEntry) -> Self {
 238        proto::RepositoryEntry {
 239            work_directory_id: value.work_directory.to_proto(),
 240            branch: value.branch.as_ref().map(|str| str.to_string()),
 241        }
 242    }
 243}
 244
 245/// This path corresponds to the 'content path' of a repository in relation
 246/// to Zed's project root.
 247/// In the majority of the cases, this is the folder that contains the .git folder.
 248/// But if a sub-folder of a git repository is opened, this corresponds to the
 249/// project root and the .git folder is located in a parent directory.
 250#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 251pub struct RepositoryWorkDirectory(pub(crate) Arc<Path>);
 252
 253impl Default for RepositoryWorkDirectory {
 254    fn default() -> Self {
 255        RepositoryWorkDirectory(Arc::from(Path::new("")))
 256    }
 257}
 258
 259impl AsRef<Path> for RepositoryWorkDirectory {
 260    fn as_ref(&self) -> &Path {
 261        self.0.as_ref()
 262    }
 263}
 264
 265#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 266pub struct WorkDirectoryEntry(ProjectEntryId);
 267
 268impl Deref for WorkDirectoryEntry {
 269    type Target = ProjectEntryId;
 270
 271    fn deref(&self) -> &Self::Target {
 272        &self.0
 273    }
 274}
 275
 276impl From<ProjectEntryId> for WorkDirectoryEntry {
 277    fn from(value: ProjectEntryId) -> Self {
 278        WorkDirectoryEntry(value)
 279    }
 280}
 281
 282#[derive(Debug, Clone)]
 283pub struct LocalSnapshot {
 284    snapshot: Snapshot,
 285    /// All of the gitignore files in the worktree, indexed by their relative path.
 286    /// The boolean indicates whether the gitignore needs to be updated.
 287    ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
 288    /// All of the git repositories in the worktree, indexed by the project entry
 289    /// id of their parent directory.
 290    git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 291}
 292
 293struct BackgroundScannerState {
 294    snapshot: LocalSnapshot,
 295    scanned_dirs: HashSet<ProjectEntryId>,
 296    path_prefixes_to_scan: HashSet<Arc<Path>>,
 297    paths_to_scan: HashSet<Arc<Path>>,
 298    /// The ids of all of the entries that were removed from the snapshot
 299    /// as part of the current update. These entry ids may be re-used
 300    /// if the same inode is discovered at a new path, or if the given
 301    /// path is re-created after being deleted.
 302    removed_entry_ids: HashMap<(u64, SystemTime), ProjectEntryId>,
 303    changed_paths: Vec<Arc<Path>>,
 304    prev_snapshot: Snapshot,
 305}
 306
 307#[derive(Debug, Clone)]
 308pub struct LocalRepositoryEntry {
 309    pub(crate) git_dir_scan_id: usize,
 310    pub(crate) repo_ptr: Arc<dyn GitRepository>,
 311    /// Path to the actual .git folder.
 312    /// Note: if .git is a file, this points to the folder indicated by the .git file
 313    pub(crate) git_dir_path: Arc<Path>,
 314}
 315
 316impl LocalRepositoryEntry {
 317    pub fn repo(&self) -> &Arc<dyn GitRepository> {
 318        &self.repo_ptr
 319    }
 320}
 321
 322impl Deref for LocalSnapshot {
 323    type Target = Snapshot;
 324
 325    fn deref(&self) -> &Self::Target {
 326        &self.snapshot
 327    }
 328}
 329
 330impl DerefMut for LocalSnapshot {
 331    fn deref_mut(&mut self) -> &mut Self::Target {
 332        &mut self.snapshot
 333    }
 334}
 335
 336enum ScanState {
 337    Started,
 338    Updated {
 339        snapshot: LocalSnapshot,
 340        changes: UpdatedEntriesSet,
 341        barrier: Option<barrier::Sender>,
 342        scanning: bool,
 343    },
 344}
 345
 346struct UpdateObservationState {
 347    snapshots_tx:
 348        mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
 349    resume_updates: watch::Sender<()>,
 350    _maintain_remote_snapshot: Task<Option<()>>,
 351}
 352
 353#[derive(Clone)]
 354pub enum Event {
 355    UpdatedEntries(UpdatedEntriesSet),
 356    UpdatedGitRepositories(UpdatedGitRepositoriesSet),
 357    DeletedEntry(ProjectEntryId),
 358}
 359
 360static EMPTY_PATH: &str = "";
 361
 362impl EventEmitter<Event> for Worktree {}
 363
 364impl Worktree {
 365    pub async fn local(
 366        path: impl Into<Arc<Path>>,
 367        visible: bool,
 368        fs: Arc<dyn Fs>,
 369        next_entry_id: Arc<AtomicUsize>,
 370        cx: &mut AsyncAppContext,
 371    ) -> Result<Model<Self>> {
 372        let abs_path = path.into();
 373        let metadata = fs
 374            .metadata(&abs_path)
 375            .await
 376            .context("failed to stat worktree path")?;
 377
 378        let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
 379            log::error!(
 380                "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
 381            );
 382            true
 383        });
 384
 385        cx.new_model(move |cx: &mut ModelContext<Worktree>| {
 386            let worktree_id = cx.handle().entity_id().as_u64();
 387            let settings_location = Some(SettingsLocation {
 388                worktree_id: worktree_id as usize,
 389                path: Path::new(EMPTY_PATH),
 390            });
 391
 392            let settings = WorktreeSettings::get(settings_location, cx).clone();
 393            cx.observe_global::<SettingsStore>(move |this, cx| {
 394                if let Self::Local(this) = this {
 395                    let settings = WorktreeSettings::get(settings_location, cx).clone();
 396                    if settings != this.settings {
 397                        this.settings = settings;
 398                        this.restart_background_scanners(cx);
 399                    }
 400                }
 401            })
 402            .detach();
 403
 404            let mut snapshot = LocalSnapshot {
 405                ignores_by_parent_abs_path: Default::default(),
 406                git_repositories: Default::default(),
 407                snapshot: Snapshot::new(
 408                    cx.entity_id().as_u64(),
 409                    abs_path
 410                        .file_name()
 411                        .map_or(String::new(), |f| f.to_string_lossy().to_string()),
 412                    abs_path,
 413                ),
 414            };
 415
 416            if let Some(metadata) = metadata {
 417                snapshot.insert_entry(
 418                    Entry::new(
 419                        Arc::from(Path::new("")),
 420                        &metadata,
 421                        &next_entry_id,
 422                        snapshot.root_char_bag,
 423                        None,
 424                    ),
 425                    fs.as_ref(),
 426                );
 427            }
 428
 429            let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
 430            let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
 431            let mut worktree = LocalWorktree {
 432                share_private_files: false,
 433                next_entry_id,
 434                snapshot,
 435                is_scanning: watch::channel_with(true),
 436                update_observer: None,
 437                scan_requests_tx,
 438                path_prefixes_to_scan_tx,
 439                _background_scanner_tasks: Vec::new(),
 440                fs,
 441                fs_case_sensitive,
 442                visible,
 443                settings,
 444            };
 445            worktree.start_background_scanner(scan_requests_rx, path_prefixes_to_scan_rx, cx);
 446            Worktree::Local(worktree)
 447        })
 448    }
 449
 450    pub fn remote(
 451        project_id: u64,
 452        replica_id: ReplicaId,
 453        worktree: proto::WorktreeMetadata,
 454        client: AnyProtoClient,
 455        cx: &mut AppContext,
 456    ) -> Model<Self> {
 457        cx.new_model(|cx: &mut ModelContext<Self>| {
 458            let snapshot = Snapshot::new(
 459                worktree.id,
 460                worktree.root_name,
 461                Arc::from(PathBuf::from(worktree.abs_path)),
 462            );
 463
 464            let background_snapshot = Arc::new(Mutex::new((snapshot.clone(), Vec::new())));
 465            let (background_updates_tx, mut background_updates_rx) = mpsc::unbounded();
 466            let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
 467
 468            let worktree = RemoteWorktree {
 469                client,
 470                project_id,
 471                replica_id,
 472                snapshot,
 473                background_snapshot: background_snapshot.clone(),
 474                updates_tx: Some(background_updates_tx),
 475                update_observer: None,
 476                snapshot_subscriptions: Default::default(),
 477                visible: worktree.visible,
 478                disconnected: false,
 479            };
 480
 481            // Apply updates to a separate snapshto in a background task, then
 482            // send them to a foreground task which updates the model.
 483            cx.background_executor()
 484                .spawn(async move {
 485                    while let Some(update) = background_updates_rx.next().await {
 486                        {
 487                            let mut lock = background_snapshot.lock();
 488                            if let Err(error) = lock.0.apply_remote_update(update.clone()) {
 489                                log::error!("error applying worktree update: {}", error);
 490                            }
 491                            lock.1.push(update);
 492                        }
 493                        snapshot_updated_tx.send(()).await.ok();
 494                    }
 495                })
 496                .detach();
 497
 498            // On the foreground task, update to the latest snapshot and notify
 499            // any update observer of all updates that led to that snapshot.
 500            cx.spawn(|this, mut cx| async move {
 501                while (snapshot_updated_rx.recv().await).is_some() {
 502                    this.update(&mut cx, |this, cx| {
 503                        let this = this.as_remote_mut().unwrap();
 504                        {
 505                            let mut lock = this.background_snapshot.lock();
 506                            this.snapshot = lock.0.clone();
 507                            if let Some(tx) = &this.update_observer {
 508                                for update in lock.1.drain(..) {
 509                                    tx.unbounded_send(update).ok();
 510                                }
 511                            }
 512                        };
 513                        cx.emit(Event::UpdatedEntries(Arc::from([])));
 514                        cx.notify();
 515                        while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
 516                            if this.observed_snapshot(*scan_id) {
 517                                let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
 518                                let _ = tx.send(());
 519                            } else {
 520                                break;
 521                            }
 522                        }
 523                    })?;
 524                }
 525                anyhow::Ok(())
 526            })
 527            .detach();
 528
 529            Worktree::Remote(worktree)
 530        })
 531    }
 532
 533    pub fn as_local(&self) -> Option<&LocalWorktree> {
 534        if let Worktree::Local(worktree) = self {
 535            Some(worktree)
 536        } else {
 537            None
 538        }
 539    }
 540
 541    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 542        if let Worktree::Remote(worktree) = self {
 543            Some(worktree)
 544        } else {
 545            None
 546        }
 547    }
 548
 549    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 550        if let Worktree::Local(worktree) = self {
 551            Some(worktree)
 552        } else {
 553            None
 554        }
 555    }
 556
 557    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 558        if let Worktree::Remote(worktree) = self {
 559            Some(worktree)
 560        } else {
 561            None
 562        }
 563    }
 564
 565    pub fn is_local(&self) -> bool {
 566        matches!(self, Worktree::Local(_))
 567    }
 568
 569    pub fn is_remote(&self) -> bool {
 570        !self.is_local()
 571    }
 572
 573    pub fn snapshot(&self) -> Snapshot {
 574        match self {
 575            Worktree::Local(worktree) => worktree.snapshot.snapshot.clone(),
 576            Worktree::Remote(worktree) => worktree.snapshot.clone(),
 577        }
 578    }
 579
 580    pub fn scan_id(&self) -> usize {
 581        match self {
 582            Worktree::Local(worktree) => worktree.snapshot.scan_id,
 583            Worktree::Remote(worktree) => worktree.snapshot.scan_id,
 584        }
 585    }
 586
 587    pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
 588        proto::WorktreeMetadata {
 589            id: self.id().to_proto(),
 590            root_name: self.root_name().to_string(),
 591            visible: self.is_visible(),
 592            abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
 593        }
 594    }
 595
 596    pub fn completed_scan_id(&self) -> usize {
 597        match self {
 598            Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
 599            Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
 600        }
 601    }
 602
 603    pub fn is_visible(&self) -> bool {
 604        match self {
 605            Worktree::Local(worktree) => worktree.visible,
 606            Worktree::Remote(worktree) => worktree.visible,
 607        }
 608    }
 609
 610    pub fn replica_id(&self) -> ReplicaId {
 611        match self {
 612            Worktree::Local(_) => 0,
 613            Worktree::Remote(worktree) => worktree.replica_id,
 614        }
 615    }
 616
 617    pub fn abs_path(&self) -> Arc<Path> {
 618        match self {
 619            Worktree::Local(worktree) => worktree.abs_path.clone(),
 620            Worktree::Remote(worktree) => worktree.abs_path.clone(),
 621        }
 622    }
 623
 624    pub fn root_file(&self, cx: &mut ModelContext<Self>) -> Option<Arc<File>> {
 625        let entry = self.root_entry()?;
 626        Some(File::for_entry(entry.clone(), cx.handle()))
 627    }
 628
 629    pub fn observe_updates<F, Fut>(
 630        &mut self,
 631        project_id: u64,
 632        cx: &mut ModelContext<Worktree>,
 633        callback: F,
 634    ) where
 635        F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
 636        Fut: 'static + Send + Future<Output = bool>,
 637    {
 638        match self {
 639            Worktree::Local(this) => this.observe_updates(project_id, cx, callback),
 640            Worktree::Remote(this) => this.observe_updates(project_id, cx, callback),
 641        }
 642    }
 643
 644    pub fn stop_observing_updates(&mut self) {
 645        match self {
 646            Worktree::Local(this) => {
 647                this.update_observer.take();
 648            }
 649            Worktree::Remote(this) => {
 650                this.update_observer.take();
 651            }
 652        }
 653    }
 654
 655    #[cfg(any(test, feature = "test-support"))]
 656    pub fn has_update_observer(&self) -> bool {
 657        match self {
 658            Worktree::Local(this) => this.update_observer.is_some(),
 659            Worktree::Remote(this) => this.update_observer.is_some(),
 660        }
 661    }
 662
 663    pub fn load_file(
 664        &self,
 665        path: &Path,
 666        cx: &mut ModelContext<Worktree>,
 667    ) -> Task<Result<LoadedFile>> {
 668        match self {
 669            Worktree::Local(this) => this.load_file(path, cx),
 670            Worktree::Remote(_) => {
 671                Task::ready(Err(anyhow!("remote worktrees can't yet load files")))
 672            }
 673        }
 674    }
 675
 676    pub fn write_file(
 677        &self,
 678        path: &Path,
 679        text: Rope,
 680        line_ending: LineEnding,
 681        cx: &mut ModelContext<Worktree>,
 682    ) -> Task<Result<Arc<File>>> {
 683        match self {
 684            Worktree::Local(this) => this.write_file(path, text, line_ending, cx),
 685            Worktree::Remote(_) => {
 686                Task::ready(Err(anyhow!("remote worktree can't yet write files")))
 687            }
 688        }
 689    }
 690
 691    pub fn create_entry(
 692        &mut self,
 693        path: impl Into<Arc<Path>>,
 694        is_directory: bool,
 695        cx: &mut ModelContext<Worktree>,
 696    ) -> Task<Result<CreatedEntry>> {
 697        let path = path.into();
 698        let worktree_id = self.id();
 699        match self {
 700            Worktree::Local(this) => this.create_entry(path, is_directory, cx),
 701            Worktree::Remote(this) => {
 702                let project_id = this.project_id;
 703                let request = this.client.request(proto::CreateProjectEntry {
 704                    worktree_id: worktree_id.to_proto(),
 705                    project_id,
 706                    path: path.to_string_lossy().into(),
 707                    is_directory,
 708                });
 709                cx.spawn(move |this, mut cx| async move {
 710                    let response = request.await?;
 711                    match response.entry {
 712                        Some(entry) => this
 713                            .update(&mut cx, |worktree, cx| {
 714                                worktree.as_remote_mut().unwrap().insert_entry(
 715                                    entry,
 716                                    response.worktree_scan_id as usize,
 717                                    cx,
 718                                )
 719                            })?
 720                            .await
 721                            .map(CreatedEntry::Included),
 722                        None => {
 723                            let abs_path = this.update(&mut cx, |worktree, _| {
 724                                worktree
 725                                    .absolutize(&path)
 726                                    .with_context(|| format!("absolutizing {path:?}"))
 727                            })??;
 728                            Ok(CreatedEntry::Excluded { abs_path })
 729                        }
 730                    }
 731                })
 732            }
 733        }
 734    }
 735
 736    pub fn delete_entry(
 737        &mut self,
 738        entry_id: ProjectEntryId,
 739        trash: bool,
 740        cx: &mut ModelContext<Worktree>,
 741    ) -> Option<Task<Result<()>>> {
 742        let task = match self {
 743            Worktree::Local(this) => this.delete_entry(entry_id, trash, cx),
 744            Worktree::Remote(this) => this.delete_entry(entry_id, trash, cx),
 745        }?;
 746        cx.emit(Event::DeletedEntry(entry_id));
 747        Some(task)
 748    }
 749
 750    pub fn rename_entry(
 751        &mut self,
 752        entry_id: ProjectEntryId,
 753        new_path: impl Into<Arc<Path>>,
 754        cx: &mut ModelContext<Self>,
 755    ) -> Task<Result<CreatedEntry>> {
 756        let new_path = new_path.into();
 757        match self {
 758            Worktree::Local(this) => this.rename_entry(entry_id, new_path, cx),
 759            Worktree::Remote(this) => this.rename_entry(entry_id, new_path, cx),
 760        }
 761    }
 762
 763    pub fn copy_entry(
 764        &mut self,
 765        entry_id: ProjectEntryId,
 766        new_path: impl Into<Arc<Path>>,
 767        cx: &mut ModelContext<Self>,
 768    ) -> Task<Result<Option<Entry>>> {
 769        let new_path = new_path.into();
 770        match self {
 771            Worktree::Local(this) => this.copy_entry(entry_id, new_path, cx),
 772            Worktree::Remote(this) => {
 773                let response = this.client.request(proto::CopyProjectEntry {
 774                    project_id: this.project_id,
 775                    entry_id: entry_id.to_proto(),
 776                    new_path: new_path.to_string_lossy().into(),
 777                });
 778                cx.spawn(move |this, mut cx| async move {
 779                    let response = response.await?;
 780                    match response.entry {
 781                        Some(entry) => this
 782                            .update(&mut cx, |worktree, cx| {
 783                                worktree.as_remote_mut().unwrap().insert_entry(
 784                                    entry,
 785                                    response.worktree_scan_id as usize,
 786                                    cx,
 787                                )
 788                            })?
 789                            .await
 790                            .map(Some),
 791                        None => Ok(None),
 792                    }
 793                })
 794            }
 795        }
 796    }
 797
 798    pub fn copy_external_entries(
 799        &mut self,
 800        target_directory: PathBuf,
 801        paths: Vec<Arc<Path>>,
 802        overwrite_existing_files: bool,
 803        cx: &mut ModelContext<Worktree>,
 804    ) -> Task<Result<Vec<ProjectEntryId>>> {
 805        match self {
 806            Worktree::Local(this) => {
 807                this.copy_external_entries(target_directory, paths, overwrite_existing_files, cx)
 808            }
 809            _ => Task::ready(Err(anyhow!(
 810                "Copying external entries is not supported for remote worktrees"
 811            ))),
 812        }
 813    }
 814
 815    pub fn expand_entry(
 816        &mut self,
 817        entry_id: ProjectEntryId,
 818        cx: &mut ModelContext<Worktree>,
 819    ) -> Option<Task<Result<()>>> {
 820        match self {
 821            Worktree::Local(this) => this.expand_entry(entry_id, cx),
 822            Worktree::Remote(this) => {
 823                let response = this.client.request(proto::ExpandProjectEntry {
 824                    project_id: this.project_id,
 825                    entry_id: entry_id.to_proto(),
 826                });
 827                Some(cx.spawn(move |this, mut cx| async move {
 828                    let response = response.await?;
 829                    this.update(&mut cx, |this, _| {
 830                        this.as_remote_mut()
 831                            .unwrap()
 832                            .wait_for_snapshot(response.worktree_scan_id as usize)
 833                    })?
 834                    .await?;
 835                    Ok(())
 836                }))
 837            }
 838        }
 839    }
 840
 841    pub async fn handle_create_entry(
 842        this: Model<Self>,
 843        request: proto::CreateProjectEntry,
 844        mut cx: AsyncAppContext,
 845    ) -> Result<proto::ProjectEntryResponse> {
 846        let (scan_id, entry) = this.update(&mut cx, |this, cx| {
 847            (
 848                this.scan_id(),
 849                this.create_entry(PathBuf::from(request.path), request.is_directory, cx),
 850            )
 851        })?;
 852        Ok(proto::ProjectEntryResponse {
 853            entry: match &entry.await? {
 854                CreatedEntry::Included(entry) => Some(entry.into()),
 855                CreatedEntry::Excluded { .. } => None,
 856            },
 857            worktree_scan_id: scan_id as u64,
 858        })
 859    }
 860
 861    pub async fn handle_delete_entry(
 862        this: Model<Self>,
 863        request: proto::DeleteProjectEntry,
 864        mut cx: AsyncAppContext,
 865    ) -> Result<proto::ProjectEntryResponse> {
 866        let (scan_id, task) = this.update(&mut cx, |this, cx| {
 867            (
 868                this.scan_id(),
 869                this.delete_entry(
 870                    ProjectEntryId::from_proto(request.entry_id),
 871                    request.use_trash,
 872                    cx,
 873                ),
 874            )
 875        })?;
 876        task.ok_or_else(|| anyhow!("invalid entry"))?.await?;
 877        Ok(proto::ProjectEntryResponse {
 878            entry: None,
 879            worktree_scan_id: scan_id as u64,
 880        })
 881    }
 882
 883    pub async fn handle_expand_entry(
 884        this: Model<Self>,
 885        request: proto::ExpandProjectEntry,
 886        mut cx: AsyncAppContext,
 887    ) -> Result<proto::ExpandProjectEntryResponse> {
 888        let task = this.update(&mut cx, |this, cx| {
 889            this.expand_entry(ProjectEntryId::from_proto(request.entry_id), cx)
 890        })?;
 891        task.ok_or_else(|| anyhow!("no such entry"))?.await?;
 892        let scan_id = this.read_with(&cx, |this, _| this.scan_id())?;
 893        Ok(proto::ExpandProjectEntryResponse {
 894            worktree_scan_id: scan_id as u64,
 895        })
 896    }
 897
 898    pub async fn handle_rename_entry(
 899        this: Model<Self>,
 900        request: proto::RenameProjectEntry,
 901        mut cx: AsyncAppContext,
 902    ) -> Result<proto::ProjectEntryResponse> {
 903        let (scan_id, task) = this.update(&mut cx, |this, cx| {
 904            (
 905                this.scan_id(),
 906                this.rename_entry(
 907                    ProjectEntryId::from_proto(request.entry_id),
 908                    PathBuf::from(request.new_path),
 909                    cx,
 910                ),
 911            )
 912        })?;
 913        Ok(proto::ProjectEntryResponse {
 914            entry: match &task.await? {
 915                CreatedEntry::Included(entry) => Some(entry.into()),
 916                CreatedEntry::Excluded { .. } => None,
 917            },
 918            worktree_scan_id: scan_id as u64,
 919        })
 920    }
 921
 922    pub async fn handle_copy_entry(
 923        this: Model<Self>,
 924        request: proto::CopyProjectEntry,
 925        mut cx: AsyncAppContext,
 926    ) -> Result<proto::ProjectEntryResponse> {
 927        let (scan_id, task) = this.update(&mut cx, |this, cx| {
 928            (
 929                this.scan_id(),
 930                this.copy_entry(
 931                    ProjectEntryId::from_proto(request.entry_id),
 932                    PathBuf::from(request.new_path),
 933                    cx,
 934                ),
 935            )
 936        })?;
 937        Ok(proto::ProjectEntryResponse {
 938            entry: task.await?.as_ref().map(|e| e.into()),
 939            worktree_scan_id: scan_id as u64,
 940        })
 941    }
 942}
 943
 944impl LocalWorktree {
 945    pub fn contains_abs_path(&self, path: &Path) -> bool {
 946        path.starts_with(&self.abs_path)
 947    }
 948
 949    pub fn is_path_private(&self, path: &Path) -> bool {
 950        !self.share_private_files && self.settings.is_path_private(path)
 951    }
 952
 953    fn restart_background_scanners(&mut self, cx: &mut ModelContext<Worktree>) {
 954        let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
 955        let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
 956        self.scan_requests_tx = scan_requests_tx;
 957        self.path_prefixes_to_scan_tx = path_prefixes_to_scan_tx;
 958        self.start_background_scanner(scan_requests_rx, path_prefixes_to_scan_rx, cx);
 959    }
 960
 961    fn start_background_scanner(
 962        &mut self,
 963        scan_requests_rx: channel::Receiver<ScanRequest>,
 964        path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
 965        cx: &mut ModelContext<Worktree>,
 966    ) {
 967        let snapshot = self.snapshot();
 968        let share_private_files = self.share_private_files;
 969        let next_entry_id = self.next_entry_id.clone();
 970        let fs = self.fs.clone();
 971        let settings = self.settings.clone();
 972        let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
 973        let background_scanner = cx.background_executor().spawn({
 974            let abs_path = &snapshot.abs_path;
 975            let abs_path = if cfg!(target_os = "windows") {
 976                abs_path
 977                    .canonicalize()
 978                    .unwrap_or_else(|_| abs_path.to_path_buf())
 979            } else {
 980                abs_path.to_path_buf()
 981            };
 982            let background = cx.background_executor().clone();
 983            async move {
 984                let (events, watcher) = fs.watch(&abs_path, FS_WATCH_LATENCY).await;
 985                let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
 986                    log::error!("Failed to determine whether filesystem is case sensitive: {e:#}");
 987                    true
 988                });
 989
 990                let mut scanner = BackgroundScanner {
 991                    fs,
 992                    fs_case_sensitive,
 993                    status_updates_tx: scan_states_tx,
 994                    executor: background,
 995                    scan_requests_rx,
 996                    path_prefixes_to_scan_rx,
 997                    next_entry_id,
 998                    state: Mutex::new(BackgroundScannerState {
 999                        prev_snapshot: snapshot.snapshot.clone(),
1000                        snapshot,
1001                        scanned_dirs: Default::default(),
1002                        path_prefixes_to_scan: Default::default(),
1003                        paths_to_scan: Default::default(),
1004                        removed_entry_ids: Default::default(),
1005                        changed_paths: Default::default(),
1006                    }),
1007                    phase: BackgroundScannerPhase::InitialScan,
1008                    share_private_files,
1009                    settings,
1010                    watcher,
1011                };
1012
1013                scanner.run(events).await;
1014            }
1015        });
1016        let scan_state_updater = cx.spawn(|this, mut cx| async move {
1017            while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade()) {
1018                this.update(&mut cx, |this, cx| {
1019                    let this = this.as_local_mut().unwrap();
1020                    match state {
1021                        ScanState::Started => {
1022                            *this.is_scanning.0.borrow_mut() = true;
1023                        }
1024                        ScanState::Updated {
1025                            snapshot,
1026                            changes,
1027                            barrier,
1028                            scanning,
1029                        } => {
1030                            *this.is_scanning.0.borrow_mut() = scanning;
1031                            this.set_snapshot(snapshot, changes, cx);
1032                            drop(barrier);
1033                        }
1034                    }
1035                    cx.notify();
1036                })
1037                .ok();
1038            }
1039        });
1040        self._background_scanner_tasks = vec![background_scanner, scan_state_updater];
1041        self.is_scanning = watch::channel_with(true);
1042    }
1043
1044    fn set_snapshot(
1045        &mut self,
1046        new_snapshot: LocalSnapshot,
1047        entry_changes: UpdatedEntriesSet,
1048        cx: &mut ModelContext<Worktree>,
1049    ) {
1050        let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
1051        self.snapshot = new_snapshot;
1052
1053        if let Some(share) = self.update_observer.as_mut() {
1054            share
1055                .snapshots_tx
1056                .unbounded_send((
1057                    self.snapshot.clone(),
1058                    entry_changes.clone(),
1059                    repo_changes.clone(),
1060                ))
1061                .ok();
1062        }
1063
1064        if !entry_changes.is_empty() {
1065            cx.emit(Event::UpdatedEntries(entry_changes));
1066        }
1067        if !repo_changes.is_empty() {
1068            cx.emit(Event::UpdatedGitRepositories(repo_changes));
1069        }
1070    }
1071
1072    fn changed_repos(
1073        &self,
1074        old_snapshot: &LocalSnapshot,
1075        new_snapshot: &LocalSnapshot,
1076    ) -> UpdatedGitRepositoriesSet {
1077        let mut changes = Vec::new();
1078        let mut old_repos = old_snapshot.git_repositories.iter().peekable();
1079        let mut new_repos = new_snapshot.git_repositories.iter().peekable();
1080        loop {
1081            match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
1082                (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
1083                    match Ord::cmp(&new_entry_id, &old_entry_id) {
1084                        Ordering::Less => {
1085                            if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
1086                                changes.push((
1087                                    entry.path.clone(),
1088                                    GitRepositoryChange {
1089                                        old_repository: None,
1090                                    },
1091                                ));
1092                            }
1093                            new_repos.next();
1094                        }
1095                        Ordering::Equal => {
1096                            if new_repo.git_dir_scan_id != old_repo.git_dir_scan_id {
1097                                if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
1098                                    let old_repo = old_snapshot
1099                                        .repository_entries
1100                                        .get(&RepositoryWorkDirectory(entry.path.clone()))
1101                                        .cloned();
1102                                    changes.push((
1103                                        entry.path.clone(),
1104                                        GitRepositoryChange {
1105                                            old_repository: old_repo,
1106                                        },
1107                                    ));
1108                                }
1109                            }
1110                            new_repos.next();
1111                            old_repos.next();
1112                        }
1113                        Ordering::Greater => {
1114                            if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
1115                                let old_repo = old_snapshot
1116                                    .repository_entries
1117                                    .get(&RepositoryWorkDirectory(entry.path.clone()))
1118                                    .cloned();
1119                                changes.push((
1120                                    entry.path.clone(),
1121                                    GitRepositoryChange {
1122                                        old_repository: old_repo,
1123                                    },
1124                                ));
1125                            }
1126                            old_repos.next();
1127                        }
1128                    }
1129                }
1130                (Some((entry_id, _)), None) => {
1131                    if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
1132                        changes.push((
1133                            entry.path.clone(),
1134                            GitRepositoryChange {
1135                                old_repository: None,
1136                            },
1137                        ));
1138                    }
1139                    new_repos.next();
1140                }
1141                (None, Some((entry_id, _))) => {
1142                    if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
1143                        let old_repo = old_snapshot
1144                            .repository_entries
1145                            .get(&RepositoryWorkDirectory(entry.path.clone()))
1146                            .cloned();
1147                        changes.push((
1148                            entry.path.clone(),
1149                            GitRepositoryChange {
1150                                old_repository: old_repo,
1151                            },
1152                        ));
1153                    }
1154                    old_repos.next();
1155                }
1156                (None, None) => break,
1157            }
1158        }
1159
1160        fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
1161            (value.0.clone(), value.1.clone())
1162        }
1163
1164        changes.into()
1165    }
1166
1167    pub fn scan_complete(&self) -> impl Future<Output = ()> {
1168        let mut is_scanning_rx = self.is_scanning.1.clone();
1169        async move {
1170            let mut is_scanning = *is_scanning_rx.borrow();
1171            while is_scanning {
1172                if let Some(value) = is_scanning_rx.recv().await {
1173                    is_scanning = value;
1174                } else {
1175                    break;
1176                }
1177            }
1178        }
1179    }
1180
1181    pub fn snapshot(&self) -> LocalSnapshot {
1182        self.snapshot.clone()
1183    }
1184
1185    pub fn settings(&self) -> WorktreeSettings {
1186        self.settings.clone()
1187    }
1188
1189    pub fn local_git_repo(&self, path: &Path) -> Option<Arc<dyn GitRepository>> {
1190        self.repo_for_path(path)
1191            .map(|(_, entry)| entry.repo_ptr.clone())
1192    }
1193
1194    pub fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1195        self.git_repositories.get(&repo.work_directory.0)
1196    }
1197
1198    fn load_file(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<LoadedFile>> {
1199        let path = Arc::from(path);
1200        let abs_path = self.absolutize(&path);
1201        let fs = self.fs.clone();
1202        let entry = self.refresh_entry(path.clone(), None, cx);
1203        let is_private = self.is_path_private(path.as_ref());
1204
1205        cx.spawn(|this, mut cx| async move {
1206            let abs_path = abs_path?;
1207            let text = fs.load(&abs_path).await?;
1208            let mut index_task = None;
1209            let snapshot = this.update(&mut cx, |this, _| this.as_local().unwrap().snapshot())?;
1210            if let Some(repo) = snapshot.repository_for_path(&path) {
1211                if let Some(repo_path) = repo.relativize(&snapshot, &path).log_err() {
1212                    if let Some(git_repo) = snapshot.git_repositories.get(&*repo.work_directory) {
1213                        let git_repo = git_repo.repo_ptr.clone();
1214                        index_task = Some(
1215                            cx.background_executor()
1216                                .spawn(async move { git_repo.load_index_text(&repo_path) }),
1217                        );
1218                    }
1219                }
1220            }
1221
1222            let diff_base = if let Some(index_task) = index_task {
1223                index_task.await
1224            } else {
1225                None
1226            };
1227
1228            let worktree = this
1229                .upgrade()
1230                .ok_or_else(|| anyhow!("worktree was dropped"))?;
1231            let file = match entry.await? {
1232                Some(entry) => File::for_entry(entry, worktree),
1233                None => {
1234                    let metadata = fs
1235                        .metadata(&abs_path)
1236                        .await
1237                        .with_context(|| {
1238                            format!("Loading metadata for excluded file {abs_path:?}")
1239                        })?
1240                        .with_context(|| {
1241                            format!("Excluded file {abs_path:?} got removed during loading")
1242                        })?;
1243                    Arc::new(File {
1244                        entry_id: None,
1245                        worktree,
1246                        path,
1247                        mtime: Some(metadata.mtime),
1248                        is_local: true,
1249                        is_deleted: false,
1250                        is_private,
1251                    })
1252                }
1253            };
1254
1255            Ok(LoadedFile {
1256                file,
1257                text,
1258                diff_base,
1259            })
1260        })
1261    }
1262
1263    /// Find the lowest path in the worktree's datastructures that is an ancestor
1264    fn lowest_ancestor(&self, path: &Path) -> PathBuf {
1265        let mut lowest_ancestor = None;
1266        for path in path.ancestors() {
1267            if self.entry_for_path(path).is_some() {
1268                lowest_ancestor = Some(path.to_path_buf());
1269                break;
1270            }
1271        }
1272
1273        lowest_ancestor.unwrap_or_else(|| PathBuf::from(""))
1274    }
1275
1276    fn create_entry(
1277        &self,
1278        path: impl Into<Arc<Path>>,
1279        is_dir: bool,
1280        cx: &mut ModelContext<Worktree>,
1281    ) -> Task<Result<CreatedEntry>> {
1282        let path = path.into();
1283        let abs_path = match self.absolutize(&path) {
1284            Ok(path) => path,
1285            Err(e) => return Task::ready(Err(e.context(format!("absolutizing path {path:?}")))),
1286        };
1287        let path_excluded = self.settings.is_path_excluded(&abs_path);
1288        let fs = self.fs.clone();
1289        let task_abs_path = abs_path.clone();
1290        let write = cx.background_executor().spawn(async move {
1291            if is_dir {
1292                fs.create_dir(&task_abs_path)
1293                    .await
1294                    .with_context(|| format!("creating directory {task_abs_path:?}"))
1295            } else {
1296                fs.save(&task_abs_path, &Rope::default(), LineEnding::default())
1297                    .await
1298                    .with_context(|| format!("creating file {task_abs_path:?}"))
1299            }
1300        });
1301
1302        let lowest_ancestor = self.lowest_ancestor(&path);
1303        cx.spawn(|this, mut cx| async move {
1304            write.await?;
1305            if path_excluded {
1306                return Ok(CreatedEntry::Excluded { abs_path });
1307            }
1308
1309            let (result, refreshes) = this.update(&mut cx, |this, cx| {
1310                let mut refreshes = Vec::new();
1311                let refresh_paths = path.strip_prefix(&lowest_ancestor).unwrap();
1312                for refresh_path in refresh_paths.ancestors() {
1313                    if refresh_path == Path::new("") {
1314                        continue;
1315                    }
1316                    let refresh_full_path = lowest_ancestor.join(refresh_path);
1317
1318                    refreshes.push(this.as_local_mut().unwrap().refresh_entry(
1319                        refresh_full_path.into(),
1320                        None,
1321                        cx,
1322                    ));
1323                }
1324                (
1325                    this.as_local_mut().unwrap().refresh_entry(path, None, cx),
1326                    refreshes,
1327                )
1328            })?;
1329            for refresh in refreshes {
1330                refresh.await.log_err();
1331            }
1332
1333            Ok(result
1334                .await?
1335                .map(CreatedEntry::Included)
1336                .unwrap_or_else(|| CreatedEntry::Excluded { abs_path }))
1337        })
1338    }
1339
1340    fn write_file(
1341        &self,
1342        path: impl Into<Arc<Path>>,
1343        text: Rope,
1344        line_ending: LineEnding,
1345        cx: &mut ModelContext<Worktree>,
1346    ) -> Task<Result<Arc<File>>> {
1347        let path = path.into();
1348        let fs = self.fs.clone();
1349        let is_private = self.is_path_private(&path);
1350        let Ok(abs_path) = self.absolutize(&path) else {
1351            return Task::ready(Err(anyhow!("invalid path {path:?}")));
1352        };
1353
1354        let write = cx.background_executor().spawn({
1355            let fs = fs.clone();
1356            let abs_path = abs_path.clone();
1357            async move { fs.save(&abs_path, &text, line_ending).await }
1358        });
1359
1360        cx.spawn(move |this, mut cx| async move {
1361            write.await?;
1362            let entry = this
1363                .update(&mut cx, |this, cx| {
1364                    this.as_local_mut()
1365                        .unwrap()
1366                        .refresh_entry(path.clone(), None, cx)
1367                })?
1368                .await?;
1369            let worktree = this.upgrade().ok_or_else(|| anyhow!("worktree dropped"))?;
1370            if let Some(entry) = entry {
1371                Ok(File::for_entry(entry, worktree))
1372            } else {
1373                let metadata = fs
1374                    .metadata(&abs_path)
1375                    .await
1376                    .with_context(|| {
1377                        format!("Fetching metadata after saving the excluded buffer {abs_path:?}")
1378                    })?
1379                    .with_context(|| {
1380                        format!("Excluded buffer {path:?} got removed during saving")
1381                    })?;
1382                Ok(Arc::new(File {
1383                    worktree,
1384                    path,
1385                    mtime: Some(metadata.mtime),
1386                    entry_id: None,
1387                    is_local: true,
1388                    is_deleted: false,
1389                    is_private,
1390                }))
1391            }
1392        })
1393    }
1394
1395    fn delete_entry(
1396        &self,
1397        entry_id: ProjectEntryId,
1398        trash: bool,
1399        cx: &mut ModelContext<Worktree>,
1400    ) -> Option<Task<Result<()>>> {
1401        let entry = self.entry_for_id(entry_id)?.clone();
1402        let abs_path = self.absolutize(&entry.path);
1403        let fs = self.fs.clone();
1404
1405        let delete = cx.background_executor().spawn(async move {
1406            if entry.is_file() {
1407                if trash {
1408                    fs.trash_file(&abs_path?, Default::default()).await?;
1409                } else {
1410                    fs.remove_file(&abs_path?, Default::default()).await?;
1411                }
1412            } else {
1413                if trash {
1414                    fs.trash_dir(
1415                        &abs_path?,
1416                        RemoveOptions {
1417                            recursive: true,
1418                            ignore_if_not_exists: false,
1419                        },
1420                    )
1421                    .await?;
1422                } else {
1423                    fs.remove_dir(
1424                        &abs_path?,
1425                        RemoveOptions {
1426                            recursive: true,
1427                            ignore_if_not_exists: false,
1428                        },
1429                    )
1430                    .await?;
1431                }
1432            }
1433            anyhow::Ok(entry.path)
1434        });
1435
1436        Some(cx.spawn(|this, mut cx| async move {
1437            let path = delete.await?;
1438            this.update(&mut cx, |this, _| {
1439                this.as_local_mut()
1440                    .unwrap()
1441                    .refresh_entries_for_paths(vec![path])
1442            })?
1443            .recv()
1444            .await;
1445            Ok(())
1446        }))
1447    }
1448
1449    fn rename_entry(
1450        &self,
1451        entry_id: ProjectEntryId,
1452        new_path: impl Into<Arc<Path>>,
1453        cx: &mut ModelContext<Worktree>,
1454    ) -> Task<Result<CreatedEntry>> {
1455        let old_path = match self.entry_for_id(entry_id) {
1456            Some(entry) => entry.path.clone(),
1457            None => return Task::ready(Err(anyhow!("no entry to rename for id {entry_id:?}"))),
1458        };
1459        let new_path = new_path.into();
1460        let abs_old_path = self.absolutize(&old_path);
1461        let Ok(abs_new_path) = self.absolutize(&new_path) else {
1462            return Task::ready(Err(anyhow!("absolutizing path {new_path:?}")));
1463        };
1464        let abs_path = abs_new_path.clone();
1465        let fs = self.fs.clone();
1466        let case_sensitive = self.fs_case_sensitive;
1467        let rename = cx.background_executor().spawn(async move {
1468            let abs_old_path = abs_old_path?;
1469            let abs_new_path = abs_new_path;
1470
1471            let abs_old_path_lower = abs_old_path.to_str().map(|p| p.to_lowercase());
1472            let abs_new_path_lower = abs_new_path.to_str().map(|p| p.to_lowercase());
1473
1474            // If we're on a case-insensitive FS and we're doing a case-only rename (i.e. `foobar` to `FOOBAR`)
1475            // we want to overwrite, because otherwise we run into a file-already-exists error.
1476            let overwrite = !case_sensitive
1477                && abs_old_path != abs_new_path
1478                && abs_old_path_lower == abs_new_path_lower;
1479
1480            fs.rename(
1481                &abs_old_path,
1482                &abs_new_path,
1483                fs::RenameOptions {
1484                    overwrite,
1485                    ..Default::default()
1486                },
1487            )
1488            .await
1489            .with_context(|| format!("Renaming {abs_old_path:?} into {abs_new_path:?}"))
1490        });
1491
1492        cx.spawn(|this, mut cx| async move {
1493            rename.await?;
1494            Ok(this
1495                .update(&mut cx, |this, cx| {
1496                    this.as_local_mut()
1497                        .unwrap()
1498                        .refresh_entry(new_path.clone(), Some(old_path), cx)
1499                })?
1500                .await?
1501                .map(CreatedEntry::Included)
1502                .unwrap_or_else(|| CreatedEntry::Excluded { abs_path }))
1503        })
1504    }
1505
1506    fn copy_entry(
1507        &self,
1508        entry_id: ProjectEntryId,
1509        new_path: impl Into<Arc<Path>>,
1510        cx: &mut ModelContext<Worktree>,
1511    ) -> Task<Result<Option<Entry>>> {
1512        let old_path = match self.entry_for_id(entry_id) {
1513            Some(entry) => entry.path.clone(),
1514            None => return Task::ready(Ok(None)),
1515        };
1516        let new_path = new_path.into();
1517        let abs_old_path = self.absolutize(&old_path);
1518        let abs_new_path = self.absolutize(&new_path);
1519        let fs = self.fs.clone();
1520        let copy = cx.background_executor().spawn(async move {
1521            copy_recursive(
1522                fs.as_ref(),
1523                &abs_old_path?,
1524                &abs_new_path?,
1525                Default::default(),
1526            )
1527            .await
1528        });
1529
1530        cx.spawn(|this, mut cx| async move {
1531            copy.await?;
1532            this.update(&mut cx, |this, cx| {
1533                this.as_local_mut()
1534                    .unwrap()
1535                    .refresh_entry(new_path.clone(), None, cx)
1536            })?
1537            .await
1538        })
1539    }
1540
1541    pub fn copy_external_entries(
1542        &mut self,
1543        target_directory: PathBuf,
1544        paths: Vec<Arc<Path>>,
1545        overwrite_existing_files: bool,
1546        cx: &mut ModelContext<Worktree>,
1547    ) -> Task<Result<Vec<ProjectEntryId>>> {
1548        let worktree_path = self.abs_path().clone();
1549        let fs = self.fs.clone();
1550        let paths = paths
1551            .into_iter()
1552            .filter_map(|source| {
1553                let file_name = source.file_name()?;
1554                let mut target = target_directory.clone();
1555                target.push(file_name);
1556
1557                // Do not allow copying the same file to itself.
1558                if source.as_ref() != target.as_path() {
1559                    Some((source, target))
1560                } else {
1561                    None
1562                }
1563            })
1564            .collect::<Vec<_>>();
1565
1566        let paths_to_refresh = paths
1567            .iter()
1568            .filter_map(|(_, target)| Some(target.strip_prefix(&worktree_path).ok()?.into()))
1569            .collect::<Vec<_>>();
1570
1571        cx.spawn(|this, cx| async move {
1572            cx.background_executor()
1573                .spawn(async move {
1574                    for (source, target) in paths {
1575                        copy_recursive(
1576                            fs.as_ref(),
1577                            &source,
1578                            &target,
1579                            fs::CopyOptions {
1580                                overwrite: overwrite_existing_files,
1581                                ..Default::default()
1582                            },
1583                        )
1584                        .await
1585                        .with_context(|| {
1586                            anyhow!("Failed to copy file from {source:?} to {target:?}")
1587                        })?;
1588                    }
1589                    Ok::<(), anyhow::Error>(())
1590                })
1591                .await
1592                .log_err();
1593            let mut refresh = cx.read_model(
1594                &this.upgrade().with_context(|| "Dropped worktree")?,
1595                |this, _| {
1596                    Ok::<postage::barrier::Receiver, anyhow::Error>(
1597                        this.as_local()
1598                            .with_context(|| "Worktree is not local")?
1599                            .refresh_entries_for_paths(paths_to_refresh.clone()),
1600                    )
1601                },
1602            )??;
1603
1604            cx.background_executor()
1605                .spawn(async move {
1606                    refresh.next().await;
1607                    Ok::<(), anyhow::Error>(())
1608                })
1609                .await
1610                .log_err();
1611
1612            let this = this.upgrade().with_context(|| "Dropped worktree")?;
1613            cx.read_model(&this, |this, _| {
1614                paths_to_refresh
1615                    .iter()
1616                    .filter_map(|path| Some(this.entry_for_path(path)?.id))
1617                    .collect()
1618            })
1619        })
1620    }
1621
1622    fn expand_entry(
1623        &mut self,
1624        entry_id: ProjectEntryId,
1625        cx: &mut ModelContext<Worktree>,
1626    ) -> Option<Task<Result<()>>> {
1627        let path = self.entry_for_id(entry_id)?.path.clone();
1628        let mut refresh = self.refresh_entries_for_paths(vec![path]);
1629        Some(cx.background_executor().spawn(async move {
1630            refresh.next().await;
1631            Ok(())
1632        }))
1633    }
1634
1635    fn refresh_entries_for_paths(&self, paths: Vec<Arc<Path>>) -> barrier::Receiver {
1636        let (tx, rx) = barrier::channel();
1637        self.scan_requests_tx
1638            .try_send(ScanRequest {
1639                relative_paths: paths,
1640                done: tx,
1641            })
1642            .ok();
1643        rx
1644    }
1645
1646    pub fn add_path_prefix_to_scan(&self, path_prefix: Arc<Path>) {
1647        self.path_prefixes_to_scan_tx.try_send(path_prefix).ok();
1648    }
1649
1650    fn refresh_entry(
1651        &self,
1652        path: Arc<Path>,
1653        old_path: Option<Arc<Path>>,
1654        cx: &mut ModelContext<Worktree>,
1655    ) -> Task<Result<Option<Entry>>> {
1656        if self.settings.is_path_excluded(&path) {
1657            return Task::ready(Ok(None));
1658        }
1659        let paths = if let Some(old_path) = old_path.as_ref() {
1660            vec![old_path.clone(), path.clone()]
1661        } else {
1662            vec![path.clone()]
1663        };
1664        let t0 = Instant::now();
1665        let mut refresh = self.refresh_entries_for_paths(paths);
1666        cx.spawn(move |this, mut cx| async move {
1667            refresh.recv().await;
1668            log::trace!("refreshed entry {path:?} in {:?}", t0.elapsed());
1669            let new_entry = this.update(&mut cx, |this, _| {
1670                this.entry_for_path(path)
1671                    .cloned()
1672                    .ok_or_else(|| anyhow!("failed to read path after update"))
1673            })??;
1674            Ok(Some(new_entry))
1675        })
1676    }
1677
1678    fn observe_updates<F, Fut>(
1679        &mut self,
1680        project_id: u64,
1681        cx: &mut ModelContext<Worktree>,
1682        callback: F,
1683    ) where
1684        F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1685        Fut: Send + Future<Output = bool>,
1686    {
1687        #[cfg(any(test, feature = "test-support"))]
1688        const MAX_CHUNK_SIZE: usize = 2;
1689        #[cfg(not(any(test, feature = "test-support")))]
1690        const MAX_CHUNK_SIZE: usize = 256;
1691
1692        if let Some(observer) = self.update_observer.as_mut() {
1693            *observer.resume_updates.borrow_mut() = ();
1694            return;
1695        }
1696
1697        let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
1698        let (snapshots_tx, mut snapshots_rx) =
1699            mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
1700        snapshots_tx
1701            .unbounded_send((self.snapshot(), Arc::from([]), Arc::from([])))
1702            .ok();
1703
1704        let worktree_id = cx.entity_id().as_u64();
1705        let _maintain_remote_snapshot = cx.background_executor().spawn(async move {
1706            let mut is_first = true;
1707            while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
1708                let update;
1709                if is_first {
1710                    update = snapshot.build_initial_update(project_id, worktree_id);
1711                    is_first = false;
1712                } else {
1713                    update =
1714                        snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes);
1715                }
1716
1717                for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1718                    let _ = resume_updates_rx.try_recv();
1719                    loop {
1720                        let result = callback(update.clone());
1721                        if result.await {
1722                            break;
1723                        } else {
1724                            log::info!("waiting to resume updates");
1725                            if resume_updates_rx.next().await.is_none() {
1726                                return Some(());
1727                            }
1728                        }
1729                    }
1730                }
1731            }
1732            Some(())
1733        });
1734
1735        self.update_observer = Some(UpdateObservationState {
1736            snapshots_tx,
1737            resume_updates: resume_updates_tx,
1738            _maintain_remote_snapshot,
1739        });
1740    }
1741
1742    pub fn share_private_files(&mut self, cx: &mut ModelContext<Worktree>) {
1743        self.share_private_files = true;
1744        self.restart_background_scanners(cx);
1745    }
1746}
1747
1748impl RemoteWorktree {
1749    pub fn project_id(&self) -> u64 {
1750        self.project_id
1751    }
1752
1753    pub fn client(&self) -> AnyProtoClient {
1754        self.client.clone()
1755    }
1756
1757    pub fn disconnected_from_host(&mut self) {
1758        self.updates_tx.take();
1759        self.snapshot_subscriptions.clear();
1760        self.disconnected = true;
1761    }
1762
1763    pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1764        if let Some(updates_tx) = &self.updates_tx {
1765            updates_tx
1766                .unbounded_send(update)
1767                .expect("consumer runs to completion");
1768        }
1769    }
1770
1771    fn observe_updates<F, Fut>(
1772        &mut self,
1773        project_id: u64,
1774        cx: &mut ModelContext<Worktree>,
1775        callback: F,
1776    ) where
1777        F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1778        Fut: 'static + Send + Future<Output = bool>,
1779    {
1780        let (tx, mut rx) = mpsc::unbounded();
1781        let initial_update = self
1782            .snapshot
1783            .build_initial_update(project_id, self.id().to_proto());
1784        self.updates_tx = Some(tx);
1785        cx.spawn(|this, mut cx| async move {
1786            let mut update = initial_update;
1787            loop {
1788                if !callback(update).await {
1789                    break;
1790                }
1791                if let Some(next_update) = rx.next().await {
1792                    update = next_update;
1793                } else {
1794                    break;
1795                }
1796            }
1797            this.update(&mut cx, |this, _| {
1798                let this = this.as_remote_mut().unwrap();
1799                this.updates_tx.take();
1800            })
1801        })
1802        .detach();
1803    }
1804
1805    fn observed_snapshot(&self, scan_id: usize) -> bool {
1806        self.completed_scan_id >= scan_id
1807    }
1808
1809    pub fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1810        let (tx, rx) = oneshot::channel();
1811        if self.observed_snapshot(scan_id) {
1812            let _ = tx.send(());
1813        } else if self.disconnected {
1814            drop(tx);
1815        } else {
1816            match self
1817                .snapshot_subscriptions
1818                .binary_search_by_key(&scan_id, |probe| probe.0)
1819            {
1820                Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1821            }
1822        }
1823
1824        async move {
1825            rx.await?;
1826            Ok(())
1827        }
1828    }
1829
1830    fn insert_entry(
1831        &mut self,
1832        entry: proto::Entry,
1833        scan_id: usize,
1834        cx: &mut ModelContext<Worktree>,
1835    ) -> Task<Result<Entry>> {
1836        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1837        cx.spawn(|this, mut cx| async move {
1838            wait_for_snapshot.await?;
1839            this.update(&mut cx, |worktree, _| {
1840                let worktree = worktree.as_remote_mut().unwrap();
1841                let snapshot = &mut worktree.background_snapshot.lock().0;
1842                let entry = snapshot.insert_entry(entry);
1843                worktree.snapshot = snapshot.clone();
1844                entry
1845            })?
1846        })
1847    }
1848
1849    fn delete_entry(
1850        &mut self,
1851        entry_id: ProjectEntryId,
1852        trash: bool,
1853        cx: &mut ModelContext<Worktree>,
1854    ) -> Option<Task<Result<()>>> {
1855        let response = self.client.request(proto::DeleteProjectEntry {
1856            project_id: self.project_id,
1857            entry_id: entry_id.to_proto(),
1858            use_trash: trash,
1859        });
1860        Some(cx.spawn(move |this, mut cx| async move {
1861            let response = response.await?;
1862            let scan_id = response.worktree_scan_id as usize;
1863
1864            this.update(&mut cx, move |this, _| {
1865                this.as_remote_mut().unwrap().wait_for_snapshot(scan_id)
1866            })?
1867            .await?;
1868
1869            this.update(&mut cx, |this, _| {
1870                let this = this.as_remote_mut().unwrap();
1871                let snapshot = &mut this.background_snapshot.lock().0;
1872                snapshot.delete_entry(entry_id);
1873                this.snapshot = snapshot.clone();
1874            })
1875        }))
1876    }
1877
1878    fn rename_entry(
1879        &mut self,
1880        entry_id: ProjectEntryId,
1881        new_path: impl Into<Arc<Path>>,
1882        cx: &mut ModelContext<Worktree>,
1883    ) -> Task<Result<CreatedEntry>> {
1884        let new_path = new_path.into();
1885        let response = self.client.request(proto::RenameProjectEntry {
1886            project_id: self.project_id,
1887            entry_id: entry_id.to_proto(),
1888            new_path: new_path.to_string_lossy().into(),
1889        });
1890        cx.spawn(move |this, mut cx| async move {
1891            let response = response.await?;
1892            match response.entry {
1893                Some(entry) => this
1894                    .update(&mut cx, |this, cx| {
1895                        this.as_remote_mut().unwrap().insert_entry(
1896                            entry,
1897                            response.worktree_scan_id as usize,
1898                            cx,
1899                        )
1900                    })?
1901                    .await
1902                    .map(CreatedEntry::Included),
1903                None => {
1904                    let abs_path = this.update(&mut cx, |worktree, _| {
1905                        worktree
1906                            .absolutize(&new_path)
1907                            .with_context(|| format!("absolutizing {new_path:?}"))
1908                    })??;
1909                    Ok(CreatedEntry::Excluded { abs_path })
1910                }
1911            }
1912        })
1913    }
1914}
1915
1916impl Snapshot {
1917    pub fn new(id: u64, root_name: String, abs_path: Arc<Path>) -> Self {
1918        Snapshot {
1919            id: WorktreeId::from_usize(id as usize),
1920            abs_path,
1921            root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
1922            root_name,
1923            entries_by_path: Default::default(),
1924            entries_by_id: Default::default(),
1925            repository_entries: Default::default(),
1926            scan_id: 1,
1927            completed_scan_id: 0,
1928        }
1929    }
1930
1931    pub fn id(&self) -> WorktreeId {
1932        self.id
1933    }
1934
1935    pub fn abs_path(&self) -> &Arc<Path> {
1936        &self.abs_path
1937    }
1938
1939    fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
1940        let mut updated_entries = self
1941            .entries_by_path
1942            .iter()
1943            .map(proto::Entry::from)
1944            .collect::<Vec<_>>();
1945        updated_entries.sort_unstable_by_key(|e| e.id);
1946
1947        let mut updated_repositories = self
1948            .repository_entries
1949            .values()
1950            .map(proto::RepositoryEntry::from)
1951            .collect::<Vec<_>>();
1952        updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
1953
1954        proto::UpdateWorktree {
1955            project_id,
1956            worktree_id,
1957            abs_path: self.abs_path().to_string_lossy().into(),
1958            root_name: self.root_name().to_string(),
1959            updated_entries,
1960            removed_entries: Vec::new(),
1961            scan_id: self.scan_id as u64,
1962            is_last_update: self.completed_scan_id == self.scan_id,
1963            updated_repositories,
1964            removed_repositories: Vec::new(),
1965        }
1966    }
1967
1968    pub fn absolutize(&self, path: &Path) -> Result<PathBuf> {
1969        if path
1970            .components()
1971            .any(|component| !matches!(component, std::path::Component::Normal(_)))
1972        {
1973            return Err(anyhow!("invalid path"));
1974        }
1975        if path.file_name().is_some() {
1976            Ok(self.abs_path.join(path))
1977        } else {
1978            Ok(self.abs_path.to_path_buf())
1979        }
1980    }
1981
1982    pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1983        self.entries_by_id.get(&entry_id, &()).is_some()
1984    }
1985
1986    fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1987        let entry = Entry::try_from((&self.root_char_bag, entry))?;
1988        let old_entry = self.entries_by_id.insert_or_replace(
1989            PathEntry {
1990                id: entry.id,
1991                path: entry.path.clone(),
1992                is_ignored: entry.is_ignored,
1993                scan_id: 0,
1994            },
1995            &(),
1996        );
1997        if let Some(old_entry) = old_entry {
1998            self.entries_by_path.remove(&PathKey(old_entry.path), &());
1999        }
2000        self.entries_by_path.insert_or_replace(entry.clone(), &());
2001        Ok(entry)
2002    }
2003
2004    fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
2005        let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
2006        self.entries_by_path = {
2007            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2008            let mut new_entries_by_path =
2009                cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
2010            while let Some(entry) = cursor.item() {
2011                if entry.path.starts_with(&removed_entry.path) {
2012                    self.entries_by_id.remove(&entry.id, &());
2013                    cursor.next(&());
2014                } else {
2015                    break;
2016                }
2017            }
2018            new_entries_by_path.append(cursor.suffix(&()), &());
2019            new_entries_by_path
2020        };
2021
2022        Some(removed_entry.path)
2023    }
2024
2025    #[cfg(any(test, feature = "test-support"))]
2026    pub fn status_for_file(&self, path: impl Into<PathBuf>) -> Option<GitFileStatus> {
2027        let path = path.into();
2028        self.entries_by_path
2029            .get(&PathKey(Arc::from(path)), &())
2030            .and_then(|entry| entry.git_status)
2031    }
2032
2033    pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
2034        log::trace!(
2035            "applying remote worktree update. {} entries updated, {} removed",
2036            update.updated_entries.len(),
2037            update.removed_entries.len()
2038        );
2039
2040        let mut entries_by_path_edits = Vec::new();
2041        let mut entries_by_id_edits = Vec::new();
2042
2043        for entry_id in update.removed_entries {
2044            let entry_id = ProjectEntryId::from_proto(entry_id);
2045            entries_by_id_edits.push(Edit::Remove(entry_id));
2046            if let Some(entry) = self.entry_for_id(entry_id) {
2047                entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
2048            }
2049        }
2050
2051        for entry in update.updated_entries {
2052            let entry = Entry::try_from((&self.root_char_bag, entry))?;
2053            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
2054                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
2055            }
2056            if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
2057                if old_entry.id != entry.id {
2058                    entries_by_id_edits.push(Edit::Remove(old_entry.id));
2059                }
2060            }
2061            entries_by_id_edits.push(Edit::Insert(PathEntry {
2062                id: entry.id,
2063                path: entry.path.clone(),
2064                is_ignored: entry.is_ignored,
2065                scan_id: 0,
2066            }));
2067            entries_by_path_edits.push(Edit::Insert(entry));
2068        }
2069
2070        self.entries_by_path.edit(entries_by_path_edits, &());
2071        self.entries_by_id.edit(entries_by_id_edits, &());
2072
2073        update.removed_repositories.sort_unstable();
2074        self.repository_entries.retain(|_, entry| {
2075            if let Ok(_) = update
2076                .removed_repositories
2077                .binary_search(&entry.work_directory.to_proto())
2078            {
2079                false
2080            } else {
2081                true
2082            }
2083        });
2084
2085        for repository in update.updated_repositories {
2086            let work_directory_entry: WorkDirectoryEntry =
2087                ProjectEntryId::from_proto(repository.work_directory_id).into();
2088
2089            if let Some(entry) = self.entry_for_id(*work_directory_entry) {
2090                let work_directory = RepositoryWorkDirectory(entry.path.clone());
2091                if self.repository_entries.get(&work_directory).is_some() {
2092                    self.repository_entries.update(&work_directory, |repo| {
2093                        repo.branch = repository.branch.map(Into::into);
2094                    });
2095                } else {
2096                    self.repository_entries.insert(
2097                        work_directory,
2098                        RepositoryEntry {
2099                            work_directory: work_directory_entry,
2100                            branch: repository.branch.map(Into::into),
2101                            // When syncing repository entries from a peer, we don't need
2102                            // the location_in_repo field, since git operations don't happen locally
2103                            // anyway.
2104                            location_in_repo: None,
2105                        },
2106                    )
2107                }
2108            } else {
2109                log::error!("no work directory entry for repository {:?}", repository)
2110            }
2111        }
2112
2113        self.scan_id = update.scan_id as usize;
2114        if update.is_last_update {
2115            self.completed_scan_id = update.scan_id as usize;
2116        }
2117
2118        Ok(())
2119    }
2120
2121    pub fn file_count(&self) -> usize {
2122        self.entries_by_path.summary().file_count
2123    }
2124
2125    pub fn visible_file_count(&self) -> usize {
2126        self.entries_by_path.summary().non_ignored_file_count
2127    }
2128
2129    fn traverse_from_offset(
2130        &self,
2131        include_files: bool,
2132        include_dirs: bool,
2133        include_ignored: bool,
2134        start_offset: usize,
2135    ) -> Traversal {
2136        let mut cursor = self.entries_by_path.cursor();
2137        cursor.seek(
2138            &TraversalTarget::Count {
2139                count: start_offset,
2140                include_files,
2141                include_dirs,
2142                include_ignored,
2143            },
2144            Bias::Right,
2145            &(),
2146        );
2147        Traversal {
2148            cursor,
2149            include_files,
2150            include_dirs,
2151            include_ignored,
2152        }
2153    }
2154
2155    pub fn traverse_from_path(
2156        &self,
2157        include_files: bool,
2158        include_dirs: bool,
2159        include_ignored: bool,
2160        path: &Path,
2161    ) -> Traversal {
2162        Traversal::new(
2163            &self.entries_by_path,
2164            include_files,
2165            include_dirs,
2166            include_ignored,
2167            path,
2168        )
2169    }
2170
2171    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
2172        self.traverse_from_offset(true, false, include_ignored, start)
2173    }
2174
2175    pub fn directories(&self, include_ignored: bool, start: usize) -> Traversal {
2176        self.traverse_from_offset(false, true, include_ignored, start)
2177    }
2178
2179    pub fn entries(&self, include_ignored: bool, start: usize) -> Traversal {
2180        self.traverse_from_offset(true, true, include_ignored, start)
2181    }
2182
2183    pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
2184        self.repository_entries
2185            .iter()
2186            .map(|(path, entry)| (&path.0, entry))
2187    }
2188
2189    /// Get the repository whose work directory contains the given path.
2190    pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
2191        self.repository_entries
2192            .get(&RepositoryWorkDirectory(path.into()))
2193            .cloned()
2194    }
2195
2196    /// Get the repository whose work directory contains the given path.
2197    pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
2198        self.repository_and_work_directory_for_path(path)
2199            .map(|e| e.1)
2200    }
2201
2202    pub fn repository_and_work_directory_for_path(
2203        &self,
2204        path: &Path,
2205    ) -> Option<(RepositoryWorkDirectory, RepositoryEntry)> {
2206        self.repository_entries
2207            .iter()
2208            .filter(|(workdir_path, _)| path.starts_with(workdir_path))
2209            .last()
2210            .map(|(path, repo)| (path.clone(), repo.clone()))
2211    }
2212
2213    /// Given an ordered iterator of entries, returns an iterator of those entries,
2214    /// along with their containing git repository.
2215    pub fn entries_with_repositories<'a>(
2216        &'a self,
2217        entries: impl 'a + Iterator<Item = &'a Entry>,
2218    ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
2219        let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
2220        let mut repositories = self.repositories().peekable();
2221        entries.map(move |entry| {
2222            while let Some((repo_path, _)) = containing_repos.last() {
2223                if entry.path.starts_with(repo_path) {
2224                    break;
2225                } else {
2226                    containing_repos.pop();
2227                }
2228            }
2229            while let Some((repo_path, _)) = repositories.peek() {
2230                if entry.path.starts_with(repo_path) {
2231                    containing_repos.push(repositories.next().unwrap());
2232                } else {
2233                    break;
2234                }
2235            }
2236            let repo = containing_repos.last().map(|(_, repo)| *repo);
2237            (entry, repo)
2238        })
2239    }
2240
2241    /// Updates the `git_status` of the given entries such that files'
2242    /// statuses bubble up to their ancestor directories.
2243    pub fn propagate_git_statuses(&self, result: &mut [Entry]) {
2244        let mut cursor = self
2245            .entries_by_path
2246            .cursor::<(TraversalProgress, GitStatuses)>();
2247        let mut entry_stack = Vec::<(usize, GitStatuses)>::new();
2248
2249        let mut result_ix = 0;
2250        loop {
2251            let next_entry = result.get(result_ix);
2252            let containing_entry = entry_stack.last().map(|(ix, _)| &result[*ix]);
2253
2254            let entry_to_finish = match (containing_entry, next_entry) {
2255                (Some(_), None) => entry_stack.pop(),
2256                (Some(containing_entry), Some(next_path)) => {
2257                    if next_path.path.starts_with(&containing_entry.path) {
2258                        None
2259                    } else {
2260                        entry_stack.pop()
2261                    }
2262                }
2263                (None, Some(_)) => None,
2264                (None, None) => break,
2265            };
2266
2267            if let Some((entry_ix, prev_statuses)) = entry_to_finish {
2268                cursor.seek_forward(
2269                    &TraversalTarget::PathSuccessor(&result[entry_ix].path),
2270                    Bias::Left,
2271                    &(),
2272                );
2273
2274                let statuses = cursor.start().1 - prev_statuses;
2275
2276                result[entry_ix].git_status = if statuses.conflict > 0 {
2277                    Some(GitFileStatus::Conflict)
2278                } else if statuses.modified > 0 {
2279                    Some(GitFileStatus::Modified)
2280                } else if statuses.added > 0 {
2281                    Some(GitFileStatus::Added)
2282                } else {
2283                    None
2284                };
2285            } else {
2286                if result[result_ix].is_dir() {
2287                    cursor.seek_forward(
2288                        &TraversalTarget::Path(&result[result_ix].path),
2289                        Bias::Left,
2290                        &(),
2291                    );
2292                    entry_stack.push((result_ix, cursor.start().1));
2293                }
2294                result_ix += 1;
2295            }
2296        }
2297    }
2298
2299    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
2300        let empty_path = Path::new("");
2301        self.entries_by_path
2302            .cursor::<()>()
2303            .filter(move |entry| entry.path.as_ref() != empty_path)
2304            .map(|entry| &entry.path)
2305    }
2306
2307    pub fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
2308        let mut cursor = self.entries_by_path.cursor();
2309        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
2310        let traversal = Traversal {
2311            cursor,
2312            include_files: true,
2313            include_dirs: true,
2314            include_ignored: true,
2315        };
2316        ChildEntriesIter {
2317            traversal,
2318            parent_path,
2319        }
2320    }
2321
2322    pub fn root_entry(&self) -> Option<&Entry> {
2323        self.entry_for_path("")
2324    }
2325
2326    pub fn root_name(&self) -> &str {
2327        &self.root_name
2328    }
2329
2330    pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
2331        self.repository_entries
2332            .get(&RepositoryWorkDirectory(Path::new("").into()))
2333            .map(|entry| entry.to_owned())
2334    }
2335
2336    pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
2337        self.repository_entries.values()
2338    }
2339
2340    pub fn scan_id(&self) -> usize {
2341        self.scan_id
2342    }
2343
2344    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
2345        let path = path.as_ref();
2346        self.traverse_from_path(true, true, true, path)
2347            .entry()
2348            .and_then(|entry| {
2349                if entry.path.as_ref() == path {
2350                    Some(entry)
2351                } else {
2352                    None
2353                }
2354            })
2355    }
2356
2357    pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
2358        let entry = self.entries_by_id.get(&id, &())?;
2359        self.entry_for_path(&entry.path)
2360    }
2361
2362    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
2363        self.entry_for_path(path.as_ref()).map(|e| e.inode)
2364    }
2365}
2366
2367impl LocalSnapshot {
2368    pub fn repo_for_path(&self, path: &Path) -> Option<(RepositoryEntry, &LocalRepositoryEntry)> {
2369        let (_, repo_entry) = self.repository_and_work_directory_for_path(path)?;
2370        let work_directory_id = repo_entry.work_directory_id();
2371        Some((repo_entry, self.git_repositories.get(&work_directory_id)?))
2372    }
2373
2374    fn build_update(
2375        &self,
2376        project_id: u64,
2377        worktree_id: u64,
2378        entry_changes: UpdatedEntriesSet,
2379        repo_changes: UpdatedGitRepositoriesSet,
2380    ) -> proto::UpdateWorktree {
2381        let mut updated_entries = Vec::new();
2382        let mut removed_entries = Vec::new();
2383        let mut updated_repositories = Vec::new();
2384        let mut removed_repositories = Vec::new();
2385
2386        for (_, entry_id, path_change) in entry_changes.iter() {
2387            if let PathChange::Removed = path_change {
2388                removed_entries.push(entry_id.0 as u64);
2389            } else if let Some(entry) = self.entry_for_id(*entry_id) {
2390                updated_entries.push(proto::Entry::from(entry));
2391            }
2392        }
2393
2394        for (work_dir_path, change) in repo_changes.iter() {
2395            let new_repo = self
2396                .repository_entries
2397                .get(&RepositoryWorkDirectory(work_dir_path.clone()));
2398            match (&change.old_repository, new_repo) {
2399                (Some(old_repo), Some(new_repo)) => {
2400                    updated_repositories.push(new_repo.build_update(old_repo));
2401                }
2402                (None, Some(new_repo)) => {
2403                    updated_repositories.push(proto::RepositoryEntry::from(new_repo));
2404                }
2405                (Some(old_repo), None) => {
2406                    removed_repositories.push(old_repo.work_directory.0.to_proto());
2407                }
2408                _ => {}
2409            }
2410        }
2411
2412        removed_entries.sort_unstable();
2413        updated_entries.sort_unstable_by_key(|e| e.id);
2414        removed_repositories.sort_unstable();
2415        updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
2416
2417        // TODO - optimize, knowing that removed_entries are sorted.
2418        removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
2419
2420        proto::UpdateWorktree {
2421            project_id,
2422            worktree_id,
2423            abs_path: self.abs_path().to_string_lossy().into(),
2424            root_name: self.root_name().to_string(),
2425            updated_entries,
2426            removed_entries,
2427            scan_id: self.scan_id as u64,
2428            is_last_update: self.completed_scan_id == self.scan_id,
2429            updated_repositories,
2430            removed_repositories,
2431        }
2432    }
2433
2434    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2435        if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
2436            let abs_path = self.abs_path.join(&entry.path);
2437            match smol::block_on(build_gitignore(&abs_path, fs)) {
2438                Ok(ignore) => {
2439                    self.ignores_by_parent_abs_path
2440                        .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
2441                }
2442                Err(error) => {
2443                    log::error!(
2444                        "error loading .gitignore file {:?} - {:?}",
2445                        &entry.path,
2446                        error
2447                    );
2448                }
2449            }
2450        }
2451
2452        if entry.kind == EntryKind::PendingDir {
2453            if let Some(existing_entry) =
2454                self.entries_by_path.get(&PathKey(entry.path.clone()), &())
2455            {
2456                entry.kind = existing_entry.kind;
2457            }
2458        }
2459
2460        let scan_id = self.scan_id;
2461        let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
2462        if let Some(removed) = removed {
2463            if removed.id != entry.id {
2464                self.entries_by_id.remove(&removed.id, &());
2465            }
2466        }
2467        self.entries_by_id.insert_or_replace(
2468            PathEntry {
2469                id: entry.id,
2470                path: entry.path.clone(),
2471                is_ignored: entry.is_ignored,
2472                scan_id,
2473            },
2474            &(),
2475        );
2476
2477        entry
2478    }
2479
2480    fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2481        let mut inodes = TreeSet::default();
2482        for ancestor in path.ancestors().skip(1) {
2483            if let Some(entry) = self.entry_for_path(ancestor) {
2484                inodes.insert(entry.inode);
2485            }
2486        }
2487        inodes
2488    }
2489
2490    fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2491        let mut new_ignores = Vec::new();
2492        for (index, ancestor) in abs_path.ancestors().enumerate() {
2493            if index > 0 {
2494                if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2495                    new_ignores.push((ancestor, Some(ignore.clone())));
2496                } else {
2497                    new_ignores.push((ancestor, None));
2498                }
2499            }
2500            if ancestor.join(&*DOT_GIT).is_dir() {
2501                break;
2502            }
2503        }
2504
2505        let mut ignore_stack = IgnoreStack::none();
2506        for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2507            if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2508                ignore_stack = IgnoreStack::all();
2509                break;
2510            } else if let Some(ignore) = ignore {
2511                ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2512            }
2513        }
2514
2515        if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2516            ignore_stack = IgnoreStack::all();
2517        }
2518
2519        ignore_stack
2520    }
2521
2522    #[cfg(test)]
2523    pub(crate) fn expanded_entries(&self) -> impl Iterator<Item = &Entry> {
2524        self.entries_by_path
2525            .cursor::<()>()
2526            .filter(|entry| entry.kind == EntryKind::Dir && (entry.is_external || entry.is_ignored))
2527    }
2528
2529    #[cfg(test)]
2530    pub fn check_invariants(&self, git_state: bool) {
2531        use pretty_assertions::assert_eq;
2532
2533        assert_eq!(
2534            self.entries_by_path
2535                .cursor::<()>()
2536                .map(|e| (&e.path, e.id))
2537                .collect::<Vec<_>>(),
2538            self.entries_by_id
2539                .cursor::<()>()
2540                .map(|e| (&e.path, e.id))
2541                .collect::<collections::BTreeSet<_>>()
2542                .into_iter()
2543                .collect::<Vec<_>>(),
2544            "entries_by_path and entries_by_id are inconsistent"
2545        );
2546
2547        let mut files = self.files(true, 0);
2548        let mut visible_files = self.files(false, 0);
2549        for entry in self.entries_by_path.cursor::<()>() {
2550            if entry.is_file() {
2551                assert_eq!(files.next().unwrap().inode, entry.inode);
2552                if !entry.is_ignored && !entry.is_external {
2553                    assert_eq!(visible_files.next().unwrap().inode, entry.inode);
2554                }
2555            }
2556        }
2557
2558        assert!(files.next().is_none());
2559        assert!(visible_files.next().is_none());
2560
2561        let mut bfs_paths = Vec::new();
2562        let mut stack = self
2563            .root_entry()
2564            .map(|e| e.path.as_ref())
2565            .into_iter()
2566            .collect::<Vec<_>>();
2567        while let Some(path) = stack.pop() {
2568            bfs_paths.push(path);
2569            let ix = stack.len();
2570            for child_entry in self.child_entries(path) {
2571                stack.insert(ix, &child_entry.path);
2572            }
2573        }
2574
2575        let dfs_paths_via_iter = self
2576            .entries_by_path
2577            .cursor::<()>()
2578            .map(|e| e.path.as_ref())
2579            .collect::<Vec<_>>();
2580        assert_eq!(bfs_paths, dfs_paths_via_iter);
2581
2582        let dfs_paths_via_traversal = self
2583            .entries(true, 0)
2584            .map(|e| e.path.as_ref())
2585            .collect::<Vec<_>>();
2586        assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
2587
2588        if git_state {
2589            for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
2590                let ignore_parent_path =
2591                    ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
2592                assert!(self.entry_for_path(&ignore_parent_path).is_some());
2593                assert!(self
2594                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
2595                    .is_some());
2596            }
2597        }
2598    }
2599
2600    #[cfg(test)]
2601    pub fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
2602        let mut paths = Vec::new();
2603        for entry in self.entries_by_path.cursor::<()>() {
2604            if include_ignored || !entry.is_ignored {
2605                paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
2606            }
2607        }
2608        paths.sort_by(|a, b| a.0.cmp(b.0));
2609        paths
2610    }
2611}
2612
2613impl BackgroundScannerState {
2614    fn should_scan_directory(&self, entry: &Entry) -> bool {
2615        (!entry.is_external && !entry.is_ignored)
2616            || entry.path.file_name() == Some(*DOT_GIT)
2617            || entry.path.file_name() == Some(local_settings_folder_relative_path().as_os_str())
2618            || self.scanned_dirs.contains(&entry.id) // If we've ever scanned it, keep scanning
2619            || self
2620                .paths_to_scan
2621                .iter()
2622                .any(|p| p.starts_with(&entry.path))
2623            || self
2624                .path_prefixes_to_scan
2625                .iter()
2626                .any(|p| entry.path.starts_with(p))
2627    }
2628
2629    fn enqueue_scan_dir(&self, abs_path: Arc<Path>, entry: &Entry, scan_job_tx: &Sender<ScanJob>) {
2630        let path = entry.path.clone();
2631        let ignore_stack = self.snapshot.ignore_stack_for_abs_path(&abs_path, true);
2632        let mut ancestor_inodes = self.snapshot.ancestor_inodes_for_path(&path);
2633        let mut containing_repository = None;
2634        if !ignore_stack.is_abs_path_ignored(&abs_path, true) {
2635            if let Some((repo_entry, repo)) = self.snapshot.repo_for_path(&path) {
2636                if let Some(workdir_path) = repo_entry.work_directory(&self.snapshot) {
2637                    if let Ok(repo_path) = repo_entry.relativize(&self.snapshot, &path) {
2638                        containing_repository = Some(ScanJobContainingRepository {
2639                            work_directory: workdir_path,
2640                            statuses: repo
2641                                .repo_ptr
2642                                .statuses(&repo_path)
2643                                .log_err()
2644                                .unwrap_or_default(),
2645                        });
2646                    }
2647                }
2648            }
2649        }
2650        if !ancestor_inodes.contains(&entry.inode) {
2651            ancestor_inodes.insert(entry.inode);
2652            scan_job_tx
2653                .try_send(ScanJob {
2654                    abs_path,
2655                    path,
2656                    ignore_stack,
2657                    scan_queue: scan_job_tx.clone(),
2658                    ancestor_inodes,
2659                    is_external: entry.is_external,
2660                    containing_repository,
2661                })
2662                .unwrap();
2663        }
2664    }
2665
2666    fn reuse_entry_id(&mut self, entry: &mut Entry) {
2667        if let Some(mtime) = entry.mtime {
2668            if let Some(removed_entry_id) = self.removed_entry_ids.remove(&(entry.inode, mtime)) {
2669                entry.id = removed_entry_id;
2670            } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
2671                entry.id = existing_entry.id;
2672            }
2673        }
2674    }
2675
2676    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2677        self.reuse_entry_id(&mut entry);
2678        let entry = self.snapshot.insert_entry(entry, fs);
2679        if entry.path.file_name() == Some(&DOT_GIT) {
2680            self.build_git_repository(entry.path.clone(), fs);
2681        }
2682
2683        #[cfg(test)]
2684        self.snapshot.check_invariants(false);
2685
2686        entry
2687    }
2688
2689    fn populate_dir(
2690        &mut self,
2691        parent_path: &Arc<Path>,
2692        entries: impl IntoIterator<Item = Entry>,
2693        ignore: Option<Arc<Gitignore>>,
2694    ) {
2695        let mut parent_entry = if let Some(parent_entry) = self
2696            .snapshot
2697            .entries_by_path
2698            .get(&PathKey(parent_path.clone()), &())
2699        {
2700            parent_entry.clone()
2701        } else {
2702            log::warn!(
2703                "populating a directory {:?} that has been removed",
2704                parent_path
2705            );
2706            return;
2707        };
2708
2709        match parent_entry.kind {
2710            EntryKind::PendingDir | EntryKind::UnloadedDir => parent_entry.kind = EntryKind::Dir,
2711            EntryKind::Dir => {}
2712            _ => return,
2713        }
2714
2715        if let Some(ignore) = ignore {
2716            let abs_parent_path = self.snapshot.abs_path.join(&parent_path).into();
2717            self.snapshot
2718                .ignores_by_parent_abs_path
2719                .insert(abs_parent_path, (ignore, false));
2720        }
2721
2722        let parent_entry_id = parent_entry.id;
2723        self.scanned_dirs.insert(parent_entry_id);
2724        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2725        let mut entries_by_id_edits = Vec::new();
2726
2727        for entry in entries {
2728            entries_by_id_edits.push(Edit::Insert(PathEntry {
2729                id: entry.id,
2730                path: entry.path.clone(),
2731                is_ignored: entry.is_ignored,
2732                scan_id: self.snapshot.scan_id,
2733            }));
2734            entries_by_path_edits.push(Edit::Insert(entry));
2735        }
2736
2737        self.snapshot
2738            .entries_by_path
2739            .edit(entries_by_path_edits, &());
2740        self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2741
2742        if let Err(ix) = self.changed_paths.binary_search(parent_path) {
2743            self.changed_paths.insert(ix, parent_path.clone());
2744        }
2745
2746        #[cfg(test)]
2747        self.snapshot.check_invariants(false);
2748    }
2749
2750    fn remove_path(&mut self, path: &Path) {
2751        let mut new_entries;
2752        let removed_entries;
2753        {
2754            let mut cursor = self.snapshot.entries_by_path.cursor::<TraversalProgress>();
2755            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2756            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2757            new_entries.append(cursor.suffix(&()), &());
2758        }
2759        self.snapshot.entries_by_path = new_entries;
2760
2761        let mut entries_by_id_edits = Vec::new();
2762        for entry in removed_entries.cursor::<()>() {
2763            if let Some(mtime) = entry.mtime {
2764                let removed_entry_id = self
2765                    .removed_entry_ids
2766                    .entry((entry.inode, mtime))
2767                    .or_insert(entry.id);
2768                *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2769            }
2770            entries_by_id_edits.push(Edit::Remove(entry.id));
2771        }
2772        self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2773
2774        if path.file_name() == Some(&GITIGNORE) {
2775            let abs_parent_path = self.snapshot.abs_path.join(path.parent().unwrap());
2776            if let Some((_, needs_update)) = self
2777                .snapshot
2778                .ignores_by_parent_abs_path
2779                .get_mut(abs_parent_path.as_path())
2780            {
2781                *needs_update = true;
2782            }
2783        }
2784
2785        #[cfg(test)]
2786        self.snapshot.check_invariants(false);
2787    }
2788
2789    fn build_git_repository(
2790        &mut self,
2791        dot_git_path: Arc<Path>,
2792        fs: &dyn Fs,
2793    ) -> Option<(RepositoryWorkDirectory, Arc<dyn GitRepository>)> {
2794        let work_dir_path: Arc<Path> = match dot_git_path.parent() {
2795            Some(parent_dir) => {
2796                // Guard against repositories inside the repository metadata
2797                if parent_dir.iter().any(|component| component == *DOT_GIT) {
2798                    log::info!(
2799                        "not building git repository for nested `.git` directory, `.git` path in the worktree: {dot_git_path:?}"
2800                    );
2801                    return None;
2802                };
2803                log::info!(
2804                    "building git repository, `.git` path in the worktree: {dot_git_path:?}"
2805                );
2806
2807                parent_dir.into()
2808            }
2809            None => {
2810                // `dot_git_path.parent().is_none()` means `.git` directory is the opened worktree itself,
2811                // no files inside that directory are tracked by git, so no need to build the repo around it
2812                log::info!(
2813                    "not building git repository for the worktree itself, `.git` path in the worktree: {dot_git_path:?}"
2814                );
2815                return None;
2816            }
2817        };
2818
2819        self.build_git_repository_for_path(work_dir_path, dot_git_path, None, fs)
2820    }
2821
2822    fn build_git_repository_for_path(
2823        &mut self,
2824        work_dir_path: Arc<Path>,
2825        dot_git_path: Arc<Path>,
2826        location_in_repo: Option<Arc<Path>>,
2827        fs: &dyn Fs,
2828    ) -> Option<(RepositoryWorkDirectory, Arc<dyn GitRepository>)> {
2829        let work_dir_id = self
2830            .snapshot
2831            .entry_for_path(work_dir_path.clone())
2832            .map(|entry| entry.id)?;
2833
2834        if self.snapshot.git_repositories.get(&work_dir_id).is_some() {
2835            return None;
2836        }
2837
2838        let abs_path = self.snapshot.abs_path.join(&dot_git_path);
2839        let t0 = Instant::now();
2840        let repository = fs.open_repo(&abs_path)?;
2841        log::trace!("constructed libgit2 repo in {:?}", t0.elapsed());
2842        let work_directory = RepositoryWorkDirectory(work_dir_path.clone());
2843
2844        self.snapshot.repository_entries.insert(
2845            work_directory.clone(),
2846            RepositoryEntry {
2847                work_directory: work_dir_id.into(),
2848                branch: repository.branch_name().map(Into::into),
2849                location_in_repo,
2850            },
2851        );
2852        self.snapshot.git_repositories.insert(
2853            work_dir_id,
2854            LocalRepositoryEntry {
2855                git_dir_scan_id: 0,
2856                repo_ptr: repository.clone(),
2857                git_dir_path: dot_git_path.clone(),
2858            },
2859        );
2860
2861        Some((work_directory, repository))
2862    }
2863}
2864
2865async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2866    let contents = fs.load(abs_path).await?;
2867    let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2868    let mut builder = GitignoreBuilder::new(parent);
2869    for line in contents.lines() {
2870        builder.add_line(Some(abs_path.into()), line)?;
2871    }
2872    Ok(builder.build()?)
2873}
2874
2875impl WorktreeId {
2876    pub fn from_usize(handle_id: usize) -> Self {
2877        Self(handle_id)
2878    }
2879
2880    pub fn from_proto(id: u64) -> Self {
2881        Self(id as usize)
2882    }
2883
2884    pub fn to_proto(&self) -> u64 {
2885        self.0 as u64
2886    }
2887
2888    pub fn to_usize(&self) -> usize {
2889        self.0
2890    }
2891}
2892
2893impl fmt::Display for WorktreeId {
2894    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2895        self.0.fmt(f)
2896    }
2897}
2898
2899impl Deref for Worktree {
2900    type Target = Snapshot;
2901
2902    fn deref(&self) -> &Self::Target {
2903        match self {
2904            Worktree::Local(worktree) => &worktree.snapshot,
2905            Worktree::Remote(worktree) => &worktree.snapshot,
2906        }
2907    }
2908}
2909
2910impl Deref for LocalWorktree {
2911    type Target = LocalSnapshot;
2912
2913    fn deref(&self) -> &Self::Target {
2914        &self.snapshot
2915    }
2916}
2917
2918impl Deref for RemoteWorktree {
2919    type Target = Snapshot;
2920
2921    fn deref(&self) -> &Self::Target {
2922        &self.snapshot
2923    }
2924}
2925
2926impl fmt::Debug for LocalWorktree {
2927    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2928        self.snapshot.fmt(f)
2929    }
2930}
2931
2932impl fmt::Debug for Snapshot {
2933    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2934        struct EntriesById<'a>(&'a SumTree<PathEntry>);
2935        struct EntriesByPath<'a>(&'a SumTree<Entry>);
2936
2937        impl<'a> fmt::Debug for EntriesByPath<'a> {
2938            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2939                f.debug_map()
2940                    .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2941                    .finish()
2942            }
2943        }
2944
2945        impl<'a> fmt::Debug for EntriesById<'a> {
2946            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2947                f.debug_list().entries(self.0.iter()).finish()
2948            }
2949        }
2950
2951        f.debug_struct("Snapshot")
2952            .field("id", &self.id)
2953            .field("root_name", &self.root_name)
2954            .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2955            .field("entries_by_id", &EntriesById(&self.entries_by_id))
2956            .finish()
2957    }
2958}
2959
2960#[derive(Clone, PartialEq)]
2961pub struct File {
2962    pub worktree: Model<Worktree>,
2963    pub path: Arc<Path>,
2964    pub mtime: Option<SystemTime>,
2965    pub entry_id: Option<ProjectEntryId>,
2966    pub is_local: bool,
2967    pub is_deleted: bool,
2968    pub is_private: bool,
2969}
2970
2971impl language::File for File {
2972    fn as_local(&self) -> Option<&dyn language::LocalFile> {
2973        if self.is_local {
2974            Some(self)
2975        } else {
2976            None
2977        }
2978    }
2979
2980    fn mtime(&self) -> Option<SystemTime> {
2981        self.mtime
2982    }
2983
2984    fn path(&self) -> &Arc<Path> {
2985        &self.path
2986    }
2987
2988    fn full_path(&self, cx: &AppContext) -> PathBuf {
2989        let mut full_path = PathBuf::new();
2990        let worktree = self.worktree.read(cx);
2991
2992        if worktree.is_visible() {
2993            full_path.push(worktree.root_name());
2994        } else {
2995            let path = worktree.abs_path();
2996
2997            if worktree.is_local() && path.starts_with(home_dir().as_path()) {
2998                full_path.push("~");
2999                full_path.push(path.strip_prefix(home_dir().as_path()).unwrap());
3000            } else {
3001                full_path.push(path)
3002            }
3003        }
3004
3005        if self.path.components().next().is_some() {
3006            full_path.push(&self.path);
3007        }
3008
3009        full_path
3010    }
3011
3012    /// Returns the last component of this handle's absolute path. If this handle refers to the root
3013    /// of its worktree, then this method will return the name of the worktree itself.
3014    fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
3015        self.path
3016            .file_name()
3017            .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
3018    }
3019
3020    fn worktree_id(&self) -> usize {
3021        self.worktree.entity_id().as_u64() as usize
3022    }
3023
3024    fn is_deleted(&self) -> bool {
3025        self.is_deleted
3026    }
3027
3028    fn as_any(&self) -> &dyn Any {
3029        self
3030    }
3031
3032    fn to_proto(&self, cx: &AppContext) -> rpc::proto::File {
3033        rpc::proto::File {
3034            worktree_id: self.worktree.read(cx).id().to_proto(),
3035            entry_id: self.entry_id.map(|id| id.to_proto()),
3036            path: self.path.to_string_lossy().into(),
3037            mtime: self.mtime.map(|time| time.into()),
3038            is_deleted: self.is_deleted,
3039        }
3040    }
3041
3042    fn is_private(&self) -> bool {
3043        self.is_private
3044    }
3045}
3046
3047impl language::LocalFile for File {
3048    fn abs_path(&self, cx: &AppContext) -> PathBuf {
3049        let worktree_path = &self.worktree.read(cx).as_local().unwrap().abs_path;
3050        if self.path.as_ref() == Path::new("") {
3051            worktree_path.to_path_buf()
3052        } else {
3053            worktree_path.join(&self.path)
3054        }
3055    }
3056
3057    fn load(&self, cx: &AppContext) -> Task<Result<String>> {
3058        let worktree = self.worktree.read(cx).as_local().unwrap();
3059        let abs_path = worktree.absolutize(&self.path);
3060        let fs = worktree.fs.clone();
3061        cx.background_executor()
3062            .spawn(async move { fs.load(&abs_path?).await })
3063    }
3064}
3065
3066impl File {
3067    pub fn for_entry(entry: Entry, worktree: Model<Worktree>) -> Arc<Self> {
3068        Arc::new(Self {
3069            worktree,
3070            path: entry.path.clone(),
3071            mtime: entry.mtime,
3072            entry_id: Some(entry.id),
3073            is_local: true,
3074            is_deleted: false,
3075            is_private: entry.is_private,
3076        })
3077    }
3078
3079    pub fn from_proto(
3080        proto: rpc::proto::File,
3081        worktree: Model<Worktree>,
3082        cx: &AppContext,
3083    ) -> Result<Self> {
3084        let worktree_id = worktree
3085            .read(cx)
3086            .as_remote()
3087            .ok_or_else(|| anyhow!("not remote"))?
3088            .id();
3089
3090        if worktree_id.to_proto() != proto.worktree_id {
3091            return Err(anyhow!("worktree id does not match file"));
3092        }
3093
3094        Ok(Self {
3095            worktree,
3096            path: Path::new(&proto.path).into(),
3097            mtime: proto.mtime.map(|time| time.into()),
3098            entry_id: proto.entry_id.map(ProjectEntryId::from_proto),
3099            is_local: false,
3100            is_deleted: proto.is_deleted,
3101            is_private: false,
3102        })
3103    }
3104
3105    pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
3106        file.and_then(|f| f.as_any().downcast_ref())
3107    }
3108
3109    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
3110        self.worktree.read(cx).id()
3111    }
3112
3113    pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
3114        if self.is_deleted {
3115            None
3116        } else {
3117            self.entry_id
3118        }
3119    }
3120}
3121
3122#[derive(Clone, Debug, PartialEq, Eq, Hash)]
3123pub struct Entry {
3124    pub id: ProjectEntryId,
3125    pub kind: EntryKind,
3126    pub path: Arc<Path>,
3127    pub inode: u64,
3128    pub mtime: Option<SystemTime>,
3129
3130    pub canonical_path: Option<PathBuf>,
3131    pub is_symlink: bool,
3132    /// Whether this entry is ignored by Git.
3133    ///
3134    /// We only scan ignored entries once the directory is expanded and
3135    /// exclude them from searches.
3136    pub is_ignored: bool,
3137
3138    /// Whether this entry's canonical path is outside of the worktree.
3139    /// This means the entry is only accessible from the worktree root via a
3140    /// symlink.
3141    ///
3142    /// We only scan entries outside of the worktree once the symlinked
3143    /// directory is expanded. External entries are treated like gitignored
3144    /// entries in that they are not included in searches.
3145    pub is_external: bool,
3146    pub git_status: Option<GitFileStatus>,
3147    /// Whether this entry is considered to be a `.env` file.
3148    pub is_private: bool,
3149}
3150
3151#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
3152pub enum EntryKind {
3153    UnloadedDir,
3154    PendingDir,
3155    Dir,
3156    File(CharBag),
3157}
3158
3159#[derive(Clone, Copy, Debug, PartialEq)]
3160pub enum PathChange {
3161    /// A filesystem entry was was created.
3162    Added,
3163    /// A filesystem entry was removed.
3164    Removed,
3165    /// A filesystem entry was updated.
3166    Updated,
3167    /// A filesystem entry was either updated or added. We don't know
3168    /// whether or not it already existed, because the path had not
3169    /// been loaded before the event.
3170    AddedOrUpdated,
3171    /// A filesystem entry was found during the initial scan of the worktree.
3172    Loaded,
3173}
3174
3175pub struct GitRepositoryChange {
3176    /// The previous state of the repository, if it already existed.
3177    pub old_repository: Option<RepositoryEntry>,
3178}
3179
3180pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
3181pub type UpdatedGitRepositoriesSet = Arc<[(Arc<Path>, GitRepositoryChange)]>;
3182
3183impl Entry {
3184    fn new(
3185        path: Arc<Path>,
3186        metadata: &fs::Metadata,
3187        next_entry_id: &AtomicUsize,
3188        root_char_bag: CharBag,
3189        canonical_path: Option<PathBuf>,
3190    ) -> Self {
3191        Self {
3192            id: ProjectEntryId::new(next_entry_id),
3193            kind: if metadata.is_dir {
3194                EntryKind::PendingDir
3195            } else {
3196                EntryKind::File(char_bag_for_path(root_char_bag, &path))
3197            },
3198            path,
3199            inode: metadata.inode,
3200            mtime: Some(metadata.mtime),
3201            canonical_path,
3202            is_symlink: metadata.is_symlink,
3203            is_ignored: false,
3204            is_external: false,
3205            is_private: false,
3206            git_status: None,
3207        }
3208    }
3209
3210    pub fn is_created(&self) -> bool {
3211        self.mtime.is_some()
3212    }
3213
3214    pub fn is_dir(&self) -> bool {
3215        self.kind.is_dir()
3216    }
3217
3218    pub fn is_file(&self) -> bool {
3219        self.kind.is_file()
3220    }
3221
3222    pub fn git_status(&self) -> Option<GitFileStatus> {
3223        self.git_status
3224    }
3225}
3226
3227impl EntryKind {
3228    pub fn is_dir(&self) -> bool {
3229        matches!(
3230            self,
3231            EntryKind::Dir | EntryKind::PendingDir | EntryKind::UnloadedDir
3232        )
3233    }
3234
3235    pub fn is_unloaded(&self) -> bool {
3236        matches!(self, EntryKind::UnloadedDir)
3237    }
3238
3239    pub fn is_file(&self) -> bool {
3240        matches!(self, EntryKind::File(_))
3241    }
3242}
3243
3244impl sum_tree::Item for Entry {
3245    type Summary = EntrySummary;
3246
3247    fn summary(&self) -> Self::Summary {
3248        let non_ignored_count = if self.is_ignored || self.is_external {
3249            0
3250        } else {
3251            1
3252        };
3253        let file_count;
3254        let non_ignored_file_count;
3255        if self.is_file() {
3256            file_count = 1;
3257            non_ignored_file_count = non_ignored_count;
3258        } else {
3259            file_count = 0;
3260            non_ignored_file_count = 0;
3261        }
3262
3263        let mut statuses = GitStatuses::default();
3264        match self.git_status {
3265            Some(status) => match status {
3266                GitFileStatus::Added => statuses.added = 1,
3267                GitFileStatus::Modified => statuses.modified = 1,
3268                GitFileStatus::Conflict => statuses.conflict = 1,
3269            },
3270            None => {}
3271        }
3272
3273        EntrySummary {
3274            max_path: self.path.clone(),
3275            count: 1,
3276            non_ignored_count,
3277            file_count,
3278            non_ignored_file_count,
3279            statuses,
3280        }
3281    }
3282}
3283
3284impl sum_tree::KeyedItem for Entry {
3285    type Key = PathKey;
3286
3287    fn key(&self) -> Self::Key {
3288        PathKey(self.path.clone())
3289    }
3290}
3291
3292#[derive(Clone, Debug)]
3293pub struct EntrySummary {
3294    max_path: Arc<Path>,
3295    count: usize,
3296    non_ignored_count: usize,
3297    file_count: usize,
3298    non_ignored_file_count: usize,
3299    statuses: GitStatuses,
3300}
3301
3302impl Default for EntrySummary {
3303    fn default() -> Self {
3304        Self {
3305            max_path: Arc::from(Path::new("")),
3306            count: 0,
3307            non_ignored_count: 0,
3308            file_count: 0,
3309            non_ignored_file_count: 0,
3310            statuses: Default::default(),
3311        }
3312    }
3313}
3314
3315impl sum_tree::Summary for EntrySummary {
3316    type Context = ();
3317
3318    fn add_summary(&mut self, rhs: &Self, _: &()) {
3319        self.max_path = rhs.max_path.clone();
3320        self.count += rhs.count;
3321        self.non_ignored_count += rhs.non_ignored_count;
3322        self.file_count += rhs.file_count;
3323        self.non_ignored_file_count += rhs.non_ignored_file_count;
3324        self.statuses += rhs.statuses;
3325    }
3326}
3327
3328#[derive(Clone, Debug)]
3329struct PathEntry {
3330    id: ProjectEntryId,
3331    path: Arc<Path>,
3332    is_ignored: bool,
3333    scan_id: usize,
3334}
3335
3336impl sum_tree::Item for PathEntry {
3337    type Summary = PathEntrySummary;
3338
3339    fn summary(&self) -> Self::Summary {
3340        PathEntrySummary { max_id: self.id }
3341    }
3342}
3343
3344impl sum_tree::KeyedItem for PathEntry {
3345    type Key = ProjectEntryId;
3346
3347    fn key(&self) -> Self::Key {
3348        self.id
3349    }
3350}
3351
3352#[derive(Clone, Debug, Default)]
3353struct PathEntrySummary {
3354    max_id: ProjectEntryId,
3355}
3356
3357impl sum_tree::Summary for PathEntrySummary {
3358    type Context = ();
3359
3360    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
3361        self.max_id = summary.max_id;
3362    }
3363}
3364
3365impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
3366    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
3367        *self = summary.max_id;
3368    }
3369}
3370
3371#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
3372pub struct PathKey(Arc<Path>);
3373
3374impl Default for PathKey {
3375    fn default() -> Self {
3376        Self(Path::new("").into())
3377    }
3378}
3379
3380impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
3381    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3382        self.0 = summary.max_path.clone();
3383    }
3384}
3385
3386struct BackgroundScanner {
3387    state: Mutex<BackgroundScannerState>,
3388    fs: Arc<dyn Fs>,
3389    fs_case_sensitive: bool,
3390    status_updates_tx: UnboundedSender<ScanState>,
3391    executor: BackgroundExecutor,
3392    scan_requests_rx: channel::Receiver<ScanRequest>,
3393    path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
3394    next_entry_id: Arc<AtomicUsize>,
3395    phase: BackgroundScannerPhase,
3396    watcher: Arc<dyn Watcher>,
3397    settings: WorktreeSettings,
3398    share_private_files: bool,
3399}
3400
3401#[derive(PartialEq)]
3402enum BackgroundScannerPhase {
3403    InitialScan,
3404    EventsReceivedDuringInitialScan,
3405    Events,
3406}
3407
3408impl BackgroundScanner {
3409    async fn run(&mut self, mut fs_events_rx: Pin<Box<dyn Send + Stream<Item = Vec<PathBuf>>>>) {
3410        use futures::FutureExt as _;
3411
3412        // If the worktree root does not contain a git repository, then find
3413        // the git repository in an ancestor directory. Find any gitignore files
3414        // in ancestor directories.
3415        let root_abs_path = self.state.lock().snapshot.abs_path.clone();
3416        for (index, ancestor) in root_abs_path.ancestors().enumerate() {
3417            if index != 0 {
3418                if let Ok(ignore) =
3419                    build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
3420                {
3421                    self.state
3422                        .lock()
3423                        .snapshot
3424                        .ignores_by_parent_abs_path
3425                        .insert(ancestor.into(), (ignore.into(), false));
3426                }
3427            }
3428
3429            let ancestor_dot_git = ancestor.join(&*DOT_GIT);
3430            if ancestor_dot_git.is_dir() {
3431                if index != 0 {
3432                    // We canonicalize, since the FS events use the canonicalized path.
3433                    if let Some(ancestor_dot_git) =
3434                        self.fs.canonicalize(&ancestor_dot_git).await.log_err()
3435                    {
3436                        let (ancestor_git_events, _) =
3437                            self.fs.watch(&ancestor_dot_git, FS_WATCH_LATENCY).await;
3438                        fs_events_rx = select(fs_events_rx, ancestor_git_events).boxed();
3439
3440                        // We associate the external git repo with our root folder and
3441                        // also mark where in the git repo the root folder is located.
3442                        self.state.lock().build_git_repository_for_path(
3443                            Path::new("").into(),
3444                            ancestor_dot_git.into(),
3445                            Some(root_abs_path.strip_prefix(ancestor).unwrap().into()),
3446                            self.fs.as_ref(),
3447                        );
3448                    };
3449                }
3450
3451                // Reached root of git repository.
3452                break;
3453            }
3454        }
3455
3456        let (scan_job_tx, scan_job_rx) = channel::unbounded();
3457        {
3458            let mut state = self.state.lock();
3459            state.snapshot.scan_id += 1;
3460            if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
3461                let ignore_stack = state
3462                    .snapshot
3463                    .ignore_stack_for_abs_path(&root_abs_path, true);
3464                if ignore_stack.is_abs_path_ignored(&root_abs_path, true) {
3465                    root_entry.is_ignored = true;
3466                    state.insert_entry(root_entry.clone(), self.fs.as_ref());
3467                }
3468                state.enqueue_scan_dir(root_abs_path, &root_entry, &scan_job_tx);
3469            }
3470        };
3471
3472        // Perform an initial scan of the directory.
3473        drop(scan_job_tx);
3474        self.scan_dirs(true, scan_job_rx).await;
3475        {
3476            let mut state = self.state.lock();
3477            state.snapshot.completed_scan_id = state.snapshot.scan_id;
3478        }
3479
3480        self.send_status_update(false, None);
3481
3482        // Process any any FS events that occurred while performing the initial scan.
3483        // For these events, update events cannot be as precise, because we didn't
3484        // have the previous state loaded yet.
3485        self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
3486        if let Poll::Ready(Some(mut paths)) = futures::poll!(fs_events_rx.next()) {
3487            while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
3488                paths.extend(more_paths);
3489            }
3490            self.process_events(paths).await;
3491        }
3492
3493        // Continue processing events until the worktree is dropped.
3494        self.phase = BackgroundScannerPhase::Events;
3495
3496        loop {
3497            select_biased! {
3498                // Process any path refresh requests from the worktree. Prioritize
3499                // these before handling changes reported by the filesystem.
3500                request = self.scan_requests_rx.recv().fuse() => {
3501                    let Ok(request) = request else { break };
3502                    if !self.process_scan_request(request, false).await {
3503                        return;
3504                    }
3505                }
3506
3507                path_prefix = self.path_prefixes_to_scan_rx.recv().fuse() => {
3508                    let Ok(path_prefix) = path_prefix else { break };
3509                    log::trace!("adding path prefix {:?}", path_prefix);
3510
3511                    let did_scan = self.forcibly_load_paths(&[path_prefix.clone()]).await;
3512                    if did_scan {
3513                        let abs_path =
3514                        {
3515                            let mut state = self.state.lock();
3516                            state.path_prefixes_to_scan.insert(path_prefix.clone());
3517                            state.snapshot.abs_path.join(&path_prefix)
3518                        };
3519
3520                        if let Some(abs_path) = self.fs.canonicalize(&abs_path).await.log_err() {
3521                            self.process_events(vec![abs_path]).await;
3522                        }
3523                    }
3524                }
3525
3526                paths = fs_events_rx.next().fuse() => {
3527                    let Some(mut paths) = paths else { break };
3528                    while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
3529                        paths.extend(more_paths);
3530                    }
3531                    self.process_events(paths.clone()).await;
3532                }
3533            }
3534        }
3535    }
3536
3537    async fn process_scan_request(&self, mut request: ScanRequest, scanning: bool) -> bool {
3538        log::debug!("rescanning paths {:?}", request.relative_paths);
3539
3540        request.relative_paths.sort_unstable();
3541        self.forcibly_load_paths(&request.relative_paths).await;
3542
3543        let root_path = self.state.lock().snapshot.abs_path.clone();
3544        let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3545            Ok(path) => path,
3546            Err(err) => {
3547                log::error!("failed to canonicalize root path: {}", err);
3548                return true;
3549            }
3550        };
3551        let abs_paths = request
3552            .relative_paths
3553            .iter()
3554            .map(|path| {
3555                if path.file_name().is_some() {
3556                    root_canonical_path.join(path)
3557                } else {
3558                    root_canonical_path.clone()
3559                }
3560            })
3561            .collect::<Vec<_>>();
3562
3563        {
3564            let mut state = self.state.lock();
3565            let is_idle = state.snapshot.completed_scan_id == state.snapshot.scan_id;
3566            state.snapshot.scan_id += 1;
3567            if is_idle {
3568                state.snapshot.completed_scan_id = state.snapshot.scan_id;
3569            }
3570        }
3571
3572        self.reload_entries_for_paths(
3573            root_path,
3574            root_canonical_path,
3575            &request.relative_paths,
3576            abs_paths,
3577            None,
3578        )
3579        .await;
3580
3581        self.send_status_update(scanning, Some(request.done))
3582    }
3583
3584    async fn process_events(&mut self, mut abs_paths: Vec<PathBuf>) {
3585        let root_path = self.state.lock().snapshot.abs_path.clone();
3586        let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3587            Ok(path) => path,
3588            Err(err) => {
3589                log::error!("failed to canonicalize root path: {}", err);
3590                return;
3591            }
3592        };
3593
3594        let mut relative_paths = Vec::with_capacity(abs_paths.len());
3595        let mut dot_git_paths = Vec::new();
3596        abs_paths.sort_unstable();
3597        abs_paths.dedup_by(|a, b| a.starts_with(&b));
3598        abs_paths.retain(|abs_path| {
3599            let snapshot = &self.state.lock().snapshot;
3600            {
3601                let mut is_git_related = false;
3602                if let Some(dot_git_dir) = abs_path
3603                    .ancestors()
3604                    .find(|ancestor| ancestor.file_name() == Some(*DOT_GIT))
3605                {
3606                    let dot_git_path = dot_git_dir
3607                        .strip_prefix(&root_canonical_path)
3608                        .unwrap_or(dot_git_dir)
3609                        .to_path_buf();
3610                    if !dot_git_paths.contains(&dot_git_path) {
3611                        dot_git_paths.push(dot_git_path);
3612                    }
3613                    is_git_related = true;
3614                }
3615
3616                let relative_path: Arc<Path> =
3617                    if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3618                        path.into()
3619                    } else {
3620                        if is_git_related {
3621                            log::debug!(
3622                              "ignoring event {abs_path:?}, since it's in git dir outside of root path {root_canonical_path:?}",
3623                            );
3624                        } else {
3625                            log::error!(
3626                              "ignoring event {abs_path:?} outside of root path {root_canonical_path:?}",
3627                            );
3628                        }
3629                        return false;
3630                    };
3631
3632                let parent_dir_is_loaded = relative_path.parent().map_or(true, |parent| {
3633                    snapshot
3634                        .entry_for_path(parent)
3635                        .map_or(false, |entry| entry.kind == EntryKind::Dir)
3636                });
3637                if !parent_dir_is_loaded {
3638                    log::debug!("ignoring event {relative_path:?} within unloaded directory");
3639                    return false;
3640                }
3641
3642                if self.settings.is_path_excluded(&relative_path) {
3643                    if !is_git_related {
3644                        log::debug!("ignoring FS event for excluded path {relative_path:?}");
3645                    }
3646                    return false;
3647                }
3648
3649                relative_paths.push(relative_path);
3650                true
3651            }
3652        });
3653
3654        if relative_paths.is_empty() && dot_git_paths.is_empty() {
3655            return;
3656        }
3657
3658        self.state.lock().snapshot.scan_id += 1;
3659
3660        let (scan_job_tx, scan_job_rx) = channel::unbounded();
3661        log::debug!("received fs events {:?}", relative_paths);
3662        self.reload_entries_for_paths(
3663            root_path,
3664            root_canonical_path,
3665            &relative_paths,
3666            abs_paths,
3667            Some(scan_job_tx.clone()),
3668        )
3669        .await;
3670
3671        self.update_ignore_statuses(scan_job_tx).await;
3672        self.scan_dirs(false, scan_job_rx).await;
3673
3674        if !dot_git_paths.is_empty() {
3675            self.update_git_repositories(dot_git_paths).await;
3676        }
3677
3678        {
3679            let mut state = self.state.lock();
3680            state.snapshot.completed_scan_id = state.snapshot.scan_id;
3681            for (_, entry_id) in mem::take(&mut state.removed_entry_ids) {
3682                state.scanned_dirs.remove(&entry_id);
3683            }
3684        }
3685
3686        self.send_status_update(false, None);
3687    }
3688
3689    async fn forcibly_load_paths(&self, paths: &[Arc<Path>]) -> bool {
3690        let (scan_job_tx, mut scan_job_rx) = channel::unbounded();
3691        {
3692            let mut state = self.state.lock();
3693            let root_path = state.snapshot.abs_path.clone();
3694            for path in paths {
3695                for ancestor in path.ancestors() {
3696                    if let Some(entry) = state.snapshot.entry_for_path(ancestor) {
3697                        if entry.kind == EntryKind::UnloadedDir {
3698                            let abs_path = root_path.join(ancestor);
3699                            state.enqueue_scan_dir(abs_path.into(), entry, &scan_job_tx);
3700                            state.paths_to_scan.insert(path.clone());
3701                            break;
3702                        }
3703                    }
3704                }
3705            }
3706            drop(scan_job_tx);
3707        }
3708        while let Some(job) = scan_job_rx.next().await {
3709            self.scan_dir(&job).await.log_err();
3710        }
3711
3712        mem::take(&mut self.state.lock().paths_to_scan).len() > 0
3713    }
3714
3715    async fn scan_dirs(
3716        &self,
3717        enable_progress_updates: bool,
3718        scan_jobs_rx: channel::Receiver<ScanJob>,
3719    ) {
3720        use futures::FutureExt as _;
3721
3722        if self
3723            .status_updates_tx
3724            .unbounded_send(ScanState::Started)
3725            .is_err()
3726        {
3727            return;
3728        }
3729
3730        let progress_update_count = AtomicUsize::new(0);
3731        self.executor
3732            .scoped(|scope| {
3733                for _ in 0..self.executor.num_cpus() {
3734                    scope.spawn(async {
3735                        let mut last_progress_update_count = 0;
3736                        let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
3737                        futures::pin_mut!(progress_update_timer);
3738
3739                        loop {
3740                            select_biased! {
3741                                // Process any path refresh requests before moving on to process
3742                                // the scan queue, so that user operations are prioritized.
3743                                request = self.scan_requests_rx.recv().fuse() => {
3744                                    let Ok(request) = request else { break };
3745                                    if !self.process_scan_request(request, true).await {
3746                                        return;
3747                                    }
3748                                }
3749
3750                                // Send periodic progress updates to the worktree. Use an atomic counter
3751                                // to ensure that only one of the workers sends a progress update after
3752                                // the update interval elapses.
3753                                _ = progress_update_timer => {
3754                                    match progress_update_count.compare_exchange(
3755                                        last_progress_update_count,
3756                                        last_progress_update_count + 1,
3757                                        SeqCst,
3758                                        SeqCst
3759                                    ) {
3760                                        Ok(_) => {
3761                                            last_progress_update_count += 1;
3762                                            self.send_status_update(true, None);
3763                                        }
3764                                        Err(count) => {
3765                                            last_progress_update_count = count;
3766                                        }
3767                                    }
3768                                    progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
3769                                }
3770
3771                                // Recursively load directories from the file system.
3772                                job = scan_jobs_rx.recv().fuse() => {
3773                                    let Ok(job) = job else { break };
3774                                    if let Err(err) = self.scan_dir(&job).await {
3775                                        if job.path.as_ref() != Path::new("") {
3776                                            log::error!("error scanning directory {:?}: {}", job.abs_path, err);
3777                                        }
3778                                    }
3779                                }
3780                            }
3781                        }
3782                    })
3783                }
3784            })
3785            .await;
3786    }
3787
3788    fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
3789        let mut state = self.state.lock();
3790        if state.changed_paths.is_empty() && scanning {
3791            return true;
3792        }
3793
3794        let new_snapshot = state.snapshot.clone();
3795        let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
3796        let changes = self.build_change_set(&old_snapshot, &new_snapshot, &state.changed_paths);
3797        state.changed_paths.clear();
3798
3799        self.status_updates_tx
3800            .unbounded_send(ScanState::Updated {
3801                snapshot: new_snapshot,
3802                changes,
3803                scanning,
3804                barrier,
3805            })
3806            .is_ok()
3807    }
3808
3809    async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
3810        let root_abs_path;
3811        let root_char_bag;
3812        {
3813            let snapshot = &self.state.lock().snapshot;
3814            if self.settings.is_path_excluded(&job.path) {
3815                log::error!("skipping excluded directory {:?}", job.path);
3816                return Ok(());
3817            }
3818            log::debug!("scanning directory {:?}", job.path);
3819            root_abs_path = snapshot.abs_path().clone();
3820            root_char_bag = snapshot.root_char_bag;
3821        }
3822
3823        let next_entry_id = self.next_entry_id.clone();
3824        let mut ignore_stack = job.ignore_stack.clone();
3825        let mut containing_repository = job.containing_repository.clone();
3826        let mut new_ignore = None;
3827        let mut root_canonical_path = None;
3828        let mut new_entries: Vec<Entry> = Vec::new();
3829        let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
3830        let mut child_paths = self
3831            .fs
3832            .read_dir(&job.abs_path)
3833            .await?
3834            .filter_map(|entry| async {
3835                match entry {
3836                    Ok(entry) => Some(entry),
3837                    Err(error) => {
3838                        log::error!("error processing entry {:?}", error);
3839                        None
3840                    }
3841                }
3842            })
3843            .collect::<Vec<_>>()
3844            .await;
3845
3846        // Ensure that .git and .gitignore are processed first.
3847        swap_to_front(&mut child_paths, *GITIGNORE);
3848        swap_to_front(&mut child_paths, *DOT_GIT);
3849
3850        for child_abs_path in child_paths {
3851            let child_abs_path: Arc<Path> = child_abs_path.into();
3852            let child_name = child_abs_path.file_name().unwrap();
3853            let child_path: Arc<Path> = job.path.join(child_name).into();
3854
3855            if child_name == *DOT_GIT {
3856                let repo = self
3857                    .state
3858                    .lock()
3859                    .build_git_repository(child_path.clone(), self.fs.as_ref());
3860                if let Some((work_directory, repository)) = repo {
3861                    let t0 = Instant::now();
3862                    let statuses = repository
3863                        .statuses(Path::new(""))
3864                        .log_err()
3865                        .unwrap_or_default();
3866                    log::trace!("computed git status in {:?}", t0.elapsed());
3867                    containing_repository = Some(ScanJobContainingRepository {
3868                        work_directory,
3869                        statuses,
3870                    });
3871                }
3872                self.watcher.add(child_abs_path.as_ref()).log_err();
3873            } else if child_name == *GITIGNORE {
3874                match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
3875                    Ok(ignore) => {
3876                        let ignore = Arc::new(ignore);
3877                        ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3878                        new_ignore = Some(ignore);
3879                    }
3880                    Err(error) => {
3881                        log::error!(
3882                            "error loading .gitignore file {:?} - {:?}",
3883                            child_name,
3884                            error
3885                        );
3886                    }
3887                }
3888            }
3889
3890            if self.settings.is_path_excluded(&child_path) {
3891                log::debug!("skipping excluded child entry {child_path:?}");
3892                self.state.lock().remove_path(&child_path);
3893                continue;
3894            }
3895
3896            let child_metadata = match self.fs.metadata(&child_abs_path).await {
3897                Ok(Some(metadata)) => metadata,
3898                Ok(None) => continue,
3899                Err(err) => {
3900                    log::error!("error processing {child_abs_path:?}: {err:?}");
3901                    continue;
3902                }
3903            };
3904
3905            let mut child_entry = Entry::new(
3906                child_path.clone(),
3907                &child_metadata,
3908                &next_entry_id,
3909                root_char_bag,
3910                None,
3911            );
3912
3913            if job.is_external {
3914                child_entry.is_external = true;
3915            } else if child_metadata.is_symlink {
3916                let canonical_path = match self.fs.canonicalize(&child_abs_path).await {
3917                    Ok(path) => path,
3918                    Err(err) => {
3919                        log::error!(
3920                            "error reading target of symlink {:?}: {:?}",
3921                            child_abs_path,
3922                            err
3923                        );
3924                        continue;
3925                    }
3926                };
3927
3928                // lazily canonicalize the root path in order to determine if
3929                // symlinks point outside of the worktree.
3930                let root_canonical_path = match &root_canonical_path {
3931                    Some(path) => path,
3932                    None => match self.fs.canonicalize(&root_abs_path).await {
3933                        Ok(path) => root_canonical_path.insert(path),
3934                        Err(err) => {
3935                            log::error!("error canonicalizing root {:?}: {:?}", root_abs_path, err);
3936                            continue;
3937                        }
3938                    },
3939                };
3940
3941                if !canonical_path.starts_with(root_canonical_path) {
3942                    child_entry.is_external = true;
3943                }
3944
3945                child_entry.canonical_path = Some(canonical_path);
3946            }
3947
3948            if child_entry.is_dir() {
3949                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
3950
3951                // Avoid recursing until crash in the case of a recursive symlink
3952                if job.ancestor_inodes.contains(&child_entry.inode) {
3953                    new_jobs.push(None);
3954                } else {
3955                    let mut ancestor_inodes = job.ancestor_inodes.clone();
3956                    ancestor_inodes.insert(child_entry.inode);
3957
3958                    new_jobs.push(Some(ScanJob {
3959                        abs_path: child_abs_path.clone(),
3960                        path: child_path,
3961                        is_external: child_entry.is_external,
3962                        ignore_stack: if child_entry.is_ignored {
3963                            IgnoreStack::all()
3964                        } else {
3965                            ignore_stack.clone()
3966                        },
3967                        ancestor_inodes,
3968                        scan_queue: job.scan_queue.clone(),
3969                        containing_repository: containing_repository.clone(),
3970                    }));
3971                }
3972            } else {
3973                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
3974                if !child_entry.is_ignored {
3975                    if let Some(repo) = &containing_repository {
3976                        if let Ok(repo_path) = child_entry.path.strip_prefix(&repo.work_directory) {
3977                            let repo_path = RepoPath(repo_path.into());
3978                            child_entry.git_status = repo.statuses.get(&repo_path);
3979                        }
3980                    }
3981                }
3982            }
3983
3984            {
3985                let relative_path = job.path.join(child_name);
3986                if self.is_path_private(&relative_path) {
3987                    log::debug!("detected private file: {relative_path:?}");
3988                    child_entry.is_private = true;
3989                }
3990            }
3991
3992            new_entries.push(child_entry);
3993        }
3994
3995        let mut state = self.state.lock();
3996
3997        // Identify any subdirectories that should not be scanned.
3998        let mut job_ix = 0;
3999        for entry in &mut new_entries {
4000            state.reuse_entry_id(entry);
4001            if entry.is_dir() {
4002                if state.should_scan_directory(entry) {
4003                    job_ix += 1;
4004                } else {
4005                    log::debug!("defer scanning directory {:?}", entry.path);
4006                    entry.kind = EntryKind::UnloadedDir;
4007                    new_jobs.remove(job_ix);
4008                }
4009            }
4010        }
4011
4012        state.populate_dir(&job.path, new_entries, new_ignore);
4013        self.watcher.add(job.abs_path.as_ref()).log_err();
4014
4015        for new_job in new_jobs.into_iter().flatten() {
4016            job.scan_queue
4017                .try_send(new_job)
4018                .expect("channel is unbounded");
4019        }
4020
4021        Ok(())
4022    }
4023
4024    async fn reload_entries_for_paths(
4025        &self,
4026        root_abs_path: Arc<Path>,
4027        root_canonical_path: PathBuf,
4028        relative_paths: &[Arc<Path>],
4029        abs_paths: Vec<PathBuf>,
4030        scan_queue_tx: Option<Sender<ScanJob>>,
4031    ) {
4032        let metadata = futures::future::join_all(
4033            abs_paths
4034                .iter()
4035                .map(|abs_path| async move {
4036                    let metadata = self.fs.metadata(abs_path).await?;
4037                    if let Some(metadata) = metadata {
4038                        let canonical_path = self.fs.canonicalize(abs_path).await?;
4039
4040                        // If we're on a case-insensitive filesystem (default on macOS), we want
4041                        // to only ignore metadata for non-symlink files if their absolute-path matches
4042                        // the canonical-path.
4043                        // Because if not, this might be a case-only-renaming (`mv test.txt TEST.TXT`)
4044                        // and we want to ignore the metadata for the old path (`test.txt`) so it's
4045                        // treated as removed.
4046                        if !self.fs_case_sensitive && !metadata.is_symlink {
4047                            let canonical_file_name = canonical_path.file_name();
4048                            let file_name = abs_path.file_name();
4049                            if canonical_file_name != file_name {
4050                                return Ok(None);
4051                            }
4052                        }
4053
4054                        anyhow::Ok(Some((metadata, canonical_path)))
4055                    } else {
4056                        Ok(None)
4057                    }
4058                })
4059                .collect::<Vec<_>>(),
4060        )
4061        .await;
4062
4063        let mut state = self.state.lock();
4064        let doing_recursive_update = scan_queue_tx.is_some();
4065
4066        // Remove any entries for paths that no longer exist or are being recursively
4067        // refreshed. Do this before adding any new entries, so that renames can be
4068        // detected regardless of the order of the paths.
4069        for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
4070            if matches!(metadata, Ok(None)) || doing_recursive_update {
4071                log::trace!("remove path {:?}", path);
4072                state.remove_path(path);
4073            }
4074        }
4075
4076        for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
4077            let abs_path: Arc<Path> = root_abs_path.join(&path).into();
4078            match metadata {
4079                Ok(Some((metadata, canonical_path))) => {
4080                    let ignore_stack = state
4081                        .snapshot
4082                        .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
4083
4084                    let mut fs_entry = Entry::new(
4085                        path.clone(),
4086                        metadata,
4087                        self.next_entry_id.as_ref(),
4088                        state.snapshot.root_char_bag,
4089                        if metadata.is_symlink {
4090                            Some(canonical_path.to_path_buf())
4091                        } else {
4092                            None
4093                        },
4094                    );
4095
4096                    let is_dir = fs_entry.is_dir();
4097                    fs_entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, is_dir);
4098
4099                    fs_entry.is_external = !canonical_path.starts_with(&root_canonical_path);
4100                    fs_entry.is_private = self.is_path_private(path);
4101
4102                    if !is_dir && !fs_entry.is_ignored && !fs_entry.is_external {
4103                        if let Some((repo_entry, repo)) = state.snapshot.repo_for_path(path) {
4104                            if let Ok(repo_path) = repo_entry.relativize(&state.snapshot, path) {
4105                                fs_entry.git_status = repo.repo_ptr.status(&repo_path);
4106                            }
4107                        }
4108                    }
4109
4110                    if let (Some(scan_queue_tx), true) = (&scan_queue_tx, fs_entry.is_dir()) {
4111                        if state.should_scan_directory(&fs_entry)
4112                            || (fs_entry.path.as_os_str().is_empty()
4113                                && abs_path.file_name() == Some(*DOT_GIT))
4114                        {
4115                            state.enqueue_scan_dir(abs_path, &fs_entry, scan_queue_tx);
4116                        } else {
4117                            fs_entry.kind = EntryKind::UnloadedDir;
4118                        }
4119                    }
4120
4121                    state.insert_entry(fs_entry, self.fs.as_ref());
4122                }
4123                Ok(None) => {
4124                    self.remove_repo_path(path, &mut state.snapshot);
4125                }
4126                Err(err) => {
4127                    // TODO - create a special 'error' entry in the entries tree to mark this
4128                    log::error!("error reading file {abs_path:?} on event: {err:#}");
4129                }
4130            }
4131        }
4132
4133        util::extend_sorted(
4134            &mut state.changed_paths,
4135            relative_paths.iter().cloned(),
4136            usize::MAX,
4137            Ord::cmp,
4138        );
4139    }
4140
4141    fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
4142        if !path
4143            .components()
4144            .any(|component| component.as_os_str() == *DOT_GIT)
4145        {
4146            if let Some(repository) = snapshot.repository_for_work_directory(path) {
4147                let entry = repository.work_directory.0;
4148                snapshot.git_repositories.remove(&entry);
4149                snapshot
4150                    .snapshot
4151                    .repository_entries
4152                    .remove(&RepositoryWorkDirectory(path.into()));
4153                return Some(());
4154            }
4155        }
4156
4157        // TODO statuses
4158        // Track when a .git is removed and iterate over the file system there
4159
4160        Some(())
4161    }
4162
4163    async fn update_ignore_statuses(&self, scan_job_tx: Sender<ScanJob>) {
4164        use futures::FutureExt as _;
4165
4166        let mut ignores_to_update = Vec::new();
4167        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
4168        let prev_snapshot;
4169        {
4170            let snapshot = &mut self.state.lock().snapshot;
4171            let abs_path = snapshot.abs_path.clone();
4172            snapshot
4173                .ignores_by_parent_abs_path
4174                .retain(|parent_abs_path, (_, needs_update)| {
4175                    if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
4176                        if *needs_update {
4177                            *needs_update = false;
4178                            if snapshot.snapshot.entry_for_path(parent_path).is_some() {
4179                                ignores_to_update.push(parent_abs_path.clone());
4180                            }
4181                        }
4182
4183                        let ignore_path = parent_path.join(&*GITIGNORE);
4184                        if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
4185                            return false;
4186                        }
4187                    }
4188                    true
4189                });
4190
4191            ignores_to_update.sort_unstable();
4192            let mut ignores_to_update = ignores_to_update.into_iter().peekable();
4193            while let Some(parent_abs_path) = ignores_to_update.next() {
4194                while ignores_to_update
4195                    .peek()
4196                    .map_or(false, |p| p.starts_with(&parent_abs_path))
4197                {
4198                    ignores_to_update.next().unwrap();
4199                }
4200
4201                let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
4202                ignore_queue_tx
4203                    .send_blocking(UpdateIgnoreStatusJob {
4204                        abs_path: parent_abs_path,
4205                        ignore_stack,
4206                        ignore_queue: ignore_queue_tx.clone(),
4207                        scan_queue: scan_job_tx.clone(),
4208                    })
4209                    .unwrap();
4210            }
4211
4212            prev_snapshot = snapshot.clone();
4213        }
4214        drop(ignore_queue_tx);
4215
4216        self.executor
4217            .scoped(|scope| {
4218                for _ in 0..self.executor.num_cpus() {
4219                    scope.spawn(async {
4220                        loop {
4221                            select_biased! {
4222                                // Process any path refresh requests before moving on to process
4223                                // the queue of ignore statuses.
4224                                request = self.scan_requests_rx.recv().fuse() => {
4225                                    let Ok(request) = request else { break };
4226                                    if !self.process_scan_request(request, true).await {
4227                                        return;
4228                                    }
4229                                }
4230
4231                                // Recursively process directories whose ignores have changed.
4232                                job = ignore_queue_rx.recv().fuse() => {
4233                                    let Ok(job) = job else { break };
4234                                    self.update_ignore_status(job, &prev_snapshot).await;
4235                                }
4236                            }
4237                        }
4238                    });
4239                }
4240            })
4241            .await;
4242    }
4243
4244    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
4245        log::trace!("update ignore status {:?}", job.abs_path);
4246
4247        let mut ignore_stack = job.ignore_stack;
4248        if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
4249            ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
4250        }
4251
4252        let mut entries_by_id_edits = Vec::new();
4253        let mut entries_by_path_edits = Vec::new();
4254        let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
4255        let repo = snapshot.repo_for_path(path);
4256        for mut entry in snapshot.child_entries(path).cloned() {
4257            let was_ignored = entry.is_ignored;
4258            let abs_path: Arc<Path> = snapshot.abs_path().join(&entry.path).into();
4259            entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
4260
4261            if entry.is_dir() {
4262                let child_ignore_stack = if entry.is_ignored {
4263                    IgnoreStack::all()
4264                } else {
4265                    ignore_stack.clone()
4266                };
4267
4268                // Scan any directories that were previously ignored and weren't previously scanned.
4269                if was_ignored && !entry.is_ignored && entry.kind.is_unloaded() {
4270                    let state = self.state.lock();
4271                    if state.should_scan_directory(&entry) {
4272                        state.enqueue_scan_dir(abs_path.clone(), &entry, &job.scan_queue);
4273                    }
4274                }
4275
4276                job.ignore_queue
4277                    .send(UpdateIgnoreStatusJob {
4278                        abs_path: abs_path.clone(),
4279                        ignore_stack: child_ignore_stack,
4280                        ignore_queue: job.ignore_queue.clone(),
4281                        scan_queue: job.scan_queue.clone(),
4282                    })
4283                    .await
4284                    .unwrap();
4285            }
4286
4287            if entry.is_ignored != was_ignored {
4288                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
4289                path_entry.scan_id = snapshot.scan_id;
4290                path_entry.is_ignored = entry.is_ignored;
4291                if !entry.is_dir() && !entry.is_ignored && !entry.is_external {
4292                    if let Some((ref repo_entry, local_repo)) = repo {
4293                        if let Ok(repo_path) = repo_entry.relativize(&snapshot, &entry.path) {
4294                            entry.git_status = local_repo.repo_ptr.status(&repo_path);
4295                        }
4296                    }
4297                }
4298                entries_by_id_edits.push(Edit::Insert(path_entry));
4299                entries_by_path_edits.push(Edit::Insert(entry));
4300            }
4301        }
4302
4303        let state = &mut self.state.lock();
4304        for edit in &entries_by_path_edits {
4305            if let Edit::Insert(entry) = edit {
4306                if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
4307                    state.changed_paths.insert(ix, entry.path.clone());
4308                }
4309            }
4310        }
4311
4312        state
4313            .snapshot
4314            .entries_by_path
4315            .edit(entries_by_path_edits, &());
4316        state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
4317    }
4318
4319    async fn update_git_repositories(&self, dot_git_paths: Vec<PathBuf>) {
4320        log::debug!("reloading repositories: {dot_git_paths:?}");
4321
4322        let mut repo_updates = Vec::new();
4323        {
4324            let mut state = self.state.lock();
4325            let scan_id = state.snapshot.scan_id;
4326            for dot_git_dir in dot_git_paths {
4327                let existing_repository_entry =
4328                    state
4329                        .snapshot
4330                        .git_repositories
4331                        .iter()
4332                        .find_map(|(entry_id, repo)| {
4333                            (repo.git_dir_path.as_ref() == dot_git_dir)
4334                                .then(|| (*entry_id, repo.clone()))
4335                        });
4336
4337                let (work_directory, repository) = match existing_repository_entry {
4338                    None => {
4339                        match state.build_git_repository(dot_git_dir.into(), self.fs.as_ref()) {
4340                            Some(output) => output,
4341                            None => continue,
4342                        }
4343                    }
4344                    Some((entry_id, repository)) => {
4345                        if repository.git_dir_scan_id == scan_id {
4346                            continue;
4347                        }
4348                        let Some(work_dir) = state
4349                            .snapshot
4350                            .entry_for_id(entry_id)
4351                            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
4352                        else {
4353                            continue;
4354                        };
4355
4356                        let repo = &repository.repo_ptr;
4357                        let branch = repo.branch_name();
4358                        repo.reload_index();
4359
4360                        state
4361                            .snapshot
4362                            .git_repositories
4363                            .update(&entry_id, |entry| entry.git_dir_scan_id = scan_id);
4364                        state
4365                            .snapshot
4366                            .snapshot
4367                            .repository_entries
4368                            .update(&work_dir, |entry| entry.branch = branch.map(Into::into));
4369                        (work_dir, repository.repo_ptr.clone())
4370                    }
4371                };
4372
4373                repo_updates.push(UpdateGitStatusesJob {
4374                    location_in_repo: state
4375                        .snapshot
4376                        .repository_entries
4377                        .get(&work_directory)
4378                        .and_then(|repo| repo.location_in_repo.clone())
4379                        .clone(),
4380                    work_directory,
4381                    repository,
4382                });
4383            }
4384
4385            // Remove any git repositories whose .git entry no longer exists.
4386            let snapshot = &mut state.snapshot;
4387            let mut ids_to_preserve = HashSet::default();
4388            for (&work_directory_id, entry) in snapshot.git_repositories.iter() {
4389                let exists_in_snapshot = snapshot
4390                    .entry_for_id(work_directory_id)
4391                    .map_or(false, |entry| {
4392                        snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
4393                    });
4394                if exists_in_snapshot {
4395                    ids_to_preserve.insert(work_directory_id);
4396                } else {
4397                    let git_dir_abs_path = snapshot.abs_path().join(&entry.git_dir_path);
4398                    let git_dir_excluded = self.settings.is_path_excluded(&entry.git_dir_path);
4399                    if git_dir_excluded
4400                        && !matches!(
4401                            smol::block_on(self.fs.metadata(&git_dir_abs_path)),
4402                            Ok(None)
4403                        )
4404                    {
4405                        ids_to_preserve.insert(work_directory_id);
4406                    }
4407                }
4408            }
4409
4410            snapshot
4411                .git_repositories
4412                .retain(|work_directory_id, _| ids_to_preserve.contains(work_directory_id));
4413            snapshot
4414                .repository_entries
4415                .retain(|_, entry| ids_to_preserve.contains(&entry.work_directory.0));
4416        }
4417
4418        let (mut updates_done_tx, mut updates_done_rx) = barrier::channel();
4419        self.executor
4420            .scoped(|scope| {
4421                scope.spawn(async {
4422                    for repo_update in repo_updates {
4423                        self.update_git_statuses(repo_update);
4424                    }
4425                    updates_done_tx.blocking_send(()).ok();
4426                });
4427
4428                scope.spawn(async {
4429                    loop {
4430                        select_biased! {
4431                            // Process any path refresh requests before moving on to process
4432                            // the queue of git statuses.
4433                            request = self.scan_requests_rx.recv().fuse() => {
4434                                let Ok(request) = request else { break };
4435                                if !self.process_scan_request(request, true).await {
4436                                    return;
4437                                }
4438                            }
4439                            _ = updates_done_rx.recv().fuse() =>  break,
4440                        }
4441                    }
4442                });
4443            })
4444            .await;
4445    }
4446
4447    /// Update the git statuses for a given batch of entries.
4448    fn update_git_statuses(&self, job: UpdateGitStatusesJob) {
4449        log::trace!("updating git statuses for repo {:?}", job.work_directory.0);
4450        let t0 = Instant::now();
4451        let Some(statuses) = job.repository.statuses(Path::new("")).log_err() else {
4452            return;
4453        };
4454        log::trace!(
4455            "computed git statuses for repo {:?} in {:?}",
4456            job.work_directory.0,
4457            t0.elapsed()
4458        );
4459
4460        let t0 = Instant::now();
4461        let mut changes = Vec::new();
4462        let snapshot = self.state.lock().snapshot.snapshot.clone();
4463        for file in snapshot.traverse_from_path(true, false, false, job.work_directory.0.as_ref()) {
4464            let Ok(repo_path) = file.path.strip_prefix(&job.work_directory.0) else {
4465                break;
4466            };
4467            let git_status = if let Some(location) = &job.location_in_repo {
4468                statuses.get(&location.join(repo_path))
4469            } else {
4470                statuses.get(&repo_path)
4471            };
4472            if file.git_status != git_status {
4473                let mut entry = file.clone();
4474                entry.git_status = git_status;
4475                changes.push((entry.path, git_status));
4476            }
4477        }
4478
4479        let mut state = self.state.lock();
4480        let edits = changes
4481            .iter()
4482            .filter_map(|(path, git_status)| {
4483                let entry = state.snapshot.entry_for_path(path)?.clone();
4484                Some(Edit::Insert(Entry {
4485                    git_status: *git_status,
4486                    ..entry.clone()
4487                }))
4488            })
4489            .collect();
4490
4491        // Apply the git status changes.
4492        util::extend_sorted(
4493            &mut state.changed_paths,
4494            changes.iter().map(|p| p.0.clone()),
4495            usize::MAX,
4496            Ord::cmp,
4497        );
4498        state.snapshot.entries_by_path.edit(edits, &());
4499        log::trace!(
4500            "applied git status updates for repo {:?} in {:?}",
4501            job.work_directory.0,
4502            t0.elapsed(),
4503        );
4504    }
4505
4506    fn build_change_set(
4507        &self,
4508        old_snapshot: &Snapshot,
4509        new_snapshot: &Snapshot,
4510        event_paths: &[Arc<Path>],
4511    ) -> UpdatedEntriesSet {
4512        use BackgroundScannerPhase::*;
4513        use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
4514
4515        // Identify which paths have changed. Use the known set of changed
4516        // parent paths to optimize the search.
4517        let mut changes = Vec::new();
4518        let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
4519        let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
4520        let mut last_newly_loaded_dir_path = None;
4521        old_paths.next(&());
4522        new_paths.next(&());
4523        for path in event_paths {
4524            let path = PathKey(path.clone());
4525            if old_paths.item().map_or(false, |e| e.path < path.0) {
4526                old_paths.seek_forward(&path, Bias::Left, &());
4527            }
4528            if new_paths.item().map_or(false, |e| e.path < path.0) {
4529                new_paths.seek_forward(&path, Bias::Left, &());
4530            }
4531            loop {
4532                match (old_paths.item(), new_paths.item()) {
4533                    (Some(old_entry), Some(new_entry)) => {
4534                        if old_entry.path > path.0
4535                            && new_entry.path > path.0
4536                            && !old_entry.path.starts_with(&path.0)
4537                            && !new_entry.path.starts_with(&path.0)
4538                        {
4539                            break;
4540                        }
4541
4542                        match Ord::cmp(&old_entry.path, &new_entry.path) {
4543                            Ordering::Less => {
4544                                changes.push((old_entry.path.clone(), old_entry.id, Removed));
4545                                old_paths.next(&());
4546                            }
4547                            Ordering::Equal => {
4548                                if self.phase == EventsReceivedDuringInitialScan {
4549                                    if old_entry.id != new_entry.id {
4550                                        changes.push((
4551                                            old_entry.path.clone(),
4552                                            old_entry.id,
4553                                            Removed,
4554                                        ));
4555                                    }
4556                                    // If the worktree was not fully initialized when this event was generated,
4557                                    // we can't know whether this entry was added during the scan or whether
4558                                    // it was merely updated.
4559                                    changes.push((
4560                                        new_entry.path.clone(),
4561                                        new_entry.id,
4562                                        AddedOrUpdated,
4563                                    ));
4564                                } else if old_entry.id != new_entry.id {
4565                                    changes.push((old_entry.path.clone(), old_entry.id, Removed));
4566                                    changes.push((new_entry.path.clone(), new_entry.id, Added));
4567                                } else if old_entry != new_entry {
4568                                    if old_entry.kind.is_unloaded() {
4569                                        last_newly_loaded_dir_path = Some(&new_entry.path);
4570                                        changes.push((
4571                                            new_entry.path.clone(),
4572                                            new_entry.id,
4573                                            Loaded,
4574                                        ));
4575                                    } else {
4576                                        changes.push((
4577                                            new_entry.path.clone(),
4578                                            new_entry.id,
4579                                            Updated,
4580                                        ));
4581                                    }
4582                                }
4583                                old_paths.next(&());
4584                                new_paths.next(&());
4585                            }
4586                            Ordering::Greater => {
4587                                let is_newly_loaded = self.phase == InitialScan
4588                                    || last_newly_loaded_dir_path
4589                                        .as_ref()
4590                                        .map_or(false, |dir| new_entry.path.starts_with(&dir));
4591                                changes.push((
4592                                    new_entry.path.clone(),
4593                                    new_entry.id,
4594                                    if is_newly_loaded { Loaded } else { Added },
4595                                ));
4596                                new_paths.next(&());
4597                            }
4598                        }
4599                    }
4600                    (Some(old_entry), None) => {
4601                        changes.push((old_entry.path.clone(), old_entry.id, Removed));
4602                        old_paths.next(&());
4603                    }
4604                    (None, Some(new_entry)) => {
4605                        let is_newly_loaded = self.phase == InitialScan
4606                            || last_newly_loaded_dir_path
4607                                .as_ref()
4608                                .map_or(false, |dir| new_entry.path.starts_with(&dir));
4609                        changes.push((
4610                            new_entry.path.clone(),
4611                            new_entry.id,
4612                            if is_newly_loaded { Loaded } else { Added },
4613                        ));
4614                        new_paths.next(&());
4615                    }
4616                    (None, None) => break,
4617                }
4618            }
4619        }
4620
4621        changes.into()
4622    }
4623
4624    async fn progress_timer(&self, running: bool) {
4625        if !running {
4626            return futures::future::pending().await;
4627        }
4628
4629        #[cfg(any(test, feature = "test-support"))]
4630        if self.fs.is_fake() {
4631            return self.executor.simulate_random_delay().await;
4632        }
4633
4634        smol::Timer::after(FS_WATCH_LATENCY).await;
4635    }
4636
4637    fn is_path_private(&self, path: &Path) -> bool {
4638        !self.share_private_files && self.settings.is_path_private(path)
4639    }
4640}
4641
4642fn swap_to_front(child_paths: &mut Vec<PathBuf>, file: &OsStr) {
4643    let position = child_paths
4644        .iter()
4645        .position(|path| path.file_name().unwrap() == file);
4646    if let Some(position) = position {
4647        let temp = child_paths.remove(position);
4648        child_paths.insert(0, temp);
4649    }
4650}
4651
4652fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
4653    let mut result = root_char_bag;
4654    result.extend(
4655        path.to_string_lossy()
4656            .chars()
4657            .map(|c| c.to_ascii_lowercase()),
4658    );
4659    result
4660}
4661
4662struct ScanJob {
4663    abs_path: Arc<Path>,
4664    path: Arc<Path>,
4665    ignore_stack: Arc<IgnoreStack>,
4666    scan_queue: Sender<ScanJob>,
4667    ancestor_inodes: TreeSet<u64>,
4668    is_external: bool,
4669    containing_repository: Option<ScanJobContainingRepository>,
4670}
4671
4672#[derive(Clone)]
4673struct ScanJobContainingRepository {
4674    work_directory: RepositoryWorkDirectory,
4675    statuses: GitStatus,
4676}
4677
4678struct UpdateIgnoreStatusJob {
4679    abs_path: Arc<Path>,
4680    ignore_stack: Arc<IgnoreStack>,
4681    ignore_queue: Sender<UpdateIgnoreStatusJob>,
4682    scan_queue: Sender<ScanJob>,
4683}
4684
4685struct UpdateGitStatusesJob {
4686    work_directory: RepositoryWorkDirectory,
4687    location_in_repo: Option<Arc<Path>>,
4688    repository: Arc<dyn GitRepository>,
4689}
4690
4691pub trait WorktreeModelHandle {
4692    #[cfg(any(test, feature = "test-support"))]
4693    fn flush_fs_events<'a>(
4694        &self,
4695        cx: &'a mut gpui::TestAppContext,
4696    ) -> futures::future::LocalBoxFuture<'a, ()>;
4697
4698    #[cfg(any(test, feature = "test-support"))]
4699    fn flush_fs_events_in_root_git_repository<'a>(
4700        &self,
4701        cx: &'a mut gpui::TestAppContext,
4702    ) -> futures::future::LocalBoxFuture<'a, ()>;
4703}
4704
4705impl WorktreeModelHandle for Model<Worktree> {
4706    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
4707    // occurred before the worktree was constructed. These events can cause the worktree to perform
4708    // extra directory scans, and emit extra scan-state notifications.
4709    //
4710    // This function mutates the worktree's directory and waits for those mutations to be picked up,
4711    // to ensure that all redundant FS events have already been processed.
4712    #[cfg(any(test, feature = "test-support"))]
4713    fn flush_fs_events<'a>(
4714        &self,
4715        cx: &'a mut gpui::TestAppContext,
4716    ) -> futures::future::LocalBoxFuture<'a, ()> {
4717        let file_name = "fs-event-sentinel";
4718
4719        let tree = self.clone();
4720        let (fs, root_path) = self.update(cx, |tree, _| {
4721            let tree = tree.as_local().unwrap();
4722            (tree.fs.clone(), tree.abs_path().clone())
4723        });
4724
4725        async move {
4726            fs.create_file(&root_path.join(file_name), Default::default())
4727                .await
4728                .unwrap();
4729
4730            cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_some())
4731                .await;
4732
4733            fs.remove_file(&root_path.join(file_name), Default::default())
4734                .await
4735                .unwrap();
4736            cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_none())
4737                .await;
4738
4739            cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4740                .await;
4741        }
4742        .boxed_local()
4743    }
4744
4745    // This function is similar to flush_fs_events, except that it waits for events to be flushed in
4746    // the .git folder of the root repository.
4747    // The reason for its existence is that a repository's .git folder might live *outside* of the
4748    // worktree and thus its FS events might go through a different path.
4749    // In order to flush those, we need to create artificial events in the .git folder and wait
4750    // for the repository to be reloaded.
4751    #[cfg(any(test, feature = "test-support"))]
4752    fn flush_fs_events_in_root_git_repository<'a>(
4753        &self,
4754        cx: &'a mut gpui::TestAppContext,
4755    ) -> futures::future::LocalBoxFuture<'a, ()> {
4756        let file_name = "fs-event-sentinel";
4757
4758        let tree = self.clone();
4759        let (fs, root_path, mut git_dir_scan_id) = self.update(cx, |tree, _| {
4760            let tree = tree.as_local().unwrap();
4761            let root_entry = tree.root_git_entry().unwrap();
4762            let local_repo_entry = tree.get_local_repo(&root_entry).unwrap();
4763            (
4764                tree.fs.clone(),
4765                local_repo_entry.git_dir_path.clone(),
4766                local_repo_entry.git_dir_scan_id,
4767            )
4768        });
4769
4770        let scan_id_increased = |tree: &mut Worktree, git_dir_scan_id: &mut usize| {
4771            let root_entry = tree.root_git_entry().unwrap();
4772            let local_repo_entry = tree
4773                .as_local()
4774                .unwrap()
4775                .get_local_repo(&root_entry)
4776                .unwrap();
4777
4778            if local_repo_entry.git_dir_scan_id > *git_dir_scan_id {
4779                *git_dir_scan_id = local_repo_entry.git_dir_scan_id;
4780                true
4781            } else {
4782                false
4783            }
4784        };
4785
4786        async move {
4787            fs.create_file(&root_path.join(file_name), Default::default())
4788                .await
4789                .unwrap();
4790
4791            cx.condition(&tree, |tree, _| {
4792                scan_id_increased(tree, &mut git_dir_scan_id)
4793            })
4794            .await;
4795
4796            fs.remove_file(&root_path.join(file_name), Default::default())
4797                .await
4798                .unwrap();
4799
4800            cx.condition(&tree, |tree, _| {
4801                scan_id_increased(tree, &mut git_dir_scan_id)
4802            })
4803            .await;
4804
4805            cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4806                .await;
4807        }
4808        .boxed_local()
4809    }
4810}
4811
4812#[derive(Clone, Debug)]
4813struct TraversalProgress<'a> {
4814    max_path: &'a Path,
4815    count: usize,
4816    non_ignored_count: usize,
4817    file_count: usize,
4818    non_ignored_file_count: usize,
4819}
4820
4821impl<'a> TraversalProgress<'a> {
4822    fn count(&self, include_files: bool, include_dirs: bool, include_ignored: bool) -> usize {
4823        match (include_files, include_dirs, include_ignored) {
4824            (true, true, true) => self.count,
4825            (true, true, false) => self.non_ignored_count,
4826            (true, false, true) => self.file_count,
4827            (true, false, false) => self.non_ignored_file_count,
4828            (false, true, true) => self.count - self.file_count,
4829            (false, true, false) => self.non_ignored_count - self.non_ignored_file_count,
4830            (false, false, _) => 0,
4831        }
4832    }
4833}
4834
4835impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
4836    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4837        self.max_path = summary.max_path.as_ref();
4838        self.count += summary.count;
4839        self.non_ignored_count += summary.non_ignored_count;
4840        self.file_count += summary.file_count;
4841        self.non_ignored_file_count += summary.non_ignored_file_count;
4842    }
4843}
4844
4845impl<'a> Default for TraversalProgress<'a> {
4846    fn default() -> Self {
4847        Self {
4848            max_path: Path::new(""),
4849            count: 0,
4850            non_ignored_count: 0,
4851            file_count: 0,
4852            non_ignored_file_count: 0,
4853        }
4854    }
4855}
4856
4857#[derive(Clone, Debug, Default, Copy)]
4858struct GitStatuses {
4859    added: usize,
4860    modified: usize,
4861    conflict: usize,
4862}
4863
4864impl AddAssign for GitStatuses {
4865    fn add_assign(&mut self, rhs: Self) {
4866        self.added += rhs.added;
4867        self.modified += rhs.modified;
4868        self.conflict += rhs.conflict;
4869    }
4870}
4871
4872impl Sub for GitStatuses {
4873    type Output = GitStatuses;
4874
4875    fn sub(self, rhs: Self) -> Self::Output {
4876        GitStatuses {
4877            added: self.added - rhs.added,
4878            modified: self.modified - rhs.modified,
4879            conflict: self.conflict - rhs.conflict,
4880        }
4881    }
4882}
4883
4884impl<'a> sum_tree::Dimension<'a, EntrySummary> for GitStatuses {
4885    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4886        *self += summary.statuses
4887    }
4888}
4889
4890pub struct Traversal<'a> {
4891    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
4892    include_ignored: bool,
4893    include_files: bool,
4894    include_dirs: bool,
4895}
4896
4897impl<'a> Traversal<'a> {
4898    fn new(
4899        entries: &'a SumTree<Entry>,
4900        include_files: bool,
4901        include_dirs: bool,
4902        include_ignored: bool,
4903        start_path: &Path,
4904    ) -> Self {
4905        let mut cursor = entries.cursor();
4906        cursor.seek(&TraversalTarget::Path(start_path), Bias::Left, &());
4907        let mut traversal = Self {
4908            cursor,
4909            include_files,
4910            include_dirs,
4911            include_ignored,
4912        };
4913        if traversal.end_offset() == traversal.start_offset() {
4914            traversal.next();
4915        }
4916        traversal
4917    }
4918    pub fn advance(&mut self) -> bool {
4919        self.advance_by(1)
4920    }
4921
4922    pub fn advance_by(&mut self, count: usize) -> bool {
4923        self.cursor.seek_forward(
4924            &TraversalTarget::Count {
4925                count: self.end_offset() + count,
4926                include_dirs: self.include_dirs,
4927                include_files: self.include_files,
4928                include_ignored: self.include_ignored,
4929            },
4930            Bias::Left,
4931            &(),
4932        )
4933    }
4934
4935    pub fn advance_to_sibling(&mut self) -> bool {
4936        while let Some(entry) = self.cursor.item() {
4937            self.cursor.seek_forward(
4938                &TraversalTarget::PathSuccessor(&entry.path),
4939                Bias::Left,
4940                &(),
4941            );
4942            if let Some(entry) = self.cursor.item() {
4943                if (self.include_files || !entry.is_file())
4944                    && (self.include_dirs || !entry.is_dir())
4945                    && (self.include_ignored || !entry.is_ignored)
4946                {
4947                    return true;
4948                }
4949            }
4950        }
4951        false
4952    }
4953
4954    pub fn back_to_parent(&mut self) -> bool {
4955        let Some(parent_path) = self.cursor.item().and_then(|entry| entry.path.parent()) else {
4956            return false;
4957        };
4958        self.cursor
4959            .seek(&TraversalTarget::Path(parent_path), Bias::Left, &())
4960    }
4961
4962    pub fn entry(&self) -> Option<&'a Entry> {
4963        self.cursor.item()
4964    }
4965
4966    pub fn start_offset(&self) -> usize {
4967        self.cursor
4968            .start()
4969            .count(self.include_files, self.include_dirs, self.include_ignored)
4970    }
4971
4972    pub fn end_offset(&self) -> usize {
4973        self.cursor
4974            .end(&())
4975            .count(self.include_files, self.include_dirs, self.include_ignored)
4976    }
4977}
4978
4979impl<'a> Iterator for Traversal<'a> {
4980    type Item = &'a Entry;
4981
4982    fn next(&mut self) -> Option<Self::Item> {
4983        if let Some(item) = self.entry() {
4984            self.advance();
4985            Some(item)
4986        } else {
4987            None
4988        }
4989    }
4990}
4991
4992#[derive(Debug)]
4993enum TraversalTarget<'a> {
4994    Path(&'a Path),
4995    PathSuccessor(&'a Path),
4996    Count {
4997        count: usize,
4998        include_files: bool,
4999        include_ignored: bool,
5000        include_dirs: bool,
5001    },
5002}
5003
5004impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
5005    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
5006        match self {
5007            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
5008            TraversalTarget::PathSuccessor(path) => {
5009                if cursor_location.max_path.starts_with(path) {
5010                    Ordering::Greater
5011                } else {
5012                    Ordering::Equal
5013                }
5014            }
5015            TraversalTarget::Count {
5016                count,
5017                include_files,
5018                include_dirs,
5019                include_ignored,
5020            } => Ord::cmp(
5021                count,
5022                &cursor_location.count(*include_files, *include_dirs, *include_ignored),
5023            ),
5024        }
5025    }
5026}
5027
5028impl<'a, 'b> SeekTarget<'a, EntrySummary, (TraversalProgress<'a>, GitStatuses)>
5029    for TraversalTarget<'b>
5030{
5031    fn cmp(&self, cursor_location: &(TraversalProgress<'a>, GitStatuses), _: &()) -> Ordering {
5032        self.cmp(&cursor_location.0, &())
5033    }
5034}
5035
5036pub struct ChildEntriesIter<'a> {
5037    parent_path: &'a Path,
5038    traversal: Traversal<'a>,
5039}
5040
5041impl<'a> Iterator for ChildEntriesIter<'a> {
5042    type Item = &'a Entry;
5043
5044    fn next(&mut self) -> Option<Self::Item> {
5045        if let Some(item) = self.traversal.entry() {
5046            if item.path.starts_with(&self.parent_path) {
5047                self.traversal.advance_to_sibling();
5048                return Some(item);
5049            }
5050        }
5051        None
5052    }
5053}
5054
5055impl<'a> From<&'a Entry> for proto::Entry {
5056    fn from(entry: &'a Entry) -> Self {
5057        Self {
5058            id: entry.id.to_proto(),
5059            is_dir: entry.is_dir(),
5060            path: entry.path.to_string_lossy().into(),
5061            inode: entry.inode,
5062            mtime: entry.mtime.map(|time| time.into()),
5063            is_symlink: entry.is_symlink,
5064            is_ignored: entry.is_ignored,
5065            is_external: entry.is_external,
5066            git_status: entry.git_status.map(git_status_to_proto),
5067        }
5068    }
5069}
5070
5071impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
5072    type Error = anyhow::Error;
5073
5074    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
5075        let kind = if entry.is_dir {
5076            EntryKind::Dir
5077        } else {
5078            let mut char_bag = *root_char_bag;
5079            char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
5080            EntryKind::File(char_bag)
5081        };
5082        let path: Arc<Path> = PathBuf::from(entry.path).into();
5083        Ok(Entry {
5084            id: ProjectEntryId::from_proto(entry.id),
5085            kind,
5086            path,
5087            inode: entry.inode,
5088            mtime: entry.mtime.map(|time| time.into()),
5089            canonical_path: None,
5090            is_ignored: entry.is_ignored,
5091            is_external: entry.is_external,
5092            git_status: git_status_from_proto(entry.git_status),
5093            is_private: false,
5094            is_symlink: entry.is_symlink,
5095        })
5096    }
5097}
5098
5099fn git_status_from_proto(git_status: Option<i32>) -> Option<GitFileStatus> {
5100    git_status.and_then(|status| {
5101        proto::GitStatus::from_i32(status).map(|status| match status {
5102            proto::GitStatus::Added => GitFileStatus::Added,
5103            proto::GitStatus::Modified => GitFileStatus::Modified,
5104            proto::GitStatus::Conflict => GitFileStatus::Conflict,
5105        })
5106    })
5107}
5108
5109fn git_status_to_proto(status: GitFileStatus) -> i32 {
5110    match status {
5111        GitFileStatus::Added => proto::GitStatus::Added as i32,
5112        GitFileStatus::Modified => proto::GitStatus::Modified as i32,
5113        GitFileStatus::Conflict => proto::GitStatus::Conflict as i32,
5114    }
5115}
5116
5117#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)]
5118pub struct ProjectEntryId(usize);
5119
5120impl ProjectEntryId {
5121    pub const MAX: Self = Self(usize::MAX);
5122    pub const MIN: Self = Self(usize::MIN);
5123
5124    pub fn new(counter: &AtomicUsize) -> Self {
5125        Self(counter.fetch_add(1, SeqCst))
5126    }
5127
5128    pub fn from_proto(id: u64) -> Self {
5129        Self(id as usize)
5130    }
5131
5132    pub fn to_proto(&self) -> u64 {
5133        self.0 as u64
5134    }
5135
5136    pub fn to_usize(&self) -> usize {
5137        self.0
5138    }
5139}
5140
5141#[cfg(any(test, feature = "test-support"))]
5142impl CreatedEntry {
5143    pub fn to_included(self) -> Option<Entry> {
5144        match self {
5145            CreatedEntry::Included(entry) => Some(entry),
5146            CreatedEntry::Excluded { .. } => None,
5147        }
5148    }
5149}