worktree.rs

   1mod ignore;
   2mod worktree_settings;
   3#[cfg(test)]
   4mod worktree_tests;
   5
   6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   7use anyhow::{anyhow, Context as _, Result};
   8use client::{proto, Client};
   9use clock::ReplicaId;
  10use collections::{HashMap, HashSet, VecDeque};
  11use fs::{copy_recursive, RemoveOptions};
  12use fs::{
  13    repository::{GitFileStatus, GitRepository, RepoPath},
  14    Fs,
  15};
  16use futures::{
  17    channel::{
  18        mpsc::{self, UnboundedSender},
  19        oneshot,
  20    },
  21    select_biased,
  22    task::Poll,
  23    FutureExt as _, Stream, StreamExt,
  24};
  25use fuzzy::CharBag;
  26use git::{DOT_GIT, GITIGNORE};
  27use gpui::{
  28    AppContext, AsyncAppContext, BackgroundExecutor, Context, EventEmitter, Model, ModelContext,
  29    Task,
  30};
  31use ignore::IgnoreStack;
  32use itertools::Itertools;
  33use language::{
  34    proto::{deserialize_version, serialize_fingerprint, serialize_line_ending, serialize_version},
  35    Buffer, Capability, DiagnosticEntry, File as _, LineEnding, PointUtf16, Rope, RopeFingerprint,
  36    Unclipped,
  37};
  38use lsp::{DiagnosticSeverity, LanguageServerId};
  39use parking_lot::Mutex;
  40use postage::{
  41    barrier,
  42    prelude::{Sink as _, Stream as _},
  43    watch,
  44};
  45use serde::Serialize;
  46use settings::{Settings, SettingsLocation, SettingsStore};
  47use smol::channel::{self, Sender};
  48use std::{
  49    any::Any,
  50    cmp::{self, Ordering},
  51    convert::TryFrom,
  52    ffi::OsStr,
  53    fmt,
  54    future::Future,
  55    mem,
  56    ops::{AddAssign, Deref, DerefMut, Sub},
  57    path::{Path, PathBuf},
  58    pin::Pin,
  59    sync::{
  60        atomic::{AtomicUsize, Ordering::SeqCst},
  61        Arc,
  62    },
  63    time::{Duration, SystemTime},
  64};
  65use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
  66use text::BufferId;
  67use util::{
  68    paths::{PathMatcher, HOME},
  69    ResultExt,
  70};
  71
  72pub use worktree_settings::WorktreeSettings;
  73
  74#[cfg(feature = "test-support")]
  75pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
  76#[cfg(not(feature = "test-support"))]
  77pub const FS_WATCH_LATENCY: Duration = Duration::from_millis(100);
  78
  79#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  80pub struct WorktreeId(usize);
  81
  82/// A set of local or remote files that are being opened as part of a project.
  83/// Responsible for tracking related FS (for local)/collab (for remote) events and corresponding updates.
  84/// Stores git repositories data and the diagnostics for the file(s).
  85///
  86/// Has an absolute path, and may be set to be visible in Zed UI or not.
  87/// May correspond to a directory or a single file.
  88/// Possible examples:
  89/// * a drag and dropped file — may be added as an invisible, "ephemeral" entry to the current worktree
  90/// * a directory opened in Zed — may be added as a visible entry to the current worktree
  91///
  92/// Uses [`Entry`] to track the state of each file/directory, can look up absolute paths for entries.
  93pub enum Worktree {
  94    Local(LocalWorktree),
  95    Remote(RemoteWorktree),
  96}
  97
  98pub struct LocalWorktree {
  99    snapshot: LocalSnapshot,
 100    scan_requests_tx: channel::Sender<ScanRequest>,
 101    path_prefixes_to_scan_tx: channel::Sender<Arc<Path>>,
 102    is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
 103    _background_scanner_tasks: Vec<Task<()>>,
 104    share: Option<ShareState>,
 105    diagnostics: HashMap<
 106        Arc<Path>,
 107        Vec<(
 108            LanguageServerId,
 109            Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 110        )>,
 111    >,
 112    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
 113    client: Arc<Client>,
 114    fs: Arc<dyn Fs>,
 115    fs_case_sensitive: bool,
 116    visible: bool,
 117}
 118
 119struct ScanRequest {
 120    relative_paths: Vec<Arc<Path>>,
 121    done: barrier::Sender,
 122}
 123
 124pub struct RemoteWorktree {
 125    snapshot: Snapshot,
 126    background_snapshot: Arc<Mutex<Snapshot>>,
 127    project_id: u64,
 128    client: Arc<Client>,
 129    updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
 130    snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
 131    replica_id: ReplicaId,
 132    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
 133    visible: bool,
 134    disconnected: bool,
 135}
 136
 137#[derive(Clone)]
 138pub struct Snapshot {
 139    id: WorktreeId,
 140    abs_path: Arc<Path>,
 141    root_name: String,
 142    root_char_bag: CharBag,
 143    entries_by_path: SumTree<Entry>,
 144    entries_by_id: SumTree<PathEntry>,
 145    repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
 146
 147    /// A number that increases every time the worktree begins scanning
 148    /// a set of paths from the filesystem. This scanning could be caused
 149    /// by some operation performed on the worktree, such as reading or
 150    /// writing a file, or by an event reported by the filesystem.
 151    scan_id: usize,
 152
 153    /// The latest scan id that has completed, and whose preceding scans
 154    /// have all completed. The current `scan_id` could be more than one
 155    /// greater than the `completed_scan_id` if operations are performed
 156    /// on the worktree while it is processing a file-system event.
 157    completed_scan_id: usize,
 158}
 159
 160#[derive(Clone, Debug, PartialEq, Eq)]
 161pub struct RepositoryEntry {
 162    pub(crate) work_directory: WorkDirectoryEntry,
 163    pub(crate) branch: Option<Arc<str>>,
 164}
 165
 166impl RepositoryEntry {
 167    pub fn branch(&self) -> Option<Arc<str>> {
 168        self.branch.clone()
 169    }
 170
 171    pub fn work_directory_id(&self) -> ProjectEntryId {
 172        *self.work_directory
 173    }
 174
 175    pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
 176        snapshot
 177            .entry_for_id(self.work_directory_id())
 178            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
 179    }
 180
 181    pub fn build_update(&self, _: &Self) -> proto::RepositoryEntry {
 182        proto::RepositoryEntry {
 183            work_directory_id: self.work_directory_id().to_proto(),
 184            branch: self.branch.as_ref().map(|str| str.to_string()),
 185        }
 186    }
 187}
 188
 189impl From<&RepositoryEntry> for proto::RepositoryEntry {
 190    fn from(value: &RepositoryEntry) -> Self {
 191        proto::RepositoryEntry {
 192            work_directory_id: value.work_directory.to_proto(),
 193            branch: value.branch.as_ref().map(|str| str.to_string()),
 194        }
 195    }
 196}
 197
 198/// This path corresponds to the 'content path' (the folder that contains the .git)
 199#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 200pub struct RepositoryWorkDirectory(pub(crate) Arc<Path>);
 201
 202impl Default for RepositoryWorkDirectory {
 203    fn default() -> Self {
 204        RepositoryWorkDirectory(Arc::from(Path::new("")))
 205    }
 206}
 207
 208impl AsRef<Path> for RepositoryWorkDirectory {
 209    fn as_ref(&self) -> &Path {
 210        self.0.as_ref()
 211    }
 212}
 213
 214#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 215pub struct WorkDirectoryEntry(ProjectEntryId);
 216
 217impl WorkDirectoryEntry {
 218    pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Result<RepoPath> {
 219        let entry = worktree
 220            .entry_for_id(self.0)
 221            .ok_or_else(|| anyhow!("entry not found"))?;
 222        let path = path
 223            .strip_prefix(&entry.path)
 224            .map_err(|_| anyhow!("could not relativize {:?} against {:?}", path, entry.path))?;
 225        Ok(path.into())
 226    }
 227}
 228
 229impl Deref for WorkDirectoryEntry {
 230    type Target = ProjectEntryId;
 231
 232    fn deref(&self) -> &Self::Target {
 233        &self.0
 234    }
 235}
 236
 237impl From<ProjectEntryId> for WorkDirectoryEntry {
 238    fn from(value: ProjectEntryId) -> Self {
 239        WorkDirectoryEntry(value)
 240    }
 241}
 242
 243#[derive(Debug, Clone)]
 244pub struct LocalSnapshot {
 245    snapshot: Snapshot,
 246    /// All of the gitignore files in the worktree, indexed by their relative path.
 247    /// The boolean indicates whether the gitignore needs to be updated.
 248    ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
 249    /// All of the git repositories in the worktree, indexed by the project entry
 250    /// id of their parent directory.
 251    git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 252    file_scan_exclusions: Vec<PathMatcher>,
 253    private_files: Vec<PathMatcher>,
 254}
 255
 256struct BackgroundScannerState {
 257    snapshot: LocalSnapshot,
 258    scanned_dirs: HashSet<ProjectEntryId>,
 259    path_prefixes_to_scan: HashSet<Arc<Path>>,
 260    paths_to_scan: HashSet<Arc<Path>>,
 261    /// The ids of all of the entries that were removed from the snapshot
 262    /// as part of the current update. These entry ids may be re-used
 263    /// if the same inode is discovered at a new path, or if the given
 264    /// path is re-created after being deleted.
 265    removed_entry_ids: HashMap<u64, ProjectEntryId>,
 266    changed_paths: Vec<Arc<Path>>,
 267    prev_snapshot: Snapshot,
 268}
 269
 270#[derive(Debug, Clone)]
 271pub struct LocalRepositoryEntry {
 272    pub(crate) git_dir_scan_id: usize,
 273    pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
 274    /// Path to the actual .git folder.
 275    /// Note: if .git is a file, this points to the folder indicated by the .git file
 276    pub(crate) git_dir_path: Arc<Path>,
 277}
 278
 279impl LocalRepositoryEntry {
 280    pub fn repo(&self) -> &Arc<Mutex<dyn GitRepository>> {
 281        &self.repo_ptr
 282    }
 283}
 284
 285impl Deref for LocalSnapshot {
 286    type Target = Snapshot;
 287
 288    fn deref(&self) -> &Self::Target {
 289        &self.snapshot
 290    }
 291}
 292
 293impl DerefMut for LocalSnapshot {
 294    fn deref_mut(&mut self) -> &mut Self::Target {
 295        &mut self.snapshot
 296    }
 297}
 298
 299enum ScanState {
 300    Started,
 301    Updated {
 302        snapshot: LocalSnapshot,
 303        changes: UpdatedEntriesSet,
 304        barrier: Option<barrier::Sender>,
 305        scanning: bool,
 306    },
 307}
 308
 309struct ShareState {
 310    project_id: u64,
 311    snapshots_tx:
 312        mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
 313    resume_updates: watch::Sender<()>,
 314    _maintain_remote_snapshot: Task<Option<()>>,
 315}
 316
 317#[derive(Clone)]
 318pub enum Event {
 319    UpdatedEntries(UpdatedEntriesSet),
 320    UpdatedGitRepositories(UpdatedGitRepositoriesSet),
 321}
 322
 323impl EventEmitter<Event> for Worktree {}
 324
 325impl Worktree {
 326    pub async fn local(
 327        client: Arc<Client>,
 328        path: impl Into<Arc<Path>>,
 329        visible: bool,
 330        fs: Arc<dyn Fs>,
 331        next_entry_id: Arc<AtomicUsize>,
 332        cx: &mut AsyncAppContext,
 333    ) -> Result<Model<Self>> {
 334        // After determining whether the root entry is a file or a directory, populate the
 335        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 336        let abs_path = path.into();
 337
 338        let metadata = fs
 339            .metadata(&abs_path)
 340            .await
 341            .context("failed to stat worktree path")?;
 342
 343        let fs_case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
 344            log::error!(
 345                "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
 346            );
 347            true
 348        });
 349
 350        let closure_fs = Arc::clone(&fs);
 351        let closure_next_entry_id = Arc::clone(&next_entry_id);
 352        let closure_abs_path = abs_path.to_path_buf();
 353        cx.new_model(move |cx: &mut ModelContext<Worktree>| {
 354            cx.observe_global::<SettingsStore>(move |this, cx| {
 355                if let Self::Local(this) = this {
 356                    let new_file_scan_exclusions = path_matchers(
 357                        WorktreeSettings::get_global(cx)
 358                            .file_scan_exclusions
 359                            .as_deref(),
 360                        "file_scan_exclusions",
 361                    );
 362                    let new_private_files = path_matchers(
 363                        WorktreeSettings::get(Some(settings::SettingsLocation {
 364                            worktree_id: cx.handle().entity_id().as_u64() as usize,
 365                            path: Path::new("")
 366                        }), cx).private_files.as_deref(),
 367                        "private_files",
 368                    );
 369
 370                    if new_file_scan_exclusions != this.snapshot.file_scan_exclusions
 371                        || new_private_files != this.snapshot.private_files
 372                    {
 373                        this.snapshot.file_scan_exclusions = new_file_scan_exclusions;
 374                        this.snapshot.private_files = new_private_files;
 375
 376                        log::info!(
 377                            "Re-scanning directories, new scan exclude files: {:?}, new dotenv files: {:?}",
 378                            this.snapshot
 379                                .file_scan_exclusions
 380                                .iter()
 381                                .map(ToString::to_string)
 382                                .collect::<Vec<_>>(),
 383                            this.snapshot
 384                                .private_files
 385                                .iter()
 386                                .map(ToString::to_string)
 387                                .collect::<Vec<_>>()
 388                        );
 389
 390                        let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
 391                        let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) =
 392                            channel::unbounded();
 393                        this.scan_requests_tx = scan_requests_tx;
 394                        this.path_prefixes_to_scan_tx = path_prefixes_to_scan_tx;
 395                        this._background_scanner_tasks = start_background_scan_tasks(
 396                            &closure_abs_path,
 397                            this.snapshot(),
 398                            scan_requests_rx,
 399                            path_prefixes_to_scan_rx,
 400                            Arc::clone(&closure_next_entry_id),
 401                            Arc::clone(&closure_fs),
 402                            cx,
 403                        );
 404                        this.is_scanning = watch::channel_with(true);
 405                    }
 406                }
 407            })
 408            .detach();
 409
 410            let root_name = abs_path
 411                .file_name()
 412                .map_or(String::new(), |f| f.to_string_lossy().to_string());
 413
 414            let mut snapshot = LocalSnapshot {
 415                file_scan_exclusions: path_matchers(
 416                    WorktreeSettings::get_global(cx)
 417                        .file_scan_exclusions
 418                        .as_deref(),
 419                    "file_scan_exclusions",
 420                ),
 421                private_files: path_matchers(
 422                    WorktreeSettings::get(Some(SettingsLocation {
 423                        worktree_id: cx.handle().entity_id().as_u64() as usize,
 424                        path: Path::new(""),
 425                    }), cx).private_files.as_deref(),
 426                    "private_files",
 427                ),
 428                ignores_by_parent_abs_path: Default::default(),
 429                git_repositories: Default::default(),
 430                snapshot: Snapshot {
 431                    id: WorktreeId::from_usize(cx.entity_id().as_u64() as usize),
 432                    abs_path: abs_path.to_path_buf().into(),
 433                    root_name: root_name.clone(),
 434                    root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
 435                    entries_by_path: Default::default(),
 436                    entries_by_id: Default::default(),
 437                    repository_entries: Default::default(),
 438                    scan_id: 1,
 439                    completed_scan_id: 0,
 440                },
 441            };
 442
 443            if let Some(metadata) = metadata {
 444                snapshot.insert_entry(
 445                    Entry::new(
 446                        Arc::from(Path::new("")),
 447                        &metadata,
 448                        &next_entry_id,
 449                        snapshot.root_char_bag,
 450                    ),
 451                    fs.as_ref(),
 452                );
 453            }
 454
 455            let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
 456            let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
 457            let task_snapshot = snapshot.clone();
 458            Worktree::Local(LocalWorktree {
 459                snapshot,
 460                is_scanning: watch::channel_with(true),
 461                share: None,
 462                scan_requests_tx,
 463                path_prefixes_to_scan_tx,
 464                _background_scanner_tasks: start_background_scan_tasks(
 465                    &abs_path,
 466                    task_snapshot,
 467                    scan_requests_rx,
 468                    path_prefixes_to_scan_rx,
 469                    Arc::clone(&next_entry_id),
 470                    Arc::clone(&fs),
 471                    cx,
 472                ),
 473                diagnostics: Default::default(),
 474                diagnostic_summaries: Default::default(),
 475                client,
 476                fs,
 477                fs_case_sensitive,
 478                visible,
 479            })
 480        })
 481    }
 482
 483    pub fn remote(
 484        project_remote_id: u64,
 485        replica_id: ReplicaId,
 486        worktree: proto::WorktreeMetadata,
 487        client: Arc<Client>,
 488        cx: &mut AppContext,
 489    ) -> Model<Self> {
 490        cx.new_model(|cx: &mut ModelContext<Self>| {
 491            let snapshot = Snapshot {
 492                id: WorktreeId(worktree.id as usize),
 493                abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
 494                root_name: worktree.root_name.clone(),
 495                root_char_bag: worktree
 496                    .root_name
 497                    .chars()
 498                    .map(|c| c.to_ascii_lowercase())
 499                    .collect(),
 500                entries_by_path: Default::default(),
 501                entries_by_id: Default::default(),
 502                repository_entries: Default::default(),
 503                scan_id: 1,
 504                completed_scan_id: 0,
 505            };
 506
 507            let (updates_tx, mut updates_rx) = mpsc::unbounded();
 508            let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
 509            let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
 510
 511            cx.background_executor()
 512                .spawn({
 513                    let background_snapshot = background_snapshot.clone();
 514                    async move {
 515                        while let Some(update) = updates_rx.next().await {
 516                            if let Err(error) =
 517                                background_snapshot.lock().apply_remote_update(update)
 518                            {
 519                                log::error!("error applying worktree update: {}", error);
 520                            }
 521                            snapshot_updated_tx.send(()).await.ok();
 522                        }
 523                    }
 524                })
 525                .detach();
 526
 527            cx.spawn(|this, mut cx| async move {
 528                while (snapshot_updated_rx.recv().await).is_some() {
 529                    this.update(&mut cx, |this, cx| {
 530                        let this = this.as_remote_mut().unwrap();
 531                        this.snapshot = this.background_snapshot.lock().clone();
 532                        cx.emit(Event::UpdatedEntries(Arc::from([])));
 533                        cx.notify();
 534                        while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
 535                            if this.observed_snapshot(*scan_id) {
 536                                let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
 537                                let _ = tx.send(());
 538                            } else {
 539                                break;
 540                            }
 541                        }
 542                    })?;
 543                }
 544                anyhow::Ok(())
 545            })
 546            .detach();
 547
 548            Worktree::Remote(RemoteWorktree {
 549                project_id: project_remote_id,
 550                replica_id,
 551                snapshot: snapshot.clone(),
 552                background_snapshot,
 553                updates_tx: Some(updates_tx),
 554                snapshot_subscriptions: Default::default(),
 555                client: client.clone(),
 556                diagnostic_summaries: Default::default(),
 557                visible: worktree.visible,
 558                disconnected: false,
 559            })
 560        })
 561    }
 562
 563    pub fn as_local(&self) -> Option<&LocalWorktree> {
 564        if let Worktree::Local(worktree) = self {
 565            Some(worktree)
 566        } else {
 567            None
 568        }
 569    }
 570
 571    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 572        if let Worktree::Remote(worktree) = self {
 573            Some(worktree)
 574        } else {
 575            None
 576        }
 577    }
 578
 579    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 580        if let Worktree::Local(worktree) = self {
 581            Some(worktree)
 582        } else {
 583            None
 584        }
 585    }
 586
 587    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 588        if let Worktree::Remote(worktree) = self {
 589            Some(worktree)
 590        } else {
 591            None
 592        }
 593    }
 594
 595    pub fn is_local(&self) -> bool {
 596        matches!(self, Worktree::Local(_))
 597    }
 598
 599    pub fn is_remote(&self) -> bool {
 600        !self.is_local()
 601    }
 602
 603    pub fn snapshot(&self) -> Snapshot {
 604        match self {
 605            Worktree::Local(worktree) => worktree.snapshot().snapshot,
 606            Worktree::Remote(worktree) => worktree.snapshot(),
 607        }
 608    }
 609
 610    pub fn scan_id(&self) -> usize {
 611        match self {
 612            Worktree::Local(worktree) => worktree.snapshot.scan_id,
 613            Worktree::Remote(worktree) => worktree.snapshot.scan_id,
 614        }
 615    }
 616
 617    pub fn completed_scan_id(&self) -> usize {
 618        match self {
 619            Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
 620            Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
 621        }
 622    }
 623
 624    pub fn is_visible(&self) -> bool {
 625        match self {
 626            Worktree::Local(worktree) => worktree.visible,
 627            Worktree::Remote(worktree) => worktree.visible,
 628        }
 629    }
 630
 631    pub fn replica_id(&self) -> ReplicaId {
 632        match self {
 633            Worktree::Local(_) => 0,
 634            Worktree::Remote(worktree) => worktree.replica_id,
 635        }
 636    }
 637
 638    pub fn diagnostic_summaries(
 639        &self,
 640    ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
 641        match self {
 642            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 643            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 644        }
 645        .iter()
 646        .flat_map(|(path, summaries)| {
 647            summaries
 648                .iter()
 649                .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
 650        })
 651    }
 652
 653    pub fn abs_path(&self) -> Arc<Path> {
 654        match self {
 655            Worktree::Local(worktree) => worktree.abs_path.clone(),
 656            Worktree::Remote(worktree) => worktree.abs_path.clone(),
 657        }
 658    }
 659
 660    pub fn root_file(&self, cx: &mut ModelContext<Self>) -> Option<Arc<File>> {
 661        let entry = self.root_entry()?;
 662        Some(File::for_entry(entry.clone(), cx.handle()))
 663    }
 664}
 665
 666fn start_background_scan_tasks(
 667    abs_path: &Path,
 668    snapshot: LocalSnapshot,
 669    scan_requests_rx: channel::Receiver<ScanRequest>,
 670    path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
 671    next_entry_id: Arc<AtomicUsize>,
 672    fs: Arc<dyn Fs>,
 673    cx: &mut ModelContext<'_, Worktree>,
 674) -> Vec<Task<()>> {
 675    let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
 676    let background_scanner = cx.background_executor().spawn({
 677        let abs_path = abs_path.to_path_buf();
 678        let background = cx.background_executor().clone();
 679        async move {
 680            let events = fs.watch(&abs_path, FS_WATCH_LATENCY).await;
 681            let case_sensitive = fs.is_case_sensitive().await.unwrap_or_else(|e| {
 682                log::error!(
 683                    "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
 684                );
 685                true
 686            });
 687
 688            BackgroundScanner::new(
 689                snapshot,
 690                next_entry_id,
 691                fs,
 692                case_sensitive,
 693                scan_states_tx,
 694                background,
 695                scan_requests_rx,
 696                path_prefixes_to_scan_rx,
 697            )
 698            .run(events)
 699            .await;
 700        }
 701    });
 702    let scan_state_updater = cx.spawn(|this, mut cx| async move {
 703        while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade()) {
 704            this.update(&mut cx, |this, cx| {
 705                let this = this.as_local_mut().unwrap();
 706                match state {
 707                    ScanState::Started => {
 708                        *this.is_scanning.0.borrow_mut() = true;
 709                    }
 710                    ScanState::Updated {
 711                        snapshot,
 712                        changes,
 713                        barrier,
 714                        scanning,
 715                    } => {
 716                        *this.is_scanning.0.borrow_mut() = scanning;
 717                        this.set_snapshot(snapshot, changes, cx);
 718                        drop(barrier);
 719                    }
 720                }
 721                cx.notify();
 722            })
 723            .ok();
 724        }
 725    });
 726    vec![background_scanner, scan_state_updater]
 727}
 728
 729fn path_matchers(values: Option<&[String]>, context: &'static str) -> Vec<PathMatcher> {
 730    values
 731        .unwrap_or(&[])
 732        .iter()
 733        .sorted()
 734        .filter_map(|pattern| {
 735            PathMatcher::new(pattern)
 736                .map(Some)
 737                .unwrap_or_else(|e| {
 738                    log::error!(
 739                        "Skipping pattern {pattern} in `{}` project settings due to parsing error: {e:#}", context
 740                    );
 741                    None
 742                })
 743        })
 744        .collect()
 745}
 746
 747impl LocalWorktree {
 748    pub fn contains_abs_path(&self, path: &Path) -> bool {
 749        path.starts_with(&self.abs_path)
 750    }
 751
 752    pub fn load_buffer(
 753        &mut self,
 754        id: BufferId,
 755        path: &Path,
 756        cx: &mut ModelContext<Worktree>,
 757    ) -> Task<Result<Model<Buffer>>> {
 758        let path = Arc::from(path);
 759        cx.spawn(move |this, mut cx| async move {
 760            let (file, contents, diff_base) = this
 761                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))?
 762                .await?;
 763            let text_buffer = cx
 764                .background_executor()
 765                .spawn(async move { text::Buffer::new(0, id, contents) })
 766                .await;
 767            cx.new_model(|_| {
 768                Buffer::build(
 769                    text_buffer,
 770                    diff_base,
 771                    Some(Arc::new(file)),
 772                    Capability::ReadWrite,
 773                )
 774            })
 775        })
 776    }
 777
 778    pub fn new_buffer(
 779        &mut self,
 780        buffer_id: BufferId,
 781        path: Arc<Path>,
 782        cx: &mut ModelContext<Worktree>,
 783    ) -> Model<Buffer> {
 784        let text_buffer = text::Buffer::new(0, buffer_id, "".into());
 785        let worktree = cx.handle();
 786        cx.new_model(|_| {
 787            Buffer::build(
 788                text_buffer,
 789                None,
 790                Some(Arc::new(File {
 791                    worktree,
 792                    path,
 793                    mtime: None,
 794                    entry_id: None,
 795                    is_local: true,
 796                    is_deleted: false,
 797                    is_private: false,
 798                })),
 799                Capability::ReadWrite,
 800            )
 801        })
 802    }
 803
 804    pub fn diagnostics_for_path(
 805        &self,
 806        path: &Path,
 807    ) -> Vec<(
 808        LanguageServerId,
 809        Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 810    )> {
 811        self.diagnostics.get(path).cloned().unwrap_or_default()
 812    }
 813
 814    pub fn clear_diagnostics_for_language_server(
 815        &mut self,
 816        server_id: LanguageServerId,
 817        _: &mut ModelContext<Worktree>,
 818    ) {
 819        let worktree_id = self.id().to_proto();
 820        self.diagnostic_summaries
 821            .retain(|path, summaries_by_server_id| {
 822                if summaries_by_server_id.remove(&server_id).is_some() {
 823                    if let Some(share) = self.share.as_ref() {
 824                        self.client
 825                            .send(proto::UpdateDiagnosticSummary {
 826                                project_id: share.project_id,
 827                                worktree_id,
 828                                summary: Some(proto::DiagnosticSummary {
 829                                    path: path.to_string_lossy().to_string(),
 830                                    language_server_id: server_id.0 as u64,
 831                                    error_count: 0,
 832                                    warning_count: 0,
 833                                }),
 834                            })
 835                            .log_err();
 836                    }
 837                    !summaries_by_server_id.is_empty()
 838                } else {
 839                    true
 840                }
 841            });
 842
 843        self.diagnostics.retain(|_, diagnostics_by_server_id| {
 844            if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 845                diagnostics_by_server_id.remove(ix);
 846                !diagnostics_by_server_id.is_empty()
 847            } else {
 848                true
 849            }
 850        });
 851    }
 852
 853    pub fn update_diagnostics(
 854        &mut self,
 855        server_id: LanguageServerId,
 856        worktree_path: Arc<Path>,
 857        diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 858        _: &mut ModelContext<Worktree>,
 859    ) -> Result<bool> {
 860        let summaries_by_server_id = self
 861            .diagnostic_summaries
 862            .entry(worktree_path.clone())
 863            .or_default();
 864
 865        let old_summary = summaries_by_server_id
 866            .remove(&server_id)
 867            .unwrap_or_default();
 868
 869        let new_summary = DiagnosticSummary::new(&diagnostics);
 870        if new_summary.is_empty() {
 871            if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
 872                if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 873                    diagnostics_by_server_id.remove(ix);
 874                }
 875                if diagnostics_by_server_id.is_empty() {
 876                    self.diagnostics.remove(&worktree_path);
 877                }
 878            }
 879        } else {
 880            summaries_by_server_id.insert(server_id, new_summary);
 881            let diagnostics_by_server_id =
 882                self.diagnostics.entry(worktree_path.clone()).or_default();
 883            match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 884                Ok(ix) => {
 885                    diagnostics_by_server_id[ix] = (server_id, diagnostics);
 886                }
 887                Err(ix) => {
 888                    diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
 889                }
 890            }
 891        }
 892
 893        if !old_summary.is_empty() || !new_summary.is_empty() {
 894            if let Some(share) = self.share.as_ref() {
 895                self.client
 896                    .send(proto::UpdateDiagnosticSummary {
 897                        project_id: share.project_id,
 898                        worktree_id: self.id().to_proto(),
 899                        summary: Some(proto::DiagnosticSummary {
 900                            path: worktree_path.to_string_lossy().to_string(),
 901                            language_server_id: server_id.0 as u64,
 902                            error_count: new_summary.error_count as u32,
 903                            warning_count: new_summary.warning_count as u32,
 904                        }),
 905                    })
 906                    .log_err();
 907            }
 908        }
 909
 910        Ok(!old_summary.is_empty() || !new_summary.is_empty())
 911    }
 912
 913    fn set_snapshot(
 914        &mut self,
 915        new_snapshot: LocalSnapshot,
 916        entry_changes: UpdatedEntriesSet,
 917        cx: &mut ModelContext<Worktree>,
 918    ) {
 919        let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
 920
 921        self.snapshot = new_snapshot;
 922
 923        if let Some(share) = self.share.as_mut() {
 924            share
 925                .snapshots_tx
 926                .unbounded_send((
 927                    self.snapshot.clone(),
 928                    entry_changes.clone(),
 929                    repo_changes.clone(),
 930                ))
 931                .ok();
 932        }
 933
 934        if !entry_changes.is_empty() {
 935            cx.emit(Event::UpdatedEntries(entry_changes));
 936        }
 937        if !repo_changes.is_empty() {
 938            cx.emit(Event::UpdatedGitRepositories(repo_changes));
 939        }
 940    }
 941
 942    fn changed_repos(
 943        &self,
 944        old_snapshot: &LocalSnapshot,
 945        new_snapshot: &LocalSnapshot,
 946    ) -> UpdatedGitRepositoriesSet {
 947        let mut changes = Vec::new();
 948        let mut old_repos = old_snapshot.git_repositories.iter().peekable();
 949        let mut new_repos = new_snapshot.git_repositories.iter().peekable();
 950        loop {
 951            match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
 952                (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
 953                    match Ord::cmp(&new_entry_id, &old_entry_id) {
 954                        Ordering::Less => {
 955                            if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
 956                                changes.push((
 957                                    entry.path.clone(),
 958                                    GitRepositoryChange {
 959                                        old_repository: None,
 960                                    },
 961                                ));
 962                            }
 963                            new_repos.next();
 964                        }
 965                        Ordering::Equal => {
 966                            if new_repo.git_dir_scan_id != old_repo.git_dir_scan_id {
 967                                if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
 968                                    let old_repo = old_snapshot
 969                                        .repository_entries
 970                                        .get(&RepositoryWorkDirectory(entry.path.clone()))
 971                                        .cloned();
 972                                    changes.push((
 973                                        entry.path.clone(),
 974                                        GitRepositoryChange {
 975                                            old_repository: old_repo,
 976                                        },
 977                                    ));
 978                                }
 979                            }
 980                            new_repos.next();
 981                            old_repos.next();
 982                        }
 983                        Ordering::Greater => {
 984                            if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
 985                                let old_repo = old_snapshot
 986                                    .repository_entries
 987                                    .get(&RepositoryWorkDirectory(entry.path.clone()))
 988                                    .cloned();
 989                                changes.push((
 990                                    entry.path.clone(),
 991                                    GitRepositoryChange {
 992                                        old_repository: old_repo,
 993                                    },
 994                                ));
 995                            }
 996                            old_repos.next();
 997                        }
 998                    }
 999                }
1000                (Some((entry_id, _)), None) => {
1001                    if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
1002                        changes.push((
1003                            entry.path.clone(),
1004                            GitRepositoryChange {
1005                                old_repository: None,
1006                            },
1007                        ));
1008                    }
1009                    new_repos.next();
1010                }
1011                (None, Some((entry_id, _))) => {
1012                    if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
1013                        let old_repo = old_snapshot
1014                            .repository_entries
1015                            .get(&RepositoryWorkDirectory(entry.path.clone()))
1016                            .cloned();
1017                        changes.push((
1018                            entry.path.clone(),
1019                            GitRepositoryChange {
1020                                old_repository: old_repo,
1021                            },
1022                        ));
1023                    }
1024                    old_repos.next();
1025                }
1026                (None, None) => break,
1027            }
1028        }
1029
1030        fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
1031            (value.0.clone(), value.1.clone())
1032        }
1033
1034        changes.into()
1035    }
1036
1037    pub fn scan_complete(&self) -> impl Future<Output = ()> {
1038        let mut is_scanning_rx = self.is_scanning.1.clone();
1039        async move {
1040            let mut is_scanning = *is_scanning_rx.borrow();
1041            while is_scanning {
1042                if let Some(value) = is_scanning_rx.recv().await {
1043                    is_scanning = value;
1044                } else {
1045                    break;
1046                }
1047            }
1048        }
1049    }
1050
1051    pub fn snapshot(&self) -> LocalSnapshot {
1052        self.snapshot.clone()
1053    }
1054
1055    pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
1056        proto::WorktreeMetadata {
1057            id: self.id().to_proto(),
1058            root_name: self.root_name().to_string(),
1059            visible: self.visible,
1060            abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
1061        }
1062    }
1063
1064    fn load(
1065        &self,
1066        path: &Path,
1067        cx: &mut ModelContext<Worktree>,
1068    ) -> Task<Result<(File, String, Option<String>)>> {
1069        let path = Arc::from(path);
1070        let abs_path = self.absolutize(&path);
1071        let fs = self.fs.clone();
1072        let entry = self.refresh_entry(path.clone(), None, cx);
1073
1074        cx.spawn(|this, mut cx| async move {
1075            let abs_path = abs_path?;
1076            let text = fs.load(&abs_path).await?;
1077            let mut index_task = None;
1078            let snapshot = this.update(&mut cx, |this, _| this.as_local().unwrap().snapshot())?;
1079            if let Some(repo) = snapshot.repository_for_path(&path) {
1080                if let Some(repo_path) = repo.work_directory.relativize(&snapshot, &path).log_err()
1081                {
1082                    if let Some(git_repo) = snapshot.git_repositories.get(&*repo.work_directory) {
1083                        let git_repo = git_repo.repo_ptr.clone();
1084                        index_task = Some(
1085                            cx.background_executor()
1086                                .spawn(async move { git_repo.lock().load_index_text(&repo_path) }),
1087                        );
1088                    }
1089                }
1090            }
1091
1092            let diff_base = if let Some(index_task) = index_task {
1093                index_task.await
1094            } else {
1095                None
1096            };
1097
1098            let worktree = this
1099                .upgrade()
1100                .ok_or_else(|| anyhow!("worktree was dropped"))?;
1101            match entry.await? {
1102                Some(entry) => Ok((
1103                    File {
1104                        entry_id: Some(entry.id),
1105                        worktree,
1106                        path: entry.path,
1107                        mtime: entry.mtime,
1108                        is_local: true,
1109                        is_deleted: false,
1110                        is_private: entry.is_private,
1111                    },
1112                    text,
1113                    diff_base,
1114                )),
1115                None => {
1116                    let metadata = fs
1117                        .metadata(&abs_path)
1118                        .await
1119                        .with_context(|| {
1120                            format!("Loading metadata for excluded file {abs_path:?}")
1121                        })?
1122                        .with_context(|| {
1123                            format!("Excluded file {abs_path:?} got removed during loading")
1124                        })?;
1125                    let is_private = snapshot.is_path_private(path.as_ref());
1126                    Ok((
1127                        File {
1128                            entry_id: None,
1129                            worktree,
1130                            path,
1131                            mtime: Some(metadata.mtime),
1132                            is_local: true,
1133                            is_deleted: false,
1134                            is_private,
1135                        },
1136                        text,
1137                        diff_base,
1138                    ))
1139                }
1140            }
1141        })
1142    }
1143
1144    pub fn save_buffer(
1145        &self,
1146        buffer_handle: Model<Buffer>,
1147        path: Arc<Path>,
1148        mut has_changed_file: bool,
1149        cx: &mut ModelContext<Worktree>,
1150    ) -> Task<Result<()>> {
1151        let buffer = buffer_handle.read(cx);
1152
1153        let rpc = self.client.clone();
1154        let buffer_id: u64 = buffer.remote_id().into();
1155        let project_id = self.share.as_ref().map(|share| share.project_id);
1156
1157        if buffer.file().is_some_and(|file| !file.is_created()) {
1158            has_changed_file = true;
1159        }
1160
1161        let text = buffer.as_rope().clone();
1162        let fingerprint = text.fingerprint();
1163        let version = buffer.version();
1164        let save = self.write_file(path.as_ref(), text, buffer.line_ending(), cx);
1165        let fs = Arc::clone(&self.fs);
1166        let abs_path = self.absolutize(&path);
1167        let is_private = self.snapshot.is_path_private(&path);
1168
1169        cx.spawn(move |this, mut cx| async move {
1170            let entry = save.await?;
1171            let abs_path = abs_path?;
1172            let this = this.upgrade().context("worktree dropped")?;
1173
1174            let (entry_id, mtime, path, is_dotenv) = match entry {
1175                Some(entry) => (Some(entry.id), entry.mtime, entry.path, entry.is_private),
1176                None => {
1177                    let metadata = fs
1178                        .metadata(&abs_path)
1179                        .await
1180                        .with_context(|| {
1181                            format!(
1182                                "Fetching metadata after saving the excluded buffer {abs_path:?}"
1183                            )
1184                        })?
1185                        .with_context(|| {
1186                            format!("Excluded buffer {path:?} got removed during saving")
1187                        })?;
1188                    (None, Some(metadata.mtime), path, is_private)
1189                }
1190            };
1191
1192            if has_changed_file {
1193                let new_file = Arc::new(File {
1194                    entry_id,
1195                    worktree: this,
1196                    path,
1197                    mtime,
1198                    is_local: true,
1199                    is_deleted: false,
1200                    is_private: is_dotenv,
1201                });
1202
1203                if let Some(project_id) = project_id {
1204                    rpc.send(proto::UpdateBufferFile {
1205                        project_id,
1206                        buffer_id,
1207                        file: Some(new_file.to_proto()),
1208                    })
1209                    .log_err();
1210                }
1211
1212                buffer_handle.update(&mut cx, |buffer, cx| {
1213                    if has_changed_file {
1214                        buffer.file_updated(new_file, cx);
1215                    }
1216                })?;
1217            }
1218
1219            if let Some(project_id) = project_id {
1220                rpc.send(proto::BufferSaved {
1221                    project_id,
1222                    buffer_id,
1223                    version: serialize_version(&version),
1224                    mtime: mtime.map(|time| time.into()),
1225                    fingerprint: serialize_fingerprint(fingerprint),
1226                })?;
1227            }
1228
1229            buffer_handle.update(&mut cx, |buffer, cx| {
1230                buffer.did_save(version.clone(), fingerprint, mtime, cx);
1231            })?;
1232
1233            Ok(())
1234        })
1235    }
1236
1237    /// Find the lowest path in the worktree's datastructures that is an ancestor
1238    fn lowest_ancestor(&self, path: &Path) -> PathBuf {
1239        let mut lowest_ancestor = None;
1240        for path in path.ancestors() {
1241            if self.entry_for_path(path).is_some() {
1242                lowest_ancestor = Some(path.to_path_buf());
1243                break;
1244            }
1245        }
1246
1247        lowest_ancestor.unwrap_or_else(|| PathBuf::from(""))
1248    }
1249
1250    pub fn create_entry(
1251        &self,
1252        path: impl Into<Arc<Path>>,
1253        is_dir: bool,
1254        cx: &mut ModelContext<Worktree>,
1255    ) -> Task<Result<Option<Entry>>> {
1256        let path = path.into();
1257        let lowest_ancestor = self.lowest_ancestor(&path);
1258        let abs_path = self.absolutize(&path);
1259        let fs = self.fs.clone();
1260        let write = cx.background_executor().spawn(async move {
1261            if is_dir {
1262                fs.create_dir(&abs_path?).await
1263            } else {
1264                fs.save(&abs_path?, &Default::default(), Default::default())
1265                    .await
1266            }
1267        });
1268
1269        cx.spawn(|this, mut cx| async move {
1270            write.await?;
1271            let (result, refreshes) = this.update(&mut cx, |this, cx| {
1272                let mut refreshes = Vec::new();
1273                let refresh_paths = path.strip_prefix(&lowest_ancestor).unwrap();
1274                for refresh_path in refresh_paths.ancestors() {
1275                    if refresh_path == Path::new("") {
1276                        continue;
1277                    }
1278                    let refresh_full_path = lowest_ancestor.join(refresh_path);
1279
1280                    refreshes.push(this.as_local_mut().unwrap().refresh_entry(
1281                        refresh_full_path.into(),
1282                        None,
1283                        cx,
1284                    ));
1285                }
1286                (
1287                    this.as_local_mut().unwrap().refresh_entry(path, None, cx),
1288                    refreshes,
1289                )
1290            })?;
1291            for refresh in refreshes {
1292                refresh.await.log_err();
1293            }
1294
1295            result.await
1296        })
1297    }
1298
1299    pub(crate) fn write_file(
1300        &self,
1301        path: impl Into<Arc<Path>>,
1302        text: Rope,
1303        line_ending: LineEnding,
1304        cx: &mut ModelContext<Worktree>,
1305    ) -> Task<Result<Option<Entry>>> {
1306        let path: Arc<Path> = path.into();
1307        let abs_path = self.absolutize(&path);
1308        let fs = self.fs.clone();
1309        let write = cx
1310            .background_executor()
1311            .spawn(async move { fs.save(&abs_path?, &text, line_ending).await });
1312
1313        cx.spawn(|this, mut cx| async move {
1314            write.await?;
1315            this.update(&mut cx, |this, cx| {
1316                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1317            })?
1318            .await
1319        })
1320    }
1321
1322    pub fn delete_entry(
1323        &self,
1324        entry_id: ProjectEntryId,
1325        cx: &mut ModelContext<Worktree>,
1326    ) -> Option<Task<Result<()>>> {
1327        let entry = self.entry_for_id(entry_id)?.clone();
1328        let abs_path = self.absolutize(&entry.path);
1329        let fs = self.fs.clone();
1330
1331        let delete = cx.background_executor().spawn(async move {
1332            if entry.is_file() {
1333                fs.remove_file(&abs_path?, Default::default()).await?;
1334            } else {
1335                fs.remove_dir(
1336                    &abs_path?,
1337                    RemoveOptions {
1338                        recursive: true,
1339                        ignore_if_not_exists: false,
1340                    },
1341                )
1342                .await?;
1343            }
1344            anyhow::Ok(entry.path)
1345        });
1346
1347        Some(cx.spawn(|this, mut cx| async move {
1348            let path = delete.await?;
1349            this.update(&mut cx, |this, _| {
1350                this.as_local_mut()
1351                    .unwrap()
1352                    .refresh_entries_for_paths(vec![path])
1353            })?
1354            .recv()
1355            .await;
1356            Ok(())
1357        }))
1358    }
1359
1360    pub fn rename_entry(
1361        &self,
1362        entry_id: ProjectEntryId,
1363        new_path: impl Into<Arc<Path>>,
1364        cx: &mut ModelContext<Worktree>,
1365    ) -> Task<Result<Option<Entry>>> {
1366        let old_path = match self.entry_for_id(entry_id) {
1367            Some(entry) => entry.path.clone(),
1368            None => return Task::ready(Ok(None)),
1369        };
1370        let new_path = new_path.into();
1371        let abs_old_path = self.absolutize(&old_path);
1372        let abs_new_path = self.absolutize(&new_path);
1373        let fs = self.fs.clone();
1374        let case_sensitive = self.fs_case_sensitive;
1375        let rename = cx.background_executor().spawn(async move {
1376            let abs_old_path = abs_old_path?;
1377            let abs_new_path = abs_new_path?;
1378
1379            let abs_old_path_lower = abs_old_path.to_str().map(|p| p.to_lowercase());
1380            let abs_new_path_lower = abs_new_path.to_str().map(|p| p.to_lowercase());
1381
1382            // If we're on a case-insensitive FS and we're doing a case-only rename (i.e. `foobar` to `FOOBAR`)
1383            // we want to overwrite, because otherwise we run into a file-already-exists error.
1384            let overwrite = !case_sensitive
1385                && abs_old_path != abs_new_path
1386                && abs_old_path_lower == abs_new_path_lower;
1387
1388            fs.rename(
1389                &abs_old_path,
1390                &abs_new_path,
1391                fs::RenameOptions {
1392                    overwrite,
1393                    ..Default::default()
1394                },
1395            )
1396            .await
1397        });
1398
1399        cx.spawn(|this, mut cx| async move {
1400            rename.await?;
1401            this.update(&mut cx, |this, cx| {
1402                this.as_local_mut()
1403                    .unwrap()
1404                    .refresh_entry(new_path.clone(), Some(old_path), cx)
1405            })?
1406            .await
1407        })
1408    }
1409
1410    pub fn copy_entry(
1411        &self,
1412        entry_id: ProjectEntryId,
1413        new_path: impl Into<Arc<Path>>,
1414        cx: &mut ModelContext<Worktree>,
1415    ) -> Task<Result<Option<Entry>>> {
1416        let old_path = match self.entry_for_id(entry_id) {
1417            Some(entry) => entry.path.clone(),
1418            None => return Task::ready(Ok(None)),
1419        };
1420        let new_path = new_path.into();
1421        let abs_old_path = self.absolutize(&old_path);
1422        let abs_new_path = self.absolutize(&new_path);
1423        let fs = self.fs.clone();
1424        let copy = cx.background_executor().spawn(async move {
1425            copy_recursive(
1426                fs.as_ref(),
1427                &abs_old_path?,
1428                &abs_new_path?,
1429                Default::default(),
1430            )
1431            .await
1432        });
1433
1434        cx.spawn(|this, mut cx| async move {
1435            copy.await?;
1436            this.update(&mut cx, |this, cx| {
1437                this.as_local_mut()
1438                    .unwrap()
1439                    .refresh_entry(new_path.clone(), None, cx)
1440            })?
1441            .await
1442        })
1443    }
1444
1445    pub fn expand_entry(
1446        &mut self,
1447        entry_id: ProjectEntryId,
1448        cx: &mut ModelContext<Worktree>,
1449    ) -> Option<Task<Result<()>>> {
1450        let path = self.entry_for_id(entry_id)?.path.clone();
1451        let mut refresh = self.refresh_entries_for_paths(vec![path]);
1452        Some(cx.background_executor().spawn(async move {
1453            refresh.next().await;
1454            Ok(())
1455        }))
1456    }
1457
1458    pub fn refresh_entries_for_paths(&self, paths: Vec<Arc<Path>>) -> barrier::Receiver {
1459        let (tx, rx) = barrier::channel();
1460        self.scan_requests_tx
1461            .try_send(ScanRequest {
1462                relative_paths: paths,
1463                done: tx,
1464            })
1465            .ok();
1466        rx
1467    }
1468
1469    pub fn add_path_prefix_to_scan(&self, path_prefix: Arc<Path>) {
1470        self.path_prefixes_to_scan_tx.try_send(path_prefix).ok();
1471    }
1472
1473    fn refresh_entry(
1474        &self,
1475        path: Arc<Path>,
1476        old_path: Option<Arc<Path>>,
1477        cx: &mut ModelContext<Worktree>,
1478    ) -> Task<Result<Option<Entry>>> {
1479        if self.is_path_excluded(path.to_path_buf()) {
1480            return Task::ready(Ok(None));
1481        }
1482        let paths = if let Some(old_path) = old_path.as_ref() {
1483            vec![old_path.clone(), path.clone()]
1484        } else {
1485            vec![path.clone()]
1486        };
1487        let mut refresh = self.refresh_entries_for_paths(paths);
1488        cx.spawn(move |this, mut cx| async move {
1489            refresh.recv().await;
1490            let new_entry = this.update(&mut cx, |this, _| {
1491                this.entry_for_path(path)
1492                    .cloned()
1493                    .ok_or_else(|| anyhow!("failed to read path after update"))
1494            })??;
1495            Ok(Some(new_entry))
1496        })
1497    }
1498
1499    pub fn observe_updates<F, Fut>(
1500        &mut self,
1501        project_id: u64,
1502        cx: &mut ModelContext<Worktree>,
1503        callback: F,
1504    ) -> oneshot::Receiver<()>
1505    where
1506        F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1507        Fut: Send + Future<Output = bool>,
1508    {
1509        #[cfg(any(test, feature = "test-support"))]
1510        const MAX_CHUNK_SIZE: usize = 2;
1511        #[cfg(not(any(test, feature = "test-support")))]
1512        const MAX_CHUNK_SIZE: usize = 256;
1513
1514        let (share_tx, share_rx) = oneshot::channel();
1515
1516        if let Some(share) = self.share.as_mut() {
1517            share_tx.send(()).ok();
1518            *share.resume_updates.borrow_mut() = ();
1519            return share_rx;
1520        }
1521
1522        let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
1523        let (snapshots_tx, mut snapshots_rx) =
1524            mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
1525        snapshots_tx
1526            .unbounded_send((self.snapshot(), Arc::from([]), Arc::from([])))
1527            .ok();
1528
1529        let worktree_id = cx.entity_id().as_u64();
1530        let _maintain_remote_snapshot = cx.background_executor().spawn(async move {
1531            let mut is_first = true;
1532            while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
1533                let update;
1534                if is_first {
1535                    update = snapshot.build_initial_update(project_id, worktree_id);
1536                    is_first = false;
1537                } else {
1538                    update =
1539                        snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes);
1540                }
1541
1542                for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1543                    let _ = resume_updates_rx.try_recv();
1544                    loop {
1545                        let result = callback(update.clone());
1546                        if result.await {
1547                            break;
1548                        } else {
1549                            log::info!("waiting to resume updates");
1550                            if resume_updates_rx.next().await.is_none() {
1551                                return Some(());
1552                            }
1553                        }
1554                    }
1555                }
1556            }
1557            share_tx.send(()).ok();
1558            Some(())
1559        });
1560
1561        self.share = Some(ShareState {
1562            project_id,
1563            snapshots_tx,
1564            resume_updates: resume_updates_tx,
1565            _maintain_remote_snapshot,
1566        });
1567        share_rx
1568    }
1569
1570    pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1571        let client = self.client.clone();
1572
1573        for (path, summaries) in &self.diagnostic_summaries {
1574            for (&server_id, summary) in summaries {
1575                if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1576                    project_id,
1577                    worktree_id: cx.entity_id().as_u64(),
1578                    summary: Some(summary.to_proto(server_id, path)),
1579                }) {
1580                    return Task::ready(Err(e));
1581                }
1582            }
1583        }
1584
1585        let rx = self.observe_updates(project_id, cx, move |update| {
1586            client.request(update).map(|result| result.is_ok())
1587        });
1588        cx.background_executor()
1589            .spawn(async move { rx.await.map_err(|_| anyhow!("share ended")) })
1590    }
1591
1592    pub fn unshare(&mut self) {
1593        self.share.take();
1594    }
1595
1596    pub fn is_shared(&self) -> bool {
1597        self.share.is_some()
1598    }
1599}
1600
1601impl RemoteWorktree {
1602    fn snapshot(&self) -> Snapshot {
1603        self.snapshot.clone()
1604    }
1605
1606    pub fn disconnected_from_host(&mut self) {
1607        self.updates_tx.take();
1608        self.snapshot_subscriptions.clear();
1609        self.disconnected = true;
1610    }
1611
1612    pub fn save_buffer(
1613        &self,
1614        buffer_handle: Model<Buffer>,
1615        cx: &mut ModelContext<Worktree>,
1616    ) -> Task<Result<()>> {
1617        let buffer = buffer_handle.read(cx);
1618        let buffer_id = buffer.remote_id().into();
1619        let version = buffer.version();
1620        let rpc = self.client.clone();
1621        let project_id = self.project_id;
1622        cx.spawn(move |_, mut cx| async move {
1623            let response = rpc
1624                .request(proto::SaveBuffer {
1625                    project_id,
1626                    buffer_id,
1627                    version: serialize_version(&version),
1628                })
1629                .await?;
1630            let version = deserialize_version(&response.version);
1631            let fingerprint = RopeFingerprint::default();
1632            let mtime = response.mtime.map(|mtime| mtime.into());
1633
1634            buffer_handle.update(&mut cx, |buffer, cx| {
1635                buffer.did_save(version.clone(), fingerprint, mtime, cx);
1636            })?;
1637
1638            Ok(())
1639        })
1640    }
1641
1642    pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1643        if let Some(updates_tx) = &self.updates_tx {
1644            updates_tx
1645                .unbounded_send(update)
1646                .expect("consumer runs to completion");
1647        }
1648    }
1649
1650    fn observed_snapshot(&self, scan_id: usize) -> bool {
1651        self.completed_scan_id >= scan_id
1652    }
1653
1654    pub fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1655        let (tx, rx) = oneshot::channel();
1656        if self.observed_snapshot(scan_id) {
1657            let _ = tx.send(());
1658        } else if self.disconnected {
1659            drop(tx);
1660        } else {
1661            match self
1662                .snapshot_subscriptions
1663                .binary_search_by_key(&scan_id, |probe| probe.0)
1664            {
1665                Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1666            }
1667        }
1668
1669        async move {
1670            rx.await?;
1671            Ok(())
1672        }
1673    }
1674
1675    pub fn update_diagnostic_summary(
1676        &mut self,
1677        path: Arc<Path>,
1678        summary: &proto::DiagnosticSummary,
1679    ) {
1680        let server_id = LanguageServerId(summary.language_server_id as usize);
1681        let summary = DiagnosticSummary {
1682            error_count: summary.error_count as usize,
1683            warning_count: summary.warning_count as usize,
1684        };
1685
1686        if summary.is_empty() {
1687            if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1688                summaries.remove(&server_id);
1689                if summaries.is_empty() {
1690                    self.diagnostic_summaries.remove(&path);
1691                }
1692            }
1693        } else {
1694            self.diagnostic_summaries
1695                .entry(path)
1696                .or_default()
1697                .insert(server_id, summary);
1698        }
1699    }
1700
1701    pub fn insert_entry(
1702        &mut self,
1703        entry: proto::Entry,
1704        scan_id: usize,
1705        cx: &mut ModelContext<Worktree>,
1706    ) -> Task<Result<Entry>> {
1707        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1708        cx.spawn(|this, mut cx| async move {
1709            wait_for_snapshot.await?;
1710            this.update(&mut cx, |worktree, _| {
1711                let worktree = worktree.as_remote_mut().unwrap();
1712                let mut snapshot = worktree.background_snapshot.lock();
1713                let entry = snapshot.insert_entry(entry);
1714                worktree.snapshot = snapshot.clone();
1715                entry
1716            })?
1717        })
1718    }
1719
1720    pub fn delete_entry(
1721        &mut self,
1722        id: ProjectEntryId,
1723        scan_id: usize,
1724        cx: &mut ModelContext<Worktree>,
1725    ) -> Task<Result<()>> {
1726        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1727        cx.spawn(move |this, mut cx| async move {
1728            wait_for_snapshot.await?;
1729            this.update(&mut cx, |worktree, _| {
1730                let worktree = worktree.as_remote_mut().unwrap();
1731                let mut snapshot = worktree.background_snapshot.lock();
1732                snapshot.delete_entry(id);
1733                worktree.snapshot = snapshot.clone();
1734            })?;
1735            Ok(())
1736        })
1737    }
1738}
1739
1740impl Snapshot {
1741    pub fn id(&self) -> WorktreeId {
1742        self.id
1743    }
1744
1745    pub fn abs_path(&self) -> &Arc<Path> {
1746        &self.abs_path
1747    }
1748
1749    pub fn absolutize(&self, path: &Path) -> Result<PathBuf> {
1750        if path
1751            .components()
1752            .any(|component| !matches!(component, std::path::Component::Normal(_)))
1753        {
1754            return Err(anyhow!("invalid path"));
1755        }
1756        if path.file_name().is_some() {
1757            Ok(self.abs_path.join(path))
1758        } else {
1759            Ok(self.abs_path.to_path_buf())
1760        }
1761    }
1762
1763    pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1764        self.entries_by_id.get(&entry_id, &()).is_some()
1765    }
1766
1767    fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1768        let entry = Entry::try_from((&self.root_char_bag, entry))?;
1769        let old_entry = self.entries_by_id.insert_or_replace(
1770            PathEntry {
1771                id: entry.id,
1772                path: entry.path.clone(),
1773                is_ignored: entry.is_ignored,
1774                scan_id: 0,
1775            },
1776            &(),
1777        );
1778        if let Some(old_entry) = old_entry {
1779            self.entries_by_path.remove(&PathKey(old_entry.path), &());
1780        }
1781        self.entries_by_path.insert_or_replace(entry.clone(), &());
1782        Ok(entry)
1783    }
1784
1785    fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1786        let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1787        self.entries_by_path = {
1788            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1789            let mut new_entries_by_path =
1790                cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1791            while let Some(entry) = cursor.item() {
1792                if entry.path.starts_with(&removed_entry.path) {
1793                    self.entries_by_id.remove(&entry.id, &());
1794                    cursor.next(&());
1795                } else {
1796                    break;
1797                }
1798            }
1799            new_entries_by_path.append(cursor.suffix(&()), &());
1800            new_entries_by_path
1801        };
1802
1803        Some(removed_entry.path)
1804    }
1805
1806    #[cfg(any(test, feature = "test-support"))]
1807    pub fn status_for_file(&self, path: impl Into<PathBuf>) -> Option<GitFileStatus> {
1808        let path = path.into();
1809        self.entries_by_path
1810            .get(&PathKey(Arc::from(path)), &())
1811            .and_then(|entry| entry.git_status)
1812    }
1813
1814    pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1815        let mut entries_by_path_edits = Vec::new();
1816        let mut entries_by_id_edits = Vec::new();
1817
1818        for entry_id in update.removed_entries {
1819            let entry_id = ProjectEntryId::from_proto(entry_id);
1820            entries_by_id_edits.push(Edit::Remove(entry_id));
1821            if let Some(entry) = self.entry_for_id(entry_id) {
1822                entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1823            }
1824        }
1825
1826        for entry in update.updated_entries {
1827            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1828            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1829                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1830            }
1831            if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
1832                if old_entry.id != entry.id {
1833                    entries_by_id_edits.push(Edit::Remove(old_entry.id));
1834                }
1835            }
1836            entries_by_id_edits.push(Edit::Insert(PathEntry {
1837                id: entry.id,
1838                path: entry.path.clone(),
1839                is_ignored: entry.is_ignored,
1840                scan_id: 0,
1841            }));
1842            entries_by_path_edits.push(Edit::Insert(entry));
1843        }
1844
1845        self.entries_by_path.edit(entries_by_path_edits, &());
1846        self.entries_by_id.edit(entries_by_id_edits, &());
1847
1848        update.removed_repositories.sort_unstable();
1849        self.repository_entries.retain(|_, entry| {
1850            if let Ok(_) = update
1851                .removed_repositories
1852                .binary_search(&entry.work_directory.to_proto())
1853            {
1854                false
1855            } else {
1856                true
1857            }
1858        });
1859
1860        for repository in update.updated_repositories {
1861            let work_directory_entry: WorkDirectoryEntry =
1862                ProjectEntryId::from_proto(repository.work_directory_id).into();
1863
1864            if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1865                let work_directory = RepositoryWorkDirectory(entry.path.clone());
1866                if self.repository_entries.get(&work_directory).is_some() {
1867                    self.repository_entries.update(&work_directory, |repo| {
1868                        repo.branch = repository.branch.map(Into::into);
1869                    });
1870                } else {
1871                    self.repository_entries.insert(
1872                        work_directory,
1873                        RepositoryEntry {
1874                            work_directory: work_directory_entry,
1875                            branch: repository.branch.map(Into::into),
1876                        },
1877                    )
1878                }
1879            } else {
1880                log::error!("no work directory entry for repository {:?}", repository)
1881            }
1882        }
1883
1884        self.scan_id = update.scan_id as usize;
1885        if update.is_last_update {
1886            self.completed_scan_id = update.scan_id as usize;
1887        }
1888
1889        Ok(())
1890    }
1891
1892    pub fn file_count(&self) -> usize {
1893        self.entries_by_path.summary().file_count
1894    }
1895
1896    pub fn visible_file_count(&self) -> usize {
1897        self.entries_by_path.summary().non_ignored_file_count
1898    }
1899
1900    fn traverse_from_offset(
1901        &self,
1902        include_dirs: bool,
1903        include_ignored: bool,
1904        start_offset: usize,
1905    ) -> Traversal {
1906        let mut cursor = self.entries_by_path.cursor();
1907        cursor.seek(
1908            &TraversalTarget::Count {
1909                count: start_offset,
1910                include_dirs,
1911                include_ignored,
1912            },
1913            Bias::Right,
1914            &(),
1915        );
1916        Traversal {
1917            cursor,
1918            include_dirs,
1919            include_ignored,
1920        }
1921    }
1922
1923    fn traverse_from_path(
1924        &self,
1925        include_dirs: bool,
1926        include_ignored: bool,
1927        path: &Path,
1928    ) -> Traversal {
1929        let mut cursor = self.entries_by_path.cursor();
1930        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1931        Traversal {
1932            cursor,
1933            include_dirs,
1934            include_ignored,
1935        }
1936    }
1937
1938    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1939        self.traverse_from_offset(false, include_ignored, start)
1940    }
1941
1942    pub fn entries(&self, include_ignored: bool) -> Traversal {
1943        self.traverse_from_offset(true, include_ignored, 0)
1944    }
1945
1946    pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
1947        self.repository_entries
1948            .iter()
1949            .map(|(path, entry)| (&path.0, entry))
1950    }
1951
1952    /// Get the repository whose work directory contains the given path.
1953    pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
1954        self.repository_entries
1955            .get(&RepositoryWorkDirectory(path.into()))
1956            .cloned()
1957    }
1958
1959    /// Get the repository whose work directory contains the given path.
1960    pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
1961        self.repository_and_work_directory_for_path(path)
1962            .map(|e| e.1)
1963    }
1964
1965    pub fn repository_and_work_directory_for_path(
1966        &self,
1967        path: &Path,
1968    ) -> Option<(RepositoryWorkDirectory, RepositoryEntry)> {
1969        self.repository_entries
1970            .iter()
1971            .filter(|(workdir_path, _)| path.starts_with(workdir_path))
1972            .last()
1973            .map(|(path, repo)| (path.clone(), repo.clone()))
1974    }
1975
1976    /// Given an ordered iterator of entries, returns an iterator of those entries,
1977    /// along with their containing git repository.
1978    pub fn entries_with_repositories<'a>(
1979        &'a self,
1980        entries: impl 'a + Iterator<Item = &'a Entry>,
1981    ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
1982        let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
1983        let mut repositories = self.repositories().peekable();
1984        entries.map(move |entry| {
1985            while let Some((repo_path, _)) = containing_repos.last() {
1986                if !entry.path.starts_with(repo_path) {
1987                    containing_repos.pop();
1988                } else {
1989                    break;
1990                }
1991            }
1992            while let Some((repo_path, _)) = repositories.peek() {
1993                if entry.path.starts_with(repo_path) {
1994                    containing_repos.push(repositories.next().unwrap());
1995                } else {
1996                    break;
1997                }
1998            }
1999            let repo = containing_repos.last().map(|(_, repo)| *repo);
2000            (entry, repo)
2001        })
2002    }
2003
2004    /// Updates the `git_status` of the given entries such that files'
2005    /// statuses bubble up to their ancestor directories.
2006    pub fn propagate_git_statuses(&self, result: &mut [Entry]) {
2007        let mut cursor = self
2008            .entries_by_path
2009            .cursor::<(TraversalProgress, GitStatuses)>();
2010        let mut entry_stack = Vec::<(usize, GitStatuses)>::new();
2011
2012        let mut result_ix = 0;
2013        loop {
2014            let next_entry = result.get(result_ix);
2015            let containing_entry = entry_stack.last().map(|(ix, _)| &result[*ix]);
2016
2017            let entry_to_finish = match (containing_entry, next_entry) {
2018                (Some(_), None) => entry_stack.pop(),
2019                (Some(containing_entry), Some(next_path)) => {
2020                    if !next_path.path.starts_with(&containing_entry.path) {
2021                        entry_stack.pop()
2022                    } else {
2023                        None
2024                    }
2025                }
2026                (None, Some(_)) => None,
2027                (None, None) => break,
2028            };
2029
2030            if let Some((entry_ix, prev_statuses)) = entry_to_finish {
2031                cursor.seek_forward(
2032                    &TraversalTarget::PathSuccessor(&result[entry_ix].path),
2033                    Bias::Left,
2034                    &(),
2035                );
2036
2037                let statuses = cursor.start().1 - prev_statuses;
2038
2039                result[entry_ix].git_status = if statuses.conflict > 0 {
2040                    Some(GitFileStatus::Conflict)
2041                } else if statuses.modified > 0 {
2042                    Some(GitFileStatus::Modified)
2043                } else if statuses.added > 0 {
2044                    Some(GitFileStatus::Added)
2045                } else {
2046                    None
2047                };
2048            } else {
2049                if result[result_ix].is_dir() {
2050                    cursor.seek_forward(
2051                        &TraversalTarget::Path(&result[result_ix].path),
2052                        Bias::Left,
2053                        &(),
2054                    );
2055                    entry_stack.push((result_ix, cursor.start().1));
2056                }
2057                result_ix += 1;
2058            }
2059        }
2060    }
2061
2062    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
2063        let empty_path = Path::new("");
2064        self.entries_by_path
2065            .cursor::<()>()
2066            .filter(move |entry| entry.path.as_ref() != empty_path)
2067            .map(|entry| &entry.path)
2068    }
2069
2070    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
2071        let mut cursor = self.entries_by_path.cursor();
2072        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
2073        let traversal = Traversal {
2074            cursor,
2075            include_dirs: true,
2076            include_ignored: true,
2077        };
2078        ChildEntriesIter {
2079            traversal,
2080            parent_path,
2081        }
2082    }
2083
2084    pub fn descendent_entries<'a>(
2085        &'a self,
2086        include_dirs: bool,
2087        include_ignored: bool,
2088        parent_path: &'a Path,
2089    ) -> DescendentEntriesIter<'a> {
2090        let mut cursor = self.entries_by_path.cursor();
2091        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
2092        let mut traversal = Traversal {
2093            cursor,
2094            include_dirs,
2095            include_ignored,
2096        };
2097
2098        if traversal.end_offset() == traversal.start_offset() {
2099            traversal.advance();
2100        }
2101
2102        DescendentEntriesIter {
2103            traversal,
2104            parent_path,
2105        }
2106    }
2107
2108    pub fn root_entry(&self) -> Option<&Entry> {
2109        self.entry_for_path("")
2110    }
2111
2112    pub fn root_name(&self) -> &str {
2113        &self.root_name
2114    }
2115
2116    pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
2117        self.repository_entries
2118            .get(&RepositoryWorkDirectory(Path::new("").into()))
2119            .map(|entry| entry.to_owned())
2120    }
2121
2122    pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
2123        self.repository_entries.values()
2124    }
2125
2126    pub fn scan_id(&self) -> usize {
2127        self.scan_id
2128    }
2129
2130    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
2131        let path = path.as_ref();
2132        self.traverse_from_path(true, true, path)
2133            .entry()
2134            .and_then(|entry| {
2135                if entry.path.as_ref() == path {
2136                    Some(entry)
2137                } else {
2138                    None
2139                }
2140            })
2141    }
2142
2143    pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
2144        let entry = self.entries_by_id.get(&id, &())?;
2145        self.entry_for_path(&entry.path)
2146    }
2147
2148    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
2149        self.entry_for_path(path.as_ref()).map(|e| e.inode)
2150    }
2151}
2152
2153impl LocalSnapshot {
2154    pub fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
2155        self.git_repositories.get(&repo.work_directory.0)
2156    }
2157
2158    pub(crate) fn local_repo_for_path(
2159        &self,
2160        path: &Path,
2161    ) -> Option<(RepositoryWorkDirectory, &LocalRepositoryEntry)> {
2162        let (path, repo) = self.repository_and_work_directory_for_path(path)?;
2163        Some((path, self.git_repositories.get(&repo.work_directory_id())?))
2164    }
2165
2166    pub fn local_git_repo(&self, path: &Path) -> Option<Arc<Mutex<dyn GitRepository>>> {
2167        self.local_repo_for_path(path)
2168            .map(|(_, entry)| entry.repo_ptr.clone())
2169    }
2170
2171    fn build_update(
2172        &self,
2173        project_id: u64,
2174        worktree_id: u64,
2175        entry_changes: UpdatedEntriesSet,
2176        repo_changes: UpdatedGitRepositoriesSet,
2177    ) -> proto::UpdateWorktree {
2178        let mut updated_entries = Vec::new();
2179        let mut removed_entries = Vec::new();
2180        let mut updated_repositories = Vec::new();
2181        let mut removed_repositories = Vec::new();
2182
2183        for (_, entry_id, path_change) in entry_changes.iter() {
2184            if let PathChange::Removed = path_change {
2185                removed_entries.push(entry_id.0 as u64);
2186            } else if let Some(entry) = self.entry_for_id(*entry_id) {
2187                updated_entries.push(proto::Entry::from(entry));
2188            }
2189        }
2190
2191        for (work_dir_path, change) in repo_changes.iter() {
2192            let new_repo = self
2193                .repository_entries
2194                .get(&RepositoryWorkDirectory(work_dir_path.clone()));
2195            match (&change.old_repository, new_repo) {
2196                (Some(old_repo), Some(new_repo)) => {
2197                    updated_repositories.push(new_repo.build_update(old_repo));
2198                }
2199                (None, Some(new_repo)) => {
2200                    updated_repositories.push(proto::RepositoryEntry::from(new_repo));
2201                }
2202                (Some(old_repo), None) => {
2203                    removed_repositories.push(old_repo.work_directory.0.to_proto());
2204                }
2205                _ => {}
2206            }
2207        }
2208
2209        removed_entries.sort_unstable();
2210        updated_entries.sort_unstable_by_key(|e| e.id);
2211        removed_repositories.sort_unstable();
2212        updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
2213
2214        // TODO - optimize, knowing that removed_entries are sorted.
2215        removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
2216
2217        proto::UpdateWorktree {
2218            project_id,
2219            worktree_id,
2220            abs_path: self.abs_path().to_string_lossy().into(),
2221            root_name: self.root_name().to_string(),
2222            updated_entries,
2223            removed_entries,
2224            scan_id: self.scan_id as u64,
2225            is_last_update: self.completed_scan_id == self.scan_id,
2226            updated_repositories,
2227            removed_repositories,
2228        }
2229    }
2230
2231    fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
2232        let mut updated_entries = self
2233            .entries_by_path
2234            .iter()
2235            .map(proto::Entry::from)
2236            .collect::<Vec<_>>();
2237        updated_entries.sort_unstable_by_key(|e| e.id);
2238
2239        let mut updated_repositories = self
2240            .repository_entries
2241            .values()
2242            .map(proto::RepositoryEntry::from)
2243            .collect::<Vec<_>>();
2244        updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
2245
2246        proto::UpdateWorktree {
2247            project_id,
2248            worktree_id,
2249            abs_path: self.abs_path().to_string_lossy().into(),
2250            root_name: self.root_name().to_string(),
2251            updated_entries,
2252            removed_entries: Vec::new(),
2253            scan_id: self.scan_id as u64,
2254            is_last_update: self.completed_scan_id == self.scan_id,
2255            updated_repositories,
2256            removed_repositories: Vec::new(),
2257        }
2258    }
2259
2260    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2261        if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
2262            let abs_path = self.abs_path.join(&entry.path);
2263            match smol::block_on(build_gitignore(&abs_path, fs)) {
2264                Ok(ignore) => {
2265                    self.ignores_by_parent_abs_path
2266                        .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
2267                }
2268                Err(error) => {
2269                    log::error!(
2270                        "error loading .gitignore file {:?} - {:?}",
2271                        &entry.path,
2272                        error
2273                    );
2274                }
2275            }
2276        }
2277
2278        if entry.kind == EntryKind::PendingDir {
2279            if let Some(existing_entry) =
2280                self.entries_by_path.get(&PathKey(entry.path.clone()), &())
2281            {
2282                entry.kind = existing_entry.kind;
2283            }
2284        }
2285
2286        let scan_id = self.scan_id;
2287        let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
2288        if let Some(removed) = removed {
2289            if removed.id != entry.id {
2290                self.entries_by_id.remove(&removed.id, &());
2291            }
2292        }
2293        self.entries_by_id.insert_or_replace(
2294            PathEntry {
2295                id: entry.id,
2296                path: entry.path.clone(),
2297                is_ignored: entry.is_ignored,
2298                scan_id,
2299            },
2300            &(),
2301        );
2302
2303        entry
2304    }
2305
2306    fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2307        let mut inodes = TreeSet::default();
2308        for ancestor in path.ancestors().skip(1) {
2309            if let Some(entry) = self.entry_for_path(ancestor) {
2310                inodes.insert(entry.inode);
2311            }
2312        }
2313        inodes
2314    }
2315
2316    fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2317        let mut new_ignores = Vec::new();
2318        for (index, ancestor) in abs_path.ancestors().enumerate() {
2319            if index > 0 {
2320                if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2321                    new_ignores.push((ancestor, Some(ignore.clone())));
2322                } else {
2323                    new_ignores.push((ancestor, None));
2324                }
2325            }
2326            if ancestor.join(&*DOT_GIT).is_dir() {
2327                break;
2328            }
2329        }
2330
2331        let mut ignore_stack = IgnoreStack::none();
2332        for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2333            if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2334                ignore_stack = IgnoreStack::all();
2335                break;
2336            } else if let Some(ignore) = ignore {
2337                ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2338            }
2339        }
2340
2341        if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2342            ignore_stack = IgnoreStack::all();
2343        }
2344
2345        ignore_stack
2346    }
2347
2348    #[cfg(test)]
2349    pub(crate) fn expanded_entries(&self) -> impl Iterator<Item = &Entry> {
2350        self.entries_by_path
2351            .cursor::<()>()
2352            .filter(|entry| entry.kind == EntryKind::Dir && (entry.is_external || entry.is_ignored))
2353    }
2354
2355    #[cfg(test)]
2356    pub fn check_invariants(&self, git_state: bool) {
2357        use pretty_assertions::assert_eq;
2358
2359        assert_eq!(
2360            self.entries_by_path
2361                .cursor::<()>()
2362                .map(|e| (&e.path, e.id))
2363                .collect::<Vec<_>>(),
2364            self.entries_by_id
2365                .cursor::<()>()
2366                .map(|e| (&e.path, e.id))
2367                .collect::<collections::BTreeSet<_>>()
2368                .into_iter()
2369                .collect::<Vec<_>>(),
2370            "entries_by_path and entries_by_id are inconsistent"
2371        );
2372
2373        let mut files = self.files(true, 0);
2374        let mut visible_files = self.files(false, 0);
2375        for entry in self.entries_by_path.cursor::<()>() {
2376            if entry.is_file() {
2377                assert_eq!(files.next().unwrap().inode, entry.inode);
2378                if !entry.is_ignored && !entry.is_external {
2379                    assert_eq!(visible_files.next().unwrap().inode, entry.inode);
2380                }
2381            }
2382        }
2383
2384        assert!(files.next().is_none());
2385        assert!(visible_files.next().is_none());
2386
2387        let mut bfs_paths = Vec::new();
2388        let mut stack = self
2389            .root_entry()
2390            .map(|e| e.path.as_ref())
2391            .into_iter()
2392            .collect::<Vec<_>>();
2393        while let Some(path) = stack.pop() {
2394            bfs_paths.push(path);
2395            let ix = stack.len();
2396            for child_entry in self.child_entries(path) {
2397                stack.insert(ix, &child_entry.path);
2398            }
2399        }
2400
2401        let dfs_paths_via_iter = self
2402            .entries_by_path
2403            .cursor::<()>()
2404            .map(|e| e.path.as_ref())
2405            .collect::<Vec<_>>();
2406        assert_eq!(bfs_paths, dfs_paths_via_iter);
2407
2408        let dfs_paths_via_traversal = self
2409            .entries(true)
2410            .map(|e| e.path.as_ref())
2411            .collect::<Vec<_>>();
2412        assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
2413
2414        if git_state {
2415            for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
2416                let ignore_parent_path =
2417                    ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
2418                assert!(self.entry_for_path(&ignore_parent_path).is_some());
2419                assert!(self
2420                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
2421                    .is_some());
2422            }
2423        }
2424    }
2425
2426    #[cfg(test)]
2427    pub fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
2428        let mut paths = Vec::new();
2429        for entry in self.entries_by_path.cursor::<()>() {
2430            if include_ignored || !entry.is_ignored {
2431                paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
2432            }
2433        }
2434        paths.sort_by(|a, b| a.0.cmp(b.0));
2435        paths
2436    }
2437
2438    pub fn is_path_private(&self, path: &Path) -> bool {
2439        path.ancestors().any(|ancestor| {
2440            self.private_files
2441                .iter()
2442                .any(|exclude_matcher| exclude_matcher.is_match(&ancestor))
2443        })
2444    }
2445
2446    pub fn is_path_excluded(&self, mut path: PathBuf) -> bool {
2447        loop {
2448            if self
2449                .file_scan_exclusions
2450                .iter()
2451                .any(|exclude_matcher| exclude_matcher.is_match(&path))
2452            {
2453                return true;
2454            }
2455            if !path.pop() {
2456                return false;
2457            }
2458        }
2459    }
2460}
2461
2462impl BackgroundScannerState {
2463    fn should_scan_directory(&self, entry: &Entry) -> bool {
2464        (!entry.is_external && !entry.is_ignored)
2465            || entry.path.file_name() == Some(*DOT_GIT)
2466            || self.scanned_dirs.contains(&entry.id) // If we've ever scanned it, keep scanning
2467            || self
2468                .paths_to_scan
2469                .iter()
2470                .any(|p| p.starts_with(&entry.path))
2471            || self
2472                .path_prefixes_to_scan
2473                .iter()
2474                .any(|p| entry.path.starts_with(p))
2475    }
2476
2477    fn enqueue_scan_dir(&self, abs_path: Arc<Path>, entry: &Entry, scan_job_tx: &Sender<ScanJob>) {
2478        let path = entry.path.clone();
2479        let ignore_stack = self.snapshot.ignore_stack_for_abs_path(&abs_path, true);
2480        let mut ancestor_inodes = self.snapshot.ancestor_inodes_for_path(&path);
2481        let mut containing_repository = None;
2482        if !ignore_stack.is_abs_path_ignored(&abs_path, true) {
2483            if let Some((workdir_path, repo)) = self.snapshot.local_repo_for_path(&path) {
2484                if let Ok(repo_path) = path.strip_prefix(&workdir_path.0) {
2485                    containing_repository = Some((
2486                        workdir_path,
2487                        repo.repo_ptr.clone(),
2488                        repo.repo_ptr.lock().staged_statuses(repo_path),
2489                    ));
2490                }
2491            }
2492        }
2493        if !ancestor_inodes.contains(&entry.inode) {
2494            ancestor_inodes.insert(entry.inode);
2495            scan_job_tx
2496                .try_send(ScanJob {
2497                    abs_path,
2498                    path,
2499                    ignore_stack,
2500                    scan_queue: scan_job_tx.clone(),
2501                    ancestor_inodes,
2502                    is_external: entry.is_external,
2503                    containing_repository,
2504                })
2505                .unwrap();
2506        }
2507    }
2508
2509    fn reuse_entry_id(&mut self, entry: &mut Entry) {
2510        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2511            entry.id = removed_entry_id;
2512        } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
2513            entry.id = existing_entry.id;
2514        }
2515    }
2516
2517    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2518        self.reuse_entry_id(&mut entry);
2519        let entry = self.snapshot.insert_entry(entry, fs);
2520        if entry.path.file_name() == Some(&DOT_GIT) {
2521            self.build_git_repository(entry.path.clone(), fs);
2522        }
2523
2524        #[cfg(test)]
2525        self.snapshot.check_invariants(false);
2526
2527        entry
2528    }
2529
2530    fn populate_dir(
2531        &mut self,
2532        parent_path: &Arc<Path>,
2533        entries: impl IntoIterator<Item = Entry>,
2534        ignore: Option<Arc<Gitignore>>,
2535    ) {
2536        let mut parent_entry = if let Some(parent_entry) = self
2537            .snapshot
2538            .entries_by_path
2539            .get(&PathKey(parent_path.clone()), &())
2540        {
2541            parent_entry.clone()
2542        } else {
2543            log::warn!(
2544                "populating a directory {:?} that has been removed",
2545                parent_path
2546            );
2547            return;
2548        };
2549
2550        match parent_entry.kind {
2551            EntryKind::PendingDir | EntryKind::UnloadedDir => parent_entry.kind = EntryKind::Dir,
2552            EntryKind::Dir => {}
2553            _ => return,
2554        }
2555
2556        if let Some(ignore) = ignore {
2557            let abs_parent_path = self.snapshot.abs_path.join(&parent_path).into();
2558            self.snapshot
2559                .ignores_by_parent_abs_path
2560                .insert(abs_parent_path, (ignore, false));
2561        }
2562
2563        let parent_entry_id = parent_entry.id;
2564        self.scanned_dirs.insert(parent_entry_id);
2565        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2566        let mut entries_by_id_edits = Vec::new();
2567
2568        for entry in entries {
2569            entries_by_id_edits.push(Edit::Insert(PathEntry {
2570                id: entry.id,
2571                path: entry.path.clone(),
2572                is_ignored: entry.is_ignored,
2573                scan_id: self.snapshot.scan_id,
2574            }));
2575            entries_by_path_edits.push(Edit::Insert(entry));
2576        }
2577
2578        self.snapshot
2579            .entries_by_path
2580            .edit(entries_by_path_edits, &());
2581        self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2582
2583        if let Err(ix) = self.changed_paths.binary_search(parent_path) {
2584            self.changed_paths.insert(ix, parent_path.clone());
2585        }
2586
2587        #[cfg(test)]
2588        self.snapshot.check_invariants(false);
2589    }
2590
2591    fn remove_path(&mut self, path: &Path) {
2592        let mut new_entries;
2593        let removed_entries;
2594        {
2595            let mut cursor = self.snapshot.entries_by_path.cursor::<TraversalProgress>();
2596            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2597            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2598            new_entries.append(cursor.suffix(&()), &());
2599        }
2600        self.snapshot.entries_by_path = new_entries;
2601
2602        let mut entries_by_id_edits = Vec::new();
2603        for entry in removed_entries.cursor::<()>() {
2604            let removed_entry_id = self
2605                .removed_entry_ids
2606                .entry(entry.inode)
2607                .or_insert(entry.id);
2608            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2609            entries_by_id_edits.push(Edit::Remove(entry.id));
2610        }
2611        self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2612
2613        if path.file_name() == Some(&GITIGNORE) {
2614            let abs_parent_path = self.snapshot.abs_path.join(path.parent().unwrap());
2615            if let Some((_, needs_update)) = self
2616                .snapshot
2617                .ignores_by_parent_abs_path
2618                .get_mut(abs_parent_path.as_path())
2619            {
2620                *needs_update = true;
2621            }
2622        }
2623
2624        #[cfg(test)]
2625        self.snapshot.check_invariants(false);
2626    }
2627
2628    fn reload_repositories(&mut self, dot_git_dirs_to_reload: &HashSet<PathBuf>, fs: &dyn Fs) {
2629        let scan_id = self.snapshot.scan_id;
2630
2631        for dot_git_dir in dot_git_dirs_to_reload {
2632            // If there is already a repository for this .git directory, reload
2633            // the status for all of its files.
2634            let repository = self
2635                .snapshot
2636                .git_repositories
2637                .iter()
2638                .find_map(|(entry_id, repo)| {
2639                    (repo.git_dir_path.as_ref() == dot_git_dir).then(|| (*entry_id, repo.clone()))
2640                });
2641            match repository {
2642                None => {
2643                    self.build_git_repository(Arc::from(dot_git_dir.as_path()), fs);
2644                }
2645                Some((entry_id, repository)) => {
2646                    if repository.git_dir_scan_id == scan_id {
2647                        continue;
2648                    }
2649                    let Some(work_dir) = self
2650                        .snapshot
2651                        .entry_for_id(entry_id)
2652                        .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
2653                    else {
2654                        continue;
2655                    };
2656
2657                    log::info!("reload git repository {dot_git_dir:?}");
2658                    let repository = repository.repo_ptr.lock();
2659                    let branch = repository.branch_name();
2660                    repository.reload_index();
2661
2662                    self.snapshot
2663                        .git_repositories
2664                        .update(&entry_id, |entry| entry.git_dir_scan_id = scan_id);
2665                    self.snapshot
2666                        .snapshot
2667                        .repository_entries
2668                        .update(&work_dir, |entry| entry.branch = branch.map(Into::into));
2669
2670                    self.update_git_statuses(&work_dir, &*repository);
2671                }
2672            }
2673        }
2674
2675        // Remove any git repositories whose .git entry no longer exists.
2676        let snapshot = &mut self.snapshot;
2677        let mut ids_to_preserve = HashSet::default();
2678        for (&work_directory_id, entry) in snapshot.git_repositories.iter() {
2679            let exists_in_snapshot = snapshot
2680                .entry_for_id(work_directory_id)
2681                .map_or(false, |entry| {
2682                    snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2683                });
2684            if exists_in_snapshot {
2685                ids_to_preserve.insert(work_directory_id);
2686            } else {
2687                let git_dir_abs_path = snapshot.abs_path().join(&entry.git_dir_path);
2688                let git_dir_excluded = snapshot.is_path_excluded(entry.git_dir_path.to_path_buf());
2689                if git_dir_excluded
2690                    && !matches!(smol::block_on(fs.metadata(&git_dir_abs_path)), Ok(None))
2691                {
2692                    ids_to_preserve.insert(work_directory_id);
2693                }
2694            }
2695        }
2696        snapshot
2697            .git_repositories
2698            .retain(|work_directory_id, _| ids_to_preserve.contains(work_directory_id));
2699        snapshot
2700            .repository_entries
2701            .retain(|_, entry| ids_to_preserve.contains(&entry.work_directory.0));
2702    }
2703
2704    fn build_git_repository(
2705        &mut self,
2706        dot_git_path: Arc<Path>,
2707        fs: &dyn Fs,
2708    ) -> Option<(
2709        RepositoryWorkDirectory,
2710        Arc<Mutex<dyn GitRepository>>,
2711        TreeMap<RepoPath, GitFileStatus>,
2712    )> {
2713        let work_dir_path: Arc<Path> = match dot_git_path.parent() {
2714            Some(parent_dir) => {
2715                // Guard against repositories inside the repository metadata
2716                if parent_dir.iter().any(|component| component == *DOT_GIT) {
2717                    log::info!(
2718                        "not building git repository for nested `.git` directory, `.git` path in the worktree: {dot_git_path:?}"
2719                    );
2720                    return None;
2721                };
2722                log::info!(
2723                    "building git repository, `.git` path in the worktree: {dot_git_path:?}"
2724                );
2725                parent_dir.into()
2726            }
2727            None => {
2728                // `dot_git_path.parent().is_none()` means `.git` directory is the opened worktree itself,
2729                // no files inside that directory are tracked by git, so no need to build the repo around it
2730                log::info!(
2731                    "not building git repository for the worktree itself, `.git` path in the worktree: {dot_git_path:?}"
2732                );
2733                return None;
2734            }
2735        };
2736
2737        let work_dir_id = self
2738            .snapshot
2739            .entry_for_path(work_dir_path.clone())
2740            .map(|entry| entry.id)?;
2741
2742        if self.snapshot.git_repositories.get(&work_dir_id).is_some() {
2743            return None;
2744        }
2745
2746        let abs_path = self.snapshot.abs_path.join(&dot_git_path);
2747        let repository = fs.open_repo(abs_path.as_path())?;
2748        let work_directory = RepositoryWorkDirectory(work_dir_path.clone());
2749
2750        let repo_lock = repository.lock();
2751        self.snapshot.repository_entries.insert(
2752            work_directory.clone(),
2753            RepositoryEntry {
2754                work_directory: work_dir_id.into(),
2755                branch: repo_lock.branch_name().map(Into::into),
2756            },
2757        );
2758
2759        let staged_statuses = self.update_git_statuses(&work_directory, &*repo_lock);
2760        drop(repo_lock);
2761
2762        self.snapshot.git_repositories.insert(
2763            work_dir_id,
2764            LocalRepositoryEntry {
2765                git_dir_scan_id: 0,
2766                repo_ptr: repository.clone(),
2767                git_dir_path: dot_git_path.clone(),
2768            },
2769        );
2770
2771        Some((work_directory, repository, staged_statuses))
2772    }
2773
2774    fn update_git_statuses(
2775        &mut self,
2776        work_directory: &RepositoryWorkDirectory,
2777        repo: &dyn GitRepository,
2778    ) -> TreeMap<RepoPath, GitFileStatus> {
2779        let staged_statuses = repo.staged_statuses(Path::new(""));
2780
2781        let mut changes = vec![];
2782        let mut edits = vec![];
2783
2784        for mut entry in self
2785            .snapshot
2786            .descendent_entries(false, false, &work_directory.0)
2787            .cloned()
2788        {
2789            let Ok(repo_path) = entry.path.strip_prefix(&work_directory.0) else {
2790                continue;
2791            };
2792            let Some(mtime) = entry.mtime else {
2793                continue;
2794            };
2795            let repo_path = RepoPath(repo_path.to_path_buf());
2796            let git_file_status = combine_git_statuses(
2797                staged_statuses.get(&repo_path).copied(),
2798                repo.unstaged_status(&repo_path, mtime),
2799            );
2800            if entry.git_status != git_file_status {
2801                entry.git_status = git_file_status;
2802                changes.push(entry.path.clone());
2803                edits.push(Edit::Insert(entry));
2804            }
2805        }
2806
2807        self.snapshot.entries_by_path.edit(edits, &());
2808        util::extend_sorted(&mut self.changed_paths, changes, usize::MAX, Ord::cmp);
2809        staged_statuses
2810    }
2811}
2812
2813async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2814    let contents = fs.load(abs_path).await?;
2815    let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2816    let mut builder = GitignoreBuilder::new(parent);
2817    for line in contents.lines() {
2818        builder.add_line(Some(abs_path.into()), line)?;
2819    }
2820    Ok(builder.build()?)
2821}
2822
2823impl WorktreeId {
2824    pub fn from_usize(handle_id: usize) -> Self {
2825        Self(handle_id)
2826    }
2827
2828    pub fn from_proto(id: u64) -> Self {
2829        Self(id as usize)
2830    }
2831
2832    pub fn to_proto(&self) -> u64 {
2833        self.0 as u64
2834    }
2835
2836    pub fn to_usize(&self) -> usize {
2837        self.0
2838    }
2839}
2840
2841impl fmt::Display for WorktreeId {
2842    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2843        self.0.fmt(f)
2844    }
2845}
2846
2847impl Deref for Worktree {
2848    type Target = Snapshot;
2849
2850    fn deref(&self) -> &Self::Target {
2851        match self {
2852            Worktree::Local(worktree) => &worktree.snapshot,
2853            Worktree::Remote(worktree) => &worktree.snapshot,
2854        }
2855    }
2856}
2857
2858impl Deref for LocalWorktree {
2859    type Target = LocalSnapshot;
2860
2861    fn deref(&self) -> &Self::Target {
2862        &self.snapshot
2863    }
2864}
2865
2866impl Deref for RemoteWorktree {
2867    type Target = Snapshot;
2868
2869    fn deref(&self) -> &Self::Target {
2870        &self.snapshot
2871    }
2872}
2873
2874impl fmt::Debug for LocalWorktree {
2875    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2876        self.snapshot.fmt(f)
2877    }
2878}
2879
2880impl fmt::Debug for Snapshot {
2881    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2882        struct EntriesById<'a>(&'a SumTree<PathEntry>);
2883        struct EntriesByPath<'a>(&'a SumTree<Entry>);
2884
2885        impl<'a> fmt::Debug for EntriesByPath<'a> {
2886            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2887                f.debug_map()
2888                    .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2889                    .finish()
2890            }
2891        }
2892
2893        impl<'a> fmt::Debug for EntriesById<'a> {
2894            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2895                f.debug_list().entries(self.0.iter()).finish()
2896            }
2897        }
2898
2899        f.debug_struct("Snapshot")
2900            .field("id", &self.id)
2901            .field("root_name", &self.root_name)
2902            .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2903            .field("entries_by_id", &EntriesById(&self.entries_by_id))
2904            .finish()
2905    }
2906}
2907
2908#[derive(Clone, PartialEq)]
2909pub struct File {
2910    pub worktree: Model<Worktree>,
2911    pub path: Arc<Path>,
2912    pub mtime: Option<SystemTime>,
2913    pub entry_id: Option<ProjectEntryId>,
2914    pub is_local: bool,
2915    pub is_deleted: bool,
2916    pub is_private: bool,
2917}
2918
2919impl language::File for File {
2920    fn as_local(&self) -> Option<&dyn language::LocalFile> {
2921        if self.is_local {
2922            Some(self)
2923        } else {
2924            None
2925        }
2926    }
2927
2928    fn mtime(&self) -> Option<SystemTime> {
2929        self.mtime
2930    }
2931
2932    fn path(&self) -> &Arc<Path> {
2933        &self.path
2934    }
2935
2936    fn full_path(&self, cx: &AppContext) -> PathBuf {
2937        let mut full_path = PathBuf::new();
2938        let worktree = self.worktree.read(cx);
2939
2940        if worktree.is_visible() {
2941            full_path.push(worktree.root_name());
2942        } else {
2943            let path = worktree.abs_path();
2944
2945            if worktree.is_local() && path.starts_with(HOME.as_path()) {
2946                full_path.push("~");
2947                full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2948            } else {
2949                full_path.push(path)
2950            }
2951        }
2952
2953        if self.path.components().next().is_some() {
2954            full_path.push(&self.path);
2955        }
2956
2957        full_path
2958    }
2959
2960    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2961    /// of its worktree, then this method will return the name of the worktree itself.
2962    fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2963        self.path
2964            .file_name()
2965            .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2966    }
2967
2968    fn worktree_id(&self) -> usize {
2969        self.worktree.entity_id().as_u64() as usize
2970    }
2971
2972    fn is_deleted(&self) -> bool {
2973        self.is_deleted
2974    }
2975
2976    fn as_any(&self) -> &dyn Any {
2977        self
2978    }
2979
2980    fn to_proto(&self) -> rpc::proto::File {
2981        rpc::proto::File {
2982            worktree_id: self.worktree.entity_id().as_u64(),
2983            entry_id: self.entry_id.map(|id| id.to_proto()),
2984            path: self.path.to_string_lossy().into(),
2985            mtime: self.mtime.map(|time| time.into()),
2986            is_deleted: self.is_deleted,
2987        }
2988    }
2989
2990    fn is_private(&self) -> bool {
2991        self.is_private
2992    }
2993}
2994
2995impl language::LocalFile for File {
2996    fn abs_path(&self, cx: &AppContext) -> PathBuf {
2997        let worktree_path = &self.worktree.read(cx).as_local().unwrap().abs_path;
2998        if self.path.as_ref() == Path::new("") {
2999            worktree_path.to_path_buf()
3000        } else {
3001            worktree_path.join(&self.path)
3002        }
3003    }
3004
3005    fn load(&self, cx: &AppContext) -> Task<Result<String>> {
3006        let worktree = self.worktree.read(cx).as_local().unwrap();
3007        let abs_path = worktree.absolutize(&self.path);
3008        let fs = worktree.fs.clone();
3009        cx.background_executor()
3010            .spawn(async move { fs.load(&abs_path?).await })
3011    }
3012
3013    fn buffer_reloaded(
3014        &self,
3015        buffer_id: BufferId,
3016        version: &clock::Global,
3017        fingerprint: RopeFingerprint,
3018        line_ending: LineEnding,
3019        mtime: Option<SystemTime>,
3020        cx: &mut AppContext,
3021    ) {
3022        let worktree = self.worktree.read(cx).as_local().unwrap();
3023        if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
3024            worktree
3025                .client
3026                .send(proto::BufferReloaded {
3027                    project_id,
3028                    buffer_id: buffer_id.into(),
3029                    version: serialize_version(version),
3030                    mtime: mtime.map(|time| time.into()),
3031                    fingerprint: serialize_fingerprint(fingerprint),
3032                    line_ending: serialize_line_ending(line_ending) as i32,
3033                })
3034                .log_err();
3035        }
3036    }
3037}
3038
3039impl File {
3040    pub fn for_entry(entry: Entry, worktree: Model<Worktree>) -> Arc<Self> {
3041        Arc::new(Self {
3042            worktree,
3043            path: entry.path.clone(),
3044            mtime: entry.mtime,
3045            entry_id: Some(entry.id),
3046            is_local: true,
3047            is_deleted: false,
3048            is_private: entry.is_private,
3049        })
3050    }
3051
3052    pub fn from_proto(
3053        proto: rpc::proto::File,
3054        worktree: Model<Worktree>,
3055        cx: &AppContext,
3056    ) -> Result<Self> {
3057        let worktree_id = worktree
3058            .read(cx)
3059            .as_remote()
3060            .ok_or_else(|| anyhow!("not remote"))?
3061            .id();
3062
3063        if worktree_id.to_proto() != proto.worktree_id {
3064            return Err(anyhow!("worktree id does not match file"));
3065        }
3066
3067        Ok(Self {
3068            worktree,
3069            path: Path::new(&proto.path).into(),
3070            mtime: proto.mtime.map(|time| time.into()),
3071            entry_id: proto.entry_id.map(ProjectEntryId::from_proto),
3072            is_local: false,
3073            is_deleted: proto.is_deleted,
3074            is_private: false,
3075        })
3076    }
3077
3078    pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
3079        file.and_then(|f| f.as_any().downcast_ref())
3080    }
3081
3082    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
3083        self.worktree.read(cx).id()
3084    }
3085
3086    pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
3087        if self.is_deleted {
3088            None
3089        } else {
3090            self.entry_id
3091        }
3092    }
3093}
3094
3095#[derive(Clone, Debug, PartialEq, Eq)]
3096pub struct Entry {
3097    pub id: ProjectEntryId,
3098    pub kind: EntryKind,
3099    pub path: Arc<Path>,
3100    pub inode: u64,
3101    pub mtime: Option<SystemTime>,
3102    pub is_symlink: bool,
3103
3104    /// Whether this entry is ignored by Git.
3105    ///
3106    /// We only scan ignored entries once the directory is expanded and
3107    /// exclude them from searches.
3108    pub is_ignored: bool,
3109
3110    /// Whether this entry's canonical path is outside of the worktree.
3111    /// This means the entry is only accessible from the worktree root via a
3112    /// symlink.
3113    ///
3114    /// We only scan entries outside of the worktree once the symlinked
3115    /// directory is expanded. External entries are treated like gitignored
3116    /// entries in that they are not included in searches.
3117    pub is_external: bool,
3118    pub git_status: Option<GitFileStatus>,
3119    /// Whether this entry is considered to be a `.env` file.
3120    pub is_private: bool,
3121}
3122
3123#[derive(Clone, Copy, Debug, PartialEq, Eq)]
3124pub enum EntryKind {
3125    UnloadedDir,
3126    PendingDir,
3127    Dir,
3128    File(CharBag),
3129}
3130
3131#[derive(Clone, Copy, Debug, PartialEq)]
3132pub enum PathChange {
3133    /// A filesystem entry was was created.
3134    Added,
3135    /// A filesystem entry was removed.
3136    Removed,
3137    /// A filesystem entry was updated.
3138    Updated,
3139    /// A filesystem entry was either updated or added. We don't know
3140    /// whether or not it already existed, because the path had not
3141    /// been loaded before the event.
3142    AddedOrUpdated,
3143    /// A filesystem entry was found during the initial scan of the worktree.
3144    Loaded,
3145}
3146
3147pub struct GitRepositoryChange {
3148    /// The previous state of the repository, if it already existed.
3149    pub old_repository: Option<RepositoryEntry>,
3150}
3151
3152pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
3153pub type UpdatedGitRepositoriesSet = Arc<[(Arc<Path>, GitRepositoryChange)]>;
3154
3155impl Entry {
3156    fn new(
3157        path: Arc<Path>,
3158        metadata: &fs::Metadata,
3159        next_entry_id: &AtomicUsize,
3160        root_char_bag: CharBag,
3161    ) -> Self {
3162        Self {
3163            id: ProjectEntryId::new(next_entry_id),
3164            kind: if metadata.is_dir {
3165                EntryKind::PendingDir
3166            } else {
3167                EntryKind::File(char_bag_for_path(root_char_bag, &path))
3168            },
3169            path,
3170            inode: metadata.inode,
3171            mtime: Some(metadata.mtime),
3172            is_symlink: metadata.is_symlink,
3173            is_ignored: false,
3174            is_external: false,
3175            is_private: false,
3176            git_status: None,
3177        }
3178    }
3179
3180    pub fn is_created(&self) -> bool {
3181        self.mtime.is_some()
3182    }
3183
3184    pub fn is_dir(&self) -> bool {
3185        self.kind.is_dir()
3186    }
3187
3188    pub fn is_file(&self) -> bool {
3189        self.kind.is_file()
3190    }
3191
3192    pub fn git_status(&self) -> Option<GitFileStatus> {
3193        self.git_status
3194    }
3195}
3196
3197impl EntryKind {
3198    pub fn is_dir(&self) -> bool {
3199        matches!(
3200            self,
3201            EntryKind::Dir | EntryKind::PendingDir | EntryKind::UnloadedDir
3202        )
3203    }
3204
3205    pub fn is_unloaded(&self) -> bool {
3206        matches!(self, EntryKind::UnloadedDir)
3207    }
3208
3209    pub fn is_file(&self) -> bool {
3210        matches!(self, EntryKind::File(_))
3211    }
3212}
3213
3214impl sum_tree::Item for Entry {
3215    type Summary = EntrySummary;
3216
3217    fn summary(&self) -> Self::Summary {
3218        let non_ignored_count = if self.is_ignored || self.is_external {
3219            0
3220        } else {
3221            1
3222        };
3223        let file_count;
3224        let non_ignored_file_count;
3225        if self.is_file() {
3226            file_count = 1;
3227            non_ignored_file_count = non_ignored_count;
3228        } else {
3229            file_count = 0;
3230            non_ignored_file_count = 0;
3231        }
3232
3233        let mut statuses = GitStatuses::default();
3234        match self.git_status {
3235            Some(status) => match status {
3236                GitFileStatus::Added => statuses.added = 1,
3237                GitFileStatus::Modified => statuses.modified = 1,
3238                GitFileStatus::Conflict => statuses.conflict = 1,
3239            },
3240            None => {}
3241        }
3242
3243        EntrySummary {
3244            max_path: self.path.clone(),
3245            count: 1,
3246            non_ignored_count,
3247            file_count,
3248            non_ignored_file_count,
3249            statuses,
3250        }
3251    }
3252}
3253
3254impl sum_tree::KeyedItem for Entry {
3255    type Key = PathKey;
3256
3257    fn key(&self) -> Self::Key {
3258        PathKey(self.path.clone())
3259    }
3260}
3261
3262#[derive(Clone, Debug)]
3263pub struct EntrySummary {
3264    max_path: Arc<Path>,
3265    count: usize,
3266    non_ignored_count: usize,
3267    file_count: usize,
3268    non_ignored_file_count: usize,
3269    statuses: GitStatuses,
3270}
3271
3272impl Default for EntrySummary {
3273    fn default() -> Self {
3274        Self {
3275            max_path: Arc::from(Path::new("")),
3276            count: 0,
3277            non_ignored_count: 0,
3278            file_count: 0,
3279            non_ignored_file_count: 0,
3280            statuses: Default::default(),
3281        }
3282    }
3283}
3284
3285impl sum_tree::Summary for EntrySummary {
3286    type Context = ();
3287
3288    fn add_summary(&mut self, rhs: &Self, _: &()) {
3289        self.max_path = rhs.max_path.clone();
3290        self.count += rhs.count;
3291        self.non_ignored_count += rhs.non_ignored_count;
3292        self.file_count += rhs.file_count;
3293        self.non_ignored_file_count += rhs.non_ignored_file_count;
3294        self.statuses += rhs.statuses;
3295    }
3296}
3297
3298#[derive(Clone, Debug)]
3299struct PathEntry {
3300    id: ProjectEntryId,
3301    path: Arc<Path>,
3302    is_ignored: bool,
3303    scan_id: usize,
3304}
3305
3306impl sum_tree::Item for PathEntry {
3307    type Summary = PathEntrySummary;
3308
3309    fn summary(&self) -> Self::Summary {
3310        PathEntrySummary { max_id: self.id }
3311    }
3312}
3313
3314impl sum_tree::KeyedItem for PathEntry {
3315    type Key = ProjectEntryId;
3316
3317    fn key(&self) -> Self::Key {
3318        self.id
3319    }
3320}
3321
3322#[derive(Clone, Debug, Default)]
3323struct PathEntrySummary {
3324    max_id: ProjectEntryId,
3325}
3326
3327impl sum_tree::Summary for PathEntrySummary {
3328    type Context = ();
3329
3330    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
3331        self.max_id = summary.max_id;
3332    }
3333}
3334
3335impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
3336    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
3337        *self = summary.max_id;
3338    }
3339}
3340
3341#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
3342pub struct PathKey(Arc<Path>);
3343
3344impl Default for PathKey {
3345    fn default() -> Self {
3346        Self(Path::new("").into())
3347    }
3348}
3349
3350impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
3351    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3352        self.0 = summary.max_path.clone();
3353    }
3354}
3355
3356struct BackgroundScanner {
3357    state: Mutex<BackgroundScannerState>,
3358    fs: Arc<dyn Fs>,
3359    fs_case_sensitive: bool,
3360    status_updates_tx: UnboundedSender<ScanState>,
3361    executor: BackgroundExecutor,
3362    scan_requests_rx: channel::Receiver<ScanRequest>,
3363    path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
3364    next_entry_id: Arc<AtomicUsize>,
3365    phase: BackgroundScannerPhase,
3366}
3367
3368#[derive(PartialEq)]
3369enum BackgroundScannerPhase {
3370    InitialScan,
3371    EventsReceivedDuringInitialScan,
3372    Events,
3373}
3374
3375impl BackgroundScanner {
3376    #[allow(clippy::too_many_arguments)]
3377    fn new(
3378        snapshot: LocalSnapshot,
3379        next_entry_id: Arc<AtomicUsize>,
3380        fs: Arc<dyn Fs>,
3381        fs_case_sensitive: bool,
3382        status_updates_tx: UnboundedSender<ScanState>,
3383        executor: BackgroundExecutor,
3384        scan_requests_rx: channel::Receiver<ScanRequest>,
3385        path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
3386    ) -> Self {
3387        Self {
3388            fs,
3389            fs_case_sensitive,
3390            status_updates_tx,
3391            executor,
3392            scan_requests_rx,
3393            path_prefixes_to_scan_rx,
3394            next_entry_id,
3395            state: Mutex::new(BackgroundScannerState {
3396                prev_snapshot: snapshot.snapshot.clone(),
3397                snapshot,
3398                scanned_dirs: Default::default(),
3399                path_prefixes_to_scan: Default::default(),
3400                paths_to_scan: Default::default(),
3401                removed_entry_ids: Default::default(),
3402                changed_paths: Default::default(),
3403            }),
3404            phase: BackgroundScannerPhase::InitialScan,
3405        }
3406    }
3407
3408    async fn run(&mut self, mut fs_events_rx: Pin<Box<dyn Send + Stream<Item = Vec<PathBuf>>>>) {
3409        use futures::FutureExt as _;
3410
3411        // Populate ignores above the root.
3412        let root_abs_path = self.state.lock().snapshot.abs_path.clone();
3413        for (index, ancestor) in root_abs_path.ancestors().enumerate() {
3414            if index != 0 {
3415                if let Ok(ignore) =
3416                    build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
3417                {
3418                    self.state
3419                        .lock()
3420                        .snapshot
3421                        .ignores_by_parent_abs_path
3422                        .insert(ancestor.into(), (ignore.into(), false));
3423                }
3424            }
3425            if ancestor.join(&*DOT_GIT).is_dir() {
3426                // Reached root of git repository.
3427                break;
3428            }
3429        }
3430
3431        let (scan_job_tx, scan_job_rx) = channel::unbounded();
3432        {
3433            let mut state = self.state.lock();
3434            state.snapshot.scan_id += 1;
3435            if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
3436                let ignore_stack = state
3437                    .snapshot
3438                    .ignore_stack_for_abs_path(&root_abs_path, true);
3439                if ignore_stack.is_abs_path_ignored(&root_abs_path, true) {
3440                    root_entry.is_ignored = true;
3441                    state.insert_entry(root_entry.clone(), self.fs.as_ref());
3442                }
3443                state.enqueue_scan_dir(root_abs_path, &root_entry, &scan_job_tx);
3444            }
3445        };
3446
3447        // Perform an initial scan of the directory.
3448        drop(scan_job_tx);
3449        self.scan_dirs(true, scan_job_rx).await;
3450        {
3451            let mut state = self.state.lock();
3452            state.snapshot.completed_scan_id = state.snapshot.scan_id;
3453        }
3454
3455        self.send_status_update(false, None);
3456
3457        // Process any any FS events that occurred while performing the initial scan.
3458        // For these events, update events cannot be as precise, because we didn't
3459        // have the previous state loaded yet.
3460        self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
3461        if let Poll::Ready(Some(mut paths)) = futures::poll!(fs_events_rx.next()) {
3462            while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
3463                paths.extend(more_paths);
3464            }
3465            self.process_events(paths).await;
3466        }
3467
3468        // Continue processing events until the worktree is dropped.
3469        self.phase = BackgroundScannerPhase::Events;
3470        loop {
3471            select_biased! {
3472                // Process any path refresh requests from the worktree. Prioritize
3473                // these before handling changes reported by the filesystem.
3474                request = self.scan_requests_rx.recv().fuse() => {
3475                    let Ok(request) = request else { break };
3476                    if !self.process_scan_request(request, false).await {
3477                        return;
3478                    }
3479                }
3480
3481                path_prefix = self.path_prefixes_to_scan_rx.recv().fuse() => {
3482                    let Ok(path_prefix) = path_prefix else { break };
3483                    log::trace!("adding path prefix {:?}", path_prefix);
3484
3485                    let did_scan = self.forcibly_load_paths(&[path_prefix.clone()]).await;
3486                    if did_scan {
3487                        let abs_path =
3488                        {
3489                            let mut state = self.state.lock();
3490                            state.path_prefixes_to_scan.insert(path_prefix.clone());
3491                            state.snapshot.abs_path.join(&path_prefix)
3492                        };
3493
3494                        if let Some(abs_path) = self.fs.canonicalize(&abs_path).await.log_err() {
3495                            self.process_events(vec![abs_path]).await;
3496                        }
3497                    }
3498                }
3499
3500                paths = fs_events_rx.next().fuse() => {
3501                    let Some(mut paths) = paths else { break };
3502                    while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
3503                        paths.extend(more_paths);
3504                    }
3505                    self.process_events(paths.clone()).await;
3506                }
3507            }
3508        }
3509    }
3510
3511    async fn process_scan_request(&self, mut request: ScanRequest, scanning: bool) -> bool {
3512        log::debug!("rescanning paths {:?}", request.relative_paths);
3513
3514        request.relative_paths.sort_unstable();
3515        self.forcibly_load_paths(&request.relative_paths).await;
3516
3517        let root_path = self.state.lock().snapshot.abs_path.clone();
3518        let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3519            Ok(path) => path,
3520            Err(err) => {
3521                log::error!("failed to canonicalize root path: {}", err);
3522                return true;
3523            }
3524        };
3525        let abs_paths = request
3526            .relative_paths
3527            .iter()
3528            .map(|path| {
3529                if path.file_name().is_some() {
3530                    root_canonical_path.join(path)
3531                } else {
3532                    root_canonical_path.clone()
3533                }
3534            })
3535            .collect::<Vec<_>>();
3536
3537        self.reload_entries_for_paths(
3538            root_path,
3539            root_canonical_path,
3540            &request.relative_paths,
3541            abs_paths,
3542            None,
3543        )
3544        .await;
3545        self.send_status_update(scanning, Some(request.done))
3546    }
3547
3548    async fn process_events(&mut self, mut abs_paths: Vec<PathBuf>) {
3549        let root_path = self.state.lock().snapshot.abs_path.clone();
3550        let root_canonical_path = match self.fs.canonicalize(&root_path).await {
3551            Ok(path) => path,
3552            Err(err) => {
3553                log::error!("failed to canonicalize root path: {}", err);
3554                return;
3555            }
3556        };
3557
3558        let mut relative_paths = Vec::with_capacity(abs_paths.len());
3559        let mut dot_git_paths_to_reload = HashSet::default();
3560        abs_paths.sort_unstable();
3561        abs_paths.dedup_by(|a, b| a.starts_with(&b));
3562        abs_paths.retain(|abs_path| {
3563            let snapshot = &self.state.lock().snapshot;
3564            {
3565                let mut is_git_related = false;
3566                if let Some(dot_git_dir) = abs_path
3567                    .ancestors()
3568                    .find(|ancestor| ancestor.file_name() == Some(*DOT_GIT))
3569                {
3570                    let dot_git_path = dot_git_dir
3571                        .strip_prefix(&root_canonical_path)
3572                        .ok()
3573                        .map(|path| path.to_path_buf())
3574                        .unwrap_or_else(|| dot_git_dir.to_path_buf());
3575                    dot_git_paths_to_reload.insert(dot_git_path.to_path_buf());
3576                    is_git_related = true;
3577                }
3578
3579                let relative_path: Arc<Path> =
3580                    if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3581                        path.into()
3582                    } else {
3583                        log::error!(
3584                        "ignoring event {abs_path:?} outside of root path {root_canonical_path:?}",
3585                    );
3586                        return false;
3587                    };
3588
3589                let parent_dir_is_loaded = relative_path.parent().map_or(true, |parent| {
3590                    snapshot
3591                        .entry_for_path(parent)
3592                        .map_or(false, |entry| entry.kind == EntryKind::Dir)
3593                });
3594                if !parent_dir_is_loaded {
3595                    log::debug!("ignoring event {relative_path:?} within unloaded directory");
3596                    return false;
3597                }
3598
3599                if snapshot.is_path_excluded(relative_path.to_path_buf()) {
3600                    if !is_git_related {
3601                        log::debug!("ignoring FS event for excluded path {relative_path:?}");
3602                    }
3603                    return false;
3604                }
3605
3606                relative_paths.push(relative_path);
3607                true
3608            }
3609        });
3610
3611        if dot_git_paths_to_reload.is_empty() && relative_paths.is_empty() {
3612            return;
3613        }
3614
3615        if !relative_paths.is_empty() {
3616            log::debug!("received fs events {:?}", relative_paths);
3617
3618            let (scan_job_tx, scan_job_rx) = channel::unbounded();
3619            self.reload_entries_for_paths(
3620                root_path,
3621                root_canonical_path,
3622                &relative_paths,
3623                abs_paths,
3624                Some(scan_job_tx.clone()),
3625            )
3626            .await;
3627            drop(scan_job_tx);
3628            self.scan_dirs(false, scan_job_rx).await;
3629
3630            let (scan_job_tx, scan_job_rx) = channel::unbounded();
3631            self.update_ignore_statuses(scan_job_tx).await;
3632            self.scan_dirs(false, scan_job_rx).await;
3633        }
3634
3635        {
3636            let mut state = self.state.lock();
3637            if !dot_git_paths_to_reload.is_empty() {
3638                if relative_paths.is_empty() {
3639                    state.snapshot.scan_id += 1;
3640                }
3641                log::debug!("reloading repositories: {dot_git_paths_to_reload:?}");
3642                state.reload_repositories(&dot_git_paths_to_reload, self.fs.as_ref());
3643            }
3644            state.snapshot.completed_scan_id = state.snapshot.scan_id;
3645            for (_, entry_id) in mem::take(&mut state.removed_entry_ids) {
3646                state.scanned_dirs.remove(&entry_id);
3647            }
3648        }
3649
3650        self.send_status_update(false, None);
3651    }
3652
3653    async fn forcibly_load_paths(&self, paths: &[Arc<Path>]) -> bool {
3654        let (scan_job_tx, mut scan_job_rx) = channel::unbounded();
3655        {
3656            let mut state = self.state.lock();
3657            let root_path = state.snapshot.abs_path.clone();
3658            for path in paths {
3659                for ancestor in path.ancestors() {
3660                    if let Some(entry) = state.snapshot.entry_for_path(ancestor) {
3661                        if entry.kind == EntryKind::UnloadedDir {
3662                            let abs_path = root_path.join(ancestor);
3663                            state.enqueue_scan_dir(abs_path.into(), entry, &scan_job_tx);
3664                            state.paths_to_scan.insert(path.clone());
3665                            break;
3666                        }
3667                    }
3668                }
3669            }
3670            drop(scan_job_tx);
3671        }
3672        while let Some(job) = scan_job_rx.next().await {
3673            self.scan_dir(&job).await.log_err();
3674        }
3675
3676        mem::take(&mut self.state.lock().paths_to_scan).len() > 0
3677    }
3678
3679    async fn scan_dirs(
3680        &self,
3681        enable_progress_updates: bool,
3682        scan_jobs_rx: channel::Receiver<ScanJob>,
3683    ) {
3684        use futures::FutureExt as _;
3685
3686        if self
3687            .status_updates_tx
3688            .unbounded_send(ScanState::Started)
3689            .is_err()
3690        {
3691            return;
3692        }
3693
3694        let progress_update_count = AtomicUsize::new(0);
3695        self.executor
3696            .scoped(|scope| {
3697                for _ in 0..self.executor.num_cpus() {
3698                    scope.spawn(async {
3699                        let mut last_progress_update_count = 0;
3700                        let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
3701                        futures::pin_mut!(progress_update_timer);
3702
3703                        loop {
3704                            select_biased! {
3705                                // Process any path refresh requests before moving on to process
3706                                // the scan queue, so that user operations are prioritized.
3707                                request = self.scan_requests_rx.recv().fuse() => {
3708                                    let Ok(request) = request else { break };
3709                                    if !self.process_scan_request(request, true).await {
3710                                        return;
3711                                    }
3712                                }
3713
3714                                // Send periodic progress updates to the worktree. Use an atomic counter
3715                                // to ensure that only one of the workers sends a progress update after
3716                                // the update interval elapses.
3717                                _ = progress_update_timer => {
3718                                    match progress_update_count.compare_exchange(
3719                                        last_progress_update_count,
3720                                        last_progress_update_count + 1,
3721                                        SeqCst,
3722                                        SeqCst
3723                                    ) {
3724                                        Ok(_) => {
3725                                            last_progress_update_count += 1;
3726                                            self.send_status_update(true, None);
3727                                        }
3728                                        Err(count) => {
3729                                            last_progress_update_count = count;
3730                                        }
3731                                    }
3732                                    progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
3733                                }
3734
3735                                // Recursively load directories from the file system.
3736                                job = scan_jobs_rx.recv().fuse() => {
3737                                    let Ok(job) = job else { break };
3738                                    if let Err(err) = self.scan_dir(&job).await {
3739                                        if job.path.as_ref() != Path::new("") {
3740                                            log::error!("error scanning directory {:?}: {}", job.abs_path, err);
3741                                        }
3742                                    }
3743                                }
3744                            }
3745                        }
3746                    })
3747                }
3748            })
3749            .await;
3750    }
3751
3752    fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
3753        let mut state = self.state.lock();
3754        if state.changed_paths.is_empty() && scanning {
3755            return true;
3756        }
3757
3758        let new_snapshot = state.snapshot.clone();
3759        let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
3760        let changes = self.build_change_set(&old_snapshot, &new_snapshot, &state.changed_paths);
3761        state.changed_paths.clear();
3762
3763        self.status_updates_tx
3764            .unbounded_send(ScanState::Updated {
3765                snapshot: new_snapshot,
3766                changes,
3767                scanning,
3768                barrier,
3769            })
3770            .is_ok()
3771    }
3772
3773    async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
3774        let root_abs_path;
3775        let mut ignore_stack;
3776        let mut new_ignore;
3777        let root_char_bag;
3778        let next_entry_id;
3779        {
3780            let state = self.state.lock();
3781            let snapshot = &state.snapshot;
3782            root_abs_path = snapshot.abs_path().clone();
3783            if snapshot.is_path_excluded(job.path.to_path_buf()) {
3784                log::error!("skipping excluded directory {:?}", job.path);
3785                return Ok(());
3786            }
3787            log::debug!("scanning directory {:?}", job.path);
3788            ignore_stack = job.ignore_stack.clone();
3789            new_ignore = None;
3790            root_char_bag = snapshot.root_char_bag;
3791            next_entry_id = self.next_entry_id.clone();
3792            drop(state);
3793        }
3794
3795        let mut dotgit_path = None;
3796        let mut root_canonical_path = None;
3797        let mut new_entries: Vec<Entry> = Vec::new();
3798        let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
3799        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
3800        while let Some(child_abs_path) = child_paths.next().await {
3801            let child_abs_path: Arc<Path> = match child_abs_path {
3802                Ok(child_abs_path) => child_abs_path.into(),
3803                Err(error) => {
3804                    log::error!("error processing entry {:?}", error);
3805                    continue;
3806                }
3807            };
3808            let child_name = child_abs_path.file_name().unwrap();
3809            let child_path: Arc<Path> = job.path.join(child_name).into();
3810            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
3811            if child_name == *GITIGNORE {
3812                match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
3813                    Ok(ignore) => {
3814                        let ignore = Arc::new(ignore);
3815                        ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3816                        new_ignore = Some(ignore);
3817                    }
3818                    Err(error) => {
3819                        log::error!(
3820                            "error loading .gitignore file {:?} - {:?}",
3821                            child_name,
3822                            error
3823                        );
3824                    }
3825                }
3826
3827                // Update ignore status of any child entries we've already processed to reflect the
3828                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
3829                // there should rarely be too numerous. Update the ignore stack associated with any
3830                // new jobs as well.
3831                let mut new_jobs = new_jobs.iter_mut();
3832                for entry in &mut new_entries {
3833                    let entry_abs_path = root_abs_path.join(&entry.path);
3834                    entry.is_ignored =
3835                        ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
3836
3837                    if entry.is_dir() {
3838                        if let Some(job) = new_jobs.next().expect("missing scan job for entry") {
3839                            job.ignore_stack = if entry.is_ignored {
3840                                IgnoreStack::all()
3841                            } else {
3842                                ignore_stack.clone()
3843                            };
3844                        }
3845                    }
3846                }
3847            }
3848            // If we find a .git, we'll need to load the repository.
3849            else if child_name == *DOT_GIT {
3850                dotgit_path = Some(child_path.clone());
3851            }
3852
3853            {
3854                let relative_path = job.path.join(child_name);
3855                let mut state = self.state.lock();
3856                if state.snapshot.is_path_excluded(relative_path.clone()) {
3857                    log::debug!("skipping excluded child entry {relative_path:?}");
3858                    state.remove_path(&relative_path);
3859                    continue;
3860                }
3861                drop(state);
3862            }
3863
3864            let child_metadata = match self.fs.metadata(&child_abs_path).await {
3865                Ok(Some(metadata)) => metadata,
3866                Ok(None) => continue,
3867                Err(err) => {
3868                    log::error!("error processing {child_abs_path:?}: {err:?}");
3869                    continue;
3870                }
3871            };
3872
3873            let mut child_entry = Entry::new(
3874                child_path.clone(),
3875                &child_metadata,
3876                &next_entry_id,
3877                root_char_bag,
3878            );
3879
3880            if job.is_external {
3881                child_entry.is_external = true;
3882            } else if child_metadata.is_symlink {
3883                let canonical_path = match self.fs.canonicalize(&child_abs_path).await {
3884                    Ok(path) => path,
3885                    Err(err) => {
3886                        log::error!(
3887                            "error reading target of symlink {:?}: {:?}",
3888                            child_abs_path,
3889                            err
3890                        );
3891                        continue;
3892                    }
3893                };
3894
3895                // lazily canonicalize the root path in order to determine if
3896                // symlinks point outside of the worktree.
3897                let root_canonical_path = match &root_canonical_path {
3898                    Some(path) => path,
3899                    None => match self.fs.canonicalize(&root_abs_path).await {
3900                        Ok(path) => root_canonical_path.insert(path),
3901                        Err(err) => {
3902                            log::error!("error canonicalizing root {:?}: {:?}", root_abs_path, err);
3903                            continue;
3904                        }
3905                    },
3906                };
3907
3908                if !canonical_path.starts_with(root_canonical_path) {
3909                    child_entry.is_external = true;
3910                }
3911            }
3912
3913            if child_entry.is_dir() {
3914                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
3915
3916                // Avoid recursing until crash in the case of a recursive symlink
3917                if !job.ancestor_inodes.contains(&child_entry.inode) {
3918                    let mut ancestor_inodes = job.ancestor_inodes.clone();
3919                    ancestor_inodes.insert(child_entry.inode);
3920
3921                    new_jobs.push(Some(ScanJob {
3922                        abs_path: child_abs_path.clone(),
3923                        path: child_path,
3924                        is_external: child_entry.is_external,
3925                        ignore_stack: if child_entry.is_ignored {
3926                            IgnoreStack::all()
3927                        } else {
3928                            ignore_stack.clone()
3929                        },
3930                        ancestor_inodes,
3931                        scan_queue: job.scan_queue.clone(),
3932                        containing_repository: job.containing_repository.clone(),
3933                    }));
3934                } else {
3935                    new_jobs.push(None);
3936                }
3937            } else {
3938                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
3939                if !child_entry.is_ignored {
3940                    if let Some((repository_dir, repository, staged_statuses)) =
3941                        &job.containing_repository
3942                    {
3943                        if let Ok(repo_path) = child_entry.path.strip_prefix(&repository_dir.0) {
3944                            if let Some(mtime) = child_entry.mtime {
3945                                let repo_path = RepoPath(repo_path.into());
3946                                child_entry.git_status = combine_git_statuses(
3947                                    staged_statuses.get(&repo_path).copied(),
3948                                    repository.lock().unstaged_status(&repo_path, mtime),
3949                                );
3950                            }
3951                        }
3952                    }
3953                }
3954            }
3955
3956            {
3957                let relative_path = job.path.join(child_name);
3958                let state = self.state.lock();
3959                if state.snapshot.is_path_private(&relative_path) {
3960                    log::debug!("detected private file: {relative_path:?}");
3961                    child_entry.is_private = true;
3962                }
3963                drop(state)
3964            }
3965
3966            new_entries.push(child_entry);
3967        }
3968
3969        let mut state = self.state.lock();
3970
3971        // Identify any subdirectories that should not be scanned.
3972        let mut job_ix = 0;
3973        for entry in &mut new_entries {
3974            state.reuse_entry_id(entry);
3975            if entry.is_dir() {
3976                if state.should_scan_directory(entry) {
3977                    job_ix += 1;
3978                } else {
3979                    log::debug!("defer scanning directory {:?}", entry.path);
3980                    entry.kind = EntryKind::UnloadedDir;
3981                    new_jobs.remove(job_ix);
3982                }
3983            }
3984        }
3985
3986        state.populate_dir(&job.path, new_entries, new_ignore);
3987
3988        let repository =
3989            dotgit_path.and_then(|path| state.build_git_repository(path, self.fs.as_ref()));
3990
3991        for mut new_job in new_jobs.into_iter().flatten() {
3992            if let Some(containing_repository) = &repository {
3993                new_job.containing_repository = Some(containing_repository.clone());
3994            }
3995
3996            job.scan_queue
3997                .try_send(new_job)
3998                .expect("channel is unbounded");
3999        }
4000
4001        Ok(())
4002    }
4003
4004    async fn reload_entries_for_paths(
4005        &self,
4006        root_abs_path: Arc<Path>,
4007        root_canonical_path: PathBuf,
4008        relative_paths: &[Arc<Path>],
4009        abs_paths: Vec<PathBuf>,
4010        scan_queue_tx: Option<Sender<ScanJob>>,
4011    ) {
4012        let metadata = futures::future::join_all(
4013            abs_paths
4014                .iter()
4015                .map(|abs_path| async move {
4016                    let metadata = self.fs.metadata(abs_path).await?;
4017                    if let Some(metadata) = metadata {
4018                        let canonical_path = self.fs.canonicalize(abs_path).await?;
4019
4020                        // If we're on a case-insensitive filesystem (default on macOS), we want
4021                        // to only ignore metadata for non-symlink files if their absolute-path matches
4022                        // the canonical-path.
4023                        // Because if not, this might be a case-only-renaming (`mv test.txt TEST.TXT`)
4024                        // and we want to ignore the metadata for the old path (`test.txt`) so it's
4025                        // treated as removed.
4026                        if !self.fs_case_sensitive && !metadata.is_symlink {
4027                            let canonical_file_name = canonical_path.file_name();
4028                            let file_name = abs_path.file_name();
4029                            if canonical_file_name != file_name {
4030                                return Ok(None);
4031                            }
4032                        }
4033
4034                        anyhow::Ok(Some((metadata, canonical_path)))
4035                    } else {
4036                        Ok(None)
4037                    }
4038                })
4039                .collect::<Vec<_>>(),
4040        )
4041        .await;
4042
4043        let mut state = self.state.lock();
4044        let snapshot = &mut state.snapshot;
4045        let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
4046        let doing_recursive_update = scan_queue_tx.is_some();
4047        snapshot.scan_id += 1;
4048        if is_idle && !doing_recursive_update {
4049            snapshot.completed_scan_id = snapshot.scan_id;
4050        }
4051
4052        // Remove any entries for paths that no longer exist or are being recursively
4053        // refreshed. Do this before adding any new entries, so that renames can be
4054        // detected regardless of the order of the paths.
4055        for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
4056            if matches!(metadata, Ok(None)) || doing_recursive_update {
4057                log::trace!("remove path {:?}", path);
4058                state.remove_path(path);
4059            }
4060        }
4061
4062        for (path, metadata) in relative_paths.iter().zip(metadata.iter()) {
4063            let abs_path: Arc<Path> = root_abs_path.join(&path).into();
4064            match metadata {
4065                Ok(Some((metadata, canonical_path))) => {
4066                    let ignore_stack = state
4067                        .snapshot
4068                        .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
4069
4070                    let mut fs_entry = Entry::new(
4071                        path.clone(),
4072                        metadata,
4073                        self.next_entry_id.as_ref(),
4074                        state.snapshot.root_char_bag,
4075                    );
4076                    let is_dir = fs_entry.is_dir();
4077                    fs_entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, is_dir);
4078                    fs_entry.is_external = !canonical_path.starts_with(&root_canonical_path);
4079                    fs_entry.is_private = state.snapshot.is_path_private(path);
4080
4081                    if !is_dir && !fs_entry.is_ignored && !fs_entry.is_external {
4082                        if let Some((work_dir, repo)) = state.snapshot.local_repo_for_path(path) {
4083                            if let Ok(repo_path) = path.strip_prefix(work_dir.0) {
4084                                if let Some(mtime) = fs_entry.mtime {
4085                                    let repo_path = RepoPath(repo_path.into());
4086                                    let repo = repo.repo_ptr.lock();
4087                                    fs_entry.git_status = repo.status(&repo_path, mtime);
4088                                }
4089                            }
4090                        }
4091                    }
4092
4093                    if let (Some(scan_queue_tx), true) = (&scan_queue_tx, fs_entry.is_dir()) {
4094                        if state.should_scan_directory(&fs_entry) {
4095                            state.enqueue_scan_dir(abs_path, &fs_entry, scan_queue_tx);
4096                        } else {
4097                            fs_entry.kind = EntryKind::UnloadedDir;
4098                        }
4099                    }
4100
4101                    state.insert_entry(fs_entry, self.fs.as_ref());
4102                }
4103                Ok(None) => {
4104                    self.remove_repo_path(path, &mut state.snapshot);
4105                }
4106                Err(err) => {
4107                    // TODO - create a special 'error' entry in the entries tree to mark this
4108                    log::error!("error reading file {abs_path:?} on event: {err:#}");
4109                }
4110            }
4111        }
4112
4113        util::extend_sorted(
4114            &mut state.changed_paths,
4115            relative_paths.iter().cloned(),
4116            usize::MAX,
4117            Ord::cmp,
4118        );
4119    }
4120
4121    fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
4122        if !path
4123            .components()
4124            .any(|component| component.as_os_str() == *DOT_GIT)
4125        {
4126            if let Some(repository) = snapshot.repository_for_work_directory(path) {
4127                let entry = repository.work_directory.0;
4128                snapshot.git_repositories.remove(&entry);
4129                snapshot
4130                    .snapshot
4131                    .repository_entries
4132                    .remove(&RepositoryWorkDirectory(path.into()));
4133                return Some(());
4134            }
4135        }
4136
4137        // TODO statuses
4138        // Track when a .git is removed and iterate over the file system there
4139
4140        Some(())
4141    }
4142
4143    async fn update_ignore_statuses(&self, scan_job_tx: Sender<ScanJob>) {
4144        use futures::FutureExt as _;
4145
4146        let mut snapshot = self.state.lock().snapshot.clone();
4147        let mut ignores_to_update = Vec::new();
4148        let mut ignores_to_delete = Vec::new();
4149        let abs_path = snapshot.abs_path.clone();
4150        for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
4151            if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
4152                if *needs_update {
4153                    *needs_update = false;
4154                    if snapshot.snapshot.entry_for_path(parent_path).is_some() {
4155                        ignores_to_update.push(parent_abs_path.clone());
4156                    }
4157                }
4158
4159                let ignore_path = parent_path.join(&*GITIGNORE);
4160                if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
4161                    ignores_to_delete.push(parent_abs_path.clone());
4162                }
4163            }
4164        }
4165
4166        for parent_abs_path in ignores_to_delete {
4167            snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
4168            self.state
4169                .lock()
4170                .snapshot
4171                .ignores_by_parent_abs_path
4172                .remove(&parent_abs_path);
4173        }
4174
4175        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
4176        ignores_to_update.sort_unstable();
4177        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
4178        while let Some(parent_abs_path) = ignores_to_update.next() {
4179            while ignores_to_update
4180                .peek()
4181                .map_or(false, |p| p.starts_with(&parent_abs_path))
4182            {
4183                ignores_to_update.next().unwrap();
4184            }
4185
4186            let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
4187            smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
4188                abs_path: parent_abs_path,
4189                ignore_stack,
4190                ignore_queue: ignore_queue_tx.clone(),
4191                scan_queue: scan_job_tx.clone(),
4192            }))
4193            .unwrap();
4194        }
4195        drop(ignore_queue_tx);
4196
4197        self.executor
4198            .scoped(|scope| {
4199                for _ in 0..self.executor.num_cpus() {
4200                    scope.spawn(async {
4201                        loop {
4202                            select_biased! {
4203                                // Process any path refresh requests before moving on to process
4204                                // the queue of ignore statuses.
4205                                request = self.scan_requests_rx.recv().fuse() => {
4206                                    let Ok(request) = request else { break };
4207                                    if !self.process_scan_request(request, true).await {
4208                                        return;
4209                                    }
4210                                }
4211
4212                                // Recursively process directories whose ignores have changed.
4213                                job = ignore_queue_rx.recv().fuse() => {
4214                                    let Ok(job) = job else { break };
4215                                    self.update_ignore_status(job, &snapshot).await;
4216                                }
4217                            }
4218                        }
4219                    });
4220                }
4221            })
4222            .await;
4223    }
4224
4225    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
4226        log::trace!("update ignore status {:?}", job.abs_path);
4227
4228        let mut ignore_stack = job.ignore_stack;
4229        if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
4230            ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
4231        }
4232
4233        let mut entries_by_id_edits = Vec::new();
4234        let mut entries_by_path_edits = Vec::new();
4235        let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
4236        let repo = snapshot
4237            .local_repo_for_path(path)
4238            .map_or(None, |local_repo| Some(local_repo.1));
4239        for mut entry in snapshot.child_entries(path).cloned() {
4240            let was_ignored = entry.is_ignored;
4241            let abs_path: Arc<Path> = snapshot.abs_path().join(&entry.path).into();
4242            entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
4243            if entry.is_dir() {
4244                let child_ignore_stack = if entry.is_ignored {
4245                    IgnoreStack::all()
4246                } else {
4247                    ignore_stack.clone()
4248                };
4249
4250                // Scan any directories that were previously ignored and weren't previously scanned.
4251                if was_ignored && !entry.is_ignored && entry.kind.is_unloaded() {
4252                    let state = self.state.lock();
4253                    if state.should_scan_directory(&entry) {
4254                        state.enqueue_scan_dir(abs_path.clone(), &entry, &job.scan_queue);
4255                    }
4256                }
4257
4258                job.ignore_queue
4259                    .send(UpdateIgnoreStatusJob {
4260                        abs_path: abs_path.clone(),
4261                        ignore_stack: child_ignore_stack,
4262                        ignore_queue: job.ignore_queue.clone(),
4263                        scan_queue: job.scan_queue.clone(),
4264                    })
4265                    .await
4266                    .unwrap();
4267            }
4268
4269            if entry.is_ignored != was_ignored {
4270                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
4271                path_entry.scan_id = snapshot.scan_id;
4272                path_entry.is_ignored = entry.is_ignored;
4273                if !entry.is_dir() && !entry.is_ignored && !entry.is_external {
4274                    if let Some(repo) = repo {
4275                        if let Some(mtime) = &entry.mtime {
4276                            let repo_path = RepoPath(entry.path.to_path_buf());
4277                            let repo = repo.repo_ptr.lock();
4278                            entry.git_status = repo.status(&repo_path, *mtime);
4279                        }
4280                    }
4281                }
4282                entries_by_id_edits.push(Edit::Insert(path_entry));
4283                entries_by_path_edits.push(Edit::Insert(entry));
4284            }
4285        }
4286
4287        let state = &mut self.state.lock();
4288        for edit in &entries_by_path_edits {
4289            if let Edit::Insert(entry) = edit {
4290                if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
4291                    state.changed_paths.insert(ix, entry.path.clone());
4292                }
4293            }
4294        }
4295
4296        state
4297            .snapshot
4298            .entries_by_path
4299            .edit(entries_by_path_edits, &());
4300        state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
4301    }
4302
4303    fn build_change_set(
4304        &self,
4305        old_snapshot: &Snapshot,
4306        new_snapshot: &Snapshot,
4307        event_paths: &[Arc<Path>],
4308    ) -> UpdatedEntriesSet {
4309        use BackgroundScannerPhase::*;
4310        use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
4311
4312        // Identify which paths have changed. Use the known set of changed
4313        // parent paths to optimize the search.
4314        let mut changes = Vec::new();
4315        let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
4316        let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
4317        let mut last_newly_loaded_dir_path = None;
4318        old_paths.next(&());
4319        new_paths.next(&());
4320        for path in event_paths {
4321            let path = PathKey(path.clone());
4322            if old_paths.item().map_or(false, |e| e.path < path.0) {
4323                old_paths.seek_forward(&path, Bias::Left, &());
4324            }
4325            if new_paths.item().map_or(false, |e| e.path < path.0) {
4326                new_paths.seek_forward(&path, Bias::Left, &());
4327            }
4328            loop {
4329                match (old_paths.item(), new_paths.item()) {
4330                    (Some(old_entry), Some(new_entry)) => {
4331                        if old_entry.path > path.0
4332                            && new_entry.path > path.0
4333                            && !old_entry.path.starts_with(&path.0)
4334                            && !new_entry.path.starts_with(&path.0)
4335                        {
4336                            break;
4337                        }
4338
4339                        match Ord::cmp(&old_entry.path, &new_entry.path) {
4340                            Ordering::Less => {
4341                                changes.push((old_entry.path.clone(), old_entry.id, Removed));
4342                                old_paths.next(&());
4343                            }
4344                            Ordering::Equal => {
4345                                if self.phase == EventsReceivedDuringInitialScan {
4346                                    if old_entry.id != new_entry.id {
4347                                        changes.push((
4348                                            old_entry.path.clone(),
4349                                            old_entry.id,
4350                                            Removed,
4351                                        ));
4352                                    }
4353                                    // If the worktree was not fully initialized when this event was generated,
4354                                    // we can't know whether this entry was added during the scan or whether
4355                                    // it was merely updated.
4356                                    changes.push((
4357                                        new_entry.path.clone(),
4358                                        new_entry.id,
4359                                        AddedOrUpdated,
4360                                    ));
4361                                } else if old_entry.id != new_entry.id {
4362                                    changes.push((old_entry.path.clone(), old_entry.id, Removed));
4363                                    changes.push((new_entry.path.clone(), new_entry.id, Added));
4364                                } else if old_entry != new_entry {
4365                                    if old_entry.kind.is_unloaded() {
4366                                        last_newly_loaded_dir_path = Some(&new_entry.path);
4367                                        changes.push((
4368                                            new_entry.path.clone(),
4369                                            new_entry.id,
4370                                            Loaded,
4371                                        ));
4372                                    } else {
4373                                        changes.push((
4374                                            new_entry.path.clone(),
4375                                            new_entry.id,
4376                                            Updated,
4377                                        ));
4378                                    }
4379                                }
4380                                old_paths.next(&());
4381                                new_paths.next(&());
4382                            }
4383                            Ordering::Greater => {
4384                                let is_newly_loaded = self.phase == InitialScan
4385                                    || last_newly_loaded_dir_path
4386                                        .as_ref()
4387                                        .map_or(false, |dir| new_entry.path.starts_with(&dir));
4388                                changes.push((
4389                                    new_entry.path.clone(),
4390                                    new_entry.id,
4391                                    if is_newly_loaded { Loaded } else { Added },
4392                                ));
4393                                new_paths.next(&());
4394                            }
4395                        }
4396                    }
4397                    (Some(old_entry), None) => {
4398                        changes.push((old_entry.path.clone(), old_entry.id, Removed));
4399                        old_paths.next(&());
4400                    }
4401                    (None, Some(new_entry)) => {
4402                        let is_newly_loaded = self.phase == InitialScan
4403                            || last_newly_loaded_dir_path
4404                                .as_ref()
4405                                .map_or(false, |dir| new_entry.path.starts_with(&dir));
4406                        changes.push((
4407                            new_entry.path.clone(),
4408                            new_entry.id,
4409                            if is_newly_loaded { Loaded } else { Added },
4410                        ));
4411                        new_paths.next(&());
4412                    }
4413                    (None, None) => break,
4414                }
4415            }
4416        }
4417
4418        changes.into()
4419    }
4420
4421    async fn progress_timer(&self, running: bool) {
4422        if !running {
4423            return futures::future::pending().await;
4424        }
4425
4426        #[cfg(any(test, feature = "test-support"))]
4427        if self.fs.is_fake() {
4428            return self.executor.simulate_random_delay().await;
4429        }
4430
4431        smol::Timer::after(FS_WATCH_LATENCY).await;
4432    }
4433}
4434
4435fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
4436    let mut result = root_char_bag;
4437    result.extend(
4438        path.to_string_lossy()
4439            .chars()
4440            .map(|c| c.to_ascii_lowercase()),
4441    );
4442    result
4443}
4444
4445struct ScanJob {
4446    abs_path: Arc<Path>,
4447    path: Arc<Path>,
4448    ignore_stack: Arc<IgnoreStack>,
4449    scan_queue: Sender<ScanJob>,
4450    ancestor_inodes: TreeSet<u64>,
4451    is_external: bool,
4452    containing_repository: Option<(
4453        RepositoryWorkDirectory,
4454        Arc<Mutex<dyn GitRepository>>,
4455        TreeMap<RepoPath, GitFileStatus>,
4456    )>,
4457}
4458
4459struct UpdateIgnoreStatusJob {
4460    abs_path: Arc<Path>,
4461    ignore_stack: Arc<IgnoreStack>,
4462    ignore_queue: Sender<UpdateIgnoreStatusJob>,
4463    scan_queue: Sender<ScanJob>,
4464}
4465
4466pub trait WorktreeModelHandle {
4467    #[cfg(any(test, feature = "test-support"))]
4468    fn flush_fs_events<'a>(
4469        &self,
4470        cx: &'a mut gpui::TestAppContext,
4471    ) -> futures::future::LocalBoxFuture<'a, ()>;
4472}
4473
4474impl WorktreeModelHandle for Model<Worktree> {
4475    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
4476    // occurred before the worktree was constructed. These events can cause the worktree to perform
4477    // extra directory scans, and emit extra scan-state notifications.
4478    //
4479    // This function mutates the worktree's directory and waits for those mutations to be picked up,
4480    // to ensure that all redundant FS events have already been processed.
4481    #[cfg(any(test, feature = "test-support"))]
4482    fn flush_fs_events<'a>(
4483        &self,
4484        cx: &'a mut gpui::TestAppContext,
4485    ) -> futures::future::LocalBoxFuture<'a, ()> {
4486        let file_name = "fs-event-sentinel";
4487
4488        let tree = self.clone();
4489        let (fs, root_path) = self.update(cx, |tree, _| {
4490            let tree = tree.as_local().unwrap();
4491            (tree.fs.clone(), tree.abs_path().clone())
4492        });
4493
4494        async move {
4495            fs.create_file(&root_path.join(file_name), Default::default())
4496                .await
4497                .unwrap();
4498
4499            cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_some())
4500                .await;
4501
4502            fs.remove_file(&root_path.join(file_name), Default::default())
4503                .await
4504                .unwrap();
4505            cx.condition(&tree, |tree, _| tree.entry_for_path(file_name).is_none())
4506                .await;
4507
4508            cx.update(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4509                .await;
4510        }
4511        .boxed_local()
4512    }
4513}
4514
4515#[derive(Clone, Debug)]
4516struct TraversalProgress<'a> {
4517    max_path: &'a Path,
4518    count: usize,
4519    non_ignored_count: usize,
4520    file_count: usize,
4521    non_ignored_file_count: usize,
4522}
4523
4524impl<'a> TraversalProgress<'a> {
4525    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
4526        match (include_ignored, include_dirs) {
4527            (true, true) => self.count,
4528            (true, false) => self.file_count,
4529            (false, true) => self.non_ignored_count,
4530            (false, false) => self.non_ignored_file_count,
4531        }
4532    }
4533}
4534
4535impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
4536    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4537        self.max_path = summary.max_path.as_ref();
4538        self.count += summary.count;
4539        self.non_ignored_count += summary.non_ignored_count;
4540        self.file_count += summary.file_count;
4541        self.non_ignored_file_count += summary.non_ignored_file_count;
4542    }
4543}
4544
4545impl<'a> Default for TraversalProgress<'a> {
4546    fn default() -> Self {
4547        Self {
4548            max_path: Path::new(""),
4549            count: 0,
4550            non_ignored_count: 0,
4551            file_count: 0,
4552            non_ignored_file_count: 0,
4553        }
4554    }
4555}
4556
4557#[derive(Clone, Debug, Default, Copy)]
4558struct GitStatuses {
4559    added: usize,
4560    modified: usize,
4561    conflict: usize,
4562}
4563
4564impl AddAssign for GitStatuses {
4565    fn add_assign(&mut self, rhs: Self) {
4566        self.added += rhs.added;
4567        self.modified += rhs.modified;
4568        self.conflict += rhs.conflict;
4569    }
4570}
4571
4572impl Sub for GitStatuses {
4573    type Output = GitStatuses;
4574
4575    fn sub(self, rhs: Self) -> Self::Output {
4576        GitStatuses {
4577            added: self.added - rhs.added,
4578            modified: self.modified - rhs.modified,
4579            conflict: self.conflict - rhs.conflict,
4580        }
4581    }
4582}
4583
4584impl<'a> sum_tree::Dimension<'a, EntrySummary> for GitStatuses {
4585    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
4586        *self += summary.statuses
4587    }
4588}
4589
4590pub struct Traversal<'a> {
4591    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
4592    include_ignored: bool,
4593    include_dirs: bool,
4594}
4595
4596impl<'a> Traversal<'a> {
4597    pub fn advance(&mut self) -> bool {
4598        self.cursor.seek_forward(
4599            &TraversalTarget::Count {
4600                count: self.end_offset() + 1,
4601                include_dirs: self.include_dirs,
4602                include_ignored: self.include_ignored,
4603            },
4604            Bias::Left,
4605            &(),
4606        )
4607    }
4608
4609    pub fn advance_to_sibling(&mut self) -> bool {
4610        while let Some(entry) = self.cursor.item() {
4611            self.cursor.seek_forward(
4612                &TraversalTarget::PathSuccessor(&entry.path),
4613                Bias::Left,
4614                &(),
4615            );
4616            if let Some(entry) = self.cursor.item() {
4617                if (self.include_dirs || !entry.is_dir())
4618                    && (self.include_ignored || !entry.is_ignored)
4619                {
4620                    return true;
4621                }
4622            }
4623        }
4624        false
4625    }
4626
4627    pub fn entry(&self) -> Option<&'a Entry> {
4628        self.cursor.item()
4629    }
4630
4631    pub fn start_offset(&self) -> usize {
4632        self.cursor
4633            .start()
4634            .count(self.include_dirs, self.include_ignored)
4635    }
4636
4637    pub fn end_offset(&self) -> usize {
4638        self.cursor
4639            .end(&())
4640            .count(self.include_dirs, self.include_ignored)
4641    }
4642}
4643
4644impl<'a> Iterator for Traversal<'a> {
4645    type Item = &'a Entry;
4646
4647    fn next(&mut self) -> Option<Self::Item> {
4648        if let Some(item) = self.entry() {
4649            self.advance();
4650            Some(item)
4651        } else {
4652            None
4653        }
4654    }
4655}
4656
4657#[derive(Debug)]
4658enum TraversalTarget<'a> {
4659    Path(&'a Path),
4660    PathSuccessor(&'a Path),
4661    Count {
4662        count: usize,
4663        include_ignored: bool,
4664        include_dirs: bool,
4665    },
4666}
4667
4668impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
4669    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
4670        match self {
4671            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
4672            TraversalTarget::PathSuccessor(path) => {
4673                if !cursor_location.max_path.starts_with(path) {
4674                    Ordering::Equal
4675                } else {
4676                    Ordering::Greater
4677                }
4678            }
4679            TraversalTarget::Count {
4680                count,
4681                include_dirs,
4682                include_ignored,
4683            } => Ord::cmp(
4684                count,
4685                &cursor_location.count(*include_dirs, *include_ignored),
4686            ),
4687        }
4688    }
4689}
4690
4691impl<'a, 'b> SeekTarget<'a, EntrySummary, (TraversalProgress<'a>, GitStatuses)>
4692    for TraversalTarget<'b>
4693{
4694    fn cmp(&self, cursor_location: &(TraversalProgress<'a>, GitStatuses), _: &()) -> Ordering {
4695        self.cmp(&cursor_location.0, &())
4696    }
4697}
4698
4699struct ChildEntriesIter<'a> {
4700    parent_path: &'a Path,
4701    traversal: Traversal<'a>,
4702}
4703
4704impl<'a> Iterator for ChildEntriesIter<'a> {
4705    type Item = &'a Entry;
4706
4707    fn next(&mut self) -> Option<Self::Item> {
4708        if let Some(item) = self.traversal.entry() {
4709            if item.path.starts_with(&self.parent_path) {
4710                self.traversal.advance_to_sibling();
4711                return Some(item);
4712            }
4713        }
4714        None
4715    }
4716}
4717
4718pub struct DescendentEntriesIter<'a> {
4719    parent_path: &'a Path,
4720    traversal: Traversal<'a>,
4721}
4722
4723impl<'a> Iterator for DescendentEntriesIter<'a> {
4724    type Item = &'a Entry;
4725
4726    fn next(&mut self) -> Option<Self::Item> {
4727        if let Some(item) = self.traversal.entry() {
4728            if item.path.starts_with(&self.parent_path) {
4729                self.traversal.advance();
4730                return Some(item);
4731            }
4732        }
4733        None
4734    }
4735}
4736
4737impl<'a> From<&'a Entry> for proto::Entry {
4738    fn from(entry: &'a Entry) -> Self {
4739        Self {
4740            id: entry.id.to_proto(),
4741            is_dir: entry.is_dir(),
4742            path: entry.path.to_string_lossy().into(),
4743            inode: entry.inode,
4744            mtime: entry.mtime.map(|time| time.into()),
4745            is_symlink: entry.is_symlink,
4746            is_ignored: entry.is_ignored,
4747            is_external: entry.is_external,
4748            git_status: entry.git_status.map(git_status_to_proto),
4749        }
4750    }
4751}
4752
4753impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
4754    type Error = anyhow::Error;
4755
4756    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
4757        let kind = if entry.is_dir {
4758            EntryKind::Dir
4759        } else {
4760            let mut char_bag = *root_char_bag;
4761            char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
4762            EntryKind::File(char_bag)
4763        };
4764        let path: Arc<Path> = PathBuf::from(entry.path).into();
4765        Ok(Entry {
4766            id: ProjectEntryId::from_proto(entry.id),
4767            kind,
4768            path,
4769            inode: entry.inode,
4770            mtime: entry.mtime.map(|time| time.into()),
4771            is_symlink: entry.is_symlink,
4772            is_ignored: entry.is_ignored,
4773            is_external: entry.is_external,
4774            git_status: git_status_from_proto(entry.git_status),
4775            is_private: false,
4776        })
4777    }
4778}
4779
4780fn combine_git_statuses(
4781    staged: Option<GitFileStatus>,
4782    unstaged: Option<GitFileStatus>,
4783) -> Option<GitFileStatus> {
4784    if let Some(staged) = staged {
4785        if let Some(unstaged) = unstaged {
4786            if unstaged != staged {
4787                Some(GitFileStatus::Modified)
4788            } else {
4789                Some(staged)
4790            }
4791        } else {
4792            Some(staged)
4793        }
4794    } else {
4795        unstaged
4796    }
4797}
4798
4799fn git_status_from_proto(git_status: Option<i32>) -> Option<GitFileStatus> {
4800    git_status.and_then(|status| {
4801        proto::GitStatus::from_i32(status).map(|status| match status {
4802            proto::GitStatus::Added => GitFileStatus::Added,
4803            proto::GitStatus::Modified => GitFileStatus::Modified,
4804            proto::GitStatus::Conflict => GitFileStatus::Conflict,
4805        })
4806    })
4807}
4808
4809fn git_status_to_proto(status: GitFileStatus) -> i32 {
4810    match status {
4811        GitFileStatus::Added => proto::GitStatus::Added as i32,
4812        GitFileStatus::Modified => proto::GitStatus::Modified as i32,
4813        GitFileStatus::Conflict => proto::GitStatus::Conflict as i32,
4814    }
4815}
4816
4817#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)]
4818pub struct ProjectEntryId(usize);
4819
4820impl ProjectEntryId {
4821    pub const MAX: Self = Self(usize::MAX);
4822
4823    pub fn new(counter: &AtomicUsize) -> Self {
4824        Self(counter.fetch_add(1, SeqCst))
4825    }
4826
4827    pub fn from_proto(id: u64) -> Self {
4828        Self(id as usize)
4829    }
4830
4831    pub fn to_proto(&self) -> u64 {
4832        self.0 as u64
4833    }
4834
4835    pub fn to_usize(&self) -> usize {
4836        self.0
4837    }
4838}
4839
4840#[derive(Copy, Clone, Debug, Default, PartialEq, Serialize)]
4841pub struct DiagnosticSummary {
4842    pub error_count: usize,
4843    pub warning_count: usize,
4844}
4845
4846impl DiagnosticSummary {
4847    fn new<'a, T: 'a>(diagnostics: impl IntoIterator<Item = &'a DiagnosticEntry<T>>) -> Self {
4848        let mut this = Self {
4849            error_count: 0,
4850            warning_count: 0,
4851        };
4852
4853        for entry in diagnostics {
4854            if entry.diagnostic.is_primary {
4855                match entry.diagnostic.severity {
4856                    DiagnosticSeverity::ERROR => this.error_count += 1,
4857                    DiagnosticSeverity::WARNING => this.warning_count += 1,
4858                    _ => {}
4859                }
4860            }
4861        }
4862
4863        this
4864    }
4865
4866    pub fn is_empty(&self) -> bool {
4867        self.error_count == 0 && self.warning_count == 0
4868    }
4869
4870    pub fn to_proto(
4871        &self,
4872        language_server_id: LanguageServerId,
4873        path: &Path,
4874    ) -> proto::DiagnosticSummary {
4875        proto::DiagnosticSummary {
4876            path: path.to_string_lossy().to_string(),
4877            language_server_id: language_server_id.0 as u64,
4878            error_count: self.error_count as u32,
4879            warning_count: self.warning_count as u32,
4880        }
4881    }
4882}