worktree.rs

   1use crate::{
   2    copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
   3};
   4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   5use anyhow::{anyhow, Context, Result};
   6use client::{proto, Client};
   7use clock::ReplicaId;
   8use collections::{HashMap, VecDeque};
   9use fs::{
  10    repository::{GitFileStatus, GitRepository, RepoPath, RepoPathDescendants},
  11    Fs, LineEnding,
  12};
  13use futures::{
  14    channel::{
  15        mpsc::{self, UnboundedSender},
  16        oneshot,
  17    },
  18    select_biased,
  19    task::Poll,
  20    FutureExt, Stream, StreamExt,
  21};
  22use fuzzy::CharBag;
  23use git::{DOT_GIT, GITIGNORE};
  24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
  25use language::{
  26    proto::{
  27        deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
  28        serialize_version,
  29    },
  30    Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
  31};
  32use lsp::LanguageServerId;
  33use parking_lot::Mutex;
  34use postage::{
  35    barrier,
  36    prelude::{Sink as _, Stream as _},
  37    watch,
  38};
  39use smol::channel::{self, Sender};
  40use std::{
  41    any::Any,
  42    cmp::{self, Ordering},
  43    convert::TryFrom,
  44    ffi::OsStr,
  45    fmt,
  46    future::Future,
  47    mem,
  48    ops::{Deref, DerefMut},
  49    path::{Path, PathBuf},
  50    pin::Pin,
  51    sync::{
  52        atomic::{AtomicUsize, Ordering::SeqCst},
  53        Arc,
  54    },
  55    time::{Duration, SystemTime},
  56};
  57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
  58use util::{paths::HOME, ResultExt, TakeUntilExt};
  59
  60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  61pub struct WorktreeId(usize);
  62
  63pub enum Worktree {
  64    Local(LocalWorktree),
  65    Remote(RemoteWorktree),
  66}
  67
  68pub struct LocalWorktree {
  69    snapshot: LocalSnapshot,
  70    path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
  71    is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
  72    _background_scanner_task: Task<()>,
  73    share: Option<ShareState>,
  74    diagnostics: HashMap<
  75        Arc<Path>,
  76        Vec<(
  77            LanguageServerId,
  78            Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
  79        )>,
  80    >,
  81    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  82    client: Arc<Client>,
  83    fs: Arc<dyn Fs>,
  84    visible: bool,
  85}
  86
  87pub struct RemoteWorktree {
  88    snapshot: Snapshot,
  89    background_snapshot: Arc<Mutex<Snapshot>>,
  90    project_id: u64,
  91    client: Arc<Client>,
  92    updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
  93    snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
  94    replica_id: ReplicaId,
  95    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  96    visible: bool,
  97    disconnected: bool,
  98}
  99
 100#[derive(Clone)]
 101pub struct Snapshot {
 102    id: WorktreeId,
 103    abs_path: Arc<Path>,
 104    root_name: String,
 105    root_char_bag: CharBag,
 106    entries_by_path: SumTree<Entry>,
 107    entries_by_id: SumTree<PathEntry>,
 108    repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
 109
 110    /// A number that increases every time the worktree begins scanning
 111    /// a set of paths from the filesystem. This scanning could be caused
 112    /// by some operation performed on the worktree, such as reading or
 113    /// writing a file, or by an event reported by the filesystem.
 114    scan_id: usize,
 115
 116    /// The latest scan id that has completed, and whose preceding scans
 117    /// have all completed. The current `scan_id` could be more than one
 118    /// greater than the `completed_scan_id` if operations are performed
 119    /// on the worktree while it is processing a file-system event.
 120    completed_scan_id: usize,
 121}
 122
 123#[derive(Clone, Debug, PartialEq, Eq)]
 124pub struct RepositoryEntry {
 125    pub(crate) work_directory: WorkDirectoryEntry,
 126    pub(crate) branch: Option<Arc<str>>,
 127    pub(crate) statuses: TreeMap<RepoPath, GitFileStatus>,
 128}
 129
 130fn read_git_status(git_status: i32) -> Option<GitFileStatus> {
 131    proto::GitStatus::from_i32(git_status).map(|status| match status {
 132        proto::GitStatus::Added => GitFileStatus::Added,
 133        proto::GitStatus::Modified => GitFileStatus::Modified,
 134        proto::GitStatus::Conflict => GitFileStatus::Conflict,
 135    })
 136}
 137
 138impl RepositoryEntry {
 139    pub fn branch(&self) -> Option<Arc<str>> {
 140        self.branch.clone()
 141    }
 142
 143    pub fn work_directory_id(&self) -> ProjectEntryId {
 144        *self.work_directory
 145    }
 146
 147    pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
 148        snapshot
 149            .entry_for_id(self.work_directory_id())
 150            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
 151    }
 152
 153    pub fn status_for_path(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
 154        self.work_directory
 155            .relativize(snapshot, path)
 156            .and_then(|repo_path| {
 157                self.statuses
 158                    .iter_from(&repo_path)
 159                    .take_while(|(key, _)| key.starts_with(&repo_path))
 160                    // Short circut once we've found the highest level
 161                    .take_until(|(_, status)| status == &&GitFileStatus::Conflict)
 162                    .map(|(_, status)| status)
 163                    .reduce(
 164                        |status_first, status_second| match (status_first, status_second) {
 165                            (GitFileStatus::Conflict, _) | (_, GitFileStatus::Conflict) => {
 166                                &GitFileStatus::Conflict
 167                            }
 168                            (GitFileStatus::Modified, _) | (_, GitFileStatus::Modified) => {
 169                                &GitFileStatus::Modified
 170                            }
 171                            _ => &GitFileStatus::Added,
 172                        },
 173                    )
 174                    .copied()
 175            })
 176    }
 177
 178    #[cfg(any(test, feature = "test-support"))]
 179    pub fn status_for_file(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
 180        self.work_directory
 181            .relativize(snapshot, path)
 182            .and_then(|repo_path| (&self.statuses).get(&repo_path))
 183            .cloned()
 184    }
 185
 186    pub fn build_update(&self, other: &Self) -> proto::RepositoryEntry {
 187        let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
 188        let mut removed_statuses: Vec<String> = Vec::new();
 189
 190        let mut self_statuses = self.statuses.iter().peekable();
 191        let mut other_statuses = other.statuses.iter().peekable();
 192        loop {
 193            match (self_statuses.peek(), other_statuses.peek()) {
 194                (Some((self_repo_path, self_status)), Some((other_repo_path, other_status))) => {
 195                    match Ord::cmp(self_repo_path, other_repo_path) {
 196                        Ordering::Less => {
 197                            updated_statuses.push(make_status_entry(self_repo_path, self_status));
 198                            self_statuses.next();
 199                        }
 200                        Ordering::Equal => {
 201                            if self_status != other_status {
 202                                updated_statuses
 203                                    .push(make_status_entry(self_repo_path, self_status));
 204                            }
 205
 206                            self_statuses.next();
 207                            other_statuses.next();
 208                        }
 209                        Ordering::Greater => {
 210                            removed_statuses.push(make_repo_path(other_repo_path));
 211                            other_statuses.next();
 212                        }
 213                    }
 214                }
 215                (Some((self_repo_path, self_status)), None) => {
 216                    updated_statuses.push(make_status_entry(self_repo_path, self_status));
 217                    self_statuses.next();
 218                }
 219                (None, Some((other_repo_path, _))) => {
 220                    removed_statuses.push(make_repo_path(other_repo_path));
 221                    other_statuses.next();
 222                }
 223                (None, None) => break,
 224            }
 225        }
 226
 227        proto::RepositoryEntry {
 228            work_directory_id: self.work_directory_id().to_proto(),
 229            branch: self.branch.as_ref().map(|str| str.to_string()),
 230            removed_repo_paths: removed_statuses,
 231            updated_statuses,
 232        }
 233    }
 234}
 235
 236fn make_repo_path(path: &RepoPath) -> String {
 237    path.as_os_str().to_string_lossy().to_string()
 238}
 239
 240fn make_status_entry(path: &RepoPath, status: &GitFileStatus) -> proto::StatusEntry {
 241    proto::StatusEntry {
 242        repo_path: make_repo_path(path),
 243        status: match status {
 244            GitFileStatus::Added => proto::GitStatus::Added.into(),
 245            GitFileStatus::Modified => proto::GitStatus::Modified.into(),
 246            GitFileStatus::Conflict => proto::GitStatus::Conflict.into(),
 247        },
 248    }
 249}
 250
 251impl From<&RepositoryEntry> for proto::RepositoryEntry {
 252    fn from(value: &RepositoryEntry) -> Self {
 253        proto::RepositoryEntry {
 254            work_directory_id: value.work_directory.to_proto(),
 255            branch: value.branch.as_ref().map(|str| str.to_string()),
 256            updated_statuses: value
 257                .statuses
 258                .iter()
 259                .map(|(repo_path, status)| make_status_entry(repo_path, status))
 260                .collect(),
 261            removed_repo_paths: Default::default(),
 262        }
 263    }
 264}
 265
 266/// This path corresponds to the 'content path' (the folder that contains the .git)
 267#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 268pub struct RepositoryWorkDirectory(Arc<Path>);
 269
 270impl Default for RepositoryWorkDirectory {
 271    fn default() -> Self {
 272        RepositoryWorkDirectory(Arc::from(Path::new("")))
 273    }
 274}
 275
 276impl AsRef<Path> for RepositoryWorkDirectory {
 277    fn as_ref(&self) -> &Path {
 278        self.0.as_ref()
 279    }
 280}
 281
 282#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 283pub struct WorkDirectoryEntry(ProjectEntryId);
 284
 285impl WorkDirectoryEntry {
 286    pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
 287        worktree.entry_for_id(self.0).and_then(|entry| {
 288            path.strip_prefix(&entry.path)
 289                .ok()
 290                .map(move |path| path.into())
 291        })
 292    }
 293}
 294
 295impl Deref for WorkDirectoryEntry {
 296    type Target = ProjectEntryId;
 297
 298    fn deref(&self) -> &Self::Target {
 299        &self.0
 300    }
 301}
 302
 303impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
 304    fn from(value: ProjectEntryId) -> Self {
 305        WorkDirectoryEntry(value)
 306    }
 307}
 308
 309#[derive(Debug, Clone)]
 310pub struct LocalSnapshot {
 311    snapshot: Snapshot,
 312    /// All of the gitignore files in the worktree, indexed by their relative path.
 313    /// The boolean indicates whether the gitignore needs to be updated.
 314    ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
 315    /// All of the git repositories in the worktree, indexed by the project entry
 316    /// id of their parent directory.
 317    git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 318}
 319
 320pub struct BackgroundScannerState {
 321    snapshot: LocalSnapshot,
 322    /// The ids of all of the entries that were removed from the snapshot
 323    /// as part of the current update. These entry ids may be re-used
 324    /// if the same inode is discovered at a new path, or if the given
 325    /// path is re-created after being deleted.
 326    removed_entry_ids: HashMap<u64, ProjectEntryId>,
 327    changed_paths: Vec<Arc<Path>>,
 328    prev_snapshot: Snapshot,
 329}
 330
 331#[derive(Debug, Clone)]
 332pub struct LocalRepositoryEntry {
 333    pub(crate) work_dir_scan_id: usize,
 334    pub(crate) git_dir_scan_id: usize,
 335    pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
 336    /// Path to the actual .git folder.
 337    /// Note: if .git is a file, this points to the folder indicated by the .git file
 338    pub(crate) git_dir_path: Arc<Path>,
 339}
 340
 341impl LocalRepositoryEntry {
 342    // Note that this path should be relative to the worktree root.
 343    pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
 344        path.starts_with(self.git_dir_path.as_ref())
 345    }
 346}
 347
 348impl Deref for LocalSnapshot {
 349    type Target = Snapshot;
 350
 351    fn deref(&self) -> &Self::Target {
 352        &self.snapshot
 353    }
 354}
 355
 356impl DerefMut for LocalSnapshot {
 357    fn deref_mut(&mut self) -> &mut Self::Target {
 358        &mut self.snapshot
 359    }
 360}
 361
 362enum ScanState {
 363    Started,
 364    Updated {
 365        snapshot: LocalSnapshot,
 366        changes: UpdatedEntriesSet,
 367        barrier: Option<barrier::Sender>,
 368        scanning: bool,
 369    },
 370}
 371
 372struct ShareState {
 373    project_id: u64,
 374    snapshots_tx:
 375        mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
 376    resume_updates: watch::Sender<()>,
 377    _maintain_remote_snapshot: Task<Option<()>>,
 378}
 379
 380pub enum Event {
 381    UpdatedEntries(UpdatedEntriesSet),
 382    UpdatedGitRepositories(UpdatedGitRepositoriesSet),
 383}
 384
 385impl Entity for Worktree {
 386    type Event = Event;
 387}
 388
 389impl Worktree {
 390    pub async fn local(
 391        client: Arc<Client>,
 392        path: impl Into<Arc<Path>>,
 393        visible: bool,
 394        fs: Arc<dyn Fs>,
 395        next_entry_id: Arc<AtomicUsize>,
 396        cx: &mut AsyncAppContext,
 397    ) -> Result<ModelHandle<Self>> {
 398        // After determining whether the root entry is a file or a directory, populate the
 399        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 400        let abs_path = path.into();
 401        let metadata = fs
 402            .metadata(&abs_path)
 403            .await
 404            .context("failed to stat worktree path")?;
 405
 406        Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
 407            let root_name = abs_path
 408                .file_name()
 409                .map_or(String::new(), |f| f.to_string_lossy().to_string());
 410
 411            let mut snapshot = LocalSnapshot {
 412                ignores_by_parent_abs_path: Default::default(),
 413                git_repositories: Default::default(),
 414                snapshot: Snapshot {
 415                    id: WorktreeId::from_usize(cx.model_id()),
 416                    abs_path: abs_path.clone(),
 417                    root_name: root_name.clone(),
 418                    root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
 419                    entries_by_path: Default::default(),
 420                    entries_by_id: Default::default(),
 421                    repository_entries: Default::default(),
 422                    scan_id: 1,
 423                    completed_scan_id: 0,
 424                },
 425            };
 426
 427            if let Some(metadata) = metadata {
 428                snapshot.insert_entry(
 429                    Entry::new(
 430                        Arc::from(Path::new("")),
 431                        &metadata,
 432                        &next_entry_id,
 433                        snapshot.root_char_bag,
 434                    ),
 435                    fs.as_ref(),
 436                );
 437            }
 438
 439            let (path_changes_tx, path_changes_rx) = channel::unbounded();
 440            let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
 441
 442            cx.spawn_weak(|this, mut cx| async move {
 443                while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
 444                    this.update(&mut cx, |this, cx| {
 445                        let this = this.as_local_mut().unwrap();
 446                        match state {
 447                            ScanState::Started => {
 448                                *this.is_scanning.0.borrow_mut() = true;
 449                            }
 450                            ScanState::Updated {
 451                                snapshot,
 452                                changes,
 453                                barrier,
 454                                scanning,
 455                            } => {
 456                                *this.is_scanning.0.borrow_mut() = scanning;
 457                                this.set_snapshot(snapshot, changes, cx);
 458                                drop(barrier);
 459                            }
 460                        }
 461                        cx.notify();
 462                    });
 463                }
 464            })
 465            .detach();
 466
 467            let background_scanner_task = cx.background().spawn({
 468                let fs = fs.clone();
 469                let snapshot = snapshot.clone();
 470                let background = cx.background().clone();
 471                async move {
 472                    let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 473                    BackgroundScanner::new(
 474                        snapshot,
 475                        next_entry_id,
 476                        fs,
 477                        scan_states_tx,
 478                        background,
 479                        path_changes_rx,
 480                    )
 481                    .run(events)
 482                    .await;
 483                }
 484            });
 485
 486            Worktree::Local(LocalWorktree {
 487                snapshot,
 488                is_scanning: watch::channel_with(true),
 489                share: None,
 490                path_changes_tx,
 491                _background_scanner_task: background_scanner_task,
 492                diagnostics: Default::default(),
 493                diagnostic_summaries: Default::default(),
 494                client,
 495                fs,
 496                visible,
 497            })
 498        }))
 499    }
 500
 501    pub fn remote(
 502        project_remote_id: u64,
 503        replica_id: ReplicaId,
 504        worktree: proto::WorktreeMetadata,
 505        client: Arc<Client>,
 506        cx: &mut AppContext,
 507    ) -> ModelHandle<Self> {
 508        cx.add_model(|cx: &mut ModelContext<Self>| {
 509            let snapshot = Snapshot {
 510                id: WorktreeId(worktree.id as usize),
 511                abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
 512                root_name: worktree.root_name.clone(),
 513                root_char_bag: worktree
 514                    .root_name
 515                    .chars()
 516                    .map(|c| c.to_ascii_lowercase())
 517                    .collect(),
 518                entries_by_path: Default::default(),
 519                entries_by_id: Default::default(),
 520                repository_entries: Default::default(),
 521                scan_id: 1,
 522                completed_scan_id: 0,
 523            };
 524
 525            let (updates_tx, mut updates_rx) = mpsc::unbounded();
 526            let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
 527            let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
 528
 529            cx.background()
 530                .spawn({
 531                    let background_snapshot = background_snapshot.clone();
 532                    async move {
 533                        while let Some(update) = updates_rx.next().await {
 534                            if let Err(error) =
 535                                background_snapshot.lock().apply_remote_update(update)
 536                            {
 537                                log::error!("error applying worktree update: {}", error);
 538                            }
 539                            snapshot_updated_tx.send(()).await.ok();
 540                        }
 541                    }
 542                })
 543                .detach();
 544
 545            cx.spawn_weak(|this, mut cx| async move {
 546                while (snapshot_updated_rx.recv().await).is_some() {
 547                    if let Some(this) = this.upgrade(&cx) {
 548                        this.update(&mut cx, |this, cx| {
 549                            let this = this.as_remote_mut().unwrap();
 550                            this.snapshot = this.background_snapshot.lock().clone();
 551                            cx.emit(Event::UpdatedEntries(Arc::from([])));
 552                            cx.notify();
 553                            while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
 554                                if this.observed_snapshot(*scan_id) {
 555                                    let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
 556                                    let _ = tx.send(());
 557                                } else {
 558                                    break;
 559                                }
 560                            }
 561                        });
 562                    } else {
 563                        break;
 564                    }
 565                }
 566            })
 567            .detach();
 568
 569            Worktree::Remote(RemoteWorktree {
 570                project_id: project_remote_id,
 571                replica_id,
 572                snapshot: snapshot.clone(),
 573                background_snapshot,
 574                updates_tx: Some(updates_tx),
 575                snapshot_subscriptions: Default::default(),
 576                client: client.clone(),
 577                diagnostic_summaries: Default::default(),
 578                visible: worktree.visible,
 579                disconnected: false,
 580            })
 581        })
 582    }
 583
 584    pub fn as_local(&self) -> Option<&LocalWorktree> {
 585        if let Worktree::Local(worktree) = self {
 586            Some(worktree)
 587        } else {
 588            None
 589        }
 590    }
 591
 592    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 593        if let Worktree::Remote(worktree) = self {
 594            Some(worktree)
 595        } else {
 596            None
 597        }
 598    }
 599
 600    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 601        if let Worktree::Local(worktree) = self {
 602            Some(worktree)
 603        } else {
 604            None
 605        }
 606    }
 607
 608    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 609        if let Worktree::Remote(worktree) = self {
 610            Some(worktree)
 611        } else {
 612            None
 613        }
 614    }
 615
 616    pub fn is_local(&self) -> bool {
 617        matches!(self, Worktree::Local(_))
 618    }
 619
 620    pub fn is_remote(&self) -> bool {
 621        !self.is_local()
 622    }
 623
 624    pub fn snapshot(&self) -> Snapshot {
 625        match self {
 626            Worktree::Local(worktree) => worktree.snapshot().snapshot,
 627            Worktree::Remote(worktree) => worktree.snapshot(),
 628        }
 629    }
 630
 631    pub fn scan_id(&self) -> usize {
 632        match self {
 633            Worktree::Local(worktree) => worktree.snapshot.scan_id,
 634            Worktree::Remote(worktree) => worktree.snapshot.scan_id,
 635        }
 636    }
 637
 638    pub fn completed_scan_id(&self) -> usize {
 639        match self {
 640            Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
 641            Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
 642        }
 643    }
 644
 645    pub fn is_visible(&self) -> bool {
 646        match self {
 647            Worktree::Local(worktree) => worktree.visible,
 648            Worktree::Remote(worktree) => worktree.visible,
 649        }
 650    }
 651
 652    pub fn replica_id(&self) -> ReplicaId {
 653        match self {
 654            Worktree::Local(_) => 0,
 655            Worktree::Remote(worktree) => worktree.replica_id,
 656        }
 657    }
 658
 659    pub fn diagnostic_summaries(
 660        &self,
 661    ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
 662        match self {
 663            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 664            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 665        }
 666        .iter()
 667        .flat_map(|(path, summaries)| {
 668            summaries
 669                .iter()
 670                .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
 671        })
 672    }
 673
 674    pub fn abs_path(&self) -> Arc<Path> {
 675        match self {
 676            Worktree::Local(worktree) => worktree.abs_path.clone(),
 677            Worktree::Remote(worktree) => worktree.abs_path.clone(),
 678        }
 679    }
 680
 681    pub fn root_file(&self, cx: &mut ModelContext<Self>) -> Option<File> {
 682        let entry = self.entry_for_path("")?;
 683        Some(File {
 684            worktree: cx.handle(),
 685            path: entry.path.clone(),
 686            mtime: entry.mtime,
 687            entry_id: entry.id,
 688            is_local: self.is_local(),
 689            is_deleted: false,
 690        })
 691    }
 692}
 693
 694impl LocalWorktree {
 695    pub fn contains_abs_path(&self, path: &Path) -> bool {
 696        path.starts_with(&self.abs_path)
 697    }
 698
 699    pub(crate) fn load_buffer(
 700        &mut self,
 701        id: u64,
 702        path: &Path,
 703        cx: &mut ModelContext<Worktree>,
 704    ) -> Task<Result<ModelHandle<Buffer>>> {
 705        let path = Arc::from(path);
 706        cx.spawn(move |this, mut cx| async move {
 707            let (file, contents, diff_base) = this
 708                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
 709                .await?;
 710            let text_buffer = cx
 711                .background()
 712                .spawn(async move { text::Buffer::new(0, id, contents) })
 713                .await;
 714            Ok(cx.add_model(|_| Buffer::build(text_buffer, diff_base, Some(Arc::new(file)))))
 715        })
 716    }
 717
 718    pub fn diagnostics_for_path(
 719        &self,
 720        path: &Path,
 721    ) -> Vec<(
 722        LanguageServerId,
 723        Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 724    )> {
 725        self.diagnostics.get(path).cloned().unwrap_or_default()
 726    }
 727
 728    pub fn clear_diagnostics_for_language_server(
 729        &mut self,
 730        server_id: LanguageServerId,
 731        _: &mut ModelContext<Worktree>,
 732    ) {
 733        let worktree_id = self.id().to_proto();
 734        self.diagnostic_summaries
 735            .retain(|path, summaries_by_server_id| {
 736                if summaries_by_server_id.remove(&server_id).is_some() {
 737                    if let Some(share) = self.share.as_ref() {
 738                        self.client
 739                            .send(proto::UpdateDiagnosticSummary {
 740                                project_id: share.project_id,
 741                                worktree_id,
 742                                summary: Some(proto::DiagnosticSummary {
 743                                    path: path.to_string_lossy().to_string(),
 744                                    language_server_id: server_id.0 as u64,
 745                                    error_count: 0,
 746                                    warning_count: 0,
 747                                }),
 748                            })
 749                            .log_err();
 750                    }
 751                    !summaries_by_server_id.is_empty()
 752                } else {
 753                    true
 754                }
 755            });
 756
 757        self.diagnostics.retain(|_, diagnostics_by_server_id| {
 758            if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 759                diagnostics_by_server_id.remove(ix);
 760                !diagnostics_by_server_id.is_empty()
 761            } else {
 762                true
 763            }
 764        });
 765    }
 766
 767    pub fn update_diagnostics(
 768        &mut self,
 769        server_id: LanguageServerId,
 770        worktree_path: Arc<Path>,
 771        diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 772        _: &mut ModelContext<Worktree>,
 773    ) -> Result<bool> {
 774        let summaries_by_server_id = self
 775            .diagnostic_summaries
 776            .entry(worktree_path.clone())
 777            .or_default();
 778
 779        let old_summary = summaries_by_server_id
 780            .remove(&server_id)
 781            .unwrap_or_default();
 782
 783        let new_summary = DiagnosticSummary::new(&diagnostics);
 784        if new_summary.is_empty() {
 785            if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
 786                if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 787                    diagnostics_by_server_id.remove(ix);
 788                }
 789                if diagnostics_by_server_id.is_empty() {
 790                    self.diagnostics.remove(&worktree_path);
 791                }
 792            }
 793        } else {
 794            summaries_by_server_id.insert(server_id, new_summary);
 795            let diagnostics_by_server_id =
 796                self.diagnostics.entry(worktree_path.clone()).or_default();
 797            match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 798                Ok(ix) => {
 799                    diagnostics_by_server_id[ix] = (server_id, diagnostics);
 800                }
 801                Err(ix) => {
 802                    diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
 803                }
 804            }
 805        }
 806
 807        if !old_summary.is_empty() || !new_summary.is_empty() {
 808            if let Some(share) = self.share.as_ref() {
 809                self.client
 810                    .send(proto::UpdateDiagnosticSummary {
 811                        project_id: share.project_id,
 812                        worktree_id: self.id().to_proto(),
 813                        summary: Some(proto::DiagnosticSummary {
 814                            path: worktree_path.to_string_lossy().to_string(),
 815                            language_server_id: server_id.0 as u64,
 816                            error_count: new_summary.error_count as u32,
 817                            warning_count: new_summary.warning_count as u32,
 818                        }),
 819                    })
 820                    .log_err();
 821            }
 822        }
 823
 824        Ok(!old_summary.is_empty() || !new_summary.is_empty())
 825    }
 826
 827    fn set_snapshot(
 828        &mut self,
 829        new_snapshot: LocalSnapshot,
 830        entry_changes: UpdatedEntriesSet,
 831        cx: &mut ModelContext<Worktree>,
 832    ) {
 833        let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
 834
 835        self.snapshot = new_snapshot;
 836
 837        if let Some(share) = self.share.as_mut() {
 838            share
 839                .snapshots_tx
 840                .unbounded_send((
 841                    self.snapshot.clone(),
 842                    entry_changes.clone(),
 843                    repo_changes.clone(),
 844                ))
 845                .ok();
 846        }
 847
 848        if !entry_changes.is_empty() {
 849            cx.emit(Event::UpdatedEntries(entry_changes));
 850        }
 851        if !repo_changes.is_empty() {
 852            cx.emit(Event::UpdatedGitRepositories(repo_changes));
 853        }
 854    }
 855
 856    fn changed_repos(
 857        &self,
 858        old_snapshot: &LocalSnapshot,
 859        new_snapshot: &LocalSnapshot,
 860    ) -> UpdatedGitRepositoriesSet {
 861        let mut changes = Vec::new();
 862        let mut old_repos = old_snapshot.git_repositories.iter().peekable();
 863        let mut new_repos = new_snapshot.git_repositories.iter().peekable();
 864        loop {
 865            match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
 866                (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
 867                    match Ord::cmp(&new_entry_id, &old_entry_id) {
 868                        Ordering::Less => {
 869                            if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
 870                                changes.push((
 871                                    entry.path.clone(),
 872                                    GitRepositoryChange {
 873                                        old_repository: None,
 874                                        git_dir_changed: true,
 875                                    },
 876                                ));
 877                            }
 878                            new_repos.next();
 879                        }
 880                        Ordering::Equal => {
 881                            let git_dir_changed =
 882                                new_repo.git_dir_scan_id != old_repo.git_dir_scan_id;
 883                            let work_dir_changed =
 884                                new_repo.work_dir_scan_id != old_repo.work_dir_scan_id;
 885                            if git_dir_changed || work_dir_changed {
 886                                if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
 887                                    let old_repo = old_snapshot
 888                                        .repository_entries
 889                                        .get(&RepositoryWorkDirectory(entry.path.clone()))
 890                                        .cloned();
 891                                    changes.push((
 892                                        entry.path.clone(),
 893                                        GitRepositoryChange {
 894                                            old_repository: old_repo,
 895                                            git_dir_changed,
 896                                        },
 897                                    ));
 898                                }
 899                            }
 900                            new_repos.next();
 901                            old_repos.next();
 902                        }
 903                        Ordering::Greater => {
 904                            if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
 905                                let old_repo = old_snapshot
 906                                    .repository_entries
 907                                    .get(&RepositoryWorkDirectory(entry.path.clone()))
 908                                    .cloned();
 909                                changes.push((
 910                                    entry.path.clone(),
 911                                    GitRepositoryChange {
 912                                        old_repository: old_repo,
 913                                        git_dir_changed: true,
 914                                    },
 915                                ));
 916                            }
 917                            old_repos.next();
 918                        }
 919                    }
 920                }
 921                (Some((entry_id, _)), None) => {
 922                    if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
 923                        changes.push((
 924                            entry.path.clone(),
 925                            GitRepositoryChange {
 926                                old_repository: None,
 927                                git_dir_changed: true,
 928                            },
 929                        ));
 930                    }
 931                    new_repos.next();
 932                }
 933                (None, Some((entry_id, _))) => {
 934                    if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
 935                        let old_repo = old_snapshot
 936                            .repository_entries
 937                            .get(&RepositoryWorkDirectory(entry.path.clone()))
 938                            .cloned();
 939                        changes.push((
 940                            entry.path.clone(),
 941                            GitRepositoryChange {
 942                                old_repository: old_repo,
 943                                git_dir_changed: true,
 944                            },
 945                        ));
 946                    }
 947                    old_repos.next();
 948                }
 949                (None, None) => break,
 950            }
 951        }
 952
 953        fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
 954            (value.0.clone(), value.1.clone())
 955        }
 956
 957        changes.into()
 958    }
 959
 960    pub fn scan_complete(&self) -> impl Future<Output = ()> {
 961        let mut is_scanning_rx = self.is_scanning.1.clone();
 962        async move {
 963            let mut is_scanning = is_scanning_rx.borrow().clone();
 964            while is_scanning {
 965                if let Some(value) = is_scanning_rx.recv().await {
 966                    is_scanning = value;
 967                } else {
 968                    break;
 969                }
 970            }
 971        }
 972    }
 973
 974    pub fn snapshot(&self) -> LocalSnapshot {
 975        self.snapshot.clone()
 976    }
 977
 978    pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
 979        proto::WorktreeMetadata {
 980            id: self.id().to_proto(),
 981            root_name: self.root_name().to_string(),
 982            visible: self.visible,
 983            abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
 984        }
 985    }
 986
 987    fn load(
 988        &self,
 989        path: &Path,
 990        cx: &mut ModelContext<Worktree>,
 991    ) -> Task<Result<(File, String, Option<String>)>> {
 992        let handle = cx.handle();
 993        let path = Arc::from(path);
 994        let abs_path = self.absolutize(&path);
 995        let fs = self.fs.clone();
 996        let snapshot = self.snapshot();
 997
 998        let mut index_task = None;
 999
1000        if let Some(repo) = snapshot.repository_for_path(&path) {
1001            let repo_path = repo.work_directory.relativize(self, &path).unwrap();
1002            if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
1003                let repo = repo.repo_ptr.to_owned();
1004                index_task = Some(
1005                    cx.background()
1006                        .spawn(async move { repo.lock().load_index_text(&repo_path) }),
1007                );
1008            }
1009        }
1010
1011        cx.spawn(|this, mut cx| async move {
1012            let text = fs.load(&abs_path).await?;
1013
1014            let diff_base = if let Some(index_task) = index_task {
1015                index_task.await
1016            } else {
1017                None
1018            };
1019
1020            // Eagerly populate the snapshot with an updated entry for the loaded file
1021            let entry = this
1022                .update(&mut cx, |this, cx| {
1023                    this.as_local().unwrap().refresh_entry(path, None, cx)
1024                })
1025                .await?;
1026
1027            Ok((
1028                File {
1029                    entry_id: entry.id,
1030                    worktree: handle,
1031                    path: entry.path,
1032                    mtime: entry.mtime,
1033                    is_local: true,
1034                    is_deleted: false,
1035                },
1036                text,
1037                diff_base,
1038            ))
1039        })
1040    }
1041
1042    pub fn save_buffer(
1043        &self,
1044        buffer_handle: ModelHandle<Buffer>,
1045        path: Arc<Path>,
1046        has_changed_file: bool,
1047        cx: &mut ModelContext<Worktree>,
1048    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1049        let handle = cx.handle();
1050        let buffer = buffer_handle.read(cx);
1051
1052        let rpc = self.client.clone();
1053        let buffer_id = buffer.remote_id();
1054        let project_id = self.share.as_ref().map(|share| share.project_id);
1055
1056        let text = buffer.as_rope().clone();
1057        let fingerprint = text.fingerprint();
1058        let version = buffer.version();
1059        let save = self.write_file(path, text, buffer.line_ending(), cx);
1060
1061        cx.as_mut().spawn(|mut cx| async move {
1062            let entry = save.await?;
1063
1064            if has_changed_file {
1065                let new_file = Arc::new(File {
1066                    entry_id: entry.id,
1067                    worktree: handle,
1068                    path: entry.path,
1069                    mtime: entry.mtime,
1070                    is_local: true,
1071                    is_deleted: false,
1072                });
1073
1074                if let Some(project_id) = project_id {
1075                    rpc.send(proto::UpdateBufferFile {
1076                        project_id,
1077                        buffer_id,
1078                        file: Some(new_file.to_proto()),
1079                    })
1080                    .log_err();
1081                }
1082
1083                buffer_handle.update(&mut cx, |buffer, cx| {
1084                    if has_changed_file {
1085                        buffer.file_updated(new_file, cx).detach();
1086                    }
1087                });
1088            }
1089
1090            if let Some(project_id) = project_id {
1091                rpc.send(proto::BufferSaved {
1092                    project_id,
1093                    buffer_id,
1094                    version: serialize_version(&version),
1095                    mtime: Some(entry.mtime.into()),
1096                    fingerprint: serialize_fingerprint(fingerprint),
1097                })?;
1098            }
1099
1100            buffer_handle.update(&mut cx, |buffer, cx| {
1101                buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
1102            });
1103
1104            Ok((version, fingerprint, entry.mtime))
1105        })
1106    }
1107
1108    pub fn create_entry(
1109        &self,
1110        path: impl Into<Arc<Path>>,
1111        is_dir: bool,
1112        cx: &mut ModelContext<Worktree>,
1113    ) -> Task<Result<Entry>> {
1114        let path = path.into();
1115        let abs_path = self.absolutize(&path);
1116        let fs = self.fs.clone();
1117        let write = cx.background().spawn(async move {
1118            if is_dir {
1119                fs.create_dir(&abs_path).await
1120            } else {
1121                fs.save(&abs_path, &Default::default(), Default::default())
1122                    .await
1123            }
1124        });
1125
1126        cx.spawn(|this, mut cx| async move {
1127            write.await?;
1128            this.update(&mut cx, |this, cx| {
1129                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1130            })
1131            .await
1132        })
1133    }
1134
1135    pub fn write_file(
1136        &self,
1137        path: impl Into<Arc<Path>>,
1138        text: Rope,
1139        line_ending: LineEnding,
1140        cx: &mut ModelContext<Worktree>,
1141    ) -> Task<Result<Entry>> {
1142        let path = path.into();
1143        let abs_path = self.absolutize(&path);
1144        let fs = self.fs.clone();
1145        let write = cx
1146            .background()
1147            .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1148
1149        cx.spawn(|this, mut cx| async move {
1150            write.await?;
1151            this.update(&mut cx, |this, cx| {
1152                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1153            })
1154            .await
1155        })
1156    }
1157
1158    pub fn delete_entry(
1159        &self,
1160        entry_id: ProjectEntryId,
1161        cx: &mut ModelContext<Worktree>,
1162    ) -> Option<Task<Result<()>>> {
1163        let entry = self.entry_for_id(entry_id)?.clone();
1164        let abs_path = self.abs_path.clone();
1165        let fs = self.fs.clone();
1166
1167        let delete = cx.background().spawn(async move {
1168            let mut abs_path = fs.canonicalize(&abs_path).await?;
1169            if entry.path.file_name().is_some() {
1170                abs_path = abs_path.join(&entry.path);
1171            }
1172            if entry.is_file() {
1173                fs.remove_file(&abs_path, Default::default()).await?;
1174            } else {
1175                fs.remove_dir(
1176                    &abs_path,
1177                    RemoveOptions {
1178                        recursive: true,
1179                        ignore_if_not_exists: false,
1180                    },
1181                )
1182                .await?;
1183            }
1184            anyhow::Ok(abs_path)
1185        });
1186
1187        Some(cx.spawn(|this, mut cx| async move {
1188            let abs_path = delete.await?;
1189            let (tx, mut rx) = barrier::channel();
1190            this.update(&mut cx, |this, _| {
1191                this.as_local_mut()
1192                    .unwrap()
1193                    .path_changes_tx
1194                    .try_send((vec![abs_path], tx))
1195            })?;
1196            rx.recv().await;
1197            Ok(())
1198        }))
1199    }
1200
1201    pub fn rename_entry(
1202        &self,
1203        entry_id: ProjectEntryId,
1204        new_path: impl Into<Arc<Path>>,
1205        cx: &mut ModelContext<Worktree>,
1206    ) -> Option<Task<Result<Entry>>> {
1207        let old_path = self.entry_for_id(entry_id)?.path.clone();
1208        let new_path = new_path.into();
1209        let abs_old_path = self.absolutize(&old_path);
1210        let abs_new_path = self.absolutize(&new_path);
1211        let fs = self.fs.clone();
1212        let rename = cx.background().spawn(async move {
1213            fs.rename(&abs_old_path, &abs_new_path, Default::default())
1214                .await
1215        });
1216
1217        Some(cx.spawn(|this, mut cx| async move {
1218            rename.await?;
1219            this.update(&mut cx, |this, cx| {
1220                this.as_local_mut()
1221                    .unwrap()
1222                    .refresh_entry(new_path.clone(), Some(old_path), cx)
1223            })
1224            .await
1225        }))
1226    }
1227
1228    pub fn copy_entry(
1229        &self,
1230        entry_id: ProjectEntryId,
1231        new_path: impl Into<Arc<Path>>,
1232        cx: &mut ModelContext<Worktree>,
1233    ) -> Option<Task<Result<Entry>>> {
1234        let old_path = self.entry_for_id(entry_id)?.path.clone();
1235        let new_path = new_path.into();
1236        let abs_old_path = self.absolutize(&old_path);
1237        let abs_new_path = self.absolutize(&new_path);
1238        let fs = self.fs.clone();
1239        let copy = cx.background().spawn(async move {
1240            copy_recursive(
1241                fs.as_ref(),
1242                &abs_old_path,
1243                &abs_new_path,
1244                Default::default(),
1245            )
1246            .await
1247        });
1248
1249        Some(cx.spawn(|this, mut cx| async move {
1250            copy.await?;
1251            this.update(&mut cx, |this, cx| {
1252                this.as_local_mut()
1253                    .unwrap()
1254                    .refresh_entry(new_path.clone(), None, cx)
1255            })
1256            .await
1257        }))
1258    }
1259
1260    fn refresh_entry(
1261        &self,
1262        path: Arc<Path>,
1263        old_path: Option<Arc<Path>>,
1264        cx: &mut ModelContext<Worktree>,
1265    ) -> Task<Result<Entry>> {
1266        let fs = self.fs.clone();
1267        let abs_root_path = self.abs_path.clone();
1268        let path_changes_tx = self.path_changes_tx.clone();
1269        cx.spawn_weak(move |this, mut cx| async move {
1270            let abs_path = fs.canonicalize(&abs_root_path).await?;
1271            let mut paths = Vec::with_capacity(2);
1272            paths.push(if path.file_name().is_some() {
1273                abs_path.join(&path)
1274            } else {
1275                abs_path.clone()
1276            });
1277            if let Some(old_path) = old_path {
1278                paths.push(if old_path.file_name().is_some() {
1279                    abs_path.join(&old_path)
1280                } else {
1281                    abs_path.clone()
1282                });
1283            }
1284
1285            let (tx, mut rx) = barrier::channel();
1286            path_changes_tx.try_send((paths, tx))?;
1287            rx.recv().await;
1288            this.upgrade(&cx)
1289                .ok_or_else(|| anyhow!("worktree was dropped"))?
1290                .update(&mut cx, |this, _| {
1291                    this.entry_for_path(path)
1292                        .cloned()
1293                        .ok_or_else(|| anyhow!("failed to read path after update"))
1294                })
1295        })
1296    }
1297
1298    pub fn observe_updates<F, Fut>(
1299        &mut self,
1300        project_id: u64,
1301        cx: &mut ModelContext<Worktree>,
1302        callback: F,
1303    ) -> oneshot::Receiver<()>
1304    where
1305        F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1306        Fut: Send + Future<Output = bool>,
1307    {
1308        #[cfg(any(test, feature = "test-support"))]
1309        const MAX_CHUNK_SIZE: usize = 2;
1310        #[cfg(not(any(test, feature = "test-support")))]
1311        const MAX_CHUNK_SIZE: usize = 256;
1312
1313        let (share_tx, share_rx) = oneshot::channel();
1314
1315        if let Some(share) = self.share.as_mut() {
1316            share_tx.send(()).ok();
1317            *share.resume_updates.borrow_mut() = ();
1318            return share_rx;
1319        }
1320
1321        let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
1322        let (snapshots_tx, mut snapshots_rx) =
1323            mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
1324        snapshots_tx
1325            .unbounded_send((self.snapshot(), Arc::from([]), Arc::from([])))
1326            .ok();
1327
1328        let worktree_id = cx.model_id() as u64;
1329        let _maintain_remote_snapshot = cx.background().spawn(async move {
1330            let mut is_first = true;
1331            while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
1332                let update;
1333                if is_first {
1334                    update = snapshot.build_initial_update(project_id, worktree_id);
1335                    is_first = false;
1336                } else {
1337                    update =
1338                        snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes);
1339                }
1340
1341                for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1342                    let _ = resume_updates_rx.try_recv();
1343                    loop {
1344                        let result = callback(update.clone());
1345                        if result.await {
1346                            break;
1347                        } else {
1348                            log::info!("waiting to resume updates");
1349                            if resume_updates_rx.next().await.is_none() {
1350                                return Some(());
1351                            }
1352                        }
1353                    }
1354                }
1355            }
1356            share_tx.send(()).ok();
1357            Some(())
1358        });
1359
1360        self.share = Some(ShareState {
1361            project_id,
1362            snapshots_tx,
1363            resume_updates: resume_updates_tx,
1364            _maintain_remote_snapshot,
1365        });
1366        share_rx
1367    }
1368
1369    pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1370        let client = self.client.clone();
1371
1372        for (path, summaries) in &self.diagnostic_summaries {
1373            for (&server_id, summary) in summaries {
1374                if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1375                    project_id,
1376                    worktree_id: cx.model_id() as u64,
1377                    summary: Some(summary.to_proto(server_id, &path)),
1378                }) {
1379                    return Task::ready(Err(e));
1380                }
1381            }
1382        }
1383
1384        let rx = self.observe_updates(project_id, cx, move |update| {
1385            client.request(update).map(|result| result.is_ok())
1386        });
1387        cx.foreground()
1388            .spawn(async move { rx.await.map_err(|_| anyhow!("share ended")) })
1389    }
1390
1391    pub fn unshare(&mut self) {
1392        self.share.take();
1393    }
1394
1395    pub fn is_shared(&self) -> bool {
1396        self.share.is_some()
1397    }
1398}
1399
1400impl RemoteWorktree {
1401    fn snapshot(&self) -> Snapshot {
1402        self.snapshot.clone()
1403    }
1404
1405    pub fn disconnected_from_host(&mut self) {
1406        self.updates_tx.take();
1407        self.snapshot_subscriptions.clear();
1408        self.disconnected = true;
1409    }
1410
1411    pub fn save_buffer(
1412        &self,
1413        buffer_handle: ModelHandle<Buffer>,
1414        cx: &mut ModelContext<Worktree>,
1415    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1416        let buffer = buffer_handle.read(cx);
1417        let buffer_id = buffer.remote_id();
1418        let version = buffer.version();
1419        let rpc = self.client.clone();
1420        let project_id = self.project_id;
1421        cx.as_mut().spawn(|mut cx| async move {
1422            let response = rpc
1423                .request(proto::SaveBuffer {
1424                    project_id,
1425                    buffer_id,
1426                    version: serialize_version(&version),
1427                })
1428                .await?;
1429            let version = deserialize_version(&response.version);
1430            let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1431            let mtime = response
1432                .mtime
1433                .ok_or_else(|| anyhow!("missing mtime"))?
1434                .into();
1435
1436            buffer_handle.update(&mut cx, |buffer, cx| {
1437                buffer.did_save(version.clone(), fingerprint, mtime, cx);
1438            });
1439
1440            Ok((version, fingerprint, mtime))
1441        })
1442    }
1443
1444    pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1445        if let Some(updates_tx) = &self.updates_tx {
1446            updates_tx
1447                .unbounded_send(update)
1448                .expect("consumer runs to completion");
1449        }
1450    }
1451
1452    fn observed_snapshot(&self, scan_id: usize) -> bool {
1453        self.completed_scan_id >= scan_id
1454    }
1455
1456    fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1457        let (tx, rx) = oneshot::channel();
1458        if self.observed_snapshot(scan_id) {
1459            let _ = tx.send(());
1460        } else if self.disconnected {
1461            drop(tx);
1462        } else {
1463            match self
1464                .snapshot_subscriptions
1465                .binary_search_by_key(&scan_id, |probe| probe.0)
1466            {
1467                Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1468            }
1469        }
1470
1471        async move {
1472            rx.await?;
1473            Ok(())
1474        }
1475    }
1476
1477    pub fn update_diagnostic_summary(
1478        &mut self,
1479        path: Arc<Path>,
1480        summary: &proto::DiagnosticSummary,
1481    ) {
1482        let server_id = LanguageServerId(summary.language_server_id as usize);
1483        let summary = DiagnosticSummary {
1484            error_count: summary.error_count as usize,
1485            warning_count: summary.warning_count as usize,
1486        };
1487
1488        if summary.is_empty() {
1489            if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1490                summaries.remove(&server_id);
1491                if summaries.is_empty() {
1492                    self.diagnostic_summaries.remove(&path);
1493                }
1494            }
1495        } else {
1496            self.diagnostic_summaries
1497                .entry(path)
1498                .or_default()
1499                .insert(server_id, summary);
1500        }
1501    }
1502
1503    pub fn insert_entry(
1504        &mut self,
1505        entry: proto::Entry,
1506        scan_id: usize,
1507        cx: &mut ModelContext<Worktree>,
1508    ) -> Task<Result<Entry>> {
1509        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1510        cx.spawn(|this, mut cx| async move {
1511            wait_for_snapshot.await?;
1512            this.update(&mut cx, |worktree, _| {
1513                let worktree = worktree.as_remote_mut().unwrap();
1514                let mut snapshot = worktree.background_snapshot.lock();
1515                let entry = snapshot.insert_entry(entry);
1516                worktree.snapshot = snapshot.clone();
1517                entry
1518            })
1519        })
1520    }
1521
1522    pub(crate) fn delete_entry(
1523        &mut self,
1524        id: ProjectEntryId,
1525        scan_id: usize,
1526        cx: &mut ModelContext<Worktree>,
1527    ) -> Task<Result<()>> {
1528        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1529        cx.spawn(|this, mut cx| async move {
1530            wait_for_snapshot.await?;
1531            this.update(&mut cx, |worktree, _| {
1532                let worktree = worktree.as_remote_mut().unwrap();
1533                let mut snapshot = worktree.background_snapshot.lock();
1534                snapshot.delete_entry(id);
1535                worktree.snapshot = snapshot.clone();
1536            });
1537            Ok(())
1538        })
1539    }
1540}
1541
1542impl Snapshot {
1543    pub fn id(&self) -> WorktreeId {
1544        self.id
1545    }
1546
1547    pub fn abs_path(&self) -> &Arc<Path> {
1548        &self.abs_path
1549    }
1550
1551    pub fn absolutize(&self, path: &Path) -> PathBuf {
1552        if path.file_name().is_some() {
1553            self.abs_path.join(path)
1554        } else {
1555            self.abs_path.to_path_buf()
1556        }
1557    }
1558
1559    pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1560        self.entries_by_id.get(&entry_id, &()).is_some()
1561    }
1562
1563    pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1564        let entry = Entry::try_from((&self.root_char_bag, entry))?;
1565        let old_entry = self.entries_by_id.insert_or_replace(
1566            PathEntry {
1567                id: entry.id,
1568                path: entry.path.clone(),
1569                is_ignored: entry.is_ignored,
1570                scan_id: 0,
1571            },
1572            &(),
1573        );
1574        if let Some(old_entry) = old_entry {
1575            self.entries_by_path.remove(&PathKey(old_entry.path), &());
1576        }
1577        self.entries_by_path.insert_or_replace(entry.clone(), &());
1578        Ok(entry)
1579    }
1580
1581    fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1582        let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1583        self.entries_by_path = {
1584            let mut cursor = self.entries_by_path.cursor();
1585            let mut new_entries_by_path =
1586                cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1587            while let Some(entry) = cursor.item() {
1588                if entry.path.starts_with(&removed_entry.path) {
1589                    self.entries_by_id.remove(&entry.id, &());
1590                    cursor.next(&());
1591                } else {
1592                    break;
1593                }
1594            }
1595            new_entries_by_path.push_tree(cursor.suffix(&()), &());
1596            new_entries_by_path
1597        };
1598
1599        Some(removed_entry.path)
1600    }
1601
1602    pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1603        let mut entries_by_path_edits = Vec::new();
1604        let mut entries_by_id_edits = Vec::new();
1605
1606        for entry_id in update.removed_entries {
1607            let entry_id = ProjectEntryId::from_proto(entry_id);
1608            entries_by_id_edits.push(Edit::Remove(entry_id));
1609            if let Some(entry) = self.entry_for_id(entry_id) {
1610                entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1611            }
1612        }
1613
1614        for entry in update.updated_entries {
1615            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1616            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1617                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1618            }
1619            if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
1620                if old_entry.id != entry.id {
1621                    entries_by_id_edits.push(Edit::Remove(old_entry.id));
1622                }
1623            }
1624            entries_by_id_edits.push(Edit::Insert(PathEntry {
1625                id: entry.id,
1626                path: entry.path.clone(),
1627                is_ignored: entry.is_ignored,
1628                scan_id: 0,
1629            }));
1630            entries_by_path_edits.push(Edit::Insert(entry));
1631        }
1632
1633        self.entries_by_path.edit(entries_by_path_edits, &());
1634        self.entries_by_id.edit(entries_by_id_edits, &());
1635
1636        update.removed_repositories.sort_unstable();
1637        self.repository_entries.retain(|_, entry| {
1638            if let Ok(_) = update
1639                .removed_repositories
1640                .binary_search(&entry.work_directory.to_proto())
1641            {
1642                false
1643            } else {
1644                true
1645            }
1646        });
1647
1648        for repository in update.updated_repositories {
1649            let work_directory_entry: WorkDirectoryEntry =
1650                ProjectEntryId::from_proto(repository.work_directory_id).into();
1651
1652            if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1653                let mut statuses = TreeMap::default();
1654                for status_entry in repository.updated_statuses {
1655                    let Some(git_file_status) = read_git_status(status_entry.status) else {
1656                        continue;
1657                    };
1658
1659                    let repo_path = RepoPath::new(status_entry.repo_path.into());
1660                    statuses.insert(repo_path, git_file_status);
1661                }
1662
1663                let work_directory = RepositoryWorkDirectory(entry.path.clone());
1664                if self.repository_entries.get(&work_directory).is_some() {
1665                    self.repository_entries.update(&work_directory, |repo| {
1666                        repo.branch = repository.branch.map(Into::into);
1667                        repo.statuses.insert_tree(statuses);
1668
1669                        for repo_path in repository.removed_repo_paths {
1670                            let repo_path = RepoPath::new(repo_path.into());
1671                            repo.statuses.remove(&repo_path);
1672                        }
1673                    });
1674                } else {
1675                    self.repository_entries.insert(
1676                        work_directory,
1677                        RepositoryEntry {
1678                            work_directory: work_directory_entry,
1679                            branch: repository.branch.map(Into::into),
1680                            statuses,
1681                        },
1682                    )
1683                }
1684            } else {
1685                log::error!("no work directory entry for repository {:?}", repository)
1686            }
1687        }
1688
1689        self.scan_id = update.scan_id as usize;
1690        if update.is_last_update {
1691            self.completed_scan_id = update.scan_id as usize;
1692        }
1693
1694        Ok(())
1695    }
1696
1697    pub fn file_count(&self) -> usize {
1698        self.entries_by_path.summary().file_count
1699    }
1700
1701    pub fn visible_file_count(&self) -> usize {
1702        self.entries_by_path.summary().visible_file_count
1703    }
1704
1705    fn traverse_from_offset(
1706        &self,
1707        include_dirs: bool,
1708        include_ignored: bool,
1709        start_offset: usize,
1710    ) -> Traversal {
1711        let mut cursor = self.entries_by_path.cursor();
1712        cursor.seek(
1713            &TraversalTarget::Count {
1714                count: start_offset,
1715                include_dirs,
1716                include_ignored,
1717            },
1718            Bias::Right,
1719            &(),
1720        );
1721        Traversal {
1722            cursor,
1723            include_dirs,
1724            include_ignored,
1725        }
1726    }
1727
1728    fn traverse_from_path(
1729        &self,
1730        include_dirs: bool,
1731        include_ignored: bool,
1732        path: &Path,
1733    ) -> Traversal {
1734        let mut cursor = self.entries_by_path.cursor();
1735        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1736        Traversal {
1737            cursor,
1738            include_dirs,
1739            include_ignored,
1740        }
1741    }
1742
1743    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1744        self.traverse_from_offset(false, include_ignored, start)
1745    }
1746
1747    pub fn entries(&self, include_ignored: bool) -> Traversal {
1748        self.traverse_from_offset(true, include_ignored, 0)
1749    }
1750
1751    pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
1752        self.repository_entries
1753            .iter()
1754            .map(|(path, entry)| (&path.0, entry))
1755    }
1756
1757    /// Get the repository whose work directory contains the given path.
1758    pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
1759        self.repository_entries
1760            .get(&RepositoryWorkDirectory(path.into()))
1761            .cloned()
1762    }
1763
1764    /// Get the repository whose work directory contains the given path.
1765    pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
1766        self.repository_and_work_directory_for_path(path)
1767            .map(|e| e.1)
1768    }
1769
1770    pub fn repository_and_work_directory_for_path(
1771        &self,
1772        path: &Path,
1773    ) -> Option<(RepositoryWorkDirectory, RepositoryEntry)> {
1774        self.repository_entries
1775            .iter()
1776            .filter(|(workdir_path, _)| path.starts_with(workdir_path))
1777            .last()
1778            .map(|(path, repo)| (path.clone(), repo.clone()))
1779    }
1780
1781    /// Given an ordered iterator of entries, returns an iterator of those entries,
1782    /// along with their containing git repository.
1783    pub fn entries_with_repositories<'a>(
1784        &'a self,
1785        entries: impl 'a + Iterator<Item = &'a Entry>,
1786    ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
1787        let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
1788        let mut repositories = self.repositories().peekable();
1789        entries.map(move |entry| {
1790            while let Some((repo_path, _)) = containing_repos.last() {
1791                if !entry.path.starts_with(repo_path) {
1792                    containing_repos.pop();
1793                } else {
1794                    break;
1795                }
1796            }
1797            while let Some((repo_path, _)) = repositories.peek() {
1798                if entry.path.starts_with(repo_path) {
1799                    containing_repos.push(repositories.next().unwrap());
1800                } else {
1801                    break;
1802                }
1803            }
1804            let repo = containing_repos.last().map(|(_, repo)| *repo);
1805            (entry, repo)
1806        })
1807    }
1808
1809    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1810        let empty_path = Path::new("");
1811        self.entries_by_path
1812            .cursor::<()>()
1813            .filter(move |entry| entry.path.as_ref() != empty_path)
1814            .map(|entry| &entry.path)
1815    }
1816
1817    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1818        let mut cursor = self.entries_by_path.cursor();
1819        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1820        let traversal = Traversal {
1821            cursor,
1822            include_dirs: true,
1823            include_ignored: true,
1824        };
1825        ChildEntriesIter {
1826            traversal,
1827            parent_path,
1828        }
1829    }
1830
1831    fn descendent_entries<'a>(
1832        &'a self,
1833        include_dirs: bool,
1834        include_ignored: bool,
1835        parent_path: &'a Path,
1836    ) -> DescendentEntriesIter<'a> {
1837        let mut cursor = self.entries_by_path.cursor();
1838        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
1839        let mut traversal = Traversal {
1840            cursor,
1841            include_dirs,
1842            include_ignored,
1843        };
1844
1845        if traversal.end_offset() == traversal.start_offset() {
1846            traversal.advance();
1847        }
1848
1849        DescendentEntriesIter {
1850            traversal,
1851            parent_path,
1852        }
1853    }
1854
1855    pub fn root_entry(&self) -> Option<&Entry> {
1856        self.entry_for_path("")
1857    }
1858
1859    pub fn root_name(&self) -> &str {
1860        &self.root_name
1861    }
1862
1863    pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1864        self.repository_entries
1865            .get(&RepositoryWorkDirectory(Path::new("").into()))
1866            .map(|entry| entry.to_owned())
1867    }
1868
1869    pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1870        self.repository_entries.values()
1871    }
1872
1873    pub fn scan_id(&self) -> usize {
1874        self.scan_id
1875    }
1876
1877    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1878        let path = path.as_ref();
1879        self.traverse_from_path(true, true, path)
1880            .entry()
1881            .and_then(|entry| {
1882                if entry.path.as_ref() == path {
1883                    Some(entry)
1884                } else {
1885                    None
1886                }
1887            })
1888    }
1889
1890    pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1891        let entry = self.entries_by_id.get(&id, &())?;
1892        self.entry_for_path(&entry.path)
1893    }
1894
1895    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1896        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1897    }
1898}
1899
1900impl LocalSnapshot {
1901    pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1902        self.git_repositories.get(&repo.work_directory.0)
1903    }
1904
1905    pub(crate) fn repo_for_metadata(
1906        &self,
1907        path: &Path,
1908    ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1909        self.git_repositories
1910            .iter()
1911            .find(|(_, repo)| repo.in_dot_git(path))
1912    }
1913
1914    fn build_update(
1915        &self,
1916        project_id: u64,
1917        worktree_id: u64,
1918        entry_changes: UpdatedEntriesSet,
1919        repo_changes: UpdatedGitRepositoriesSet,
1920    ) -> proto::UpdateWorktree {
1921        let mut updated_entries = Vec::new();
1922        let mut removed_entries = Vec::new();
1923        let mut updated_repositories = Vec::new();
1924        let mut removed_repositories = Vec::new();
1925
1926        for (_, entry_id, path_change) in entry_changes.iter() {
1927            if let PathChange::Removed = path_change {
1928                removed_entries.push(entry_id.0 as u64);
1929            } else if let Some(entry) = self.entry_for_id(*entry_id) {
1930                updated_entries.push(proto::Entry::from(entry));
1931            }
1932        }
1933
1934        for (work_dir_path, change) in repo_changes.iter() {
1935            let new_repo = self
1936                .repository_entries
1937                .get(&RepositoryWorkDirectory(work_dir_path.clone()));
1938            match (&change.old_repository, new_repo) {
1939                (Some(old_repo), Some(new_repo)) => {
1940                    updated_repositories.push(new_repo.build_update(old_repo));
1941                }
1942                (None, Some(new_repo)) => {
1943                    updated_repositories.push(proto::RepositoryEntry::from(new_repo));
1944                }
1945                (Some(old_repo), None) => {
1946                    removed_repositories.push(old_repo.work_directory.0.to_proto());
1947                }
1948                _ => {}
1949            }
1950        }
1951
1952        removed_entries.sort_unstable();
1953        updated_entries.sort_unstable_by_key(|e| e.id);
1954        removed_repositories.sort_unstable();
1955        updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
1956
1957        // TODO - optimize, knowing that removed_entries are sorted.
1958        removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
1959
1960        proto::UpdateWorktree {
1961            project_id,
1962            worktree_id,
1963            abs_path: self.abs_path().to_string_lossy().into(),
1964            root_name: self.root_name().to_string(),
1965            updated_entries,
1966            removed_entries,
1967            scan_id: self.scan_id as u64,
1968            is_last_update: self.completed_scan_id == self.scan_id,
1969            updated_repositories,
1970            removed_repositories,
1971        }
1972    }
1973
1974    fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
1975        let mut updated_entries = self
1976            .entries_by_path
1977            .iter()
1978            .map(proto::Entry::from)
1979            .collect::<Vec<_>>();
1980        updated_entries.sort_unstable_by_key(|e| e.id);
1981
1982        let mut updated_repositories = self
1983            .repository_entries
1984            .values()
1985            .map(proto::RepositoryEntry::from)
1986            .collect::<Vec<_>>();
1987        updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
1988
1989        proto::UpdateWorktree {
1990            project_id,
1991            worktree_id,
1992            abs_path: self.abs_path().to_string_lossy().into(),
1993            root_name: self.root_name().to_string(),
1994            updated_entries,
1995            removed_entries: Vec::new(),
1996            scan_id: self.scan_id as u64,
1997            is_last_update: self.completed_scan_id == self.scan_id,
1998            updated_repositories,
1999            removed_repositories: Vec::new(),
2000        }
2001    }
2002
2003    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2004        if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
2005            let abs_path = self.abs_path.join(&entry.path);
2006            match smol::block_on(build_gitignore(&abs_path, fs)) {
2007                Ok(ignore) => {
2008                    self.ignores_by_parent_abs_path
2009                        .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
2010                }
2011                Err(error) => {
2012                    log::error!(
2013                        "error loading .gitignore file {:?} - {:?}",
2014                        &entry.path,
2015                        error
2016                    );
2017                }
2018            }
2019        }
2020
2021        if entry.kind == EntryKind::PendingDir {
2022            if let Some(existing_entry) =
2023                self.entries_by_path.get(&PathKey(entry.path.clone()), &())
2024            {
2025                entry.kind = existing_entry.kind;
2026            }
2027        }
2028
2029        let scan_id = self.scan_id;
2030        let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
2031        if let Some(removed) = removed {
2032            if removed.id != entry.id {
2033                self.entries_by_id.remove(&removed.id, &());
2034            }
2035        }
2036        self.entries_by_id.insert_or_replace(
2037            PathEntry {
2038                id: entry.id,
2039                path: entry.path.clone(),
2040                is_ignored: entry.is_ignored,
2041                scan_id,
2042            },
2043            &(),
2044        );
2045
2046        entry
2047    }
2048
2049    fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
2050        let abs_path = self.abs_path.join(&parent_path);
2051        let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
2052
2053        // Guard against repositories inside the repository metadata
2054        if work_dir
2055            .components()
2056            .find(|component| component.as_os_str() == *DOT_GIT)
2057            .is_some()
2058        {
2059            return None;
2060        };
2061
2062        let work_dir_id = self
2063            .entry_for_path(work_dir.clone())
2064            .map(|entry| entry.id)?;
2065
2066        if self.git_repositories.get(&work_dir_id).is_none() {
2067            let repo = fs.open_repo(abs_path.as_path())?;
2068            let work_directory = RepositoryWorkDirectory(work_dir.clone());
2069            let scan_id = self.scan_id;
2070
2071            let repo_lock = repo.lock();
2072
2073            self.repository_entries.insert(
2074                work_directory,
2075                RepositoryEntry {
2076                    work_directory: work_dir_id.into(),
2077                    branch: repo_lock.branch_name().map(Into::into),
2078                    statuses: repo_lock.statuses().unwrap_or_default(),
2079                },
2080            );
2081            drop(repo_lock);
2082
2083            self.git_repositories.insert(
2084                work_dir_id,
2085                LocalRepositoryEntry {
2086                    work_dir_scan_id: scan_id,
2087                    git_dir_scan_id: scan_id,
2088                    repo_ptr: repo,
2089                    git_dir_path: parent_path.clone(),
2090                },
2091            )
2092        }
2093
2094        Some(())
2095    }
2096
2097    fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2098        let mut inodes = TreeSet::default();
2099        for ancestor in path.ancestors().skip(1) {
2100            if let Some(entry) = self.entry_for_path(ancestor) {
2101                inodes.insert(entry.inode);
2102            }
2103        }
2104        inodes
2105    }
2106
2107    fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2108        let mut new_ignores = Vec::new();
2109        for ancestor in abs_path.ancestors().skip(1) {
2110            if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2111                new_ignores.push((ancestor, Some(ignore.clone())));
2112            } else {
2113                new_ignores.push((ancestor, None));
2114            }
2115        }
2116
2117        let mut ignore_stack = IgnoreStack::none();
2118        for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2119            if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2120                ignore_stack = IgnoreStack::all();
2121                break;
2122            } else if let Some(ignore) = ignore {
2123                ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2124            }
2125        }
2126
2127        if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2128            ignore_stack = IgnoreStack::all();
2129        }
2130
2131        ignore_stack
2132    }
2133}
2134
2135impl BackgroundScannerState {
2136    fn reuse_entry_id(&mut self, entry: &mut Entry) {
2137        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2138            entry.id = removed_entry_id;
2139        } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
2140            entry.id = existing_entry.id;
2141        }
2142    }
2143
2144    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2145        self.reuse_entry_id(&mut entry);
2146        self.snapshot.insert_entry(entry, fs)
2147    }
2148
2149    fn populate_dir(
2150        &mut self,
2151        parent_path: Arc<Path>,
2152        entries: impl IntoIterator<Item = Entry>,
2153        ignore: Option<Arc<Gitignore>>,
2154        fs: &dyn Fs,
2155    ) {
2156        let mut parent_entry = if let Some(parent_entry) = self
2157            .snapshot
2158            .entries_by_path
2159            .get(&PathKey(parent_path.clone()), &())
2160        {
2161            parent_entry.clone()
2162        } else {
2163            log::warn!(
2164                "populating a directory {:?} that has been removed",
2165                parent_path
2166            );
2167            return;
2168        };
2169
2170        match parent_entry.kind {
2171            EntryKind::PendingDir => {
2172                parent_entry.kind = EntryKind::Dir;
2173            }
2174            EntryKind::Dir => {}
2175            _ => return,
2176        }
2177
2178        if let Some(ignore) = ignore {
2179            let abs_parent_path = self.snapshot.abs_path.join(&parent_path).into();
2180            self.snapshot
2181                .ignores_by_parent_abs_path
2182                .insert(abs_parent_path, (ignore, false));
2183        }
2184
2185        if parent_path.file_name() == Some(&DOT_GIT) {
2186            self.snapshot.build_repo(parent_path, fs);
2187        }
2188
2189        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2190        let mut entries_by_id_edits = Vec::new();
2191
2192        for mut entry in entries {
2193            self.reuse_entry_id(&mut entry);
2194            entries_by_id_edits.push(Edit::Insert(PathEntry {
2195                id: entry.id,
2196                path: entry.path.clone(),
2197                is_ignored: entry.is_ignored,
2198                scan_id: self.snapshot.scan_id,
2199            }));
2200            entries_by_path_edits.push(Edit::Insert(entry));
2201        }
2202
2203        self.snapshot
2204            .entries_by_path
2205            .edit(entries_by_path_edits, &());
2206        self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2207    }
2208
2209    fn remove_path(&mut self, path: &Path) {
2210        let mut new_entries;
2211        let removed_entries;
2212        {
2213            let mut cursor = self.snapshot.entries_by_path.cursor::<TraversalProgress>();
2214            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2215            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2216            new_entries.push_tree(cursor.suffix(&()), &());
2217        }
2218        self.snapshot.entries_by_path = new_entries;
2219
2220        let mut entries_by_id_edits = Vec::new();
2221        for entry in removed_entries.cursor::<()>() {
2222            let removed_entry_id = self
2223                .removed_entry_ids
2224                .entry(entry.inode)
2225                .or_insert(entry.id);
2226            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2227            entries_by_id_edits.push(Edit::Remove(entry.id));
2228        }
2229        self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2230
2231        if path.file_name() == Some(&GITIGNORE) {
2232            let abs_parent_path = self.snapshot.abs_path.join(path.parent().unwrap());
2233            if let Some((_, needs_update)) = self
2234                .snapshot
2235                .ignores_by_parent_abs_path
2236                .get_mut(abs_parent_path.as_path())
2237            {
2238                *needs_update = true;
2239            }
2240        }
2241    }
2242}
2243
2244async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2245    let contents = fs.load(abs_path).await?;
2246    let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2247    let mut builder = GitignoreBuilder::new(parent);
2248    for line in contents.lines() {
2249        builder.add_line(Some(abs_path.into()), line)?;
2250    }
2251    Ok(builder.build()?)
2252}
2253
2254impl WorktreeId {
2255    pub fn from_usize(handle_id: usize) -> Self {
2256        Self(handle_id)
2257    }
2258
2259    pub(crate) fn from_proto(id: u64) -> Self {
2260        Self(id as usize)
2261    }
2262
2263    pub fn to_proto(&self) -> u64 {
2264        self.0 as u64
2265    }
2266
2267    pub fn to_usize(&self) -> usize {
2268        self.0
2269    }
2270}
2271
2272impl fmt::Display for WorktreeId {
2273    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2274        self.0.fmt(f)
2275    }
2276}
2277
2278impl Deref for Worktree {
2279    type Target = Snapshot;
2280
2281    fn deref(&self) -> &Self::Target {
2282        match self {
2283            Worktree::Local(worktree) => &worktree.snapshot,
2284            Worktree::Remote(worktree) => &worktree.snapshot,
2285        }
2286    }
2287}
2288
2289impl Deref for LocalWorktree {
2290    type Target = LocalSnapshot;
2291
2292    fn deref(&self) -> &Self::Target {
2293        &self.snapshot
2294    }
2295}
2296
2297impl Deref for RemoteWorktree {
2298    type Target = Snapshot;
2299
2300    fn deref(&self) -> &Self::Target {
2301        &self.snapshot
2302    }
2303}
2304
2305impl fmt::Debug for LocalWorktree {
2306    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2307        self.snapshot.fmt(f)
2308    }
2309}
2310
2311impl fmt::Debug for Snapshot {
2312    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2313        struct EntriesById<'a>(&'a SumTree<PathEntry>);
2314        struct EntriesByPath<'a>(&'a SumTree<Entry>);
2315
2316        impl<'a> fmt::Debug for EntriesByPath<'a> {
2317            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2318                f.debug_map()
2319                    .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2320                    .finish()
2321            }
2322        }
2323
2324        impl<'a> fmt::Debug for EntriesById<'a> {
2325            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2326                f.debug_list().entries(self.0.iter()).finish()
2327            }
2328        }
2329
2330        f.debug_struct("Snapshot")
2331            .field("id", &self.id)
2332            .field("root_name", &self.root_name)
2333            .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2334            .field("entries_by_id", &EntriesById(&self.entries_by_id))
2335            .finish()
2336    }
2337}
2338
2339#[derive(Clone, PartialEq)]
2340pub struct File {
2341    pub worktree: ModelHandle<Worktree>,
2342    pub path: Arc<Path>,
2343    pub mtime: SystemTime,
2344    pub(crate) entry_id: ProjectEntryId,
2345    pub(crate) is_local: bool,
2346    pub(crate) is_deleted: bool,
2347}
2348
2349impl language::File for File {
2350    fn as_local(&self) -> Option<&dyn language::LocalFile> {
2351        if self.is_local {
2352            Some(self)
2353        } else {
2354            None
2355        }
2356    }
2357
2358    fn mtime(&self) -> SystemTime {
2359        self.mtime
2360    }
2361
2362    fn path(&self) -> &Arc<Path> {
2363        &self.path
2364    }
2365
2366    fn full_path(&self, cx: &AppContext) -> PathBuf {
2367        let mut full_path = PathBuf::new();
2368        let worktree = self.worktree.read(cx);
2369
2370        if worktree.is_visible() {
2371            full_path.push(worktree.root_name());
2372        } else {
2373            let path = worktree.abs_path();
2374
2375            if worktree.is_local() && path.starts_with(HOME.as_path()) {
2376                full_path.push("~");
2377                full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2378            } else {
2379                full_path.push(path)
2380            }
2381        }
2382
2383        if self.path.components().next().is_some() {
2384            full_path.push(&self.path);
2385        }
2386
2387        full_path
2388    }
2389
2390    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2391    /// of its worktree, then this method will return the name of the worktree itself.
2392    fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2393        self.path
2394            .file_name()
2395            .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2396    }
2397
2398    fn worktree_id(&self) -> usize {
2399        self.worktree.id()
2400    }
2401
2402    fn is_deleted(&self) -> bool {
2403        self.is_deleted
2404    }
2405
2406    fn as_any(&self) -> &dyn Any {
2407        self
2408    }
2409
2410    fn to_proto(&self) -> rpc::proto::File {
2411        rpc::proto::File {
2412            worktree_id: self.worktree.id() as u64,
2413            entry_id: self.entry_id.to_proto(),
2414            path: self.path.to_string_lossy().into(),
2415            mtime: Some(self.mtime.into()),
2416            is_deleted: self.is_deleted,
2417        }
2418    }
2419}
2420
2421impl language::LocalFile for File {
2422    fn abs_path(&self, cx: &AppContext) -> PathBuf {
2423        self.worktree
2424            .read(cx)
2425            .as_local()
2426            .unwrap()
2427            .abs_path
2428            .join(&self.path)
2429    }
2430
2431    fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2432        let worktree = self.worktree.read(cx).as_local().unwrap();
2433        let abs_path = worktree.absolutize(&self.path);
2434        let fs = worktree.fs.clone();
2435        cx.background()
2436            .spawn(async move { fs.load(&abs_path).await })
2437    }
2438
2439    fn buffer_reloaded(
2440        &self,
2441        buffer_id: u64,
2442        version: &clock::Global,
2443        fingerprint: RopeFingerprint,
2444        line_ending: LineEnding,
2445        mtime: SystemTime,
2446        cx: &mut AppContext,
2447    ) {
2448        let worktree = self.worktree.read(cx).as_local().unwrap();
2449        if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2450            worktree
2451                .client
2452                .send(proto::BufferReloaded {
2453                    project_id,
2454                    buffer_id,
2455                    version: serialize_version(version),
2456                    mtime: Some(mtime.into()),
2457                    fingerprint: serialize_fingerprint(fingerprint),
2458                    line_ending: serialize_line_ending(line_ending) as i32,
2459                })
2460                .log_err();
2461        }
2462    }
2463}
2464
2465impl File {
2466    pub fn for_entry(entry: Entry, worktree: ModelHandle<Worktree>) -> Self {
2467        Self {
2468            worktree,
2469            path: entry.path.clone(),
2470            mtime: entry.mtime,
2471            entry_id: entry.id,
2472            is_local: true,
2473            is_deleted: false,
2474        }
2475    }
2476
2477    pub fn from_proto(
2478        proto: rpc::proto::File,
2479        worktree: ModelHandle<Worktree>,
2480        cx: &AppContext,
2481    ) -> Result<Self> {
2482        let worktree_id = worktree
2483            .read(cx)
2484            .as_remote()
2485            .ok_or_else(|| anyhow!("not remote"))?
2486            .id();
2487
2488        if worktree_id.to_proto() != proto.worktree_id {
2489            return Err(anyhow!("worktree id does not match file"));
2490        }
2491
2492        Ok(Self {
2493            worktree,
2494            path: Path::new(&proto.path).into(),
2495            mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2496            entry_id: ProjectEntryId::from_proto(proto.entry_id),
2497            is_local: false,
2498            is_deleted: proto.is_deleted,
2499        })
2500    }
2501
2502    pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2503        file.and_then(|f| f.as_any().downcast_ref())
2504    }
2505
2506    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2507        self.worktree.read(cx).id()
2508    }
2509
2510    pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2511        if self.is_deleted {
2512            None
2513        } else {
2514            Some(self.entry_id)
2515        }
2516    }
2517}
2518
2519#[derive(Clone, Debug, PartialEq, Eq)]
2520pub struct Entry {
2521    pub id: ProjectEntryId,
2522    pub kind: EntryKind,
2523    pub path: Arc<Path>,
2524    pub inode: u64,
2525    pub mtime: SystemTime,
2526    pub is_symlink: bool,
2527    pub is_ignored: bool,
2528}
2529
2530#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2531pub enum EntryKind {
2532    PendingDir,
2533    Dir,
2534    File(CharBag),
2535}
2536
2537#[derive(Clone, Copy, Debug, PartialEq)]
2538pub enum PathChange {
2539    /// A filesystem entry was was created.
2540    Added,
2541    /// A filesystem entry was removed.
2542    Removed,
2543    /// A filesystem entry was updated.
2544    Updated,
2545    /// A filesystem entry was either updated or added. We don't know
2546    /// whether or not it already existed, because the path had not
2547    /// been loaded before the event.
2548    AddedOrUpdated,
2549    /// A filesystem entry was found during the initial scan of the worktree.
2550    Loaded,
2551}
2552
2553pub struct GitRepositoryChange {
2554    /// The previous state of the repository, if it already existed.
2555    pub old_repository: Option<RepositoryEntry>,
2556    /// Whether the content of the .git directory changed. This will be false
2557    /// if only the repository's work directory changed.
2558    pub git_dir_changed: bool,
2559}
2560
2561pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
2562pub type UpdatedGitRepositoriesSet = Arc<[(Arc<Path>, GitRepositoryChange)]>;
2563
2564impl Entry {
2565    fn new(
2566        path: Arc<Path>,
2567        metadata: &fs::Metadata,
2568        next_entry_id: &AtomicUsize,
2569        root_char_bag: CharBag,
2570    ) -> Self {
2571        Self {
2572            id: ProjectEntryId::new(next_entry_id),
2573            kind: if metadata.is_dir {
2574                EntryKind::PendingDir
2575            } else {
2576                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2577            },
2578            path,
2579            inode: metadata.inode,
2580            mtime: metadata.mtime,
2581            is_symlink: metadata.is_symlink,
2582            is_ignored: false,
2583        }
2584    }
2585
2586    pub fn is_dir(&self) -> bool {
2587        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2588    }
2589
2590    pub fn is_file(&self) -> bool {
2591        matches!(self.kind, EntryKind::File(_))
2592    }
2593}
2594
2595impl sum_tree::Item for Entry {
2596    type Summary = EntrySummary;
2597
2598    fn summary(&self) -> Self::Summary {
2599        let visible_count = if self.is_ignored { 0 } else { 1 };
2600        let file_count;
2601        let visible_file_count;
2602        if self.is_file() {
2603            file_count = 1;
2604            visible_file_count = visible_count;
2605        } else {
2606            file_count = 0;
2607            visible_file_count = 0;
2608        }
2609
2610        EntrySummary {
2611            max_path: self.path.clone(),
2612            count: 1,
2613            visible_count,
2614            file_count,
2615            visible_file_count,
2616        }
2617    }
2618}
2619
2620impl sum_tree::KeyedItem for Entry {
2621    type Key = PathKey;
2622
2623    fn key(&self) -> Self::Key {
2624        PathKey(self.path.clone())
2625    }
2626}
2627
2628#[derive(Clone, Debug)]
2629pub struct EntrySummary {
2630    max_path: Arc<Path>,
2631    count: usize,
2632    visible_count: usize,
2633    file_count: usize,
2634    visible_file_count: usize,
2635}
2636
2637impl Default for EntrySummary {
2638    fn default() -> Self {
2639        Self {
2640            max_path: Arc::from(Path::new("")),
2641            count: 0,
2642            visible_count: 0,
2643            file_count: 0,
2644            visible_file_count: 0,
2645        }
2646    }
2647}
2648
2649impl sum_tree::Summary for EntrySummary {
2650    type Context = ();
2651
2652    fn add_summary(&mut self, rhs: &Self, _: &()) {
2653        self.max_path = rhs.max_path.clone();
2654        self.count += rhs.count;
2655        self.visible_count += rhs.visible_count;
2656        self.file_count += rhs.file_count;
2657        self.visible_file_count += rhs.visible_file_count;
2658    }
2659}
2660
2661#[derive(Clone, Debug)]
2662struct PathEntry {
2663    id: ProjectEntryId,
2664    path: Arc<Path>,
2665    is_ignored: bool,
2666    scan_id: usize,
2667}
2668
2669impl sum_tree::Item for PathEntry {
2670    type Summary = PathEntrySummary;
2671
2672    fn summary(&self) -> Self::Summary {
2673        PathEntrySummary { max_id: self.id }
2674    }
2675}
2676
2677impl sum_tree::KeyedItem for PathEntry {
2678    type Key = ProjectEntryId;
2679
2680    fn key(&self) -> Self::Key {
2681        self.id
2682    }
2683}
2684
2685#[derive(Clone, Debug, Default)]
2686struct PathEntrySummary {
2687    max_id: ProjectEntryId,
2688}
2689
2690impl sum_tree::Summary for PathEntrySummary {
2691    type Context = ();
2692
2693    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2694        self.max_id = summary.max_id;
2695    }
2696}
2697
2698impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2699    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2700        *self = summary.max_id;
2701    }
2702}
2703
2704#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2705pub struct PathKey(Arc<Path>);
2706
2707impl Default for PathKey {
2708    fn default() -> Self {
2709        Self(Path::new("").into())
2710    }
2711}
2712
2713impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2714    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2715        self.0 = summary.max_path.clone();
2716    }
2717}
2718
2719struct BackgroundScanner {
2720    state: Mutex<BackgroundScannerState>,
2721    fs: Arc<dyn Fs>,
2722    status_updates_tx: UnboundedSender<ScanState>,
2723    executor: Arc<executor::Background>,
2724    refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2725    next_entry_id: Arc<AtomicUsize>,
2726    phase: BackgroundScannerPhase,
2727}
2728
2729#[derive(PartialEq)]
2730enum BackgroundScannerPhase {
2731    InitialScan,
2732    EventsReceivedDuringInitialScan,
2733    Events,
2734}
2735
2736impl BackgroundScanner {
2737    fn new(
2738        snapshot: LocalSnapshot,
2739        next_entry_id: Arc<AtomicUsize>,
2740        fs: Arc<dyn Fs>,
2741        status_updates_tx: UnboundedSender<ScanState>,
2742        executor: Arc<executor::Background>,
2743        refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2744    ) -> Self {
2745        Self {
2746            fs,
2747            status_updates_tx,
2748            executor,
2749            refresh_requests_rx,
2750            next_entry_id,
2751            state: Mutex::new(BackgroundScannerState {
2752                prev_snapshot: snapshot.snapshot.clone(),
2753                snapshot,
2754                removed_entry_ids: Default::default(),
2755                changed_paths: Default::default(),
2756            }),
2757            phase: BackgroundScannerPhase::InitialScan,
2758        }
2759    }
2760
2761    async fn run(
2762        &mut self,
2763        mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2764    ) {
2765        use futures::FutureExt as _;
2766
2767        let (root_abs_path, root_inode) = {
2768            let snapshot = &self.state.lock().snapshot;
2769            (
2770                snapshot.abs_path.clone(),
2771                snapshot.root_entry().map(|e| e.inode),
2772            )
2773        };
2774
2775        // Populate ignores above the root.
2776        let ignore_stack;
2777        for ancestor in root_abs_path.ancestors().skip(1) {
2778            if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2779            {
2780                self.state
2781                    .lock()
2782                    .snapshot
2783                    .ignores_by_parent_abs_path
2784                    .insert(ancestor.into(), (ignore.into(), false));
2785            }
2786        }
2787        {
2788            let mut state = self.state.lock();
2789            state.snapshot.scan_id += 1;
2790            ignore_stack = state
2791                .snapshot
2792                .ignore_stack_for_abs_path(&root_abs_path, true);
2793            if ignore_stack.is_all() {
2794                if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
2795                    root_entry.is_ignored = true;
2796                    state.insert_entry(root_entry, self.fs.as_ref());
2797                }
2798            }
2799        };
2800
2801        // Perform an initial scan of the directory.
2802        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2803        smol::block_on(scan_job_tx.send(ScanJob {
2804            abs_path: root_abs_path,
2805            path: Arc::from(Path::new("")),
2806            ignore_stack,
2807            ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2808            scan_queue: scan_job_tx.clone(),
2809        }))
2810        .unwrap();
2811        drop(scan_job_tx);
2812        self.scan_dirs(true, scan_job_rx).await;
2813        {
2814            let mut state = self.state.lock();
2815            state.snapshot.completed_scan_id = state.snapshot.scan_id;
2816        }
2817        self.send_status_update(false, None);
2818
2819        // Process any any FS events that occurred while performing the initial scan.
2820        // For these events, update events cannot be as precise, because we didn't
2821        // have the previous state loaded yet.
2822        self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
2823        if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2824            let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2825            while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2826                paths.extend(more_events.into_iter().map(|e| e.path));
2827            }
2828            self.process_events(paths).await;
2829        }
2830
2831        // Continue processing events until the worktree is dropped.
2832        self.phase = BackgroundScannerPhase::Events;
2833        loop {
2834            select_biased! {
2835                // Process any path refresh requests from the worktree. Prioritize
2836                // these before handling changes reported by the filesystem.
2837                request = self.refresh_requests_rx.recv().fuse() => {
2838                    let Ok((paths, barrier)) = request else { break };
2839                    if !self.process_refresh_request(paths.clone(), barrier).await {
2840                        return;
2841                    }
2842                }
2843
2844                events = events_rx.next().fuse() => {
2845                    let Some(events) = events else { break };
2846                    let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2847                    while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2848                        paths.extend(more_events.into_iter().map(|e| e.path));
2849                    }
2850                    self.process_events(paths.clone()).await;
2851                }
2852            }
2853        }
2854    }
2855
2856    async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2857        self.reload_entries_for_paths(paths, None).await;
2858        self.send_status_update(false, Some(barrier))
2859    }
2860
2861    async fn process_events(&mut self, paths: Vec<PathBuf>) {
2862        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2863        let paths = self
2864            .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2865            .await;
2866        drop(scan_job_tx);
2867        self.scan_dirs(false, scan_job_rx).await;
2868
2869        self.update_ignore_statuses().await;
2870
2871        {
2872            let mut snapshot = &mut self.state.lock().snapshot;
2873
2874            if let Some(paths) = paths {
2875                for path in paths {
2876                    self.reload_repo_for_file_path(&path, &mut *snapshot, self.fs.as_ref());
2877                }
2878            }
2879
2880            let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2881            git_repositories.retain(|work_directory_id, _| {
2882                snapshot
2883                    .entry_for_id(*work_directory_id)
2884                    .map_or(false, |entry| {
2885                        snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2886                    })
2887            });
2888            snapshot.git_repositories = git_repositories;
2889
2890            let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2891            git_repository_entries.retain(|_, entry| {
2892                snapshot
2893                    .git_repositories
2894                    .get(&entry.work_directory.0)
2895                    .is_some()
2896            });
2897            snapshot.snapshot.repository_entries = git_repository_entries;
2898            snapshot.completed_scan_id = snapshot.scan_id;
2899        }
2900
2901        self.send_status_update(false, None);
2902    }
2903
2904    async fn scan_dirs(
2905        &self,
2906        enable_progress_updates: bool,
2907        scan_jobs_rx: channel::Receiver<ScanJob>,
2908    ) {
2909        use futures::FutureExt as _;
2910
2911        if self
2912            .status_updates_tx
2913            .unbounded_send(ScanState::Started)
2914            .is_err()
2915        {
2916            return;
2917        }
2918
2919        let progress_update_count = AtomicUsize::new(0);
2920        self.executor
2921            .scoped(|scope| {
2922                for _ in 0..self.executor.num_cpus() {
2923                    scope.spawn(async {
2924                        let mut last_progress_update_count = 0;
2925                        let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2926                        futures::pin_mut!(progress_update_timer);
2927
2928                        loop {
2929                            select_biased! {
2930                                // Process any path refresh requests before moving on to process
2931                                // the scan queue, so that user operations are prioritized.
2932                                request = self.refresh_requests_rx.recv().fuse() => {
2933                                    let Ok((paths, barrier)) = request else { break };
2934                                    if !self.process_refresh_request(paths, barrier).await {
2935                                        return;
2936                                    }
2937                                }
2938
2939                                // Send periodic progress updates to the worktree. Use an atomic counter
2940                                // to ensure that only one of the workers sends a progress update after
2941                                // the update interval elapses.
2942                                _ = progress_update_timer => {
2943                                    match progress_update_count.compare_exchange(
2944                                        last_progress_update_count,
2945                                        last_progress_update_count + 1,
2946                                        SeqCst,
2947                                        SeqCst
2948                                    ) {
2949                                        Ok(_) => {
2950                                            last_progress_update_count += 1;
2951                                            self.send_status_update(true, None);
2952                                        }
2953                                        Err(count) => {
2954                                            last_progress_update_count = count;
2955                                        }
2956                                    }
2957                                    progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2958                                }
2959
2960                                // Recursively load directories from the file system.
2961                                job = scan_jobs_rx.recv().fuse() => {
2962                                    let Ok(job) = job else { break };
2963                                    if let Err(err) = self.scan_dir(&job).await {
2964                                        if job.path.as_ref() != Path::new("") {
2965                                            log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2966                                        }
2967                                    }
2968                                }
2969                            }
2970                        }
2971                    })
2972                }
2973            })
2974            .await;
2975    }
2976
2977    fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2978        let mut state = self.state.lock();
2979        if state.changed_paths.is_empty() && scanning {
2980            return true;
2981        }
2982
2983        let new_snapshot = state.snapshot.clone();
2984        let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
2985        let changes = self.build_change_set(&old_snapshot, &new_snapshot, &state.changed_paths);
2986        state.changed_paths.clear();
2987
2988        self.status_updates_tx
2989            .unbounded_send(ScanState::Updated {
2990                snapshot: new_snapshot,
2991                changes,
2992                scanning,
2993                barrier,
2994            })
2995            .is_ok()
2996    }
2997
2998    async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2999        let mut new_entries: Vec<Entry> = Vec::new();
3000        let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
3001        let mut ignore_stack = job.ignore_stack.clone();
3002        let mut new_ignore = None;
3003        let (root_abs_path, root_char_bag, next_entry_id) = {
3004            let snapshot = &self.state.lock().snapshot;
3005            (
3006                snapshot.abs_path().clone(),
3007                snapshot.root_char_bag,
3008                self.next_entry_id.clone(),
3009            )
3010        };
3011        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
3012        while let Some(child_abs_path) = child_paths.next().await {
3013            let child_abs_path: Arc<Path> = match child_abs_path {
3014                Ok(child_abs_path) => child_abs_path.into(),
3015                Err(error) => {
3016                    log::error!("error processing entry {:?}", error);
3017                    continue;
3018                }
3019            };
3020
3021            let child_name = child_abs_path.file_name().unwrap();
3022            let child_path: Arc<Path> = job.path.join(child_name).into();
3023            let child_metadata = match self.fs.metadata(&child_abs_path).await {
3024                Ok(Some(metadata)) => metadata,
3025                Ok(None) => continue,
3026                Err(err) => {
3027                    log::error!("error processing {:?}: {:?}", child_abs_path, err);
3028                    continue;
3029                }
3030            };
3031
3032            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
3033            if child_name == *GITIGNORE {
3034                match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
3035                    Ok(ignore) => {
3036                        let ignore = Arc::new(ignore);
3037                        ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3038                        new_ignore = Some(ignore);
3039                    }
3040                    Err(error) => {
3041                        log::error!(
3042                            "error loading .gitignore file {:?} - {:?}",
3043                            child_name,
3044                            error
3045                        );
3046                    }
3047                }
3048
3049                // Update ignore status of any child entries we've already processed to reflect the
3050                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
3051                // there should rarely be too numerous. Update the ignore stack associated with any
3052                // new jobs as well.
3053                let mut new_jobs = new_jobs.iter_mut();
3054                for entry in &mut new_entries {
3055                    let entry_abs_path = root_abs_path.join(&entry.path);
3056                    entry.is_ignored =
3057                        ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
3058
3059                    if entry.is_dir() {
3060                        if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
3061                            job.ignore_stack = if entry.is_ignored {
3062                                IgnoreStack::all()
3063                            } else {
3064                                ignore_stack.clone()
3065                            };
3066                        }
3067                    }
3068                }
3069            }
3070
3071            let mut child_entry = Entry::new(
3072                child_path.clone(),
3073                &child_metadata,
3074                &next_entry_id,
3075                root_char_bag,
3076            );
3077
3078            if child_entry.is_dir() {
3079                let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
3080                child_entry.is_ignored = is_ignored;
3081
3082                // Avoid recursing until crash in the case of a recursive symlink
3083                if !job.ancestor_inodes.contains(&child_entry.inode) {
3084                    let mut ancestor_inodes = job.ancestor_inodes.clone();
3085                    ancestor_inodes.insert(child_entry.inode);
3086
3087                    new_jobs.push(Some(ScanJob {
3088                        abs_path: child_abs_path,
3089                        path: child_path,
3090                        ignore_stack: if is_ignored {
3091                            IgnoreStack::all()
3092                        } else {
3093                            ignore_stack.clone()
3094                        },
3095                        ancestor_inodes,
3096                        scan_queue: job.scan_queue.clone(),
3097                    }));
3098                } else {
3099                    new_jobs.push(None);
3100                }
3101            } else {
3102                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
3103            }
3104
3105            new_entries.push(child_entry);
3106        }
3107
3108        {
3109            let mut state = self.state.lock();
3110            state.populate_dir(job.path.clone(), new_entries, new_ignore, self.fs.as_ref());
3111            if let Err(ix) = state.changed_paths.binary_search(&job.path) {
3112                state.changed_paths.insert(ix, job.path.clone());
3113            }
3114        }
3115
3116        for new_job in new_jobs {
3117            if let Some(new_job) = new_job {
3118                job.scan_queue.send(new_job).await.unwrap();
3119            }
3120        }
3121
3122        Ok(())
3123    }
3124
3125    async fn reload_entries_for_paths(
3126        &self,
3127        mut abs_paths: Vec<PathBuf>,
3128        scan_queue_tx: Option<Sender<ScanJob>>,
3129    ) -> Option<Vec<Arc<Path>>> {
3130        let doing_recursive_update = scan_queue_tx.is_some();
3131
3132        abs_paths.sort_unstable();
3133        abs_paths.dedup_by(|a, b| a.starts_with(&b));
3134
3135        let root_abs_path = self.state.lock().snapshot.abs_path.clone();
3136        let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
3137        let metadata = futures::future::join_all(
3138            abs_paths
3139                .iter()
3140                .map(|abs_path| self.fs.metadata(&abs_path))
3141                .collect::<Vec<_>>(),
3142        )
3143        .await;
3144
3145        let mut state = self.state.lock();
3146        let snapshot = &mut state.snapshot;
3147        let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
3148        snapshot.scan_id += 1;
3149        if is_idle && !doing_recursive_update {
3150            snapshot.completed_scan_id = snapshot.scan_id;
3151        }
3152
3153        // Remove any entries for paths that no longer exist or are being recursively
3154        // refreshed. Do this before adding any new entries, so that renames can be
3155        // detected regardless of the order of the paths.
3156        let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
3157        for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
3158            if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3159                if matches!(metadata, Ok(None)) || doing_recursive_update {
3160                    state.remove_path(path);
3161                }
3162                event_paths.push(path.into());
3163            } else {
3164                log::error!(
3165                    "unexpected event {:?} for root path {:?}",
3166                    abs_path,
3167                    root_canonical_path
3168                );
3169            }
3170        }
3171
3172        for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
3173            let abs_path: Arc<Path> = root_abs_path.join(&path).into();
3174
3175            match metadata {
3176                Ok(Some(metadata)) => {
3177                    let ignore_stack = state
3178                        .snapshot
3179                        .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
3180                    let mut fs_entry = Entry::new(
3181                        path.clone(),
3182                        &metadata,
3183                        self.next_entry_id.as_ref(),
3184                        state.snapshot.root_char_bag,
3185                    );
3186                    fs_entry.is_ignored = ignore_stack.is_all();
3187                    state.insert_entry(fs_entry, self.fs.as_ref());
3188
3189                    if let Some(scan_queue_tx) = &scan_queue_tx {
3190                        let mut ancestor_inodes = state.snapshot.ancestor_inodes_for_path(&path);
3191                        if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
3192                            ancestor_inodes.insert(metadata.inode);
3193                            smol::block_on(scan_queue_tx.send(ScanJob {
3194                                abs_path,
3195                                path,
3196                                ignore_stack,
3197                                ancestor_inodes,
3198                                scan_queue: scan_queue_tx.clone(),
3199                            }))
3200                            .unwrap();
3201                        }
3202                    }
3203                }
3204                Ok(None) => {
3205                    self.remove_repo_path(&path, &mut state.snapshot);
3206                }
3207                Err(err) => {
3208                    // TODO - create a special 'error' entry in the entries tree to mark this
3209                    log::error!("error reading file on event {:?}", err);
3210                }
3211            }
3212        }
3213
3214        util::extend_sorted(
3215            &mut state.changed_paths,
3216            event_paths.iter().cloned(),
3217            usize::MAX,
3218            Ord::cmp,
3219        );
3220
3221        Some(event_paths)
3222    }
3223
3224    fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3225        if !path
3226            .components()
3227            .any(|component| component.as_os_str() == *DOT_GIT)
3228        {
3229            let scan_id = snapshot.scan_id;
3230
3231            if let Some(repository) = snapshot.repository_for_work_directory(path) {
3232                let entry = repository.work_directory.0;
3233                snapshot.git_repositories.remove(&entry);
3234                snapshot
3235                    .snapshot
3236                    .repository_entries
3237                    .remove(&RepositoryWorkDirectory(path.into()));
3238                return Some(());
3239            }
3240
3241            let repo = snapshot.repository_for_path(&path)?;
3242            let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3243            let work_dir = repo.work_directory(snapshot)?;
3244            let work_dir_id = repo.work_directory;
3245
3246            snapshot
3247                .git_repositories
3248                .update(&work_dir_id, |entry| entry.work_dir_scan_id = scan_id);
3249
3250            snapshot.repository_entries.update(&work_dir, |entry| {
3251                entry
3252                    .statuses
3253                    .remove_range(&repo_path, &RepoPathDescendants(&repo_path))
3254            });
3255        }
3256
3257        Some(())
3258    }
3259
3260    fn reload_repo_for_file_path(
3261        &self,
3262        path: &Path,
3263        snapshot: &mut LocalSnapshot,
3264        fs: &dyn Fs,
3265    ) -> Option<()> {
3266        let scan_id = snapshot.scan_id;
3267
3268        if path
3269            .components()
3270            .any(|component| component.as_os_str() == *DOT_GIT)
3271        {
3272            let (entry_id, repo_ptr) = {
3273                let Some((entry_id, repo)) = snapshot.repo_for_metadata(&path) else {
3274                    let dot_git_dir = path.ancestors()
3275                    .skip_while(|ancestor| ancestor.file_name() != Some(&*DOT_GIT))
3276                    .next()?;
3277
3278                    snapshot.build_repo(dot_git_dir.into(), fs);
3279                    return None;
3280                };
3281                if repo.git_dir_scan_id == scan_id {
3282                    return None;
3283                }
3284                (*entry_id, repo.repo_ptr.to_owned())
3285            };
3286
3287            let work_dir = snapshot
3288                .entry_for_id(entry_id)
3289                .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3290
3291            let repo = repo_ptr.lock();
3292            repo.reload_index();
3293            let branch = repo.branch_name();
3294            let statuses = repo.statuses().unwrap_or_default();
3295
3296            snapshot.git_repositories.update(&entry_id, |entry| {
3297                entry.work_dir_scan_id = scan_id;
3298                entry.git_dir_scan_id = scan_id;
3299            });
3300
3301            snapshot.repository_entries.update(&work_dir, |entry| {
3302                entry.branch = branch.map(Into::into);
3303                entry.statuses = statuses;
3304            });
3305        } else {
3306            if snapshot
3307                .entry_for_path(&path)
3308                .map(|entry| entry.is_ignored)
3309                .unwrap_or(false)
3310            {
3311                self.remove_repo_path(&path, snapshot);
3312                return None;
3313            }
3314
3315            let repo = snapshot.repository_for_path(&path)?;
3316
3317            let work_dir = repo.work_directory(snapshot)?;
3318            let work_dir_id = repo.work_directory.clone();
3319
3320            let (local_repo, git_dir_scan_id) =
3321                snapshot.git_repositories.update(&work_dir_id, |entry| {
3322                    entry.work_dir_scan_id = scan_id;
3323                    (entry.repo_ptr.clone(), entry.git_dir_scan_id)
3324                })?;
3325
3326            // Short circuit if we've already scanned everything
3327            if git_dir_scan_id == scan_id {
3328                return None;
3329            }
3330
3331            let mut repository = snapshot.repository_entries.remove(&work_dir)?;
3332
3333            for entry in snapshot.descendent_entries(false, false, path) {
3334                let Some(repo_path) = repo.work_directory.relativize(snapshot, &entry.path) else {
3335                    continue;
3336                };
3337
3338                let status = local_repo.lock().status(&repo_path);
3339                if let Some(status) = status {
3340                    repository.statuses.insert(repo_path.clone(), status);
3341                } else {
3342                    repository.statuses.remove(&repo_path);
3343                }
3344            }
3345
3346            snapshot.repository_entries.insert(work_dir, repository)
3347        }
3348
3349        Some(())
3350    }
3351
3352    async fn update_ignore_statuses(&self) {
3353        use futures::FutureExt as _;
3354
3355        let mut snapshot = self.state.lock().snapshot.clone();
3356        let mut ignores_to_update = Vec::new();
3357        let mut ignores_to_delete = Vec::new();
3358        let abs_path = snapshot.abs_path.clone();
3359        for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
3360            if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
3361                if *needs_update {
3362                    *needs_update = false;
3363                    if snapshot.snapshot.entry_for_path(parent_path).is_some() {
3364                        ignores_to_update.push(parent_abs_path.clone());
3365                    }
3366                }
3367
3368                let ignore_path = parent_path.join(&*GITIGNORE);
3369                if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
3370                    ignores_to_delete.push(parent_abs_path.clone());
3371                }
3372            }
3373        }
3374
3375        for parent_abs_path in ignores_to_delete {
3376            snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3377            self.state
3378                .lock()
3379                .snapshot
3380                .ignores_by_parent_abs_path
3381                .remove(&parent_abs_path);
3382        }
3383
3384        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3385        ignores_to_update.sort_unstable();
3386        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3387        while let Some(parent_abs_path) = ignores_to_update.next() {
3388            while ignores_to_update
3389                .peek()
3390                .map_or(false, |p| p.starts_with(&parent_abs_path))
3391            {
3392                ignores_to_update.next().unwrap();
3393            }
3394
3395            let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3396            smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3397                abs_path: parent_abs_path,
3398                ignore_stack,
3399                ignore_queue: ignore_queue_tx.clone(),
3400            }))
3401            .unwrap();
3402        }
3403        drop(ignore_queue_tx);
3404
3405        self.executor
3406            .scoped(|scope| {
3407                for _ in 0..self.executor.num_cpus() {
3408                    scope.spawn(async {
3409                        loop {
3410                            select_biased! {
3411                                // Process any path refresh requests before moving on to process
3412                                // the queue of ignore statuses.
3413                                request = self.refresh_requests_rx.recv().fuse() => {
3414                                    let Ok((paths, barrier)) = request else { break };
3415                                    if !self.process_refresh_request(paths, barrier).await {
3416                                        return;
3417                                    }
3418                                }
3419
3420                                // Recursively process directories whose ignores have changed.
3421                                job = ignore_queue_rx.recv().fuse() => {
3422                                    let Ok(job) = job else { break };
3423                                    self.update_ignore_status(job, &snapshot).await;
3424                                }
3425                            }
3426                        }
3427                    });
3428                }
3429            })
3430            .await;
3431    }
3432
3433    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3434        let mut ignore_stack = job.ignore_stack;
3435        if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3436            ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3437        }
3438
3439        let mut entries_by_id_edits = Vec::new();
3440        let mut entries_by_path_edits = Vec::new();
3441        let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3442        for mut entry in snapshot.child_entries(path).cloned() {
3443            let was_ignored = entry.is_ignored;
3444            let abs_path = snapshot.abs_path().join(&entry.path);
3445            entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3446            if entry.is_dir() {
3447                let child_ignore_stack = if entry.is_ignored {
3448                    IgnoreStack::all()
3449                } else {
3450                    ignore_stack.clone()
3451                };
3452                job.ignore_queue
3453                    .send(UpdateIgnoreStatusJob {
3454                        abs_path: abs_path.into(),
3455                        ignore_stack: child_ignore_stack,
3456                        ignore_queue: job.ignore_queue.clone(),
3457                    })
3458                    .await
3459                    .unwrap();
3460            }
3461
3462            if entry.is_ignored != was_ignored {
3463                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3464                path_entry.scan_id = snapshot.scan_id;
3465                path_entry.is_ignored = entry.is_ignored;
3466                entries_by_id_edits.push(Edit::Insert(path_entry));
3467                entries_by_path_edits.push(Edit::Insert(entry));
3468            }
3469        }
3470
3471        let state = &mut self.state.lock();
3472        for edit in &entries_by_path_edits {
3473            if let Edit::Insert(entry) = edit {
3474                if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
3475                    state.changed_paths.insert(ix, entry.path.clone());
3476                }
3477            }
3478        }
3479
3480        state
3481            .snapshot
3482            .entries_by_path
3483            .edit(entries_by_path_edits, &());
3484        state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
3485    }
3486
3487    fn build_change_set(
3488        &self,
3489        old_snapshot: &Snapshot,
3490        new_snapshot: &Snapshot,
3491        event_paths: &[Arc<Path>],
3492    ) -> UpdatedEntriesSet {
3493        use BackgroundScannerPhase::*;
3494        use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
3495
3496        // Identify which paths have changed. Use the known set of changed
3497        // parent paths to optimize the search.
3498        let mut changes = Vec::new();
3499        let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3500        let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3501        old_paths.next(&());
3502        new_paths.next(&());
3503        for path in event_paths {
3504            let path = PathKey(path.clone());
3505            if old_paths.item().map_or(false, |e| e.path < path.0) {
3506                old_paths.seek_forward(&path, Bias::Left, &());
3507            }
3508            if new_paths.item().map_or(false, |e| e.path < path.0) {
3509                new_paths.seek_forward(&path, Bias::Left, &());
3510            }
3511
3512            loop {
3513                match (old_paths.item(), new_paths.item()) {
3514                    (Some(old_entry), Some(new_entry)) => {
3515                        if old_entry.path > path.0
3516                            && new_entry.path > path.0
3517                            && !old_entry.path.starts_with(&path.0)
3518                            && !new_entry.path.starts_with(&path.0)
3519                        {
3520                            break;
3521                        }
3522
3523                        match Ord::cmp(&old_entry.path, &new_entry.path) {
3524                            Ordering::Less => {
3525                                changes.push((old_entry.path.clone(), old_entry.id, Removed));
3526                                old_paths.next(&());
3527                            }
3528                            Ordering::Equal => {
3529                                if self.phase == EventsReceivedDuringInitialScan {
3530                                    // If the worktree was not fully initialized when this event was generated,
3531                                    // we can't know whether this entry was added during the scan or whether
3532                                    // it was merely updated.
3533                                    changes.push((
3534                                        new_entry.path.clone(),
3535                                        new_entry.id,
3536                                        AddedOrUpdated,
3537                                    ));
3538                                } else if old_entry.id != new_entry.id {
3539                                    changes.push((old_entry.path.clone(), old_entry.id, Removed));
3540                                    changes.push((new_entry.path.clone(), new_entry.id, Added));
3541                                } else if old_entry != new_entry {
3542                                    changes.push((new_entry.path.clone(), new_entry.id, Updated));
3543                                }
3544                                old_paths.next(&());
3545                                new_paths.next(&());
3546                            }
3547                            Ordering::Greater => {
3548                                changes.push((
3549                                    new_entry.path.clone(),
3550                                    new_entry.id,
3551                                    if self.phase == InitialScan {
3552                                        Loaded
3553                                    } else {
3554                                        Added
3555                                    },
3556                                ));
3557                                new_paths.next(&());
3558                            }
3559                        }
3560                    }
3561                    (Some(old_entry), None) => {
3562                        changes.push((old_entry.path.clone(), old_entry.id, Removed));
3563                        old_paths.next(&());
3564                    }
3565                    (None, Some(new_entry)) => {
3566                        changes.push((
3567                            new_entry.path.clone(),
3568                            new_entry.id,
3569                            if self.phase == InitialScan {
3570                                Loaded
3571                            } else {
3572                                Added
3573                            },
3574                        ));
3575                        new_paths.next(&());
3576                    }
3577                    (None, None) => break,
3578                }
3579            }
3580        }
3581
3582        changes.into()
3583    }
3584
3585    async fn progress_timer(&self, running: bool) {
3586        if !running {
3587            return futures::future::pending().await;
3588        }
3589
3590        #[cfg(any(test, feature = "test-support"))]
3591        if self.fs.is_fake() {
3592            return self.executor.simulate_random_delay().await;
3593        }
3594
3595        smol::Timer::after(Duration::from_millis(100)).await;
3596    }
3597}
3598
3599fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3600    let mut result = root_char_bag;
3601    result.extend(
3602        path.to_string_lossy()
3603            .chars()
3604            .map(|c| c.to_ascii_lowercase()),
3605    );
3606    result
3607}
3608
3609struct ScanJob {
3610    abs_path: Arc<Path>,
3611    path: Arc<Path>,
3612    ignore_stack: Arc<IgnoreStack>,
3613    scan_queue: Sender<ScanJob>,
3614    ancestor_inodes: TreeSet<u64>,
3615}
3616
3617struct UpdateIgnoreStatusJob {
3618    abs_path: Arc<Path>,
3619    ignore_stack: Arc<IgnoreStack>,
3620    ignore_queue: Sender<UpdateIgnoreStatusJob>,
3621}
3622
3623pub trait WorktreeHandle {
3624    #[cfg(any(test, feature = "test-support"))]
3625    fn flush_fs_events<'a>(
3626        &self,
3627        cx: &'a gpui::TestAppContext,
3628    ) -> futures::future::LocalBoxFuture<'a, ()>;
3629}
3630
3631impl WorktreeHandle for ModelHandle<Worktree> {
3632    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3633    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3634    // extra directory scans, and emit extra scan-state notifications.
3635    //
3636    // This function mutates the worktree's directory and waits for those mutations to be picked up,
3637    // to ensure that all redundant FS events have already been processed.
3638    #[cfg(any(test, feature = "test-support"))]
3639    fn flush_fs_events<'a>(
3640        &self,
3641        cx: &'a gpui::TestAppContext,
3642    ) -> futures::future::LocalBoxFuture<'a, ()> {
3643        let filename = "fs-event-sentinel";
3644        let tree = self.clone();
3645        let (fs, root_path) = self.read_with(cx, |tree, _| {
3646            let tree = tree.as_local().unwrap();
3647            (tree.fs.clone(), tree.abs_path().clone())
3648        });
3649
3650        async move {
3651            fs.create_file(&root_path.join(filename), Default::default())
3652                .await
3653                .unwrap();
3654            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3655                .await;
3656
3657            fs.remove_file(&root_path.join(filename), Default::default())
3658                .await
3659                .unwrap();
3660            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3661                .await;
3662
3663            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3664                .await;
3665        }
3666        .boxed_local()
3667    }
3668}
3669
3670#[derive(Clone, Debug)]
3671struct TraversalProgress<'a> {
3672    max_path: &'a Path,
3673    count: usize,
3674    visible_count: usize,
3675    file_count: usize,
3676    visible_file_count: usize,
3677}
3678
3679impl<'a> TraversalProgress<'a> {
3680    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3681        match (include_ignored, include_dirs) {
3682            (true, true) => self.count,
3683            (true, false) => self.file_count,
3684            (false, true) => self.visible_count,
3685            (false, false) => self.visible_file_count,
3686        }
3687    }
3688}
3689
3690impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3691    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3692        self.max_path = summary.max_path.as_ref();
3693        self.count += summary.count;
3694        self.visible_count += summary.visible_count;
3695        self.file_count += summary.file_count;
3696        self.visible_file_count += summary.visible_file_count;
3697    }
3698}
3699
3700impl<'a> Default for TraversalProgress<'a> {
3701    fn default() -> Self {
3702        Self {
3703            max_path: Path::new(""),
3704            count: 0,
3705            visible_count: 0,
3706            file_count: 0,
3707            visible_file_count: 0,
3708        }
3709    }
3710}
3711
3712pub struct Traversal<'a> {
3713    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3714    include_ignored: bool,
3715    include_dirs: bool,
3716}
3717
3718impl<'a> Traversal<'a> {
3719    pub fn advance(&mut self) -> bool {
3720        self.cursor.seek_forward(
3721            &TraversalTarget::Count {
3722                count: self.end_offset() + 1,
3723                include_dirs: self.include_dirs,
3724                include_ignored: self.include_ignored,
3725            },
3726            Bias::Left,
3727            &(),
3728        )
3729    }
3730
3731    pub fn advance_to_sibling(&mut self) -> bool {
3732        while let Some(entry) = self.cursor.item() {
3733            self.cursor.seek_forward(
3734                &TraversalTarget::PathSuccessor(&entry.path),
3735                Bias::Left,
3736                &(),
3737            );
3738            if let Some(entry) = self.cursor.item() {
3739                if (self.include_dirs || !entry.is_dir())
3740                    && (self.include_ignored || !entry.is_ignored)
3741                {
3742                    return true;
3743                }
3744            }
3745        }
3746        false
3747    }
3748
3749    pub fn entry(&self) -> Option<&'a Entry> {
3750        self.cursor.item()
3751    }
3752
3753    pub fn start_offset(&self) -> usize {
3754        self.cursor
3755            .start()
3756            .count(self.include_dirs, self.include_ignored)
3757    }
3758
3759    pub fn end_offset(&self) -> usize {
3760        self.cursor
3761            .end(&())
3762            .count(self.include_dirs, self.include_ignored)
3763    }
3764}
3765
3766impl<'a> Iterator for Traversal<'a> {
3767    type Item = &'a Entry;
3768
3769    fn next(&mut self) -> Option<Self::Item> {
3770        if let Some(item) = self.entry() {
3771            self.advance();
3772            Some(item)
3773        } else {
3774            None
3775        }
3776    }
3777}
3778
3779#[derive(Debug)]
3780enum TraversalTarget<'a> {
3781    Path(&'a Path),
3782    PathSuccessor(&'a Path),
3783    Count {
3784        count: usize,
3785        include_ignored: bool,
3786        include_dirs: bool,
3787    },
3788}
3789
3790impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3791    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3792        match self {
3793            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3794            TraversalTarget::PathSuccessor(path) => {
3795                if !cursor_location.max_path.starts_with(path) {
3796                    Ordering::Equal
3797                } else {
3798                    Ordering::Greater
3799                }
3800            }
3801            TraversalTarget::Count {
3802                count,
3803                include_dirs,
3804                include_ignored,
3805            } => Ord::cmp(
3806                count,
3807                &cursor_location.count(*include_dirs, *include_ignored),
3808            ),
3809        }
3810    }
3811}
3812
3813struct ChildEntriesIter<'a> {
3814    parent_path: &'a Path,
3815    traversal: Traversal<'a>,
3816}
3817
3818impl<'a> Iterator for ChildEntriesIter<'a> {
3819    type Item = &'a Entry;
3820
3821    fn next(&mut self) -> Option<Self::Item> {
3822        if let Some(item) = self.traversal.entry() {
3823            if item.path.starts_with(&self.parent_path) {
3824                self.traversal.advance_to_sibling();
3825                return Some(item);
3826            }
3827        }
3828        None
3829    }
3830}
3831
3832struct DescendentEntriesIter<'a> {
3833    parent_path: &'a Path,
3834    traversal: Traversal<'a>,
3835}
3836
3837impl<'a> Iterator for DescendentEntriesIter<'a> {
3838    type Item = &'a Entry;
3839
3840    fn next(&mut self) -> Option<Self::Item> {
3841        if let Some(item) = self.traversal.entry() {
3842            if item.path.starts_with(&self.parent_path) {
3843                self.traversal.advance();
3844                return Some(item);
3845            }
3846        }
3847        None
3848    }
3849}
3850
3851impl<'a> From<&'a Entry> for proto::Entry {
3852    fn from(entry: &'a Entry) -> Self {
3853        Self {
3854            id: entry.id.to_proto(),
3855            is_dir: entry.is_dir(),
3856            path: entry.path.to_string_lossy().into(),
3857            inode: entry.inode,
3858            mtime: Some(entry.mtime.into()),
3859            is_symlink: entry.is_symlink,
3860            is_ignored: entry.is_ignored,
3861        }
3862    }
3863}
3864
3865impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3866    type Error = anyhow::Error;
3867
3868    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3869        if let Some(mtime) = entry.mtime {
3870            let kind = if entry.is_dir {
3871                EntryKind::Dir
3872            } else {
3873                let mut char_bag = *root_char_bag;
3874                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3875                EntryKind::File(char_bag)
3876            };
3877            let path: Arc<Path> = PathBuf::from(entry.path).into();
3878            Ok(Entry {
3879                id: ProjectEntryId::from_proto(entry.id),
3880                kind,
3881                path,
3882                inode: entry.inode,
3883                mtime: mtime.into(),
3884                is_symlink: entry.is_symlink,
3885                is_ignored: entry.is_ignored,
3886            })
3887        } else {
3888            Err(anyhow!(
3889                "missing mtime in remote worktree entry {:?}",
3890                entry.path
3891            ))
3892        }
3893    }
3894}
3895
3896#[cfg(test)]
3897mod tests {
3898    use super::*;
3899    use fs::{FakeFs, RealFs};
3900    use gpui::{executor::Deterministic, TestAppContext};
3901    use pretty_assertions::assert_eq;
3902    use rand::prelude::*;
3903    use serde_json::json;
3904    use std::{env, fmt::Write};
3905    use util::{http::FakeHttpClient, test::temp_tree};
3906
3907    #[gpui::test]
3908    async fn test_traversal(cx: &mut TestAppContext) {
3909        let fs = FakeFs::new(cx.background());
3910        fs.insert_tree(
3911            "/root",
3912            json!({
3913               ".gitignore": "a/b\n",
3914               "a": {
3915                   "b": "",
3916                   "c": "",
3917               }
3918            }),
3919        )
3920        .await;
3921
3922        let http_client = FakeHttpClient::with_404_response();
3923        let client = cx.read(|cx| Client::new(http_client, cx));
3924
3925        let tree = Worktree::local(
3926            client,
3927            Path::new("/root"),
3928            true,
3929            fs,
3930            Default::default(),
3931            &mut cx.to_async(),
3932        )
3933        .await
3934        .unwrap();
3935        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3936            .await;
3937
3938        tree.read_with(cx, |tree, _| {
3939            assert_eq!(
3940                tree.entries(false)
3941                    .map(|entry| entry.path.as_ref())
3942                    .collect::<Vec<_>>(),
3943                vec![
3944                    Path::new(""),
3945                    Path::new(".gitignore"),
3946                    Path::new("a"),
3947                    Path::new("a/c"),
3948                ]
3949            );
3950            assert_eq!(
3951                tree.entries(true)
3952                    .map(|entry| entry.path.as_ref())
3953                    .collect::<Vec<_>>(),
3954                vec![
3955                    Path::new(""),
3956                    Path::new(".gitignore"),
3957                    Path::new("a"),
3958                    Path::new("a/b"),
3959                    Path::new("a/c"),
3960                ]
3961            );
3962        })
3963    }
3964
3965    #[gpui::test]
3966    async fn test_descendent_entries(cx: &mut TestAppContext) {
3967        let fs = FakeFs::new(cx.background());
3968        fs.insert_tree(
3969            "/root",
3970            json!({
3971                "a": "",
3972                "b": {
3973                   "c": {
3974                       "d": ""
3975                   },
3976                   "e": {}
3977                },
3978                "f": "",
3979                "g": {
3980                    "h": {}
3981                },
3982                "i": {
3983                    "j": {
3984                        "k": ""
3985                    },
3986                    "l": {
3987
3988                    }
3989                },
3990                ".gitignore": "i/j\n",
3991            }),
3992        )
3993        .await;
3994
3995        let http_client = FakeHttpClient::with_404_response();
3996        let client = cx.read(|cx| Client::new(http_client, cx));
3997
3998        let tree = Worktree::local(
3999            client,
4000            Path::new("/root"),
4001            true,
4002            fs,
4003            Default::default(),
4004            &mut cx.to_async(),
4005        )
4006        .await
4007        .unwrap();
4008        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4009            .await;
4010
4011        tree.read_with(cx, |tree, _| {
4012            assert_eq!(
4013                tree.descendent_entries(false, false, Path::new("b"))
4014                    .map(|entry| entry.path.as_ref())
4015                    .collect::<Vec<_>>(),
4016                vec![Path::new("b/c/d"),]
4017            );
4018            assert_eq!(
4019                tree.descendent_entries(true, false, Path::new("b"))
4020                    .map(|entry| entry.path.as_ref())
4021                    .collect::<Vec<_>>(),
4022                vec![
4023                    Path::new("b"),
4024                    Path::new("b/c"),
4025                    Path::new("b/c/d"),
4026                    Path::new("b/e"),
4027                ]
4028            );
4029
4030            assert_eq!(
4031                tree.descendent_entries(false, false, Path::new("g"))
4032                    .map(|entry| entry.path.as_ref())
4033                    .collect::<Vec<_>>(),
4034                Vec::<PathBuf>::new()
4035            );
4036            assert_eq!(
4037                tree.descendent_entries(true, false, Path::new("g"))
4038                    .map(|entry| entry.path.as_ref())
4039                    .collect::<Vec<_>>(),
4040                vec![Path::new("g"), Path::new("g/h"),]
4041            );
4042
4043            assert_eq!(
4044                tree.descendent_entries(false, false, Path::new("i"))
4045                    .map(|entry| entry.path.as_ref())
4046                    .collect::<Vec<_>>(),
4047                Vec::<PathBuf>::new()
4048            );
4049            assert_eq!(
4050                tree.descendent_entries(false, true, Path::new("i"))
4051                    .map(|entry| entry.path.as_ref())
4052                    .collect::<Vec<_>>(),
4053                vec![Path::new("i/j/k")]
4054            );
4055            assert_eq!(
4056                tree.descendent_entries(true, false, Path::new("i"))
4057                    .map(|entry| entry.path.as_ref())
4058                    .collect::<Vec<_>>(),
4059                vec![Path::new("i"), Path::new("i/l"),]
4060            );
4061        })
4062    }
4063
4064    #[gpui::test(iterations = 10)]
4065    async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
4066        let fs = FakeFs::new(cx.background());
4067        fs.insert_tree(
4068            "/root",
4069            json!({
4070                "lib": {
4071                    "a": {
4072                        "a.txt": ""
4073                    },
4074                    "b": {
4075                        "b.txt": ""
4076                    }
4077                }
4078            }),
4079        )
4080        .await;
4081        fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
4082        fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
4083
4084        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4085        let tree = Worktree::local(
4086            client,
4087            Path::new("/root"),
4088            true,
4089            fs.clone(),
4090            Default::default(),
4091            &mut cx.to_async(),
4092        )
4093        .await
4094        .unwrap();
4095
4096        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4097            .await;
4098
4099        tree.read_with(cx, |tree, _| {
4100            assert_eq!(
4101                tree.entries(false)
4102                    .map(|entry| entry.path.as_ref())
4103                    .collect::<Vec<_>>(),
4104                vec![
4105                    Path::new(""),
4106                    Path::new("lib"),
4107                    Path::new("lib/a"),
4108                    Path::new("lib/a/a.txt"),
4109                    Path::new("lib/a/lib"),
4110                    Path::new("lib/b"),
4111                    Path::new("lib/b/b.txt"),
4112                    Path::new("lib/b/lib"),
4113                ]
4114            );
4115        });
4116
4117        fs.rename(
4118            Path::new("/root/lib/a/lib"),
4119            Path::new("/root/lib/a/lib-2"),
4120            Default::default(),
4121        )
4122        .await
4123        .unwrap();
4124        executor.run_until_parked();
4125        tree.read_with(cx, |tree, _| {
4126            assert_eq!(
4127                tree.entries(false)
4128                    .map(|entry| entry.path.as_ref())
4129                    .collect::<Vec<_>>(),
4130                vec![
4131                    Path::new(""),
4132                    Path::new("lib"),
4133                    Path::new("lib/a"),
4134                    Path::new("lib/a/a.txt"),
4135                    Path::new("lib/a/lib-2"),
4136                    Path::new("lib/b"),
4137                    Path::new("lib/b/b.txt"),
4138                    Path::new("lib/b/lib"),
4139                ]
4140            );
4141        });
4142    }
4143
4144    #[gpui::test]
4145    async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
4146        // .gitignores are handled explicitly by Zed and do not use the git
4147        // machinery that the git_tests module checks
4148        let parent_dir = temp_tree(json!({
4149            ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
4150            "tree": {
4151                ".git": {},
4152                ".gitignore": "ignored-dir\n",
4153                "tracked-dir": {
4154                    "tracked-file1": "",
4155                    "ancestor-ignored-file1": "",
4156                },
4157                "ignored-dir": {
4158                    "ignored-file1": ""
4159                }
4160            }
4161        }));
4162        let dir = parent_dir.path().join("tree");
4163
4164        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4165
4166        let tree = Worktree::local(
4167            client,
4168            dir.as_path(),
4169            true,
4170            Arc::new(RealFs),
4171            Default::default(),
4172            &mut cx.to_async(),
4173        )
4174        .await
4175        .unwrap();
4176        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4177            .await;
4178        tree.flush_fs_events(cx).await;
4179        cx.read(|cx| {
4180            let tree = tree.read(cx);
4181            assert!(
4182                !tree
4183                    .entry_for_path("tracked-dir/tracked-file1")
4184                    .unwrap()
4185                    .is_ignored
4186            );
4187            assert!(
4188                tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
4189                    .unwrap()
4190                    .is_ignored
4191            );
4192            assert!(
4193                tree.entry_for_path("ignored-dir/ignored-file1")
4194                    .unwrap()
4195                    .is_ignored
4196            );
4197        });
4198
4199        std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
4200        std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
4201        std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
4202        tree.flush_fs_events(cx).await;
4203        cx.read(|cx| {
4204            let tree = tree.read(cx);
4205            assert!(
4206                !tree
4207                    .entry_for_path("tracked-dir/tracked-file2")
4208                    .unwrap()
4209                    .is_ignored
4210            );
4211            assert!(
4212                tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
4213                    .unwrap()
4214                    .is_ignored
4215            );
4216            assert!(
4217                tree.entry_for_path("ignored-dir/ignored-file2")
4218                    .unwrap()
4219                    .is_ignored
4220            );
4221            assert!(tree.entry_for_path(".git").unwrap().is_ignored);
4222        });
4223    }
4224
4225    #[gpui::test]
4226    async fn test_write_file(cx: &mut TestAppContext) {
4227        let dir = temp_tree(json!({
4228            ".git": {},
4229            ".gitignore": "ignored-dir\n",
4230            "tracked-dir": {},
4231            "ignored-dir": {}
4232        }));
4233
4234        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4235
4236        let tree = Worktree::local(
4237            client,
4238            dir.path(),
4239            true,
4240            Arc::new(RealFs),
4241            Default::default(),
4242            &mut cx.to_async(),
4243        )
4244        .await
4245        .unwrap();
4246        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4247            .await;
4248        tree.flush_fs_events(cx).await;
4249
4250        tree.update(cx, |tree, cx| {
4251            tree.as_local().unwrap().write_file(
4252                Path::new("tracked-dir/file.txt"),
4253                "hello".into(),
4254                Default::default(),
4255                cx,
4256            )
4257        })
4258        .await
4259        .unwrap();
4260        tree.update(cx, |tree, cx| {
4261            tree.as_local().unwrap().write_file(
4262                Path::new("ignored-dir/file.txt"),
4263                "world".into(),
4264                Default::default(),
4265                cx,
4266            )
4267        })
4268        .await
4269        .unwrap();
4270
4271        tree.read_with(cx, |tree, _| {
4272            let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4273            let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4274            assert!(!tracked.is_ignored);
4275            assert!(ignored.is_ignored);
4276        });
4277    }
4278
4279    #[gpui::test(iterations = 30)]
4280    async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4281        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4282
4283        let fs = FakeFs::new(cx.background());
4284        fs.insert_tree(
4285            "/root",
4286            json!({
4287                "b": {},
4288                "c": {},
4289                "d": {},
4290            }),
4291        )
4292        .await;
4293
4294        let tree = Worktree::local(
4295            client,
4296            "/root".as_ref(),
4297            true,
4298            fs,
4299            Default::default(),
4300            &mut cx.to_async(),
4301        )
4302        .await
4303        .unwrap();
4304
4305        let snapshot1 = tree.update(cx, |tree, cx| {
4306            let tree = tree.as_local_mut().unwrap();
4307            let snapshot = Arc::new(Mutex::new(tree.snapshot()));
4308            let _ = tree.observe_updates(0, cx, {
4309                let snapshot = snapshot.clone();
4310                move |update| {
4311                    snapshot.lock().apply_remote_update(update).unwrap();
4312                    async { true }
4313                }
4314            });
4315            snapshot
4316        });
4317
4318        let entry = tree
4319            .update(cx, |tree, cx| {
4320                tree.as_local_mut()
4321                    .unwrap()
4322                    .create_entry("a/e".as_ref(), true, cx)
4323            })
4324            .await
4325            .unwrap();
4326        assert!(entry.is_dir());
4327
4328        cx.foreground().run_until_parked();
4329        tree.read_with(cx, |tree, _| {
4330            assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4331        });
4332
4333        let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4334        assert_eq!(
4335            snapshot1.lock().entries(true).collect::<Vec<_>>(),
4336            snapshot2.entries(true).collect::<Vec<_>>()
4337        );
4338    }
4339
4340    #[gpui::test(iterations = 100)]
4341    async fn test_random_worktree_operations_during_initial_scan(
4342        cx: &mut TestAppContext,
4343        mut rng: StdRng,
4344    ) {
4345        let operations = env::var("OPERATIONS")
4346            .map(|o| o.parse().unwrap())
4347            .unwrap_or(5);
4348        let initial_entries = env::var("INITIAL_ENTRIES")
4349            .map(|o| o.parse().unwrap())
4350            .unwrap_or(20);
4351
4352        let root_dir = Path::new("/test");
4353        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4354        fs.as_fake().insert_tree(root_dir, json!({})).await;
4355        for _ in 0..initial_entries {
4356            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4357        }
4358        log::info!("generated initial tree");
4359
4360        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4361        let worktree = Worktree::local(
4362            client.clone(),
4363            root_dir,
4364            true,
4365            fs.clone(),
4366            Default::default(),
4367            &mut cx.to_async(),
4368        )
4369        .await
4370        .unwrap();
4371
4372        let mut snapshots =
4373            vec![worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot())];
4374        let updates = Arc::new(Mutex::new(Vec::new()));
4375        worktree.update(cx, |tree, cx| {
4376            check_worktree_change_events(tree, cx);
4377
4378            let _ = tree.as_local_mut().unwrap().observe_updates(0, cx, {
4379                let updates = updates.clone();
4380                move |update| {
4381                    updates.lock().push(update);
4382                    async { true }
4383                }
4384            });
4385        });
4386
4387        for _ in 0..operations {
4388            worktree
4389                .update(cx, |worktree, cx| {
4390                    randomly_mutate_worktree(worktree, &mut rng, cx)
4391                })
4392                .await
4393                .log_err();
4394            worktree.read_with(cx, |tree, _| {
4395                tree.as_local().unwrap().snapshot.check_invariants()
4396            });
4397
4398            if rng.gen_bool(0.6) {
4399                snapshots
4400                    .push(worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot()));
4401            }
4402        }
4403
4404        worktree
4405            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4406            .await;
4407
4408        cx.foreground().run_until_parked();
4409
4410        let final_snapshot = worktree.read_with(cx, |tree, _| {
4411            let tree = tree.as_local().unwrap();
4412            tree.snapshot.check_invariants();
4413            tree.snapshot()
4414        });
4415
4416        for (i, snapshot) in snapshots.into_iter().enumerate().rev() {
4417            let mut updated_snapshot = snapshot.clone();
4418            for update in updates.lock().iter() {
4419                if update.scan_id >= updated_snapshot.scan_id() as u64 {
4420                    updated_snapshot
4421                        .apply_remote_update(update.clone())
4422                        .unwrap();
4423                }
4424            }
4425
4426            assert_eq!(
4427                updated_snapshot.entries(true).collect::<Vec<_>>(),
4428                final_snapshot.entries(true).collect::<Vec<_>>(),
4429                "wrong updates after snapshot {i}: {snapshot:#?} {updates:#?}",
4430            );
4431        }
4432    }
4433
4434    #[gpui::test(iterations = 100)]
4435    async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4436        let operations = env::var("OPERATIONS")
4437            .map(|o| o.parse().unwrap())
4438            .unwrap_or(40);
4439        let initial_entries = env::var("INITIAL_ENTRIES")
4440            .map(|o| o.parse().unwrap())
4441            .unwrap_or(20);
4442
4443        let root_dir = Path::new("/test");
4444        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4445        fs.as_fake().insert_tree(root_dir, json!({})).await;
4446        for _ in 0..initial_entries {
4447            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4448        }
4449        log::info!("generated initial tree");
4450
4451        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4452        let worktree = Worktree::local(
4453            client.clone(),
4454            root_dir,
4455            true,
4456            fs.clone(),
4457            Default::default(),
4458            &mut cx.to_async(),
4459        )
4460        .await
4461        .unwrap();
4462
4463        let updates = Arc::new(Mutex::new(Vec::new()));
4464        worktree.update(cx, |tree, cx| {
4465            check_worktree_change_events(tree, cx);
4466
4467            let _ = tree.as_local_mut().unwrap().observe_updates(0, cx, {
4468                let updates = updates.clone();
4469                move |update| {
4470                    updates.lock().push(update);
4471                    async { true }
4472                }
4473            });
4474        });
4475
4476        worktree
4477            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4478            .await;
4479
4480        fs.as_fake().pause_events();
4481        let mut snapshots = Vec::new();
4482        let mut mutations_len = operations;
4483        while mutations_len > 1 {
4484            if rng.gen_bool(0.2) {
4485                worktree
4486                    .update(cx, |worktree, cx| {
4487                        randomly_mutate_worktree(worktree, &mut rng, cx)
4488                    })
4489                    .await
4490                    .log_err();
4491            } else {
4492                randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4493            }
4494
4495            let buffered_event_count = fs.as_fake().buffered_event_count();
4496            if buffered_event_count > 0 && rng.gen_bool(0.3) {
4497                let len = rng.gen_range(0..=buffered_event_count);
4498                log::info!("flushing {} events", len);
4499                fs.as_fake().flush_events(len);
4500            } else {
4501                randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4502                mutations_len -= 1;
4503            }
4504
4505            cx.foreground().run_until_parked();
4506            if rng.gen_bool(0.2) {
4507                log::info!("storing snapshot {}", snapshots.len());
4508                let snapshot =
4509                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4510                snapshots.push(snapshot);
4511            }
4512        }
4513
4514        log::info!("quiescing");
4515        fs.as_fake().flush_events(usize::MAX);
4516        cx.foreground().run_until_parked();
4517        let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4518        snapshot.check_invariants();
4519
4520        {
4521            let new_worktree = Worktree::local(
4522                client.clone(),
4523                root_dir,
4524                true,
4525                fs.clone(),
4526                Default::default(),
4527                &mut cx.to_async(),
4528            )
4529            .await
4530            .unwrap();
4531            new_worktree
4532                .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4533                .await;
4534            let new_snapshot =
4535                new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4536            assert_eq!(
4537                snapshot.entries_without_ids(true),
4538                new_snapshot.entries_without_ids(true)
4539            );
4540        }
4541
4542        for (i, mut prev_snapshot) in snapshots.into_iter().enumerate().rev() {
4543            for update in updates.lock().iter() {
4544                if update.scan_id >= prev_snapshot.scan_id() as u64 {
4545                    prev_snapshot.apply_remote_update(update.clone()).unwrap();
4546                }
4547            }
4548
4549            assert_eq!(
4550                prev_snapshot.entries(true).collect::<Vec<_>>(),
4551                snapshot.entries(true).collect::<Vec<_>>(),
4552                "wrong updates after snapshot {i}: {updates:#?}",
4553            );
4554        }
4555    }
4556
4557    // The worktree's `UpdatedEntries` event can be used to follow along with
4558    // all changes to the worktree's snapshot.
4559    fn check_worktree_change_events(tree: &mut Worktree, cx: &mut ModelContext<Worktree>) {
4560        let mut entries = tree.entries(true).cloned().collect::<Vec<_>>();
4561        cx.subscribe(&cx.handle(), move |tree, _, event, _| {
4562            if let Event::UpdatedEntries(changes) = event {
4563                for (path, _, change_type) in changes.iter() {
4564                    let entry = tree.entry_for_path(&path).cloned();
4565                    let ix = match entries.binary_search_by_key(&path, |e| &e.path) {
4566                        Ok(ix) | Err(ix) => ix,
4567                    };
4568                    match change_type {
4569                        PathChange::Loaded => entries.insert(ix, entry.unwrap()),
4570                        PathChange::Added => entries.insert(ix, entry.unwrap()),
4571                        PathChange::Removed => drop(entries.remove(ix)),
4572                        PathChange::Updated => {
4573                            let entry = entry.unwrap();
4574                            let existing_entry = entries.get_mut(ix).unwrap();
4575                            assert_eq!(existing_entry.path, entry.path);
4576                            *existing_entry = entry;
4577                        }
4578                        PathChange::AddedOrUpdated => {
4579                            let entry = entry.unwrap();
4580                            if entries.get(ix).map(|e| &e.path) == Some(&entry.path) {
4581                                *entries.get_mut(ix).unwrap() = entry;
4582                            } else {
4583                                entries.insert(ix, entry);
4584                            }
4585                        }
4586                    }
4587                }
4588
4589                let new_entries = tree.entries(true).cloned().collect::<Vec<_>>();
4590                assert_eq!(entries, new_entries, "incorrect changes: {:?}", changes);
4591            }
4592        })
4593        .detach();
4594    }
4595
4596    fn randomly_mutate_worktree(
4597        worktree: &mut Worktree,
4598        rng: &mut impl Rng,
4599        cx: &mut ModelContext<Worktree>,
4600    ) -> Task<Result<()>> {
4601        log::info!("mutating worktree");
4602        let worktree = worktree.as_local_mut().unwrap();
4603        let snapshot = worktree.snapshot();
4604        let entry = snapshot.entries(false).choose(rng).unwrap();
4605
4606        match rng.gen_range(0_u32..100) {
4607            0..=33 if entry.path.as_ref() != Path::new("") => {
4608                log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4609                worktree.delete_entry(entry.id, cx).unwrap()
4610            }
4611            ..=66 if entry.path.as_ref() != Path::new("") => {
4612                let other_entry = snapshot.entries(false).choose(rng).unwrap();
4613                let new_parent_path = if other_entry.is_dir() {
4614                    other_entry.path.clone()
4615                } else {
4616                    other_entry.path.parent().unwrap().into()
4617                };
4618                let mut new_path = new_parent_path.join(gen_name(rng));
4619                if new_path.starts_with(&entry.path) {
4620                    new_path = gen_name(rng).into();
4621                }
4622
4623                log::info!(
4624                    "renaming entry {:?} ({}) to {:?}",
4625                    entry.path,
4626                    entry.id.0,
4627                    new_path
4628                );
4629                let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4630                cx.foreground().spawn(async move {
4631                    task.await?;
4632                    Ok(())
4633                })
4634            }
4635            _ => {
4636                let task = if entry.is_dir() {
4637                    let child_path = entry.path.join(gen_name(rng));
4638                    let is_dir = rng.gen_bool(0.3);
4639                    log::info!(
4640                        "creating {} at {:?}",
4641                        if is_dir { "dir" } else { "file" },
4642                        child_path,
4643                    );
4644                    worktree.create_entry(child_path, is_dir, cx)
4645                } else {
4646                    log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4647                    worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4648                };
4649                cx.foreground().spawn(async move {
4650                    task.await?;
4651                    Ok(())
4652                })
4653            }
4654        }
4655    }
4656
4657    async fn randomly_mutate_fs(
4658        fs: &Arc<dyn Fs>,
4659        root_path: &Path,
4660        insertion_probability: f64,
4661        rng: &mut impl Rng,
4662    ) {
4663        log::info!("mutating fs");
4664        let mut files = Vec::new();
4665        let mut dirs = Vec::new();
4666        for path in fs.as_fake().paths() {
4667            if path.starts_with(root_path) {
4668                if fs.is_file(&path).await {
4669                    files.push(path);
4670                } else {
4671                    dirs.push(path);
4672                }
4673            }
4674        }
4675
4676        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4677            let path = dirs.choose(rng).unwrap();
4678            let new_path = path.join(gen_name(rng));
4679
4680            if rng.gen() {
4681                log::info!(
4682                    "creating dir {:?}",
4683                    new_path.strip_prefix(root_path).unwrap()
4684                );
4685                fs.create_dir(&new_path).await.unwrap();
4686            } else {
4687                log::info!(
4688                    "creating file {:?}",
4689                    new_path.strip_prefix(root_path).unwrap()
4690                );
4691                fs.create_file(&new_path, Default::default()).await.unwrap();
4692            }
4693        } else if rng.gen_bool(0.05) {
4694            let ignore_dir_path = dirs.choose(rng).unwrap();
4695            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4696
4697            let subdirs = dirs
4698                .iter()
4699                .filter(|d| d.starts_with(&ignore_dir_path))
4700                .cloned()
4701                .collect::<Vec<_>>();
4702            let subfiles = files
4703                .iter()
4704                .filter(|d| d.starts_with(&ignore_dir_path))
4705                .cloned()
4706                .collect::<Vec<_>>();
4707            let files_to_ignore = {
4708                let len = rng.gen_range(0..=subfiles.len());
4709                subfiles.choose_multiple(rng, len)
4710            };
4711            let dirs_to_ignore = {
4712                let len = rng.gen_range(0..subdirs.len());
4713                subdirs.choose_multiple(rng, len)
4714            };
4715
4716            let mut ignore_contents = String::new();
4717            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4718                writeln!(
4719                    ignore_contents,
4720                    "{}",
4721                    path_to_ignore
4722                        .strip_prefix(&ignore_dir_path)
4723                        .unwrap()
4724                        .to_str()
4725                        .unwrap()
4726                )
4727                .unwrap();
4728            }
4729            log::info!(
4730                "creating gitignore {:?} with contents:\n{}",
4731                ignore_path.strip_prefix(&root_path).unwrap(),
4732                ignore_contents
4733            );
4734            fs.save(
4735                &ignore_path,
4736                &ignore_contents.as_str().into(),
4737                Default::default(),
4738            )
4739            .await
4740            .unwrap();
4741        } else {
4742            let old_path = {
4743                let file_path = files.choose(rng);
4744                let dir_path = dirs[1..].choose(rng);
4745                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4746            };
4747
4748            let is_rename = rng.gen();
4749            if is_rename {
4750                let new_path_parent = dirs
4751                    .iter()
4752                    .filter(|d| !d.starts_with(old_path))
4753                    .choose(rng)
4754                    .unwrap();
4755
4756                let overwrite_existing_dir =
4757                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4758                let new_path = if overwrite_existing_dir {
4759                    fs.remove_dir(
4760                        &new_path_parent,
4761                        RemoveOptions {
4762                            recursive: true,
4763                            ignore_if_not_exists: true,
4764                        },
4765                    )
4766                    .await
4767                    .unwrap();
4768                    new_path_parent.to_path_buf()
4769                } else {
4770                    new_path_parent.join(gen_name(rng))
4771                };
4772
4773                log::info!(
4774                    "renaming {:?} to {}{:?}",
4775                    old_path.strip_prefix(&root_path).unwrap(),
4776                    if overwrite_existing_dir {
4777                        "overwrite "
4778                    } else {
4779                        ""
4780                    },
4781                    new_path.strip_prefix(&root_path).unwrap()
4782                );
4783                fs.rename(
4784                    &old_path,
4785                    &new_path,
4786                    fs::RenameOptions {
4787                        overwrite: true,
4788                        ignore_if_exists: true,
4789                    },
4790                )
4791                .await
4792                .unwrap();
4793            } else if fs.is_file(&old_path).await {
4794                log::info!(
4795                    "deleting file {:?}",
4796                    old_path.strip_prefix(&root_path).unwrap()
4797                );
4798                fs.remove_file(old_path, Default::default()).await.unwrap();
4799            } else {
4800                log::info!(
4801                    "deleting dir {:?}",
4802                    old_path.strip_prefix(&root_path).unwrap()
4803                );
4804                fs.remove_dir(
4805                    &old_path,
4806                    RemoveOptions {
4807                        recursive: true,
4808                        ignore_if_not_exists: true,
4809                    },
4810                )
4811                .await
4812                .unwrap();
4813            }
4814        }
4815    }
4816
4817    fn gen_name(rng: &mut impl Rng) -> String {
4818        (0..6)
4819            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4820            .map(char::from)
4821            .collect()
4822    }
4823
4824    impl LocalSnapshot {
4825        fn check_invariants(&self) {
4826            assert_eq!(
4827                self.entries_by_path
4828                    .cursor::<()>()
4829                    .map(|e| (&e.path, e.id))
4830                    .collect::<Vec<_>>(),
4831                self.entries_by_id
4832                    .cursor::<()>()
4833                    .map(|e| (&e.path, e.id))
4834                    .collect::<collections::BTreeSet<_>>()
4835                    .into_iter()
4836                    .collect::<Vec<_>>(),
4837                "entries_by_path and entries_by_id are inconsistent"
4838            );
4839
4840            let mut files = self.files(true, 0);
4841            let mut visible_files = self.files(false, 0);
4842            for entry in self.entries_by_path.cursor::<()>() {
4843                if entry.is_file() {
4844                    assert_eq!(files.next().unwrap().inode, entry.inode);
4845                    if !entry.is_ignored {
4846                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4847                    }
4848                }
4849            }
4850
4851            assert!(files.next().is_none());
4852            assert!(visible_files.next().is_none());
4853
4854            let mut bfs_paths = Vec::new();
4855            let mut stack = vec![Path::new("")];
4856            while let Some(path) = stack.pop() {
4857                bfs_paths.push(path);
4858                let ix = stack.len();
4859                for child_entry in self.child_entries(path) {
4860                    stack.insert(ix, &child_entry.path);
4861                }
4862            }
4863
4864            let dfs_paths_via_iter = self
4865                .entries_by_path
4866                .cursor::<()>()
4867                .map(|e| e.path.as_ref())
4868                .collect::<Vec<_>>();
4869            assert_eq!(bfs_paths, dfs_paths_via_iter);
4870
4871            let dfs_paths_via_traversal = self
4872                .entries(true)
4873                .map(|e| e.path.as_ref())
4874                .collect::<Vec<_>>();
4875            assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4876
4877            for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4878                let ignore_parent_path =
4879                    ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4880                assert!(self.entry_for_path(&ignore_parent_path).is_some());
4881                assert!(self
4882                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4883                    .is_some());
4884            }
4885        }
4886
4887        fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4888            let mut paths = Vec::new();
4889            for entry in self.entries_by_path.cursor::<()>() {
4890                if include_ignored || !entry.is_ignored {
4891                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4892                }
4893            }
4894            paths.sort_by(|a, b| a.0.cmp(b.0));
4895            paths
4896        }
4897    }
4898
4899    mod git_tests {
4900        use super::*;
4901        use pretty_assertions::assert_eq;
4902
4903        #[gpui::test]
4904        async fn test_rename_work_directory(cx: &mut TestAppContext) {
4905            let root = temp_tree(json!({
4906                "projects": {
4907                    "project1": {
4908                        "a": "",
4909                        "b": "",
4910                    }
4911                },
4912
4913            }));
4914            let root_path = root.path();
4915
4916            let http_client = FakeHttpClient::with_404_response();
4917            let client = cx.read(|cx| Client::new(http_client, cx));
4918            let tree = Worktree::local(
4919                client,
4920                root_path,
4921                true,
4922                Arc::new(RealFs),
4923                Default::default(),
4924                &mut cx.to_async(),
4925            )
4926            .await
4927            .unwrap();
4928
4929            let repo = git_init(&root_path.join("projects/project1"));
4930            git_add("a", &repo);
4931            git_commit("init", &repo);
4932            std::fs::write(root_path.join("projects/project1/a"), "aa").ok();
4933
4934            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4935                .await;
4936
4937            tree.flush_fs_events(cx).await;
4938
4939            cx.read(|cx| {
4940                let tree = tree.read(cx);
4941                let (work_dir, repo) = tree.repositories().next().unwrap();
4942                assert_eq!(work_dir.as_ref(), Path::new("projects/project1"));
4943                assert_eq!(
4944                    repo.status_for_file(tree, Path::new("projects/project1/a")),
4945                    Some(GitFileStatus::Modified)
4946                );
4947                assert_eq!(
4948                    repo.status_for_file(tree, Path::new("projects/project1/b")),
4949                    Some(GitFileStatus::Added)
4950                );
4951            });
4952
4953            std::fs::rename(
4954                root_path.join("projects/project1"),
4955                root_path.join("projects/project2"),
4956            )
4957            .ok();
4958            tree.flush_fs_events(cx).await;
4959
4960            cx.read(|cx| {
4961                let tree = tree.read(cx);
4962                let (work_dir, repo) = tree.repositories().next().unwrap();
4963                assert_eq!(work_dir.as_ref(), Path::new("projects/project2"));
4964                assert_eq!(
4965                    repo.status_for_file(tree, Path::new("projects/project2/a")),
4966                    Some(GitFileStatus::Modified)
4967                );
4968                assert_eq!(
4969                    repo.status_for_file(tree, Path::new("projects/project2/b")),
4970                    Some(GitFileStatus::Added)
4971                );
4972            });
4973        }
4974
4975        #[gpui::test]
4976        async fn test_git_repository_for_path(cx: &mut TestAppContext) {
4977            let root = temp_tree(json!({
4978                "c.txt": "",
4979                "dir1": {
4980                    ".git": {},
4981                    "deps": {
4982                        "dep1": {
4983                            ".git": {},
4984                            "src": {
4985                                "a.txt": ""
4986                            }
4987                        }
4988                    },
4989                    "src": {
4990                        "b.txt": ""
4991                    }
4992                },
4993            }));
4994
4995            let http_client = FakeHttpClient::with_404_response();
4996            let client = cx.read(|cx| Client::new(http_client, cx));
4997            let tree = Worktree::local(
4998                client,
4999                root.path(),
5000                true,
5001                Arc::new(RealFs),
5002                Default::default(),
5003                &mut cx.to_async(),
5004            )
5005            .await
5006            .unwrap();
5007
5008            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
5009                .await;
5010            tree.flush_fs_events(cx).await;
5011
5012            tree.read_with(cx, |tree, _cx| {
5013                let tree = tree.as_local().unwrap();
5014
5015                assert!(tree.repository_for_path("c.txt".as_ref()).is_none());
5016
5017                let entry = tree.repository_for_path("dir1/src/b.txt".as_ref()).unwrap();
5018                assert_eq!(
5019                    entry
5020                        .work_directory(tree)
5021                        .map(|directory| directory.as_ref().to_owned()),
5022                    Some(Path::new("dir1").to_owned())
5023                );
5024
5025                let entry = tree
5026                    .repository_for_path("dir1/deps/dep1/src/a.txt".as_ref())
5027                    .unwrap();
5028                assert_eq!(
5029                    entry
5030                        .work_directory(tree)
5031                        .map(|directory| directory.as_ref().to_owned()),
5032                    Some(Path::new("dir1/deps/dep1").to_owned())
5033                );
5034
5035                let entries = tree.files(false, 0);
5036
5037                let paths_with_repos = tree
5038                    .entries_with_repositories(entries)
5039                    .map(|(entry, repo)| {
5040                        (
5041                            entry.path.as_ref(),
5042                            repo.and_then(|repo| {
5043                                repo.work_directory(&tree)
5044                                    .map(|work_directory| work_directory.0.to_path_buf())
5045                            }),
5046                        )
5047                    })
5048                    .collect::<Vec<_>>();
5049
5050                assert_eq!(
5051                    paths_with_repos,
5052                    &[
5053                        (Path::new("c.txt"), None),
5054                        (
5055                            Path::new("dir1/deps/dep1/src/a.txt"),
5056                            Some(Path::new("dir1/deps/dep1").into())
5057                        ),
5058                        (Path::new("dir1/src/b.txt"), Some(Path::new("dir1").into())),
5059                    ]
5060                );
5061            });
5062
5063            let repo_update_events = Arc::new(Mutex::new(vec![]));
5064            tree.update(cx, |_, cx| {
5065                let repo_update_events = repo_update_events.clone();
5066                cx.subscribe(&tree, move |_, _, event, _| {
5067                    if let Event::UpdatedGitRepositories(update) = event {
5068                        repo_update_events.lock().push(update.clone());
5069                    }
5070                })
5071                .detach();
5072            });
5073
5074            std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
5075            tree.flush_fs_events(cx).await;
5076
5077            assert_eq!(
5078                repo_update_events.lock()[0]
5079                    .iter()
5080                    .map(|e| e.0.clone())
5081                    .collect::<Vec<Arc<Path>>>(),
5082                vec![Path::new("dir1").into()]
5083            );
5084
5085            std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
5086            tree.flush_fs_events(cx).await;
5087
5088            tree.read_with(cx, |tree, _cx| {
5089                let tree = tree.as_local().unwrap();
5090
5091                assert!(tree
5092                    .repository_for_path("dir1/src/b.txt".as_ref())
5093                    .is_none());
5094            });
5095        }
5096
5097        #[gpui::test]
5098        async fn test_git_status(cx: &mut TestAppContext) {
5099            const IGNORE_RULE: &'static str = "**/target";
5100
5101            let root = temp_tree(json!({
5102                "project": {
5103                    "a.txt": "a",
5104                    "b.txt": "bb",
5105                    "c": {
5106                        "d": {
5107                            "e.txt": "eee"
5108                        }
5109                    },
5110                    "f.txt": "ffff",
5111                    "target": {
5112                        "build_file": "???"
5113                    },
5114                    ".gitignore": IGNORE_RULE
5115                },
5116
5117            }));
5118
5119            let http_client = FakeHttpClient::with_404_response();
5120            let client = cx.read(|cx| Client::new(http_client, cx));
5121            let tree = Worktree::local(
5122                client,
5123                root.path(),
5124                true,
5125                Arc::new(RealFs),
5126                Default::default(),
5127                &mut cx.to_async(),
5128            )
5129            .await
5130            .unwrap();
5131
5132            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
5133                .await;
5134
5135            const A_TXT: &'static str = "a.txt";
5136            const B_TXT: &'static str = "b.txt";
5137            const E_TXT: &'static str = "c/d/e.txt";
5138            const F_TXT: &'static str = "f.txt";
5139            const DOTGITIGNORE: &'static str = ".gitignore";
5140            const BUILD_FILE: &'static str = "target/build_file";
5141
5142            let work_dir = root.path().join("project");
5143            let mut repo = git_init(work_dir.as_path());
5144            repo.add_ignore_rule(IGNORE_RULE).unwrap();
5145            git_add(Path::new(A_TXT), &repo);
5146            git_add(Path::new(E_TXT), &repo);
5147            git_add(Path::new(DOTGITIGNORE), &repo);
5148            git_commit("Initial commit", &repo);
5149
5150            std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
5151
5152            tree.flush_fs_events(cx).await;
5153
5154            // Check that the right git state is observed on startup
5155            tree.read_with(cx, |tree, _cx| {
5156                let snapshot = tree.snapshot();
5157                assert_eq!(snapshot.repository_entries.iter().count(), 1);
5158                let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
5159                assert_eq!(dir.0.as_ref(), Path::new("project"));
5160
5161                assert_eq!(repo.statuses.iter().count(), 3);
5162                assert_eq!(
5163                    repo.statuses.get(&Path::new(A_TXT).into()),
5164                    Some(&GitFileStatus::Modified)
5165                );
5166                assert_eq!(
5167                    repo.statuses.get(&Path::new(B_TXT).into()),
5168                    Some(&GitFileStatus::Added)
5169                );
5170                assert_eq!(
5171                    repo.statuses.get(&Path::new(F_TXT).into()),
5172                    Some(&GitFileStatus::Added)
5173                );
5174            });
5175
5176            git_add(Path::new(A_TXT), &repo);
5177            git_add(Path::new(B_TXT), &repo);
5178            git_commit("Committing modified and added", &repo);
5179            tree.flush_fs_events(cx).await;
5180
5181            // Check that repo only changes are tracked
5182            tree.read_with(cx, |tree, _cx| {
5183                let snapshot = tree.snapshot();
5184                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5185
5186                assert_eq!(repo.statuses.iter().count(), 1);
5187                assert_eq!(
5188                    repo.statuses.get(&Path::new(F_TXT).into()),
5189                    Some(&GitFileStatus::Added)
5190                );
5191            });
5192
5193            git_reset(0, &repo);
5194            git_remove_index(Path::new(B_TXT), &repo);
5195            git_stash(&mut repo);
5196            std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
5197            std::fs::write(work_dir.join(BUILD_FILE), "this should be ignored").unwrap();
5198            tree.flush_fs_events(cx).await;
5199
5200            // Check that more complex repo changes are tracked
5201            tree.read_with(cx, |tree, _cx| {
5202                let snapshot = tree.snapshot();
5203                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5204
5205                assert_eq!(repo.statuses.iter().count(), 3);
5206                assert_eq!(repo.statuses.get(&Path::new(A_TXT).into()), None);
5207                assert_eq!(
5208                    repo.statuses.get(&Path::new(B_TXT).into()),
5209                    Some(&GitFileStatus::Added)
5210                );
5211                assert_eq!(
5212                    repo.statuses.get(&Path::new(E_TXT).into()),
5213                    Some(&GitFileStatus::Modified)
5214                );
5215                assert_eq!(
5216                    repo.statuses.get(&Path::new(F_TXT).into()),
5217                    Some(&GitFileStatus::Added)
5218                );
5219            });
5220
5221            std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
5222            std::fs::remove_dir_all(work_dir.join("c")).unwrap();
5223            std::fs::write(
5224                work_dir.join(DOTGITIGNORE),
5225                [IGNORE_RULE, "f.txt"].join("\n"),
5226            )
5227            .unwrap();
5228
5229            git_add(Path::new(DOTGITIGNORE), &repo);
5230            git_commit("Committing modified git ignore", &repo);
5231
5232            tree.flush_fs_events(cx).await;
5233
5234            // Check that non-repo behavior is tracked
5235            tree.read_with(cx, |tree, _cx| {
5236                let snapshot = tree.snapshot();
5237                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5238
5239                assert_eq!(repo.statuses.iter().count(), 0);
5240            });
5241
5242            let mut renamed_dir_name = "first_directory/second_directory";
5243            const RENAMED_FILE: &'static str = "rf.txt";
5244
5245            std::fs::create_dir_all(work_dir.join(renamed_dir_name)).unwrap();
5246            std::fs::write(
5247                work_dir.join(renamed_dir_name).join(RENAMED_FILE),
5248                "new-contents",
5249            )
5250            .unwrap();
5251
5252            tree.flush_fs_events(cx).await;
5253
5254            tree.read_with(cx, |tree, _cx| {
5255                let snapshot = tree.snapshot();
5256                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5257
5258                assert_eq!(repo.statuses.iter().count(), 1);
5259                assert_eq!(
5260                    repo.statuses
5261                        .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5262                    Some(&GitFileStatus::Added)
5263                );
5264            });
5265
5266            renamed_dir_name = "new_first_directory/second_directory";
5267
5268            std::fs::rename(
5269                work_dir.join("first_directory"),
5270                work_dir.join("new_first_directory"),
5271            )
5272            .unwrap();
5273
5274            tree.flush_fs_events(cx).await;
5275
5276            tree.read_with(cx, |tree, _cx| {
5277                let snapshot = tree.snapshot();
5278                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5279
5280                assert_eq!(repo.statuses.iter().count(), 1);
5281                assert_eq!(
5282                    repo.statuses
5283                        .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5284                    Some(&GitFileStatus::Added)
5285                );
5286            });
5287        }
5288
5289        #[track_caller]
5290        fn git_init(path: &Path) -> git2::Repository {
5291            git2::Repository::init(path).expect("Failed to initialize git repository")
5292        }
5293
5294        #[track_caller]
5295        fn git_add<P: AsRef<Path>>(path: P, repo: &git2::Repository) {
5296            let path = path.as_ref();
5297            let mut index = repo.index().expect("Failed to get index");
5298            index.add_path(path).expect("Failed to add a.txt");
5299            index.write().expect("Failed to write index");
5300        }
5301
5302        #[track_caller]
5303        fn git_remove_index(path: &Path, repo: &git2::Repository) {
5304            let mut index = repo.index().expect("Failed to get index");
5305            index.remove_path(path).expect("Failed to add a.txt");
5306            index.write().expect("Failed to write index");
5307        }
5308
5309        #[track_caller]
5310        fn git_commit(msg: &'static str, repo: &git2::Repository) {
5311            use git2::Signature;
5312
5313            let signature = Signature::now("test", "test@zed.dev").unwrap();
5314            let oid = repo.index().unwrap().write_tree().unwrap();
5315            let tree = repo.find_tree(oid).unwrap();
5316            if let Some(head) = repo.head().ok() {
5317                let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
5318
5319                let parent_commit = parent_obj.as_commit().unwrap();
5320
5321                repo.commit(
5322                    Some("HEAD"),
5323                    &signature,
5324                    &signature,
5325                    msg,
5326                    &tree,
5327                    &[parent_commit],
5328                )
5329                .expect("Failed to commit with parent");
5330            } else {
5331                repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
5332                    .expect("Failed to commit");
5333            }
5334        }
5335
5336        #[track_caller]
5337        fn git_stash(repo: &mut git2::Repository) {
5338            use git2::Signature;
5339
5340            let signature = Signature::now("test", "test@zed.dev").unwrap();
5341            repo.stash_save(&signature, "N/A", None)
5342                .expect("Failed to stash");
5343        }
5344
5345        #[track_caller]
5346        fn git_reset(offset: usize, repo: &git2::Repository) {
5347            let head = repo.head().expect("Couldn't get repo head");
5348            let object = head.peel(git2::ObjectType::Commit).unwrap();
5349            let commit = object.as_commit().unwrap();
5350            let new_head = commit
5351                .parents()
5352                .inspect(|parnet| {
5353                    parnet.message();
5354                })
5355                .skip(offset)
5356                .next()
5357                .expect("Not enough history");
5358            repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
5359                .expect("Could not reset");
5360        }
5361
5362        #[allow(dead_code)]
5363        #[track_caller]
5364        fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
5365            repo.statuses(None)
5366                .unwrap()
5367                .iter()
5368                .map(|status| (status.path().unwrap().to_string(), status.status()))
5369                .collect()
5370        }
5371    }
5372}