worktree.rs

   1use crate::{
   2    copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
   3};
   4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   5use anyhow::{anyhow, Context, Result};
   6use client::{proto, Client};
   7use clock::ReplicaId;
   8use collections::{HashMap, VecDeque};
   9use fs::{
  10    repository::{GitFileStatus, GitRepository, RepoPath, RepoPathDescendants},
  11    Fs, LineEnding,
  12};
  13use futures::{
  14    channel::{
  15        mpsc::{self, UnboundedSender},
  16        oneshot,
  17    },
  18    select_biased,
  19    task::Poll,
  20    FutureExt, Stream, StreamExt,
  21};
  22use fuzzy::CharBag;
  23use git::{DOT_GIT, GITIGNORE};
  24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
  25use language::{
  26    proto::{
  27        deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
  28        serialize_version,
  29    },
  30    Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
  31};
  32use lsp::LanguageServerId;
  33use parking_lot::Mutex;
  34use postage::{
  35    barrier,
  36    prelude::{Sink as _, Stream as _},
  37    watch,
  38};
  39use smol::channel::{self, Sender};
  40use std::{
  41    any::Any,
  42    cmp::{self, Ordering},
  43    convert::TryFrom,
  44    ffi::OsStr,
  45    fmt,
  46    future::Future,
  47    mem,
  48    ops::{Deref, DerefMut},
  49    path::{Path, PathBuf},
  50    pin::Pin,
  51    sync::{
  52        atomic::{AtomicUsize, Ordering::SeqCst},
  53        Arc,
  54    },
  55    time::{Duration, SystemTime},
  56};
  57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
  58use util::{paths::HOME, ResultExt, TakeUntilExt};
  59
  60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  61pub struct WorktreeId(usize);
  62
  63pub enum Worktree {
  64    Local(LocalWorktree),
  65    Remote(RemoteWorktree),
  66}
  67
  68pub struct LocalWorktree {
  69    snapshot: LocalSnapshot,
  70    path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
  71    is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
  72    _background_scanner_task: Task<()>,
  73    share: Option<ShareState>,
  74    diagnostics: HashMap<
  75        Arc<Path>,
  76        Vec<(
  77            LanguageServerId,
  78            Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
  79        )>,
  80    >,
  81    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  82    client: Arc<Client>,
  83    fs: Arc<dyn Fs>,
  84    visible: bool,
  85}
  86
  87pub struct RemoteWorktree {
  88    snapshot: Snapshot,
  89    background_snapshot: Arc<Mutex<Snapshot>>,
  90    project_id: u64,
  91    client: Arc<Client>,
  92    updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
  93    snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
  94    replica_id: ReplicaId,
  95    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  96    visible: bool,
  97    disconnected: bool,
  98}
  99
 100#[derive(Clone)]
 101pub struct Snapshot {
 102    id: WorktreeId,
 103    abs_path: Arc<Path>,
 104    root_name: String,
 105    root_char_bag: CharBag,
 106    entries_by_path: SumTree<Entry>,
 107    entries_by_id: SumTree<PathEntry>,
 108    repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
 109
 110    /// A number that increases every time the worktree begins scanning
 111    /// a set of paths from the filesystem. This scanning could be caused
 112    /// by some operation performed on the worktree, such as reading or
 113    /// writing a file, or by an event reported by the filesystem.
 114    scan_id: usize,
 115
 116    /// The latest scan id that has completed, and whose preceding scans
 117    /// have all completed. The current `scan_id` could be more than one
 118    /// greater than the `completed_scan_id` if operations are performed
 119    /// on the worktree while it is processing a file-system event.
 120    completed_scan_id: usize,
 121}
 122
 123#[derive(Clone, Debug, PartialEq, Eq)]
 124pub struct RepositoryEntry {
 125    pub(crate) work_directory: WorkDirectoryEntry,
 126    pub(crate) branch: Option<Arc<str>>,
 127    pub(crate) statuses: TreeMap<RepoPath, GitFileStatus>,
 128}
 129
 130fn read_git_status(git_status: i32) -> Option<GitFileStatus> {
 131    proto::GitStatus::from_i32(git_status).map(|status| match status {
 132        proto::GitStatus::Added => GitFileStatus::Added,
 133        proto::GitStatus::Modified => GitFileStatus::Modified,
 134        proto::GitStatus::Conflict => GitFileStatus::Conflict,
 135    })
 136}
 137
 138impl RepositoryEntry {
 139    pub fn branch(&self) -> Option<Arc<str>> {
 140        self.branch.clone()
 141    }
 142
 143    pub fn work_directory_id(&self) -> ProjectEntryId {
 144        *self.work_directory
 145    }
 146
 147    pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
 148        snapshot
 149            .entry_for_id(self.work_directory_id())
 150            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
 151    }
 152
 153    pub fn status_for_path(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
 154        self.work_directory
 155            .relativize(snapshot, path)
 156            .and_then(|repo_path| {
 157                self.statuses
 158                    .iter_from(&repo_path)
 159                    .take_while(|(key, _)| key.starts_with(&repo_path))
 160                    // Short circut once we've found the highest level
 161                    .take_until(|(_, status)| status == &&GitFileStatus::Conflict)
 162                    .map(|(_, status)| status)
 163                    .reduce(
 164                        |status_first, status_second| match (status_first, status_second) {
 165                            (GitFileStatus::Conflict, _) | (_, GitFileStatus::Conflict) => {
 166                                &GitFileStatus::Conflict
 167                            }
 168                            (GitFileStatus::Modified, _) | (_, GitFileStatus::Modified) => {
 169                                &GitFileStatus::Modified
 170                            }
 171                            _ => &GitFileStatus::Added,
 172                        },
 173                    )
 174                    .copied()
 175            })
 176    }
 177
 178    #[cfg(any(test, feature = "test-support"))]
 179    pub fn status_for_file(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
 180        self.work_directory
 181            .relativize(snapshot, path)
 182            .and_then(|repo_path| (&self.statuses).get(&repo_path))
 183            .cloned()
 184    }
 185
 186    pub fn build_update(&self, other: &Self) -> proto::RepositoryEntry {
 187        let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
 188        let mut removed_statuses: Vec<String> = Vec::new();
 189
 190        let mut self_statuses = self.statuses.iter().peekable();
 191        let mut other_statuses = other.statuses.iter().peekable();
 192        loop {
 193            match (self_statuses.peek(), other_statuses.peek()) {
 194                (Some((self_repo_path, self_status)), Some((other_repo_path, other_status))) => {
 195                    match Ord::cmp(self_repo_path, other_repo_path) {
 196                        Ordering::Less => {
 197                            updated_statuses.push(make_status_entry(self_repo_path, self_status));
 198                            self_statuses.next();
 199                        }
 200                        Ordering::Equal => {
 201                            if self_status != other_status {
 202                                updated_statuses
 203                                    .push(make_status_entry(self_repo_path, self_status));
 204                            }
 205
 206                            self_statuses.next();
 207                            other_statuses.next();
 208                        }
 209                        Ordering::Greater => {
 210                            removed_statuses.push(make_repo_path(other_repo_path));
 211                            other_statuses.next();
 212                        }
 213                    }
 214                }
 215                (Some((self_repo_path, self_status)), None) => {
 216                    updated_statuses.push(make_status_entry(self_repo_path, self_status));
 217                    self_statuses.next();
 218                }
 219                (None, Some((other_repo_path, _))) => {
 220                    removed_statuses.push(make_repo_path(other_repo_path));
 221                    other_statuses.next();
 222                }
 223                (None, None) => break,
 224            }
 225        }
 226
 227        proto::RepositoryEntry {
 228            work_directory_id: self.work_directory_id().to_proto(),
 229            branch: self.branch.as_ref().map(|str| str.to_string()),
 230            removed_repo_paths: removed_statuses,
 231            updated_statuses,
 232        }
 233    }
 234}
 235
 236fn make_repo_path(path: &RepoPath) -> String {
 237    path.as_os_str().to_string_lossy().to_string()
 238}
 239
 240fn make_status_entry(path: &RepoPath, status: &GitFileStatus) -> proto::StatusEntry {
 241    proto::StatusEntry {
 242        repo_path: make_repo_path(path),
 243        status: match status {
 244            GitFileStatus::Added => proto::GitStatus::Added.into(),
 245            GitFileStatus::Modified => proto::GitStatus::Modified.into(),
 246            GitFileStatus::Conflict => proto::GitStatus::Conflict.into(),
 247        },
 248    }
 249}
 250
 251impl From<&RepositoryEntry> for proto::RepositoryEntry {
 252    fn from(value: &RepositoryEntry) -> Self {
 253        proto::RepositoryEntry {
 254            work_directory_id: value.work_directory.to_proto(),
 255            branch: value.branch.as_ref().map(|str| str.to_string()),
 256            updated_statuses: value
 257                .statuses
 258                .iter()
 259                .map(|(repo_path, status)| make_status_entry(repo_path, status))
 260                .collect(),
 261            removed_repo_paths: Default::default(),
 262        }
 263    }
 264}
 265
 266/// This path corresponds to the 'content path' (the folder that contains the .git)
 267#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 268pub struct RepositoryWorkDirectory(Arc<Path>);
 269
 270impl Default for RepositoryWorkDirectory {
 271    fn default() -> Self {
 272        RepositoryWorkDirectory(Arc::from(Path::new("")))
 273    }
 274}
 275
 276impl AsRef<Path> for RepositoryWorkDirectory {
 277    fn as_ref(&self) -> &Path {
 278        self.0.as_ref()
 279    }
 280}
 281
 282#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 283pub struct WorkDirectoryEntry(ProjectEntryId);
 284
 285impl WorkDirectoryEntry {
 286    pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
 287        worktree.entry_for_id(self.0).and_then(|entry| {
 288            path.strip_prefix(&entry.path)
 289                .ok()
 290                .map(move |path| path.into())
 291        })
 292    }
 293}
 294
 295impl Deref for WorkDirectoryEntry {
 296    type Target = ProjectEntryId;
 297
 298    fn deref(&self) -> &Self::Target {
 299        &self.0
 300    }
 301}
 302
 303impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
 304    fn from(value: ProjectEntryId) -> Self {
 305        WorkDirectoryEntry(value)
 306    }
 307}
 308
 309#[derive(Debug, Clone)]
 310pub struct LocalSnapshot {
 311    snapshot: Snapshot,
 312    /// All of the gitignore files in the worktree, indexed by their relative path.
 313    /// The boolean indicates whether the gitignore needs to be updated.
 314    ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
 315    /// All of the git repositories in the worktree, indexed by the project entry
 316    /// id of their parent directory.
 317    git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 318}
 319
 320pub struct BackgroundScannerState {
 321    snapshot: LocalSnapshot,
 322    /// The ids of all of the entries that were removed from the snapshot
 323    /// as part of the current update. These entry ids may be re-used
 324    /// if the same inode is discovered at a new path, or if the given
 325    /// path is re-created after being deleted.
 326    removed_entry_ids: HashMap<u64, ProjectEntryId>,
 327    changed_paths: Vec<Arc<Path>>,
 328    prev_snapshot: Snapshot,
 329}
 330
 331#[derive(Debug, Clone)]
 332pub struct LocalRepositoryEntry {
 333    pub(crate) work_dir_scan_id: usize,
 334    pub(crate) git_dir_scan_id: usize,
 335    pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
 336    /// Path to the actual .git folder.
 337    /// Note: if .git is a file, this points to the folder indicated by the .git file
 338    pub(crate) git_dir_path: Arc<Path>,
 339}
 340
 341impl LocalRepositoryEntry {
 342    // Note that this path should be relative to the worktree root.
 343    pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
 344        path.starts_with(self.git_dir_path.as_ref())
 345    }
 346}
 347
 348impl Deref for LocalSnapshot {
 349    type Target = Snapshot;
 350
 351    fn deref(&self) -> &Self::Target {
 352        &self.snapshot
 353    }
 354}
 355
 356impl DerefMut for LocalSnapshot {
 357    fn deref_mut(&mut self) -> &mut Self::Target {
 358        &mut self.snapshot
 359    }
 360}
 361
 362enum ScanState {
 363    Started,
 364    Updated {
 365        snapshot: LocalSnapshot,
 366        changes: UpdatedEntriesSet,
 367        barrier: Option<barrier::Sender>,
 368        scanning: bool,
 369    },
 370}
 371
 372struct ShareState {
 373    project_id: u64,
 374    snapshots_tx:
 375        mpsc::UnboundedSender<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>,
 376    resume_updates: watch::Sender<()>,
 377    _maintain_remote_snapshot: Task<Option<()>>,
 378}
 379
 380pub enum Event {
 381    UpdatedEntries(UpdatedEntriesSet),
 382    UpdatedGitRepositories(UpdatedGitRepositoriesSet),
 383}
 384
 385impl Entity for Worktree {
 386    type Event = Event;
 387}
 388
 389impl Worktree {
 390    pub async fn local(
 391        client: Arc<Client>,
 392        path: impl Into<Arc<Path>>,
 393        visible: bool,
 394        fs: Arc<dyn Fs>,
 395        next_entry_id: Arc<AtomicUsize>,
 396        cx: &mut AsyncAppContext,
 397    ) -> Result<ModelHandle<Self>> {
 398        // After determining whether the root entry is a file or a directory, populate the
 399        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 400        let abs_path = path.into();
 401        let metadata = fs
 402            .metadata(&abs_path)
 403            .await
 404            .context("failed to stat worktree path")?;
 405
 406        Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
 407            let root_name = abs_path
 408                .file_name()
 409                .map_or(String::new(), |f| f.to_string_lossy().to_string());
 410
 411            let mut snapshot = LocalSnapshot {
 412                ignores_by_parent_abs_path: Default::default(),
 413                git_repositories: Default::default(),
 414                snapshot: Snapshot {
 415                    id: WorktreeId::from_usize(cx.model_id()),
 416                    abs_path: abs_path.clone(),
 417                    root_name: root_name.clone(),
 418                    root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
 419                    entries_by_path: Default::default(),
 420                    entries_by_id: Default::default(),
 421                    repository_entries: Default::default(),
 422                    scan_id: 1,
 423                    completed_scan_id: 0,
 424                },
 425            };
 426
 427            if let Some(metadata) = metadata {
 428                snapshot.insert_entry(
 429                    Entry::new(
 430                        Arc::from(Path::new("")),
 431                        &metadata,
 432                        &next_entry_id,
 433                        snapshot.root_char_bag,
 434                    ),
 435                    fs.as_ref(),
 436                );
 437            }
 438
 439            let (path_changes_tx, path_changes_rx) = channel::unbounded();
 440            let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
 441
 442            cx.spawn_weak(|this, mut cx| async move {
 443                while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
 444                    this.update(&mut cx, |this, cx| {
 445                        let this = this.as_local_mut().unwrap();
 446                        match state {
 447                            ScanState::Started => {
 448                                *this.is_scanning.0.borrow_mut() = true;
 449                            }
 450                            ScanState::Updated {
 451                                snapshot,
 452                                changes,
 453                                barrier,
 454                                scanning,
 455                            } => {
 456                                *this.is_scanning.0.borrow_mut() = scanning;
 457                                this.set_snapshot(snapshot, changes, cx);
 458                                drop(barrier);
 459                            }
 460                        }
 461                        cx.notify();
 462                    });
 463                }
 464            })
 465            .detach();
 466
 467            let background_scanner_task = cx.background().spawn({
 468                let fs = fs.clone();
 469                let snapshot = snapshot.clone();
 470                let background = cx.background().clone();
 471                async move {
 472                    let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 473                    BackgroundScanner::new(
 474                        snapshot,
 475                        next_entry_id,
 476                        fs,
 477                        scan_states_tx,
 478                        background,
 479                        path_changes_rx,
 480                    )
 481                    .run(events)
 482                    .await;
 483                }
 484            });
 485
 486            Worktree::Local(LocalWorktree {
 487                snapshot,
 488                is_scanning: watch::channel_with(true),
 489                share: None,
 490                path_changes_tx,
 491                _background_scanner_task: background_scanner_task,
 492                diagnostics: Default::default(),
 493                diagnostic_summaries: Default::default(),
 494                client,
 495                fs,
 496                visible,
 497            })
 498        }))
 499    }
 500
 501    pub fn remote(
 502        project_remote_id: u64,
 503        replica_id: ReplicaId,
 504        worktree: proto::WorktreeMetadata,
 505        client: Arc<Client>,
 506        cx: &mut AppContext,
 507    ) -> ModelHandle<Self> {
 508        cx.add_model(|cx: &mut ModelContext<Self>| {
 509            let snapshot = Snapshot {
 510                id: WorktreeId(worktree.id as usize),
 511                abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
 512                root_name: worktree.root_name.clone(),
 513                root_char_bag: worktree
 514                    .root_name
 515                    .chars()
 516                    .map(|c| c.to_ascii_lowercase())
 517                    .collect(),
 518                entries_by_path: Default::default(),
 519                entries_by_id: Default::default(),
 520                repository_entries: Default::default(),
 521                scan_id: 1,
 522                completed_scan_id: 0,
 523            };
 524
 525            let (updates_tx, mut updates_rx) = mpsc::unbounded();
 526            let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
 527            let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
 528
 529            cx.background()
 530                .spawn({
 531                    let background_snapshot = background_snapshot.clone();
 532                    async move {
 533                        while let Some(update) = updates_rx.next().await {
 534                            if let Err(error) =
 535                                background_snapshot.lock().apply_remote_update(update)
 536                            {
 537                                log::error!("error applying worktree update: {}", error);
 538                            }
 539                            snapshot_updated_tx.send(()).await.ok();
 540                        }
 541                    }
 542                })
 543                .detach();
 544
 545            cx.spawn_weak(|this, mut cx| async move {
 546                while (snapshot_updated_rx.recv().await).is_some() {
 547                    if let Some(this) = this.upgrade(&cx) {
 548                        this.update(&mut cx, |this, cx| {
 549                            let this = this.as_remote_mut().unwrap();
 550                            this.snapshot = this.background_snapshot.lock().clone();
 551                            cx.emit(Event::UpdatedEntries(Arc::from([])));
 552                            cx.notify();
 553                            while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
 554                                if this.observed_snapshot(*scan_id) {
 555                                    let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
 556                                    let _ = tx.send(());
 557                                } else {
 558                                    break;
 559                                }
 560                            }
 561                        });
 562                    } else {
 563                        break;
 564                    }
 565                }
 566            })
 567            .detach();
 568
 569            Worktree::Remote(RemoteWorktree {
 570                project_id: project_remote_id,
 571                replica_id,
 572                snapshot: snapshot.clone(),
 573                background_snapshot,
 574                updates_tx: Some(updates_tx),
 575                snapshot_subscriptions: Default::default(),
 576                client: client.clone(),
 577                diagnostic_summaries: Default::default(),
 578                visible: worktree.visible,
 579                disconnected: false,
 580            })
 581        })
 582    }
 583
 584    pub fn as_local(&self) -> Option<&LocalWorktree> {
 585        if let Worktree::Local(worktree) = self {
 586            Some(worktree)
 587        } else {
 588            None
 589        }
 590    }
 591
 592    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 593        if let Worktree::Remote(worktree) = self {
 594            Some(worktree)
 595        } else {
 596            None
 597        }
 598    }
 599
 600    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 601        if let Worktree::Local(worktree) = self {
 602            Some(worktree)
 603        } else {
 604            None
 605        }
 606    }
 607
 608    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 609        if let Worktree::Remote(worktree) = self {
 610            Some(worktree)
 611        } else {
 612            None
 613        }
 614    }
 615
 616    pub fn is_local(&self) -> bool {
 617        matches!(self, Worktree::Local(_))
 618    }
 619
 620    pub fn is_remote(&self) -> bool {
 621        !self.is_local()
 622    }
 623
 624    pub fn snapshot(&self) -> Snapshot {
 625        match self {
 626            Worktree::Local(worktree) => worktree.snapshot().snapshot,
 627            Worktree::Remote(worktree) => worktree.snapshot(),
 628        }
 629    }
 630
 631    pub fn scan_id(&self) -> usize {
 632        match self {
 633            Worktree::Local(worktree) => worktree.snapshot.scan_id,
 634            Worktree::Remote(worktree) => worktree.snapshot.scan_id,
 635        }
 636    }
 637
 638    pub fn completed_scan_id(&self) -> usize {
 639        match self {
 640            Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
 641            Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
 642        }
 643    }
 644
 645    pub fn is_visible(&self) -> bool {
 646        match self {
 647            Worktree::Local(worktree) => worktree.visible,
 648            Worktree::Remote(worktree) => worktree.visible,
 649        }
 650    }
 651
 652    pub fn replica_id(&self) -> ReplicaId {
 653        match self {
 654            Worktree::Local(_) => 0,
 655            Worktree::Remote(worktree) => worktree.replica_id,
 656        }
 657    }
 658
 659    pub fn diagnostic_summaries(
 660        &self,
 661    ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
 662        match self {
 663            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 664            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 665        }
 666        .iter()
 667        .flat_map(|(path, summaries)| {
 668            summaries
 669                .iter()
 670                .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
 671        })
 672    }
 673
 674    pub fn abs_path(&self) -> Arc<Path> {
 675        match self {
 676            Worktree::Local(worktree) => worktree.abs_path.clone(),
 677            Worktree::Remote(worktree) => worktree.abs_path.clone(),
 678        }
 679    }
 680}
 681
 682impl LocalWorktree {
 683    pub fn contains_abs_path(&self, path: &Path) -> bool {
 684        path.starts_with(&self.abs_path)
 685    }
 686
 687    fn absolutize(&self, path: &Path) -> PathBuf {
 688        if path.file_name().is_some() {
 689            self.abs_path.join(path)
 690        } else {
 691            self.abs_path.to_path_buf()
 692        }
 693    }
 694
 695    pub(crate) fn load_buffer(
 696        &mut self,
 697        id: u64,
 698        path: &Path,
 699        cx: &mut ModelContext<Worktree>,
 700    ) -> Task<Result<ModelHandle<Buffer>>> {
 701        let path = Arc::from(path);
 702        cx.spawn(move |this, mut cx| async move {
 703            let (file, contents, diff_base) = this
 704                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
 705                .await?;
 706            let text_buffer = cx
 707                .background()
 708                .spawn(async move { text::Buffer::new(0, id, contents) })
 709                .await;
 710            Ok(cx.add_model(|_| Buffer::build(text_buffer, diff_base, Some(Arc::new(file)))))
 711        })
 712    }
 713
 714    pub fn diagnostics_for_path(
 715        &self,
 716        path: &Path,
 717    ) -> Vec<(
 718        LanguageServerId,
 719        Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 720    )> {
 721        self.diagnostics.get(path).cloned().unwrap_or_default()
 722    }
 723
 724    pub fn clear_diagnostics_for_language_server(
 725        &mut self,
 726        server_id: LanguageServerId,
 727        _: &mut ModelContext<Worktree>,
 728    ) {
 729        let worktree_id = self.id().to_proto();
 730        self.diagnostic_summaries
 731            .retain(|path, summaries_by_server_id| {
 732                if summaries_by_server_id.remove(&server_id).is_some() {
 733                    if let Some(share) = self.share.as_ref() {
 734                        self.client
 735                            .send(proto::UpdateDiagnosticSummary {
 736                                project_id: share.project_id,
 737                                worktree_id,
 738                                summary: Some(proto::DiagnosticSummary {
 739                                    path: path.to_string_lossy().to_string(),
 740                                    language_server_id: server_id.0 as u64,
 741                                    error_count: 0,
 742                                    warning_count: 0,
 743                                }),
 744                            })
 745                            .log_err();
 746                    }
 747                    !summaries_by_server_id.is_empty()
 748                } else {
 749                    true
 750                }
 751            });
 752
 753        self.diagnostics.retain(|_, diagnostics_by_server_id| {
 754            if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 755                diagnostics_by_server_id.remove(ix);
 756                !diagnostics_by_server_id.is_empty()
 757            } else {
 758                true
 759            }
 760        });
 761    }
 762
 763    pub fn update_diagnostics(
 764        &mut self,
 765        server_id: LanguageServerId,
 766        worktree_path: Arc<Path>,
 767        diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 768        _: &mut ModelContext<Worktree>,
 769    ) -> Result<bool> {
 770        let summaries_by_server_id = self
 771            .diagnostic_summaries
 772            .entry(worktree_path.clone())
 773            .or_default();
 774
 775        let old_summary = summaries_by_server_id
 776            .remove(&server_id)
 777            .unwrap_or_default();
 778
 779        let new_summary = DiagnosticSummary::new(&diagnostics);
 780        if new_summary.is_empty() {
 781            if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
 782                if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 783                    diagnostics_by_server_id.remove(ix);
 784                }
 785                if diagnostics_by_server_id.is_empty() {
 786                    self.diagnostics.remove(&worktree_path);
 787                }
 788            }
 789        } else {
 790            summaries_by_server_id.insert(server_id, new_summary);
 791            let diagnostics_by_server_id =
 792                self.diagnostics.entry(worktree_path.clone()).or_default();
 793            match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 794                Ok(ix) => {
 795                    diagnostics_by_server_id[ix] = (server_id, diagnostics);
 796                }
 797                Err(ix) => {
 798                    diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
 799                }
 800            }
 801        }
 802
 803        if !old_summary.is_empty() || !new_summary.is_empty() {
 804            if let Some(share) = self.share.as_ref() {
 805                self.client
 806                    .send(proto::UpdateDiagnosticSummary {
 807                        project_id: share.project_id,
 808                        worktree_id: self.id().to_proto(),
 809                        summary: Some(proto::DiagnosticSummary {
 810                            path: worktree_path.to_string_lossy().to_string(),
 811                            language_server_id: server_id.0 as u64,
 812                            error_count: new_summary.error_count as u32,
 813                            warning_count: new_summary.warning_count as u32,
 814                        }),
 815                    })
 816                    .log_err();
 817            }
 818        }
 819
 820        Ok(!old_summary.is_empty() || !new_summary.is_empty())
 821    }
 822
 823    fn set_snapshot(
 824        &mut self,
 825        new_snapshot: LocalSnapshot,
 826        entry_changes: UpdatedEntriesSet,
 827        cx: &mut ModelContext<Worktree>,
 828    ) {
 829        let repo_changes = self.changed_repos(&self.snapshot, &new_snapshot);
 830
 831        self.snapshot = new_snapshot;
 832
 833        if let Some(share) = self.share.as_mut() {
 834            share
 835                .snapshots_tx
 836                .unbounded_send((
 837                    self.snapshot.clone(),
 838                    entry_changes.clone(),
 839                    repo_changes.clone(),
 840                ))
 841                .ok();
 842        }
 843
 844        if !entry_changes.is_empty() {
 845            cx.emit(Event::UpdatedEntries(entry_changes));
 846        }
 847        if !repo_changes.is_empty() {
 848            cx.emit(Event::UpdatedGitRepositories(repo_changes));
 849        }
 850    }
 851
 852    fn changed_repos(
 853        &self,
 854        old_snapshot: &LocalSnapshot,
 855        new_snapshot: &LocalSnapshot,
 856    ) -> UpdatedGitRepositoriesSet {
 857        let mut changes = Vec::new();
 858        let mut old_repos = old_snapshot.git_repositories.iter().peekable();
 859        let mut new_repos = new_snapshot.git_repositories.iter().peekable();
 860        loop {
 861            match (new_repos.peek().map(clone), old_repos.peek().map(clone)) {
 862                (Some((new_entry_id, new_repo)), Some((old_entry_id, old_repo))) => {
 863                    match Ord::cmp(&new_entry_id, &old_entry_id) {
 864                        Ordering::Less => {
 865                            if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
 866                                changes.push((
 867                                    entry.path.clone(),
 868                                    GitRepositoryChange {
 869                                        old_repository: None,
 870                                        git_dir_changed: true,
 871                                    },
 872                                ));
 873                            }
 874                            new_repos.next();
 875                        }
 876                        Ordering::Equal => {
 877                            let git_dir_changed =
 878                                new_repo.git_dir_scan_id != old_repo.git_dir_scan_id;
 879                            let work_dir_changed =
 880                                new_repo.work_dir_scan_id != old_repo.work_dir_scan_id;
 881                            if git_dir_changed || work_dir_changed {
 882                                if let Some(entry) = new_snapshot.entry_for_id(new_entry_id) {
 883                                    let old_repo = old_snapshot
 884                                        .repository_entries
 885                                        .get(&RepositoryWorkDirectory(entry.path.clone()))
 886                                        .cloned();
 887                                    changes.push((
 888                                        entry.path.clone(),
 889                                        GitRepositoryChange {
 890                                            old_repository: old_repo,
 891                                            git_dir_changed,
 892                                        },
 893                                    ));
 894                                }
 895                            }
 896                            new_repos.next();
 897                            old_repos.next();
 898                        }
 899                        Ordering::Greater => {
 900                            if let Some(entry) = old_snapshot.entry_for_id(old_entry_id) {
 901                                let old_repo = old_snapshot
 902                                    .repository_entries
 903                                    .get(&RepositoryWorkDirectory(entry.path.clone()))
 904                                    .cloned();
 905                                changes.push((
 906                                    entry.path.clone(),
 907                                    GitRepositoryChange {
 908                                        old_repository: old_repo,
 909                                        git_dir_changed: true,
 910                                    },
 911                                ));
 912                            }
 913                            old_repos.next();
 914                        }
 915                    }
 916                }
 917                (Some((entry_id, _)), None) => {
 918                    if let Some(entry) = new_snapshot.entry_for_id(entry_id) {
 919                        changes.push((
 920                            entry.path.clone(),
 921                            GitRepositoryChange {
 922                                old_repository: None,
 923                                git_dir_changed: true,
 924                            },
 925                        ));
 926                    }
 927                    new_repos.next();
 928                }
 929                (None, Some((entry_id, _))) => {
 930                    if let Some(entry) = old_snapshot.entry_for_id(entry_id) {
 931                        let old_repo = old_snapshot
 932                            .repository_entries
 933                            .get(&RepositoryWorkDirectory(entry.path.clone()))
 934                            .cloned();
 935                        changes.push((
 936                            entry.path.clone(),
 937                            GitRepositoryChange {
 938                                old_repository: old_repo,
 939                                git_dir_changed: true,
 940                            },
 941                        ));
 942                    }
 943                    old_repos.next();
 944                }
 945                (None, None) => break,
 946            }
 947        }
 948
 949        fn clone<T: Clone, U: Clone>(value: &(&T, &U)) -> (T, U) {
 950            (value.0.clone(), value.1.clone())
 951        }
 952
 953        changes.into()
 954    }
 955
 956    pub fn scan_complete(&self) -> impl Future<Output = ()> {
 957        let mut is_scanning_rx = self.is_scanning.1.clone();
 958        async move {
 959            let mut is_scanning = is_scanning_rx.borrow().clone();
 960            while is_scanning {
 961                if let Some(value) = is_scanning_rx.recv().await {
 962                    is_scanning = value;
 963                } else {
 964                    break;
 965                }
 966            }
 967        }
 968    }
 969
 970    pub fn snapshot(&self) -> LocalSnapshot {
 971        self.snapshot.clone()
 972    }
 973
 974    pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
 975        proto::WorktreeMetadata {
 976            id: self.id().to_proto(),
 977            root_name: self.root_name().to_string(),
 978            visible: self.visible,
 979            abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
 980        }
 981    }
 982
 983    fn load(
 984        &self,
 985        path: &Path,
 986        cx: &mut ModelContext<Worktree>,
 987    ) -> Task<Result<(File, String, Option<String>)>> {
 988        let handle = cx.handle();
 989        let path = Arc::from(path);
 990        let abs_path = self.absolutize(&path);
 991        let fs = self.fs.clone();
 992        let snapshot = self.snapshot();
 993
 994        let mut index_task = None;
 995
 996        if let Some(repo) = snapshot.repository_for_path(&path) {
 997            let repo_path = repo.work_directory.relativize(self, &path).unwrap();
 998            if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
 999                let repo = repo.repo_ptr.to_owned();
1000                index_task = Some(
1001                    cx.background()
1002                        .spawn(async move { repo.lock().load_index_text(&repo_path) }),
1003                );
1004            }
1005        }
1006
1007        cx.spawn(|this, mut cx| async move {
1008            let text = fs.load(&abs_path).await?;
1009
1010            let diff_base = if let Some(index_task) = index_task {
1011                index_task.await
1012            } else {
1013                None
1014            };
1015
1016            // Eagerly populate the snapshot with an updated entry for the loaded file
1017            let entry = this
1018                .update(&mut cx, |this, cx| {
1019                    this.as_local().unwrap().refresh_entry(path, None, cx)
1020                })
1021                .await?;
1022
1023            Ok((
1024                File {
1025                    entry_id: entry.id,
1026                    worktree: handle,
1027                    path: entry.path,
1028                    mtime: entry.mtime,
1029                    is_local: true,
1030                    is_deleted: false,
1031                },
1032                text,
1033                diff_base,
1034            ))
1035        })
1036    }
1037
1038    pub fn save_buffer(
1039        &self,
1040        buffer_handle: ModelHandle<Buffer>,
1041        path: Arc<Path>,
1042        has_changed_file: bool,
1043        cx: &mut ModelContext<Worktree>,
1044    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1045        let handle = cx.handle();
1046        let buffer = buffer_handle.read(cx);
1047
1048        let rpc = self.client.clone();
1049        let buffer_id = buffer.remote_id();
1050        let project_id = self.share.as_ref().map(|share| share.project_id);
1051
1052        let text = buffer.as_rope().clone();
1053        let fingerprint = text.fingerprint();
1054        let version = buffer.version();
1055        let save = self.write_file(path, text, buffer.line_ending(), cx);
1056
1057        cx.as_mut().spawn(|mut cx| async move {
1058            let entry = save.await?;
1059
1060            if has_changed_file {
1061                let new_file = Arc::new(File {
1062                    entry_id: entry.id,
1063                    worktree: handle,
1064                    path: entry.path,
1065                    mtime: entry.mtime,
1066                    is_local: true,
1067                    is_deleted: false,
1068                });
1069
1070                if let Some(project_id) = project_id {
1071                    rpc.send(proto::UpdateBufferFile {
1072                        project_id,
1073                        buffer_id,
1074                        file: Some(new_file.to_proto()),
1075                    })
1076                    .log_err();
1077                }
1078
1079                buffer_handle.update(&mut cx, |buffer, cx| {
1080                    if has_changed_file {
1081                        buffer.file_updated(new_file, cx).detach();
1082                    }
1083                });
1084            }
1085
1086            if let Some(project_id) = project_id {
1087                rpc.send(proto::BufferSaved {
1088                    project_id,
1089                    buffer_id,
1090                    version: serialize_version(&version),
1091                    mtime: Some(entry.mtime.into()),
1092                    fingerprint: serialize_fingerprint(fingerprint),
1093                })?;
1094            }
1095
1096            buffer_handle.update(&mut cx, |buffer, cx| {
1097                buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
1098            });
1099
1100            Ok((version, fingerprint, entry.mtime))
1101        })
1102    }
1103
1104    pub fn create_entry(
1105        &self,
1106        path: impl Into<Arc<Path>>,
1107        is_dir: bool,
1108        cx: &mut ModelContext<Worktree>,
1109    ) -> Task<Result<Entry>> {
1110        let path = path.into();
1111        let abs_path = self.absolutize(&path);
1112        let fs = self.fs.clone();
1113        let write = cx.background().spawn(async move {
1114            if is_dir {
1115                fs.create_dir(&abs_path).await
1116            } else {
1117                fs.save(&abs_path, &Default::default(), Default::default())
1118                    .await
1119            }
1120        });
1121
1122        cx.spawn(|this, mut cx| async move {
1123            write.await?;
1124            this.update(&mut cx, |this, cx| {
1125                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1126            })
1127            .await
1128        })
1129    }
1130
1131    pub fn write_file(
1132        &self,
1133        path: impl Into<Arc<Path>>,
1134        text: Rope,
1135        line_ending: LineEnding,
1136        cx: &mut ModelContext<Worktree>,
1137    ) -> Task<Result<Entry>> {
1138        let path = path.into();
1139        let abs_path = self.absolutize(&path);
1140        let fs = self.fs.clone();
1141        let write = cx
1142            .background()
1143            .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1144
1145        cx.spawn(|this, mut cx| async move {
1146            write.await?;
1147            this.update(&mut cx, |this, cx| {
1148                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1149            })
1150            .await
1151        })
1152    }
1153
1154    pub fn delete_entry(
1155        &self,
1156        entry_id: ProjectEntryId,
1157        cx: &mut ModelContext<Worktree>,
1158    ) -> Option<Task<Result<()>>> {
1159        let entry = self.entry_for_id(entry_id)?.clone();
1160        let abs_path = self.abs_path.clone();
1161        let fs = self.fs.clone();
1162
1163        let delete = cx.background().spawn(async move {
1164            let mut abs_path = fs.canonicalize(&abs_path).await?;
1165            if entry.path.file_name().is_some() {
1166                abs_path = abs_path.join(&entry.path);
1167            }
1168            if entry.is_file() {
1169                fs.remove_file(&abs_path, Default::default()).await?;
1170            } else {
1171                fs.remove_dir(
1172                    &abs_path,
1173                    RemoveOptions {
1174                        recursive: true,
1175                        ignore_if_not_exists: false,
1176                    },
1177                )
1178                .await?;
1179            }
1180            anyhow::Ok(abs_path)
1181        });
1182
1183        Some(cx.spawn(|this, mut cx| async move {
1184            let abs_path = delete.await?;
1185            let (tx, mut rx) = barrier::channel();
1186            this.update(&mut cx, |this, _| {
1187                this.as_local_mut()
1188                    .unwrap()
1189                    .path_changes_tx
1190                    .try_send((vec![abs_path], tx))
1191            })?;
1192            rx.recv().await;
1193            Ok(())
1194        }))
1195    }
1196
1197    pub fn rename_entry(
1198        &self,
1199        entry_id: ProjectEntryId,
1200        new_path: impl Into<Arc<Path>>,
1201        cx: &mut ModelContext<Worktree>,
1202    ) -> Option<Task<Result<Entry>>> {
1203        let old_path = self.entry_for_id(entry_id)?.path.clone();
1204        let new_path = new_path.into();
1205        let abs_old_path = self.absolutize(&old_path);
1206        let abs_new_path = self.absolutize(&new_path);
1207        let fs = self.fs.clone();
1208        let rename = cx.background().spawn(async move {
1209            fs.rename(&abs_old_path, &abs_new_path, Default::default())
1210                .await
1211        });
1212
1213        Some(cx.spawn(|this, mut cx| async move {
1214            rename.await?;
1215            this.update(&mut cx, |this, cx| {
1216                this.as_local_mut()
1217                    .unwrap()
1218                    .refresh_entry(new_path.clone(), Some(old_path), cx)
1219            })
1220            .await
1221        }))
1222    }
1223
1224    pub fn copy_entry(
1225        &self,
1226        entry_id: ProjectEntryId,
1227        new_path: impl Into<Arc<Path>>,
1228        cx: &mut ModelContext<Worktree>,
1229    ) -> Option<Task<Result<Entry>>> {
1230        let old_path = self.entry_for_id(entry_id)?.path.clone();
1231        let new_path = new_path.into();
1232        let abs_old_path = self.absolutize(&old_path);
1233        let abs_new_path = self.absolutize(&new_path);
1234        let fs = self.fs.clone();
1235        let copy = cx.background().spawn(async move {
1236            copy_recursive(
1237                fs.as_ref(),
1238                &abs_old_path,
1239                &abs_new_path,
1240                Default::default(),
1241            )
1242            .await
1243        });
1244
1245        Some(cx.spawn(|this, mut cx| async move {
1246            copy.await?;
1247            this.update(&mut cx, |this, cx| {
1248                this.as_local_mut()
1249                    .unwrap()
1250                    .refresh_entry(new_path.clone(), None, cx)
1251            })
1252            .await
1253        }))
1254    }
1255
1256    fn refresh_entry(
1257        &self,
1258        path: Arc<Path>,
1259        old_path: Option<Arc<Path>>,
1260        cx: &mut ModelContext<Worktree>,
1261    ) -> Task<Result<Entry>> {
1262        let fs = self.fs.clone();
1263        let abs_root_path = self.abs_path.clone();
1264        let path_changes_tx = self.path_changes_tx.clone();
1265        cx.spawn_weak(move |this, mut cx| async move {
1266            let abs_path = fs.canonicalize(&abs_root_path).await?;
1267            let mut paths = Vec::with_capacity(2);
1268            paths.push(if path.file_name().is_some() {
1269                abs_path.join(&path)
1270            } else {
1271                abs_path.clone()
1272            });
1273            if let Some(old_path) = old_path {
1274                paths.push(if old_path.file_name().is_some() {
1275                    abs_path.join(&old_path)
1276                } else {
1277                    abs_path.clone()
1278                });
1279            }
1280
1281            let (tx, mut rx) = barrier::channel();
1282            path_changes_tx.try_send((paths, tx))?;
1283            rx.recv().await;
1284            this.upgrade(&cx)
1285                .ok_or_else(|| anyhow!("worktree was dropped"))?
1286                .update(&mut cx, |this, _| {
1287                    this.entry_for_path(path)
1288                        .cloned()
1289                        .ok_or_else(|| anyhow!("failed to read path after update"))
1290                })
1291        })
1292    }
1293
1294    pub fn observe_updates<F, Fut>(
1295        &mut self,
1296        project_id: u64,
1297        cx: &mut ModelContext<Worktree>,
1298        callback: F,
1299    ) -> oneshot::Receiver<()>
1300    where
1301        F: 'static + Send + Fn(proto::UpdateWorktree) -> Fut,
1302        Fut: Send + Future<Output = bool>,
1303    {
1304        #[cfg(any(test, feature = "test-support"))]
1305        const MAX_CHUNK_SIZE: usize = 2;
1306        #[cfg(not(any(test, feature = "test-support")))]
1307        const MAX_CHUNK_SIZE: usize = 256;
1308
1309        let (share_tx, share_rx) = oneshot::channel();
1310
1311        if let Some(share) = self.share.as_mut() {
1312            share_tx.send(()).ok();
1313            *share.resume_updates.borrow_mut() = ();
1314            return share_rx;
1315        }
1316
1317        let (resume_updates_tx, mut resume_updates_rx) = watch::channel::<()>();
1318        let (snapshots_tx, mut snapshots_rx) =
1319            mpsc::unbounded::<(LocalSnapshot, UpdatedEntriesSet, UpdatedGitRepositoriesSet)>();
1320        snapshots_tx
1321            .unbounded_send((self.snapshot(), Arc::from([]), Arc::from([])))
1322            .ok();
1323
1324        let worktree_id = cx.model_id() as u64;
1325        let _maintain_remote_snapshot = cx.background().spawn(async move {
1326            let mut is_first = true;
1327            while let Some((snapshot, entry_changes, repo_changes)) = snapshots_rx.next().await {
1328                let update;
1329                if is_first {
1330                    update = snapshot.build_initial_update(project_id, worktree_id);
1331                    is_first = false;
1332                } else {
1333                    update =
1334                        snapshot.build_update(project_id, worktree_id, entry_changes, repo_changes);
1335                }
1336
1337                for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1338                    let _ = resume_updates_rx.try_recv();
1339                    loop {
1340                        let result = callback(update.clone());
1341                        if result.await {
1342                            break;
1343                        } else {
1344                            log::info!("waiting to resume updates");
1345                            if resume_updates_rx.next().await.is_none() {
1346                                return Some(());
1347                            }
1348                        }
1349                    }
1350                }
1351            }
1352            share_tx.send(()).ok();
1353            Some(())
1354        });
1355
1356        self.share = Some(ShareState {
1357            project_id,
1358            snapshots_tx,
1359            resume_updates: resume_updates_tx,
1360            _maintain_remote_snapshot,
1361        });
1362        share_rx
1363    }
1364
1365    pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1366        let client = self.client.clone();
1367
1368        for (path, summaries) in &self.diagnostic_summaries {
1369            for (&server_id, summary) in summaries {
1370                if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1371                    project_id,
1372                    worktree_id: cx.model_id() as u64,
1373                    summary: Some(summary.to_proto(server_id, &path)),
1374                }) {
1375                    return Task::ready(Err(e));
1376                }
1377            }
1378        }
1379
1380        let rx = self.observe_updates(project_id, cx, move |update| {
1381            client.request(update).map(|result| result.is_ok())
1382        });
1383        cx.foreground()
1384            .spawn(async move { rx.await.map_err(|_| anyhow!("share ended")) })
1385    }
1386
1387    pub fn unshare(&mut self) {
1388        self.share.take();
1389    }
1390
1391    pub fn is_shared(&self) -> bool {
1392        self.share.is_some()
1393    }
1394}
1395
1396impl RemoteWorktree {
1397    fn snapshot(&self) -> Snapshot {
1398        self.snapshot.clone()
1399    }
1400
1401    pub fn disconnected_from_host(&mut self) {
1402        self.updates_tx.take();
1403        self.snapshot_subscriptions.clear();
1404        self.disconnected = true;
1405    }
1406
1407    pub fn save_buffer(
1408        &self,
1409        buffer_handle: ModelHandle<Buffer>,
1410        cx: &mut ModelContext<Worktree>,
1411    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1412        let buffer = buffer_handle.read(cx);
1413        let buffer_id = buffer.remote_id();
1414        let version = buffer.version();
1415        let rpc = self.client.clone();
1416        let project_id = self.project_id;
1417        cx.as_mut().spawn(|mut cx| async move {
1418            let response = rpc
1419                .request(proto::SaveBuffer {
1420                    project_id,
1421                    buffer_id,
1422                    version: serialize_version(&version),
1423                })
1424                .await?;
1425            let version = deserialize_version(&response.version);
1426            let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1427            let mtime = response
1428                .mtime
1429                .ok_or_else(|| anyhow!("missing mtime"))?
1430                .into();
1431
1432            buffer_handle.update(&mut cx, |buffer, cx| {
1433                buffer.did_save(version.clone(), fingerprint, mtime, cx);
1434            });
1435
1436            Ok((version, fingerprint, mtime))
1437        })
1438    }
1439
1440    pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1441        if let Some(updates_tx) = &self.updates_tx {
1442            updates_tx
1443                .unbounded_send(update)
1444                .expect("consumer runs to completion");
1445        }
1446    }
1447
1448    fn observed_snapshot(&self, scan_id: usize) -> bool {
1449        self.completed_scan_id >= scan_id
1450    }
1451
1452    fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1453        let (tx, rx) = oneshot::channel();
1454        if self.observed_snapshot(scan_id) {
1455            let _ = tx.send(());
1456        } else if self.disconnected {
1457            drop(tx);
1458        } else {
1459            match self
1460                .snapshot_subscriptions
1461                .binary_search_by_key(&scan_id, |probe| probe.0)
1462            {
1463                Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1464            }
1465        }
1466
1467        async move {
1468            rx.await?;
1469            Ok(())
1470        }
1471    }
1472
1473    pub fn update_diagnostic_summary(
1474        &mut self,
1475        path: Arc<Path>,
1476        summary: &proto::DiagnosticSummary,
1477    ) {
1478        let server_id = LanguageServerId(summary.language_server_id as usize);
1479        let summary = DiagnosticSummary {
1480            error_count: summary.error_count as usize,
1481            warning_count: summary.warning_count as usize,
1482        };
1483
1484        if summary.is_empty() {
1485            if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1486                summaries.remove(&server_id);
1487                if summaries.is_empty() {
1488                    self.diagnostic_summaries.remove(&path);
1489                }
1490            }
1491        } else {
1492            self.diagnostic_summaries
1493                .entry(path)
1494                .or_default()
1495                .insert(server_id, summary);
1496        }
1497    }
1498
1499    pub fn insert_entry(
1500        &mut self,
1501        entry: proto::Entry,
1502        scan_id: usize,
1503        cx: &mut ModelContext<Worktree>,
1504    ) -> Task<Result<Entry>> {
1505        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1506        cx.spawn(|this, mut cx| async move {
1507            wait_for_snapshot.await?;
1508            this.update(&mut cx, |worktree, _| {
1509                let worktree = worktree.as_remote_mut().unwrap();
1510                let mut snapshot = worktree.background_snapshot.lock();
1511                let entry = snapshot.insert_entry(entry);
1512                worktree.snapshot = snapshot.clone();
1513                entry
1514            })
1515        })
1516    }
1517
1518    pub(crate) fn delete_entry(
1519        &mut self,
1520        id: ProjectEntryId,
1521        scan_id: usize,
1522        cx: &mut ModelContext<Worktree>,
1523    ) -> Task<Result<()>> {
1524        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1525        cx.spawn(|this, mut cx| async move {
1526            wait_for_snapshot.await?;
1527            this.update(&mut cx, |worktree, _| {
1528                let worktree = worktree.as_remote_mut().unwrap();
1529                let mut snapshot = worktree.background_snapshot.lock();
1530                snapshot.delete_entry(id);
1531                worktree.snapshot = snapshot.clone();
1532            });
1533            Ok(())
1534        })
1535    }
1536}
1537
1538impl Snapshot {
1539    pub fn id(&self) -> WorktreeId {
1540        self.id
1541    }
1542
1543    pub fn abs_path(&self) -> &Arc<Path> {
1544        &self.abs_path
1545    }
1546
1547    pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1548        self.entries_by_id.get(&entry_id, &()).is_some()
1549    }
1550
1551    pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1552        let entry = Entry::try_from((&self.root_char_bag, entry))?;
1553        let old_entry = self.entries_by_id.insert_or_replace(
1554            PathEntry {
1555                id: entry.id,
1556                path: entry.path.clone(),
1557                is_ignored: entry.is_ignored,
1558                scan_id: 0,
1559            },
1560            &(),
1561        );
1562        if let Some(old_entry) = old_entry {
1563            self.entries_by_path.remove(&PathKey(old_entry.path), &());
1564        }
1565        self.entries_by_path.insert_or_replace(entry.clone(), &());
1566        Ok(entry)
1567    }
1568
1569    fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1570        let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1571        self.entries_by_path = {
1572            let mut cursor = self.entries_by_path.cursor();
1573            let mut new_entries_by_path =
1574                cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1575            while let Some(entry) = cursor.item() {
1576                if entry.path.starts_with(&removed_entry.path) {
1577                    self.entries_by_id.remove(&entry.id, &());
1578                    cursor.next(&());
1579                } else {
1580                    break;
1581                }
1582            }
1583            new_entries_by_path.push_tree(cursor.suffix(&()), &());
1584            new_entries_by_path
1585        };
1586
1587        Some(removed_entry.path)
1588    }
1589
1590    pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1591        let mut entries_by_path_edits = Vec::new();
1592        let mut entries_by_id_edits = Vec::new();
1593
1594        for entry_id in update.removed_entries {
1595            let entry_id = ProjectEntryId::from_proto(entry_id);
1596            entries_by_id_edits.push(Edit::Remove(entry_id));
1597            if let Some(entry) = self.entry_for_id(entry_id) {
1598                entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1599            }
1600        }
1601
1602        for entry in update.updated_entries {
1603            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1604            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1605                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1606            }
1607            if let Some(old_entry) = self.entries_by_path.get(&PathKey(entry.path.clone()), &()) {
1608                if old_entry.id != entry.id {
1609                    entries_by_id_edits.push(Edit::Remove(old_entry.id));
1610                }
1611            }
1612            entries_by_id_edits.push(Edit::Insert(PathEntry {
1613                id: entry.id,
1614                path: entry.path.clone(),
1615                is_ignored: entry.is_ignored,
1616                scan_id: 0,
1617            }));
1618            entries_by_path_edits.push(Edit::Insert(entry));
1619        }
1620
1621        self.entries_by_path.edit(entries_by_path_edits, &());
1622        self.entries_by_id.edit(entries_by_id_edits, &());
1623
1624        update.removed_repositories.sort_unstable();
1625        self.repository_entries.retain(|_, entry| {
1626            if let Ok(_) = update
1627                .removed_repositories
1628                .binary_search(&entry.work_directory.to_proto())
1629            {
1630                false
1631            } else {
1632                true
1633            }
1634        });
1635
1636        for repository in update.updated_repositories {
1637            let work_directory_entry: WorkDirectoryEntry =
1638                ProjectEntryId::from_proto(repository.work_directory_id).into();
1639
1640            if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1641                let mut statuses = TreeMap::default();
1642                for status_entry in repository.updated_statuses {
1643                    let Some(git_file_status) = read_git_status(status_entry.status) else {
1644                        continue;
1645                    };
1646
1647                    let repo_path = RepoPath::new(status_entry.repo_path.into());
1648                    statuses.insert(repo_path, git_file_status);
1649                }
1650
1651                let work_directory = RepositoryWorkDirectory(entry.path.clone());
1652                if self.repository_entries.get(&work_directory).is_some() {
1653                    self.repository_entries.update(&work_directory, |repo| {
1654                        repo.branch = repository.branch.map(Into::into);
1655                        repo.statuses.insert_tree(statuses);
1656
1657                        for repo_path in repository.removed_repo_paths {
1658                            let repo_path = RepoPath::new(repo_path.into());
1659                            repo.statuses.remove(&repo_path);
1660                        }
1661                    });
1662                } else {
1663                    self.repository_entries.insert(
1664                        work_directory,
1665                        RepositoryEntry {
1666                            work_directory: work_directory_entry,
1667                            branch: repository.branch.map(Into::into),
1668                            statuses,
1669                        },
1670                    )
1671                }
1672            } else {
1673                log::error!("no work directory entry for repository {:?}", repository)
1674            }
1675        }
1676
1677        self.scan_id = update.scan_id as usize;
1678        if update.is_last_update {
1679            self.completed_scan_id = update.scan_id as usize;
1680        }
1681
1682        Ok(())
1683    }
1684
1685    pub fn file_count(&self) -> usize {
1686        self.entries_by_path.summary().file_count
1687    }
1688
1689    pub fn visible_file_count(&self) -> usize {
1690        self.entries_by_path.summary().visible_file_count
1691    }
1692
1693    fn traverse_from_offset(
1694        &self,
1695        include_dirs: bool,
1696        include_ignored: bool,
1697        start_offset: usize,
1698    ) -> Traversal {
1699        let mut cursor = self.entries_by_path.cursor();
1700        cursor.seek(
1701            &TraversalTarget::Count {
1702                count: start_offset,
1703                include_dirs,
1704                include_ignored,
1705            },
1706            Bias::Right,
1707            &(),
1708        );
1709        Traversal {
1710            cursor,
1711            include_dirs,
1712            include_ignored,
1713        }
1714    }
1715
1716    fn traverse_from_path(
1717        &self,
1718        include_dirs: bool,
1719        include_ignored: bool,
1720        path: &Path,
1721    ) -> Traversal {
1722        let mut cursor = self.entries_by_path.cursor();
1723        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1724        Traversal {
1725            cursor,
1726            include_dirs,
1727            include_ignored,
1728        }
1729    }
1730
1731    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1732        self.traverse_from_offset(false, include_ignored, start)
1733    }
1734
1735    pub fn entries(&self, include_ignored: bool) -> Traversal {
1736        self.traverse_from_offset(true, include_ignored, 0)
1737    }
1738
1739    pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
1740        self.repository_entries
1741            .iter()
1742            .map(|(path, entry)| (&path.0, entry))
1743    }
1744
1745    /// Get the repository whose work directory contains the given path.
1746    pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
1747        self.repository_entries
1748            .get(&RepositoryWorkDirectory(path.into()))
1749            .cloned()
1750    }
1751
1752    /// Get the repository whose work directory contains the given path.
1753    pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
1754        self.repository_and_work_directory_for_path(path)
1755            .map(|e| e.1)
1756    }
1757
1758    pub fn repository_and_work_directory_for_path(
1759        &self,
1760        path: &Path,
1761    ) -> Option<(RepositoryWorkDirectory, RepositoryEntry)> {
1762        self.repository_entries
1763            .iter()
1764            .filter(|(workdir_path, _)| path.starts_with(workdir_path))
1765            .last()
1766            .map(|(path, repo)| (path.clone(), repo.clone()))
1767    }
1768
1769    /// Given an ordered iterator of entries, returns an iterator of those entries,
1770    /// along with their containing git repository.
1771    pub fn entries_with_repositories<'a>(
1772        &'a self,
1773        entries: impl 'a + Iterator<Item = &'a Entry>,
1774    ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
1775        let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
1776        let mut repositories = self.repositories().peekable();
1777        entries.map(move |entry| {
1778            while let Some((repo_path, _)) = containing_repos.last() {
1779                if !entry.path.starts_with(repo_path) {
1780                    containing_repos.pop();
1781                } else {
1782                    break;
1783                }
1784            }
1785            while let Some((repo_path, _)) = repositories.peek() {
1786                if entry.path.starts_with(repo_path) {
1787                    containing_repos.push(repositories.next().unwrap());
1788                } else {
1789                    break;
1790                }
1791            }
1792            let repo = containing_repos.last().map(|(_, repo)| *repo);
1793            (entry, repo)
1794        })
1795    }
1796
1797    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1798        let empty_path = Path::new("");
1799        self.entries_by_path
1800            .cursor::<()>()
1801            .filter(move |entry| entry.path.as_ref() != empty_path)
1802            .map(|entry| &entry.path)
1803    }
1804
1805    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1806        let mut cursor = self.entries_by_path.cursor();
1807        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1808        let traversal = Traversal {
1809            cursor,
1810            include_dirs: true,
1811            include_ignored: true,
1812        };
1813        ChildEntriesIter {
1814            traversal,
1815            parent_path,
1816        }
1817    }
1818
1819    fn descendent_entries<'a>(
1820        &'a self,
1821        include_dirs: bool,
1822        include_ignored: bool,
1823        parent_path: &'a Path,
1824    ) -> DescendentEntriesIter<'a> {
1825        let mut cursor = self.entries_by_path.cursor();
1826        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
1827        let mut traversal = Traversal {
1828            cursor,
1829            include_dirs,
1830            include_ignored,
1831        };
1832
1833        if traversal.end_offset() == traversal.start_offset() {
1834            traversal.advance();
1835        }
1836
1837        DescendentEntriesIter {
1838            traversal,
1839            parent_path,
1840        }
1841    }
1842
1843    pub fn root_entry(&self) -> Option<&Entry> {
1844        self.entry_for_path("")
1845    }
1846
1847    pub fn root_name(&self) -> &str {
1848        &self.root_name
1849    }
1850
1851    pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1852        self.repository_entries
1853            .get(&RepositoryWorkDirectory(Path::new("").into()))
1854            .map(|entry| entry.to_owned())
1855    }
1856
1857    pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1858        self.repository_entries.values()
1859    }
1860
1861    pub fn scan_id(&self) -> usize {
1862        self.scan_id
1863    }
1864
1865    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1866        let path = path.as_ref();
1867        self.traverse_from_path(true, true, path)
1868            .entry()
1869            .and_then(|entry| {
1870                if entry.path.as_ref() == path {
1871                    Some(entry)
1872                } else {
1873                    None
1874                }
1875            })
1876    }
1877
1878    pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1879        let entry = self.entries_by_id.get(&id, &())?;
1880        self.entry_for_path(&entry.path)
1881    }
1882
1883    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1884        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1885    }
1886}
1887
1888impl LocalSnapshot {
1889    pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1890        self.git_repositories.get(&repo.work_directory.0)
1891    }
1892
1893    pub(crate) fn repo_for_metadata(
1894        &self,
1895        path: &Path,
1896    ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1897        self.git_repositories
1898            .iter()
1899            .find(|(_, repo)| repo.in_dot_git(path))
1900    }
1901
1902    fn build_update(
1903        &self,
1904        project_id: u64,
1905        worktree_id: u64,
1906        entry_changes: UpdatedEntriesSet,
1907        repo_changes: UpdatedGitRepositoriesSet,
1908    ) -> proto::UpdateWorktree {
1909        let mut updated_entries = Vec::new();
1910        let mut removed_entries = Vec::new();
1911        let mut updated_repositories = Vec::new();
1912        let mut removed_repositories = Vec::new();
1913
1914        for (_, entry_id, path_change) in entry_changes.iter() {
1915            if let PathChange::Removed = path_change {
1916                removed_entries.push(entry_id.0 as u64);
1917            } else if let Some(entry) = self.entry_for_id(*entry_id) {
1918                updated_entries.push(proto::Entry::from(entry));
1919            }
1920        }
1921
1922        for (work_dir_path, change) in repo_changes.iter() {
1923            let new_repo = self
1924                .repository_entries
1925                .get(&RepositoryWorkDirectory(work_dir_path.clone()));
1926            match (&change.old_repository, new_repo) {
1927                (Some(old_repo), Some(new_repo)) => {
1928                    updated_repositories.push(new_repo.build_update(old_repo));
1929                }
1930                (None, Some(new_repo)) => {
1931                    updated_repositories.push(proto::RepositoryEntry::from(new_repo));
1932                }
1933                (Some(old_repo), None) => {
1934                    removed_repositories.push(old_repo.work_directory.0.to_proto());
1935                }
1936                _ => {}
1937            }
1938        }
1939
1940        removed_entries.sort_unstable();
1941        updated_entries.sort_unstable_by_key(|e| e.id);
1942        removed_repositories.sort_unstable();
1943        updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
1944
1945        // TODO - optimize, knowing that removed_entries are sorted.
1946        removed_entries.retain(|id| updated_entries.binary_search_by_key(id, |e| e.id).is_err());
1947
1948        proto::UpdateWorktree {
1949            project_id,
1950            worktree_id,
1951            abs_path: self.abs_path().to_string_lossy().into(),
1952            root_name: self.root_name().to_string(),
1953            updated_entries,
1954            removed_entries,
1955            scan_id: self.scan_id as u64,
1956            is_last_update: self.completed_scan_id == self.scan_id,
1957            updated_repositories,
1958            removed_repositories,
1959        }
1960    }
1961
1962    fn build_initial_update(&self, project_id: u64, worktree_id: u64) -> proto::UpdateWorktree {
1963        let mut updated_entries = self
1964            .entries_by_path
1965            .iter()
1966            .map(proto::Entry::from)
1967            .collect::<Vec<_>>();
1968        updated_entries.sort_unstable_by_key(|e| e.id);
1969
1970        let mut updated_repositories = self
1971            .repository_entries
1972            .values()
1973            .map(proto::RepositoryEntry::from)
1974            .collect::<Vec<_>>();
1975        updated_repositories.sort_unstable_by_key(|e| e.work_directory_id);
1976
1977        proto::UpdateWorktree {
1978            project_id,
1979            worktree_id,
1980            abs_path: self.abs_path().to_string_lossy().into(),
1981            root_name: self.root_name().to_string(),
1982            updated_entries,
1983            removed_entries: Vec::new(),
1984            scan_id: self.scan_id as u64,
1985            is_last_update: self.completed_scan_id == self.scan_id,
1986            updated_repositories,
1987            removed_repositories: Vec::new(),
1988        }
1989    }
1990
1991    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1992        if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1993            let abs_path = self.abs_path.join(&entry.path);
1994            match smol::block_on(build_gitignore(&abs_path, fs)) {
1995                Ok(ignore) => {
1996                    self.ignores_by_parent_abs_path
1997                        .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
1998                }
1999                Err(error) => {
2000                    log::error!(
2001                        "error loading .gitignore file {:?} - {:?}",
2002                        &entry.path,
2003                        error
2004                    );
2005                }
2006            }
2007        }
2008
2009        if entry.kind == EntryKind::PendingDir {
2010            if let Some(existing_entry) =
2011                self.entries_by_path.get(&PathKey(entry.path.clone()), &())
2012            {
2013                entry.kind = existing_entry.kind;
2014            }
2015        }
2016
2017        let scan_id = self.scan_id;
2018        let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
2019        if let Some(removed) = removed {
2020            if removed.id != entry.id {
2021                self.entries_by_id.remove(&removed.id, &());
2022            }
2023        }
2024        self.entries_by_id.insert_or_replace(
2025            PathEntry {
2026                id: entry.id,
2027                path: entry.path.clone(),
2028                is_ignored: entry.is_ignored,
2029                scan_id,
2030            },
2031            &(),
2032        );
2033
2034        entry
2035    }
2036
2037    fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
2038        let abs_path = self.abs_path.join(&parent_path);
2039        let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
2040
2041        // Guard against repositories inside the repository metadata
2042        if work_dir
2043            .components()
2044            .find(|component| component.as_os_str() == *DOT_GIT)
2045            .is_some()
2046        {
2047            return None;
2048        };
2049
2050        let work_dir_id = self
2051            .entry_for_path(work_dir.clone())
2052            .map(|entry| entry.id)?;
2053
2054        if self.git_repositories.get(&work_dir_id).is_none() {
2055            let repo = fs.open_repo(abs_path.as_path())?;
2056            let work_directory = RepositoryWorkDirectory(work_dir.clone());
2057            let scan_id = self.scan_id;
2058
2059            let repo_lock = repo.lock();
2060
2061            self.repository_entries.insert(
2062                work_directory,
2063                RepositoryEntry {
2064                    work_directory: work_dir_id.into(),
2065                    branch: repo_lock.branch_name().map(Into::into),
2066                    statuses: repo_lock.statuses().unwrap_or_default(),
2067                },
2068            );
2069            drop(repo_lock);
2070
2071            self.git_repositories.insert(
2072                work_dir_id,
2073                LocalRepositoryEntry {
2074                    work_dir_scan_id: scan_id,
2075                    git_dir_scan_id: scan_id,
2076                    repo_ptr: repo,
2077                    git_dir_path: parent_path.clone(),
2078                },
2079            )
2080        }
2081
2082        Some(())
2083    }
2084
2085    fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2086        let mut inodes = TreeSet::default();
2087        for ancestor in path.ancestors().skip(1) {
2088            if let Some(entry) = self.entry_for_path(ancestor) {
2089                inodes.insert(entry.inode);
2090            }
2091        }
2092        inodes
2093    }
2094
2095    fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2096        let mut new_ignores = Vec::new();
2097        for ancestor in abs_path.ancestors().skip(1) {
2098            if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2099                new_ignores.push((ancestor, Some(ignore.clone())));
2100            } else {
2101                new_ignores.push((ancestor, None));
2102            }
2103        }
2104
2105        let mut ignore_stack = IgnoreStack::none();
2106        for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2107            if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2108                ignore_stack = IgnoreStack::all();
2109                break;
2110            } else if let Some(ignore) = ignore {
2111                ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2112            }
2113        }
2114
2115        if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2116            ignore_stack = IgnoreStack::all();
2117        }
2118
2119        ignore_stack
2120    }
2121}
2122
2123impl BackgroundScannerState {
2124    fn reuse_entry_id(&mut self, entry: &mut Entry) {
2125        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2126            entry.id = removed_entry_id;
2127        } else if let Some(existing_entry) = self.snapshot.entry_for_path(&entry.path) {
2128            entry.id = existing_entry.id;
2129        }
2130    }
2131
2132    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2133        self.reuse_entry_id(&mut entry);
2134        self.snapshot.insert_entry(entry, fs)
2135    }
2136
2137    fn populate_dir(
2138        &mut self,
2139        parent_path: Arc<Path>,
2140        entries: impl IntoIterator<Item = Entry>,
2141        ignore: Option<Arc<Gitignore>>,
2142        fs: &dyn Fs,
2143    ) {
2144        let mut parent_entry = if let Some(parent_entry) = self
2145            .snapshot
2146            .entries_by_path
2147            .get(&PathKey(parent_path.clone()), &())
2148        {
2149            parent_entry.clone()
2150        } else {
2151            log::warn!(
2152                "populating a directory {:?} that has been removed",
2153                parent_path
2154            );
2155            return;
2156        };
2157
2158        match parent_entry.kind {
2159            EntryKind::PendingDir => {
2160                parent_entry.kind = EntryKind::Dir;
2161            }
2162            EntryKind::Dir => {}
2163            _ => return,
2164        }
2165
2166        if let Some(ignore) = ignore {
2167            let abs_parent_path = self.snapshot.abs_path.join(&parent_path).into();
2168            self.snapshot
2169                .ignores_by_parent_abs_path
2170                .insert(abs_parent_path, (ignore, false));
2171        }
2172
2173        if parent_path.file_name() == Some(&DOT_GIT) {
2174            self.snapshot.build_repo(parent_path, fs);
2175        }
2176
2177        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2178        let mut entries_by_id_edits = Vec::new();
2179
2180        for mut entry in entries {
2181            self.reuse_entry_id(&mut entry);
2182            entries_by_id_edits.push(Edit::Insert(PathEntry {
2183                id: entry.id,
2184                path: entry.path.clone(),
2185                is_ignored: entry.is_ignored,
2186                scan_id: self.snapshot.scan_id,
2187            }));
2188            entries_by_path_edits.push(Edit::Insert(entry));
2189        }
2190
2191        self.snapshot
2192            .entries_by_path
2193            .edit(entries_by_path_edits, &());
2194        self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2195    }
2196
2197    fn remove_path(&mut self, path: &Path) {
2198        let mut new_entries;
2199        let removed_entries;
2200        {
2201            let mut cursor = self.snapshot.entries_by_path.cursor::<TraversalProgress>();
2202            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2203            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2204            new_entries.push_tree(cursor.suffix(&()), &());
2205        }
2206        self.snapshot.entries_by_path = new_entries;
2207
2208        let mut entries_by_id_edits = Vec::new();
2209        for entry in removed_entries.cursor::<()>() {
2210            let removed_entry_id = self
2211                .removed_entry_ids
2212                .entry(entry.inode)
2213                .or_insert(entry.id);
2214            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2215            entries_by_id_edits.push(Edit::Remove(entry.id));
2216        }
2217        self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
2218
2219        if path.file_name() == Some(&GITIGNORE) {
2220            let abs_parent_path = self.snapshot.abs_path.join(path.parent().unwrap());
2221            if let Some((_, needs_update)) = self
2222                .snapshot
2223                .ignores_by_parent_abs_path
2224                .get_mut(abs_parent_path.as_path())
2225            {
2226                *needs_update = true;
2227            }
2228        }
2229    }
2230}
2231
2232async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2233    let contents = fs.load(abs_path).await?;
2234    let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2235    let mut builder = GitignoreBuilder::new(parent);
2236    for line in contents.lines() {
2237        builder.add_line(Some(abs_path.into()), line)?;
2238    }
2239    Ok(builder.build()?)
2240}
2241
2242impl WorktreeId {
2243    pub fn from_usize(handle_id: usize) -> Self {
2244        Self(handle_id)
2245    }
2246
2247    pub(crate) fn from_proto(id: u64) -> Self {
2248        Self(id as usize)
2249    }
2250
2251    pub fn to_proto(&self) -> u64 {
2252        self.0 as u64
2253    }
2254
2255    pub fn to_usize(&self) -> usize {
2256        self.0
2257    }
2258}
2259
2260impl fmt::Display for WorktreeId {
2261    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2262        self.0.fmt(f)
2263    }
2264}
2265
2266impl Deref for Worktree {
2267    type Target = Snapshot;
2268
2269    fn deref(&self) -> &Self::Target {
2270        match self {
2271            Worktree::Local(worktree) => &worktree.snapshot,
2272            Worktree::Remote(worktree) => &worktree.snapshot,
2273        }
2274    }
2275}
2276
2277impl Deref for LocalWorktree {
2278    type Target = LocalSnapshot;
2279
2280    fn deref(&self) -> &Self::Target {
2281        &self.snapshot
2282    }
2283}
2284
2285impl Deref for RemoteWorktree {
2286    type Target = Snapshot;
2287
2288    fn deref(&self) -> &Self::Target {
2289        &self.snapshot
2290    }
2291}
2292
2293impl fmt::Debug for LocalWorktree {
2294    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2295        self.snapshot.fmt(f)
2296    }
2297}
2298
2299impl fmt::Debug for Snapshot {
2300    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2301        struct EntriesById<'a>(&'a SumTree<PathEntry>);
2302        struct EntriesByPath<'a>(&'a SumTree<Entry>);
2303
2304        impl<'a> fmt::Debug for EntriesByPath<'a> {
2305            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2306                f.debug_map()
2307                    .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2308                    .finish()
2309            }
2310        }
2311
2312        impl<'a> fmt::Debug for EntriesById<'a> {
2313            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2314                f.debug_list().entries(self.0.iter()).finish()
2315            }
2316        }
2317
2318        f.debug_struct("Snapshot")
2319            .field("id", &self.id)
2320            .field("root_name", &self.root_name)
2321            .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2322            .field("entries_by_id", &EntriesById(&self.entries_by_id))
2323            .finish()
2324    }
2325}
2326
2327#[derive(Clone, PartialEq)]
2328pub struct File {
2329    pub worktree: ModelHandle<Worktree>,
2330    pub path: Arc<Path>,
2331    pub mtime: SystemTime,
2332    pub(crate) entry_id: ProjectEntryId,
2333    pub(crate) is_local: bool,
2334    pub(crate) is_deleted: bool,
2335}
2336
2337impl language::File for File {
2338    fn as_local(&self) -> Option<&dyn language::LocalFile> {
2339        if self.is_local {
2340            Some(self)
2341        } else {
2342            None
2343        }
2344    }
2345
2346    fn mtime(&self) -> SystemTime {
2347        self.mtime
2348    }
2349
2350    fn path(&self) -> &Arc<Path> {
2351        &self.path
2352    }
2353
2354    fn full_path(&self, cx: &AppContext) -> PathBuf {
2355        let mut full_path = PathBuf::new();
2356        let worktree = self.worktree.read(cx);
2357
2358        if worktree.is_visible() {
2359            full_path.push(worktree.root_name());
2360        } else {
2361            let path = worktree.abs_path();
2362
2363            if worktree.is_local() && path.starts_with(HOME.as_path()) {
2364                full_path.push("~");
2365                full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2366            } else {
2367                full_path.push(path)
2368            }
2369        }
2370
2371        if self.path.components().next().is_some() {
2372            full_path.push(&self.path);
2373        }
2374
2375        full_path
2376    }
2377
2378    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2379    /// of its worktree, then this method will return the name of the worktree itself.
2380    fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2381        self.path
2382            .file_name()
2383            .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2384    }
2385
2386    fn is_deleted(&self) -> bool {
2387        self.is_deleted
2388    }
2389
2390    fn as_any(&self) -> &dyn Any {
2391        self
2392    }
2393
2394    fn to_proto(&self) -> rpc::proto::File {
2395        rpc::proto::File {
2396            worktree_id: self.worktree.id() as u64,
2397            entry_id: self.entry_id.to_proto(),
2398            path: self.path.to_string_lossy().into(),
2399            mtime: Some(self.mtime.into()),
2400            is_deleted: self.is_deleted,
2401        }
2402    }
2403}
2404
2405impl language::LocalFile for File {
2406    fn abs_path(&self, cx: &AppContext) -> PathBuf {
2407        self.worktree
2408            .read(cx)
2409            .as_local()
2410            .unwrap()
2411            .abs_path
2412            .join(&self.path)
2413    }
2414
2415    fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2416        let worktree = self.worktree.read(cx).as_local().unwrap();
2417        let abs_path = worktree.absolutize(&self.path);
2418        let fs = worktree.fs.clone();
2419        cx.background()
2420            .spawn(async move { fs.load(&abs_path).await })
2421    }
2422
2423    fn buffer_reloaded(
2424        &self,
2425        buffer_id: u64,
2426        version: &clock::Global,
2427        fingerprint: RopeFingerprint,
2428        line_ending: LineEnding,
2429        mtime: SystemTime,
2430        cx: &mut AppContext,
2431    ) {
2432        let worktree = self.worktree.read(cx).as_local().unwrap();
2433        if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2434            worktree
2435                .client
2436                .send(proto::BufferReloaded {
2437                    project_id,
2438                    buffer_id,
2439                    version: serialize_version(version),
2440                    mtime: Some(mtime.into()),
2441                    fingerprint: serialize_fingerprint(fingerprint),
2442                    line_ending: serialize_line_ending(line_ending) as i32,
2443                })
2444                .log_err();
2445        }
2446    }
2447}
2448
2449impl File {
2450    pub fn from_proto(
2451        proto: rpc::proto::File,
2452        worktree: ModelHandle<Worktree>,
2453        cx: &AppContext,
2454    ) -> Result<Self> {
2455        let worktree_id = worktree
2456            .read(cx)
2457            .as_remote()
2458            .ok_or_else(|| anyhow!("not remote"))?
2459            .id();
2460
2461        if worktree_id.to_proto() != proto.worktree_id {
2462            return Err(anyhow!("worktree id does not match file"));
2463        }
2464
2465        Ok(Self {
2466            worktree,
2467            path: Path::new(&proto.path).into(),
2468            mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2469            entry_id: ProjectEntryId::from_proto(proto.entry_id),
2470            is_local: false,
2471            is_deleted: proto.is_deleted,
2472        })
2473    }
2474
2475    pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2476        file.and_then(|f| f.as_any().downcast_ref())
2477    }
2478
2479    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2480        self.worktree.read(cx).id()
2481    }
2482
2483    pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2484        if self.is_deleted {
2485            None
2486        } else {
2487            Some(self.entry_id)
2488        }
2489    }
2490}
2491
2492#[derive(Clone, Debug, PartialEq, Eq)]
2493pub struct Entry {
2494    pub id: ProjectEntryId,
2495    pub kind: EntryKind,
2496    pub path: Arc<Path>,
2497    pub inode: u64,
2498    pub mtime: SystemTime,
2499    pub is_symlink: bool,
2500    pub is_ignored: bool,
2501}
2502
2503#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2504pub enum EntryKind {
2505    PendingDir,
2506    Dir,
2507    File(CharBag),
2508}
2509
2510#[derive(Clone, Copy, Debug)]
2511pub enum PathChange {
2512    /// A filesystem entry was was created.
2513    Added,
2514    /// A filesystem entry was removed.
2515    Removed,
2516    /// A filesystem entry was updated.
2517    Updated,
2518    /// A filesystem entry was either updated or added. We don't know
2519    /// whether or not it already existed, because the path had not
2520    /// been loaded before the event.
2521    AddedOrUpdated,
2522    /// A filesystem entry was found during the initial scan of the worktree.
2523    Loaded,
2524}
2525
2526pub struct GitRepositoryChange {
2527    /// The previous state of the repository, if it already existed.
2528    pub old_repository: Option<RepositoryEntry>,
2529    /// Whether the content of the .git directory changed. This will be false
2530    /// if only the repository's work directory changed.
2531    pub git_dir_changed: bool,
2532}
2533
2534pub type UpdatedEntriesSet = Arc<[(Arc<Path>, ProjectEntryId, PathChange)]>;
2535pub type UpdatedGitRepositoriesSet = Arc<[(Arc<Path>, GitRepositoryChange)]>;
2536
2537impl Entry {
2538    fn new(
2539        path: Arc<Path>,
2540        metadata: &fs::Metadata,
2541        next_entry_id: &AtomicUsize,
2542        root_char_bag: CharBag,
2543    ) -> Self {
2544        Self {
2545            id: ProjectEntryId::new(next_entry_id),
2546            kind: if metadata.is_dir {
2547                EntryKind::PendingDir
2548            } else {
2549                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2550            },
2551            path,
2552            inode: metadata.inode,
2553            mtime: metadata.mtime,
2554            is_symlink: metadata.is_symlink,
2555            is_ignored: false,
2556        }
2557    }
2558
2559    pub fn is_dir(&self) -> bool {
2560        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2561    }
2562
2563    pub fn is_file(&self) -> bool {
2564        matches!(self.kind, EntryKind::File(_))
2565    }
2566}
2567
2568impl sum_tree::Item for Entry {
2569    type Summary = EntrySummary;
2570
2571    fn summary(&self) -> Self::Summary {
2572        let visible_count = if self.is_ignored { 0 } else { 1 };
2573        let file_count;
2574        let visible_file_count;
2575        if self.is_file() {
2576            file_count = 1;
2577            visible_file_count = visible_count;
2578        } else {
2579            file_count = 0;
2580            visible_file_count = 0;
2581        }
2582
2583        EntrySummary {
2584            max_path: self.path.clone(),
2585            count: 1,
2586            visible_count,
2587            file_count,
2588            visible_file_count,
2589        }
2590    }
2591}
2592
2593impl sum_tree::KeyedItem for Entry {
2594    type Key = PathKey;
2595
2596    fn key(&self) -> Self::Key {
2597        PathKey(self.path.clone())
2598    }
2599}
2600
2601#[derive(Clone, Debug)]
2602pub struct EntrySummary {
2603    max_path: Arc<Path>,
2604    count: usize,
2605    visible_count: usize,
2606    file_count: usize,
2607    visible_file_count: usize,
2608}
2609
2610impl Default for EntrySummary {
2611    fn default() -> Self {
2612        Self {
2613            max_path: Arc::from(Path::new("")),
2614            count: 0,
2615            visible_count: 0,
2616            file_count: 0,
2617            visible_file_count: 0,
2618        }
2619    }
2620}
2621
2622impl sum_tree::Summary for EntrySummary {
2623    type Context = ();
2624
2625    fn add_summary(&mut self, rhs: &Self, _: &()) {
2626        self.max_path = rhs.max_path.clone();
2627        self.count += rhs.count;
2628        self.visible_count += rhs.visible_count;
2629        self.file_count += rhs.file_count;
2630        self.visible_file_count += rhs.visible_file_count;
2631    }
2632}
2633
2634#[derive(Clone, Debug)]
2635struct PathEntry {
2636    id: ProjectEntryId,
2637    path: Arc<Path>,
2638    is_ignored: bool,
2639    scan_id: usize,
2640}
2641
2642impl sum_tree::Item for PathEntry {
2643    type Summary = PathEntrySummary;
2644
2645    fn summary(&self) -> Self::Summary {
2646        PathEntrySummary { max_id: self.id }
2647    }
2648}
2649
2650impl sum_tree::KeyedItem for PathEntry {
2651    type Key = ProjectEntryId;
2652
2653    fn key(&self) -> Self::Key {
2654        self.id
2655    }
2656}
2657
2658#[derive(Clone, Debug, Default)]
2659struct PathEntrySummary {
2660    max_id: ProjectEntryId,
2661}
2662
2663impl sum_tree::Summary for PathEntrySummary {
2664    type Context = ();
2665
2666    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2667        self.max_id = summary.max_id;
2668    }
2669}
2670
2671impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2672    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2673        *self = summary.max_id;
2674    }
2675}
2676
2677#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2678pub struct PathKey(Arc<Path>);
2679
2680impl Default for PathKey {
2681    fn default() -> Self {
2682        Self(Path::new("").into())
2683    }
2684}
2685
2686impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2687    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2688        self.0 = summary.max_path.clone();
2689    }
2690}
2691
2692struct BackgroundScanner {
2693    state: Mutex<BackgroundScannerState>,
2694    fs: Arc<dyn Fs>,
2695    status_updates_tx: UnboundedSender<ScanState>,
2696    executor: Arc<executor::Background>,
2697    refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2698    next_entry_id: Arc<AtomicUsize>,
2699    phase: BackgroundScannerPhase,
2700}
2701
2702#[derive(PartialEq)]
2703enum BackgroundScannerPhase {
2704    InitialScan,
2705    EventsReceivedDuringInitialScan,
2706    Events,
2707}
2708
2709impl BackgroundScanner {
2710    fn new(
2711        snapshot: LocalSnapshot,
2712        next_entry_id: Arc<AtomicUsize>,
2713        fs: Arc<dyn Fs>,
2714        status_updates_tx: UnboundedSender<ScanState>,
2715        executor: Arc<executor::Background>,
2716        refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2717    ) -> Self {
2718        Self {
2719            fs,
2720            status_updates_tx,
2721            executor,
2722            refresh_requests_rx,
2723            next_entry_id,
2724            state: Mutex::new(BackgroundScannerState {
2725                prev_snapshot: snapshot.snapshot.clone(),
2726                snapshot,
2727                removed_entry_ids: Default::default(),
2728                changed_paths: Default::default(),
2729            }),
2730            phase: BackgroundScannerPhase::InitialScan,
2731        }
2732    }
2733
2734    async fn run(
2735        &mut self,
2736        mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2737    ) {
2738        use futures::FutureExt as _;
2739
2740        let (root_abs_path, root_inode) = {
2741            let snapshot = &self.state.lock().snapshot;
2742            (
2743                snapshot.abs_path.clone(),
2744                snapshot.root_entry().map(|e| e.inode),
2745            )
2746        };
2747
2748        // Populate ignores above the root.
2749        let ignore_stack;
2750        for ancestor in root_abs_path.ancestors().skip(1) {
2751            if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2752            {
2753                self.state
2754                    .lock()
2755                    .snapshot
2756                    .ignores_by_parent_abs_path
2757                    .insert(ancestor.into(), (ignore.into(), false));
2758            }
2759        }
2760        {
2761            let mut state = self.state.lock();
2762            state.snapshot.scan_id += 1;
2763            ignore_stack = state
2764                .snapshot
2765                .ignore_stack_for_abs_path(&root_abs_path, true);
2766            if ignore_stack.is_all() {
2767                if let Some(mut root_entry) = state.snapshot.root_entry().cloned() {
2768                    root_entry.is_ignored = true;
2769                    state.insert_entry(root_entry, self.fs.as_ref());
2770                }
2771            }
2772        };
2773
2774        // Perform an initial scan of the directory.
2775        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2776        smol::block_on(scan_job_tx.send(ScanJob {
2777            abs_path: root_abs_path,
2778            path: Arc::from(Path::new("")),
2779            ignore_stack,
2780            ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2781            scan_queue: scan_job_tx.clone(),
2782        }))
2783        .unwrap();
2784        drop(scan_job_tx);
2785        self.scan_dirs(true, scan_job_rx).await;
2786        {
2787            let mut state = self.state.lock();
2788            state.snapshot.completed_scan_id = state.snapshot.scan_id;
2789        }
2790        self.send_status_update(false, None);
2791
2792        // Process any any FS events that occurred while performing the initial scan.
2793        // For these events, update events cannot be as precise, because we didn't
2794        // have the previous state loaded yet.
2795        self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
2796        if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2797            let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2798            while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2799                paths.extend(more_events.into_iter().map(|e| e.path));
2800            }
2801            self.process_events(paths).await;
2802        }
2803
2804        // Continue processing events until the worktree is dropped.
2805        self.phase = BackgroundScannerPhase::Events;
2806        loop {
2807            select_biased! {
2808                // Process any path refresh requests from the worktree. Prioritize
2809                // these before handling changes reported by the filesystem.
2810                request = self.refresh_requests_rx.recv().fuse() => {
2811                    let Ok((paths, barrier)) = request else { break };
2812                    if !self.process_refresh_request(paths.clone(), barrier).await {
2813                        return;
2814                    }
2815                }
2816
2817                events = events_rx.next().fuse() => {
2818                    let Some(events) = events else { break };
2819                    let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2820                    while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2821                        paths.extend(more_events.into_iter().map(|e| e.path));
2822                    }
2823                    self.process_events(paths.clone()).await;
2824                }
2825            }
2826        }
2827    }
2828
2829    async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2830        self.reload_entries_for_paths(paths, None).await;
2831        self.send_status_update(false, Some(barrier))
2832    }
2833
2834    async fn process_events(&mut self, paths: Vec<PathBuf>) {
2835        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2836        let paths = self
2837            .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2838            .await;
2839        drop(scan_job_tx);
2840        self.scan_dirs(false, scan_job_rx).await;
2841
2842        self.update_ignore_statuses().await;
2843
2844        {
2845            let mut snapshot = &mut self.state.lock().snapshot;
2846
2847            if let Some(paths) = paths {
2848                for path in paths {
2849                    self.reload_repo_for_file_path(&path, &mut *snapshot, self.fs.as_ref());
2850                }
2851            }
2852
2853            let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2854            git_repositories.retain(|work_directory_id, _| {
2855                snapshot
2856                    .entry_for_id(*work_directory_id)
2857                    .map_or(false, |entry| {
2858                        snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2859                    })
2860            });
2861            snapshot.git_repositories = git_repositories;
2862
2863            let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2864            git_repository_entries.retain(|_, entry| {
2865                snapshot
2866                    .git_repositories
2867                    .get(&entry.work_directory.0)
2868                    .is_some()
2869            });
2870            snapshot.snapshot.repository_entries = git_repository_entries;
2871            snapshot.completed_scan_id = snapshot.scan_id;
2872        }
2873
2874        self.send_status_update(false, None);
2875    }
2876
2877    async fn scan_dirs(
2878        &self,
2879        enable_progress_updates: bool,
2880        scan_jobs_rx: channel::Receiver<ScanJob>,
2881    ) {
2882        use futures::FutureExt as _;
2883
2884        if self
2885            .status_updates_tx
2886            .unbounded_send(ScanState::Started)
2887            .is_err()
2888        {
2889            return;
2890        }
2891
2892        let progress_update_count = AtomicUsize::new(0);
2893        self.executor
2894            .scoped(|scope| {
2895                for _ in 0..self.executor.num_cpus() {
2896                    scope.spawn(async {
2897                        let mut last_progress_update_count = 0;
2898                        let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2899                        futures::pin_mut!(progress_update_timer);
2900
2901                        loop {
2902                            select_biased! {
2903                                // Process any path refresh requests before moving on to process
2904                                // the scan queue, so that user operations are prioritized.
2905                                request = self.refresh_requests_rx.recv().fuse() => {
2906                                    let Ok((paths, barrier)) = request else { break };
2907                                    if !self.process_refresh_request(paths, barrier).await {
2908                                        return;
2909                                    }
2910                                }
2911
2912                                // Send periodic progress updates to the worktree. Use an atomic counter
2913                                // to ensure that only one of the workers sends a progress update after
2914                                // the update interval elapses.
2915                                _ = progress_update_timer => {
2916                                    match progress_update_count.compare_exchange(
2917                                        last_progress_update_count,
2918                                        last_progress_update_count + 1,
2919                                        SeqCst,
2920                                        SeqCst
2921                                    ) {
2922                                        Ok(_) => {
2923                                            last_progress_update_count += 1;
2924                                            self.send_status_update(true, None);
2925                                        }
2926                                        Err(count) => {
2927                                            last_progress_update_count = count;
2928                                        }
2929                                    }
2930                                    progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2931                                }
2932
2933                                // Recursively load directories from the file system.
2934                                job = scan_jobs_rx.recv().fuse() => {
2935                                    let Ok(job) = job else { break };
2936                                    if let Err(err) = self.scan_dir(&job).await {
2937                                        if job.path.as_ref() != Path::new("") {
2938                                            log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2939                                        }
2940                                    }
2941                                }
2942                            }
2943                        }
2944                    })
2945                }
2946            })
2947            .await;
2948    }
2949
2950    fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2951        let mut state = self.state.lock();
2952        if state.changed_paths.is_empty() && scanning {
2953            return true;
2954        }
2955
2956        let new_snapshot = state.snapshot.clone();
2957        let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
2958        let changes = self.build_change_set(&old_snapshot, &new_snapshot, &state.changed_paths);
2959        state.changed_paths.clear();
2960
2961        self.status_updates_tx
2962            .unbounded_send(ScanState::Updated {
2963                snapshot: new_snapshot,
2964                changes,
2965                scanning,
2966                barrier,
2967            })
2968            .is_ok()
2969    }
2970
2971    async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2972        let mut new_entries: Vec<Entry> = Vec::new();
2973        let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2974        let mut ignore_stack = job.ignore_stack.clone();
2975        let mut new_ignore = None;
2976        let (root_abs_path, root_char_bag, next_entry_id) = {
2977            let snapshot = &self.state.lock().snapshot;
2978            (
2979                snapshot.abs_path().clone(),
2980                snapshot.root_char_bag,
2981                self.next_entry_id.clone(),
2982            )
2983        };
2984        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2985        while let Some(child_abs_path) = child_paths.next().await {
2986            let child_abs_path: Arc<Path> = match child_abs_path {
2987                Ok(child_abs_path) => child_abs_path.into(),
2988                Err(error) => {
2989                    log::error!("error processing entry {:?}", error);
2990                    continue;
2991                }
2992            };
2993
2994            let child_name = child_abs_path.file_name().unwrap();
2995            let child_path: Arc<Path> = job.path.join(child_name).into();
2996            let child_metadata = match self.fs.metadata(&child_abs_path).await {
2997                Ok(Some(metadata)) => metadata,
2998                Ok(None) => continue,
2999                Err(err) => {
3000                    log::error!("error processing {:?}: {:?}", child_abs_path, err);
3001                    continue;
3002                }
3003            };
3004
3005            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
3006            if child_name == *GITIGNORE {
3007                match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
3008                    Ok(ignore) => {
3009                        let ignore = Arc::new(ignore);
3010                        ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3011                        new_ignore = Some(ignore);
3012                    }
3013                    Err(error) => {
3014                        log::error!(
3015                            "error loading .gitignore file {:?} - {:?}",
3016                            child_name,
3017                            error
3018                        );
3019                    }
3020                }
3021
3022                // Update ignore status of any child entries we've already processed to reflect the
3023                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
3024                // there should rarely be too numerous. Update the ignore stack associated with any
3025                // new jobs as well.
3026                let mut new_jobs = new_jobs.iter_mut();
3027                for entry in &mut new_entries {
3028                    let entry_abs_path = root_abs_path.join(&entry.path);
3029                    entry.is_ignored =
3030                        ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
3031
3032                    if entry.is_dir() {
3033                        if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
3034                            job.ignore_stack = if entry.is_ignored {
3035                                IgnoreStack::all()
3036                            } else {
3037                                ignore_stack.clone()
3038                            };
3039                        }
3040                    }
3041                }
3042            }
3043
3044            let mut child_entry = Entry::new(
3045                child_path.clone(),
3046                &child_metadata,
3047                &next_entry_id,
3048                root_char_bag,
3049            );
3050
3051            if child_entry.is_dir() {
3052                let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
3053                child_entry.is_ignored = is_ignored;
3054
3055                // Avoid recursing until crash in the case of a recursive symlink
3056                if !job.ancestor_inodes.contains(&child_entry.inode) {
3057                    let mut ancestor_inodes = job.ancestor_inodes.clone();
3058                    ancestor_inodes.insert(child_entry.inode);
3059
3060                    new_jobs.push(Some(ScanJob {
3061                        abs_path: child_abs_path,
3062                        path: child_path,
3063                        ignore_stack: if is_ignored {
3064                            IgnoreStack::all()
3065                        } else {
3066                            ignore_stack.clone()
3067                        },
3068                        ancestor_inodes,
3069                        scan_queue: job.scan_queue.clone(),
3070                    }));
3071                } else {
3072                    new_jobs.push(None);
3073                }
3074            } else {
3075                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
3076            }
3077
3078            new_entries.push(child_entry);
3079        }
3080
3081        {
3082            let mut state = self.state.lock();
3083            state.populate_dir(job.path.clone(), new_entries, new_ignore, self.fs.as_ref());
3084            if let Err(ix) = state.changed_paths.binary_search(&job.path) {
3085                state.changed_paths.insert(ix, job.path.clone());
3086            }
3087        }
3088
3089        for new_job in new_jobs {
3090            if let Some(new_job) = new_job {
3091                job.scan_queue.send(new_job).await.unwrap();
3092            }
3093        }
3094
3095        Ok(())
3096    }
3097
3098    async fn reload_entries_for_paths(
3099        &self,
3100        mut abs_paths: Vec<PathBuf>,
3101        scan_queue_tx: Option<Sender<ScanJob>>,
3102    ) -> Option<Vec<Arc<Path>>> {
3103        let doing_recursive_update = scan_queue_tx.is_some();
3104
3105        abs_paths.sort_unstable();
3106        abs_paths.dedup_by(|a, b| a.starts_with(&b));
3107
3108        let root_abs_path = self.state.lock().snapshot.abs_path.clone();
3109        let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
3110        let metadata = futures::future::join_all(
3111            abs_paths
3112                .iter()
3113                .map(|abs_path| self.fs.metadata(&abs_path))
3114                .collect::<Vec<_>>(),
3115        )
3116        .await;
3117
3118        let mut state = self.state.lock();
3119        let snapshot = &mut state.snapshot;
3120        let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
3121        snapshot.scan_id += 1;
3122        if is_idle && !doing_recursive_update {
3123            snapshot.completed_scan_id = snapshot.scan_id;
3124        }
3125
3126        // Remove any entries for paths that no longer exist or are being recursively
3127        // refreshed. Do this before adding any new entries, so that renames can be
3128        // detected regardless of the order of the paths.
3129        let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
3130        for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
3131            if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3132                if matches!(metadata, Ok(None)) || doing_recursive_update {
3133                    state.remove_path(path);
3134                }
3135                event_paths.push(path.into());
3136            } else {
3137                log::error!(
3138                    "unexpected event {:?} for root path {:?}",
3139                    abs_path,
3140                    root_canonical_path
3141                );
3142            }
3143        }
3144
3145        for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
3146            let abs_path: Arc<Path> = root_abs_path.join(&path).into();
3147
3148            match metadata {
3149                Ok(Some(metadata)) => {
3150                    let ignore_stack = state
3151                        .snapshot
3152                        .ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
3153                    let mut fs_entry = Entry::new(
3154                        path.clone(),
3155                        &metadata,
3156                        self.next_entry_id.as_ref(),
3157                        state.snapshot.root_char_bag,
3158                    );
3159                    fs_entry.is_ignored = ignore_stack.is_all();
3160                    state.insert_entry(fs_entry, self.fs.as_ref());
3161
3162                    if let Some(scan_queue_tx) = &scan_queue_tx {
3163                        let mut ancestor_inodes = state.snapshot.ancestor_inodes_for_path(&path);
3164                        if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
3165                            ancestor_inodes.insert(metadata.inode);
3166                            smol::block_on(scan_queue_tx.send(ScanJob {
3167                                abs_path,
3168                                path,
3169                                ignore_stack,
3170                                ancestor_inodes,
3171                                scan_queue: scan_queue_tx.clone(),
3172                            }))
3173                            .unwrap();
3174                        }
3175                    }
3176                }
3177                Ok(None) => {
3178                    self.remove_repo_path(&path, &mut state.snapshot);
3179                }
3180                Err(err) => {
3181                    // TODO - create a special 'error' entry in the entries tree to mark this
3182                    log::error!("error reading file on event {:?}", err);
3183                }
3184            }
3185        }
3186
3187        util::extend_sorted(
3188            &mut state.changed_paths,
3189            event_paths.iter().cloned(),
3190            usize::MAX,
3191            Ord::cmp,
3192        );
3193
3194        Some(event_paths)
3195    }
3196
3197    fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3198        if !path
3199            .components()
3200            .any(|component| component.as_os_str() == *DOT_GIT)
3201        {
3202            let scan_id = snapshot.scan_id;
3203
3204            if let Some(repository) = snapshot.repository_for_work_directory(path) {
3205                let entry = repository.work_directory.0;
3206                snapshot.git_repositories.remove(&entry);
3207                snapshot
3208                    .snapshot
3209                    .repository_entries
3210                    .remove(&RepositoryWorkDirectory(path.into()));
3211                return Some(());
3212            }
3213
3214            let repo = snapshot.repository_for_path(&path)?;
3215            let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3216            let work_dir = repo.work_directory(snapshot)?;
3217            let work_dir_id = repo.work_directory;
3218
3219            snapshot
3220                .git_repositories
3221                .update(&work_dir_id, |entry| entry.work_dir_scan_id = scan_id);
3222
3223            snapshot.repository_entries.update(&work_dir, |entry| {
3224                entry
3225                    .statuses
3226                    .remove_range(&repo_path, &RepoPathDescendants(&repo_path))
3227            });
3228        }
3229
3230        Some(())
3231    }
3232
3233    fn reload_repo_for_file_path(
3234        &self,
3235        path: &Path,
3236        snapshot: &mut LocalSnapshot,
3237        fs: &dyn Fs,
3238    ) -> Option<()> {
3239        let scan_id = snapshot.scan_id;
3240
3241        if path
3242            .components()
3243            .any(|component| component.as_os_str() == *DOT_GIT)
3244        {
3245            let (entry_id, repo_ptr) = {
3246                let Some((entry_id, repo)) = snapshot.repo_for_metadata(&path) else {
3247                    let dot_git_dir = path.ancestors()
3248                    .skip_while(|ancestor| ancestor.file_name() != Some(&*DOT_GIT))
3249                    .next()?;
3250
3251                    snapshot.build_repo(dot_git_dir.into(), fs);
3252                    return None;
3253                };
3254                if repo.git_dir_scan_id == scan_id {
3255                    return None;
3256                }
3257                (*entry_id, repo.repo_ptr.to_owned())
3258            };
3259
3260            let work_dir = snapshot
3261                .entry_for_id(entry_id)
3262                .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3263
3264            let repo = repo_ptr.lock();
3265            repo.reload_index();
3266            let branch = repo.branch_name();
3267            let statuses = repo.statuses().unwrap_or_default();
3268
3269            snapshot.git_repositories.update(&entry_id, |entry| {
3270                entry.work_dir_scan_id = scan_id;
3271                entry.git_dir_scan_id = scan_id;
3272            });
3273
3274            snapshot.repository_entries.update(&work_dir, |entry| {
3275                entry.branch = branch.map(Into::into);
3276                entry.statuses = statuses;
3277            });
3278        } else {
3279            if snapshot
3280                .entry_for_path(&path)
3281                .map(|entry| entry.is_ignored)
3282                .unwrap_or(false)
3283            {
3284                self.remove_repo_path(&path, snapshot);
3285                return None;
3286            }
3287
3288            let repo = snapshot.repository_for_path(&path)?;
3289
3290            let work_dir = repo.work_directory(snapshot)?;
3291            let work_dir_id = repo.work_directory.clone();
3292
3293            let (local_repo, git_dir_scan_id) =
3294                snapshot.git_repositories.update(&work_dir_id, |entry| {
3295                    entry.work_dir_scan_id = scan_id;
3296                    (entry.repo_ptr.clone(), entry.git_dir_scan_id)
3297                })?;
3298
3299            // Short circuit if we've already scanned everything
3300            if git_dir_scan_id == scan_id {
3301                return None;
3302            }
3303
3304            let mut repository = snapshot.repository_entries.remove(&work_dir)?;
3305
3306            for entry in snapshot.descendent_entries(false, false, path) {
3307                let Some(repo_path) = repo.work_directory.relativize(snapshot, &entry.path) else {
3308                    continue;
3309                };
3310
3311                let status = local_repo.lock().status(&repo_path);
3312                if let Some(status) = status {
3313                    repository.statuses.insert(repo_path.clone(), status);
3314                } else {
3315                    repository.statuses.remove(&repo_path);
3316                }
3317            }
3318
3319            snapshot.repository_entries.insert(work_dir, repository)
3320        }
3321
3322        Some(())
3323    }
3324
3325    async fn update_ignore_statuses(&self) {
3326        use futures::FutureExt as _;
3327
3328        let mut snapshot = self.state.lock().snapshot.clone();
3329        let mut ignores_to_update = Vec::new();
3330        let mut ignores_to_delete = Vec::new();
3331        let abs_path = snapshot.abs_path.clone();
3332        for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
3333            if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
3334                if *needs_update {
3335                    *needs_update = false;
3336                    if snapshot.snapshot.entry_for_path(parent_path).is_some() {
3337                        ignores_to_update.push(parent_abs_path.clone());
3338                    }
3339                }
3340
3341                let ignore_path = parent_path.join(&*GITIGNORE);
3342                if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
3343                    ignores_to_delete.push(parent_abs_path.clone());
3344                }
3345            }
3346        }
3347
3348        for parent_abs_path in ignores_to_delete {
3349            snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3350            self.state
3351                .lock()
3352                .snapshot
3353                .ignores_by_parent_abs_path
3354                .remove(&parent_abs_path);
3355        }
3356
3357        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3358        ignores_to_update.sort_unstable();
3359        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3360        while let Some(parent_abs_path) = ignores_to_update.next() {
3361            while ignores_to_update
3362                .peek()
3363                .map_or(false, |p| p.starts_with(&parent_abs_path))
3364            {
3365                ignores_to_update.next().unwrap();
3366            }
3367
3368            let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3369            smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3370                abs_path: parent_abs_path,
3371                ignore_stack,
3372                ignore_queue: ignore_queue_tx.clone(),
3373            }))
3374            .unwrap();
3375        }
3376        drop(ignore_queue_tx);
3377
3378        self.executor
3379            .scoped(|scope| {
3380                for _ in 0..self.executor.num_cpus() {
3381                    scope.spawn(async {
3382                        loop {
3383                            select_biased! {
3384                                // Process any path refresh requests before moving on to process
3385                                // the queue of ignore statuses.
3386                                request = self.refresh_requests_rx.recv().fuse() => {
3387                                    let Ok((paths, barrier)) = request else { break };
3388                                    if !self.process_refresh_request(paths, barrier).await {
3389                                        return;
3390                                    }
3391                                }
3392
3393                                // Recursively process directories whose ignores have changed.
3394                                job = ignore_queue_rx.recv().fuse() => {
3395                                    let Ok(job) = job else { break };
3396                                    self.update_ignore_status(job, &snapshot).await;
3397                                }
3398                            }
3399                        }
3400                    });
3401                }
3402            })
3403            .await;
3404    }
3405
3406    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3407        let mut ignore_stack = job.ignore_stack;
3408        if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3409            ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3410        }
3411
3412        let mut entries_by_id_edits = Vec::new();
3413        let mut entries_by_path_edits = Vec::new();
3414        let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3415        for mut entry in snapshot.child_entries(path).cloned() {
3416            let was_ignored = entry.is_ignored;
3417            let abs_path = snapshot.abs_path().join(&entry.path);
3418            entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3419            if entry.is_dir() {
3420                let child_ignore_stack = if entry.is_ignored {
3421                    IgnoreStack::all()
3422                } else {
3423                    ignore_stack.clone()
3424                };
3425                job.ignore_queue
3426                    .send(UpdateIgnoreStatusJob {
3427                        abs_path: abs_path.into(),
3428                        ignore_stack: child_ignore_stack,
3429                        ignore_queue: job.ignore_queue.clone(),
3430                    })
3431                    .await
3432                    .unwrap();
3433            }
3434
3435            if entry.is_ignored != was_ignored {
3436                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3437                path_entry.scan_id = snapshot.scan_id;
3438                path_entry.is_ignored = entry.is_ignored;
3439                entries_by_id_edits.push(Edit::Insert(path_entry));
3440                entries_by_path_edits.push(Edit::Insert(entry));
3441            }
3442        }
3443
3444        let state = &mut self.state.lock();
3445        for edit in &entries_by_path_edits {
3446            if let Edit::Insert(entry) = edit {
3447                if let Err(ix) = state.changed_paths.binary_search(&entry.path) {
3448                    state.changed_paths.insert(ix, entry.path.clone());
3449                }
3450            }
3451        }
3452
3453        state
3454            .snapshot
3455            .entries_by_path
3456            .edit(entries_by_path_edits, &());
3457        state.snapshot.entries_by_id.edit(entries_by_id_edits, &());
3458    }
3459
3460    fn build_change_set(
3461        &self,
3462        old_snapshot: &Snapshot,
3463        new_snapshot: &Snapshot,
3464        event_paths: &[Arc<Path>],
3465    ) -> UpdatedEntriesSet {
3466        use BackgroundScannerPhase::*;
3467        use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
3468
3469        // Identify which paths have changed. Use the known set of changed
3470        // parent paths to optimize the search.
3471        let mut changes = Vec::new();
3472        let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3473        let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3474        old_paths.next(&());
3475        new_paths.next(&());
3476        for path in event_paths {
3477            let path = PathKey(path.clone());
3478            if old_paths.item().map_or(false, |e| e.path < path.0) {
3479                old_paths.seek_forward(&path, Bias::Left, &());
3480            }
3481            if new_paths.item().map_or(false, |e| e.path < path.0) {
3482                new_paths.seek_forward(&path, Bias::Left, &());
3483            }
3484
3485            loop {
3486                match (old_paths.item(), new_paths.item()) {
3487                    (Some(old_entry), Some(new_entry)) => {
3488                        if old_entry.path > path.0
3489                            && new_entry.path > path.0
3490                            && !old_entry.path.starts_with(&path.0)
3491                            && !new_entry.path.starts_with(&path.0)
3492                        {
3493                            break;
3494                        }
3495
3496                        match Ord::cmp(&old_entry.path, &new_entry.path) {
3497                            Ordering::Less => {
3498                                changes.push((old_entry.path.clone(), old_entry.id, Removed));
3499                                old_paths.next(&());
3500                            }
3501                            Ordering::Equal => {
3502                                if self.phase == EventsReceivedDuringInitialScan {
3503                                    // If the worktree was not fully initialized when this event was generated,
3504                                    // we can't know whether this entry was added during the scan or whether
3505                                    // it was merely updated.
3506                                    changes.push((
3507                                        new_entry.path.clone(),
3508                                        new_entry.id,
3509                                        AddedOrUpdated,
3510                                    ));
3511                                } else if old_entry.id != new_entry.id {
3512                                    changes.push((old_entry.path.clone(), old_entry.id, Removed));
3513                                    changes.push((new_entry.path.clone(), new_entry.id, Added));
3514                                } else if old_entry != new_entry {
3515                                    changes.push((new_entry.path.clone(), new_entry.id, Updated));
3516                                }
3517                                old_paths.next(&());
3518                                new_paths.next(&());
3519                            }
3520                            Ordering::Greater => {
3521                                changes.push((
3522                                    new_entry.path.clone(),
3523                                    new_entry.id,
3524                                    if self.phase == InitialScan {
3525                                        Loaded
3526                                    } else {
3527                                        Added
3528                                    },
3529                                ));
3530                                new_paths.next(&());
3531                            }
3532                        }
3533                    }
3534                    (Some(old_entry), None) => {
3535                        changes.push((old_entry.path.clone(), old_entry.id, Removed));
3536                        old_paths.next(&());
3537                    }
3538                    (None, Some(new_entry)) => {
3539                        changes.push((
3540                            new_entry.path.clone(),
3541                            new_entry.id,
3542                            if self.phase == InitialScan {
3543                                Loaded
3544                            } else {
3545                                Added
3546                            },
3547                        ));
3548                        new_paths.next(&());
3549                    }
3550                    (None, None) => break,
3551                }
3552            }
3553        }
3554
3555        changes.into()
3556    }
3557
3558    async fn progress_timer(&self, running: bool) {
3559        if !running {
3560            return futures::future::pending().await;
3561        }
3562
3563        #[cfg(any(test, feature = "test-support"))]
3564        if self.fs.is_fake() {
3565            return self.executor.simulate_random_delay().await;
3566        }
3567
3568        smol::Timer::after(Duration::from_millis(100)).await;
3569    }
3570}
3571
3572fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3573    let mut result = root_char_bag;
3574    result.extend(
3575        path.to_string_lossy()
3576            .chars()
3577            .map(|c| c.to_ascii_lowercase()),
3578    );
3579    result
3580}
3581
3582struct ScanJob {
3583    abs_path: Arc<Path>,
3584    path: Arc<Path>,
3585    ignore_stack: Arc<IgnoreStack>,
3586    scan_queue: Sender<ScanJob>,
3587    ancestor_inodes: TreeSet<u64>,
3588}
3589
3590struct UpdateIgnoreStatusJob {
3591    abs_path: Arc<Path>,
3592    ignore_stack: Arc<IgnoreStack>,
3593    ignore_queue: Sender<UpdateIgnoreStatusJob>,
3594}
3595
3596pub trait WorktreeHandle {
3597    #[cfg(any(test, feature = "test-support"))]
3598    fn flush_fs_events<'a>(
3599        &self,
3600        cx: &'a gpui::TestAppContext,
3601    ) -> futures::future::LocalBoxFuture<'a, ()>;
3602}
3603
3604impl WorktreeHandle for ModelHandle<Worktree> {
3605    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3606    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3607    // extra directory scans, and emit extra scan-state notifications.
3608    //
3609    // This function mutates the worktree's directory and waits for those mutations to be picked up,
3610    // to ensure that all redundant FS events have already been processed.
3611    #[cfg(any(test, feature = "test-support"))]
3612    fn flush_fs_events<'a>(
3613        &self,
3614        cx: &'a gpui::TestAppContext,
3615    ) -> futures::future::LocalBoxFuture<'a, ()> {
3616        let filename = "fs-event-sentinel";
3617        let tree = self.clone();
3618        let (fs, root_path) = self.read_with(cx, |tree, _| {
3619            let tree = tree.as_local().unwrap();
3620            (tree.fs.clone(), tree.abs_path().clone())
3621        });
3622
3623        async move {
3624            fs.create_file(&root_path.join(filename), Default::default())
3625                .await
3626                .unwrap();
3627            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3628                .await;
3629
3630            fs.remove_file(&root_path.join(filename), Default::default())
3631                .await
3632                .unwrap();
3633            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3634                .await;
3635
3636            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3637                .await;
3638        }
3639        .boxed_local()
3640    }
3641}
3642
3643#[derive(Clone, Debug)]
3644struct TraversalProgress<'a> {
3645    max_path: &'a Path,
3646    count: usize,
3647    visible_count: usize,
3648    file_count: usize,
3649    visible_file_count: usize,
3650}
3651
3652impl<'a> TraversalProgress<'a> {
3653    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3654        match (include_ignored, include_dirs) {
3655            (true, true) => self.count,
3656            (true, false) => self.file_count,
3657            (false, true) => self.visible_count,
3658            (false, false) => self.visible_file_count,
3659        }
3660    }
3661}
3662
3663impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3664    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3665        self.max_path = summary.max_path.as_ref();
3666        self.count += summary.count;
3667        self.visible_count += summary.visible_count;
3668        self.file_count += summary.file_count;
3669        self.visible_file_count += summary.visible_file_count;
3670    }
3671}
3672
3673impl<'a> Default for TraversalProgress<'a> {
3674    fn default() -> Self {
3675        Self {
3676            max_path: Path::new(""),
3677            count: 0,
3678            visible_count: 0,
3679            file_count: 0,
3680            visible_file_count: 0,
3681        }
3682    }
3683}
3684
3685pub struct Traversal<'a> {
3686    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3687    include_ignored: bool,
3688    include_dirs: bool,
3689}
3690
3691impl<'a> Traversal<'a> {
3692    pub fn advance(&mut self) -> bool {
3693        self.cursor.seek_forward(
3694            &TraversalTarget::Count {
3695                count: self.end_offset() + 1,
3696                include_dirs: self.include_dirs,
3697                include_ignored: self.include_ignored,
3698            },
3699            Bias::Left,
3700            &(),
3701        )
3702    }
3703
3704    pub fn advance_to_sibling(&mut self) -> bool {
3705        while let Some(entry) = self.cursor.item() {
3706            self.cursor.seek_forward(
3707                &TraversalTarget::PathSuccessor(&entry.path),
3708                Bias::Left,
3709                &(),
3710            );
3711            if let Some(entry) = self.cursor.item() {
3712                if (self.include_dirs || !entry.is_dir())
3713                    && (self.include_ignored || !entry.is_ignored)
3714                {
3715                    return true;
3716                }
3717            }
3718        }
3719        false
3720    }
3721
3722    pub fn entry(&self) -> Option<&'a Entry> {
3723        self.cursor.item()
3724    }
3725
3726    pub fn start_offset(&self) -> usize {
3727        self.cursor
3728            .start()
3729            .count(self.include_dirs, self.include_ignored)
3730    }
3731
3732    pub fn end_offset(&self) -> usize {
3733        self.cursor
3734            .end(&())
3735            .count(self.include_dirs, self.include_ignored)
3736    }
3737}
3738
3739impl<'a> Iterator for Traversal<'a> {
3740    type Item = &'a Entry;
3741
3742    fn next(&mut self) -> Option<Self::Item> {
3743        if let Some(item) = self.entry() {
3744            self.advance();
3745            Some(item)
3746        } else {
3747            None
3748        }
3749    }
3750}
3751
3752#[derive(Debug)]
3753enum TraversalTarget<'a> {
3754    Path(&'a Path),
3755    PathSuccessor(&'a Path),
3756    Count {
3757        count: usize,
3758        include_ignored: bool,
3759        include_dirs: bool,
3760    },
3761}
3762
3763impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3764    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3765        match self {
3766            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3767            TraversalTarget::PathSuccessor(path) => {
3768                if !cursor_location.max_path.starts_with(path) {
3769                    Ordering::Equal
3770                } else {
3771                    Ordering::Greater
3772                }
3773            }
3774            TraversalTarget::Count {
3775                count,
3776                include_dirs,
3777                include_ignored,
3778            } => Ord::cmp(
3779                count,
3780                &cursor_location.count(*include_dirs, *include_ignored),
3781            ),
3782        }
3783    }
3784}
3785
3786struct ChildEntriesIter<'a> {
3787    parent_path: &'a Path,
3788    traversal: Traversal<'a>,
3789}
3790
3791impl<'a> Iterator for ChildEntriesIter<'a> {
3792    type Item = &'a Entry;
3793
3794    fn next(&mut self) -> Option<Self::Item> {
3795        if let Some(item) = self.traversal.entry() {
3796            if item.path.starts_with(&self.parent_path) {
3797                self.traversal.advance_to_sibling();
3798                return Some(item);
3799            }
3800        }
3801        None
3802    }
3803}
3804
3805struct DescendentEntriesIter<'a> {
3806    parent_path: &'a Path,
3807    traversal: Traversal<'a>,
3808}
3809
3810impl<'a> Iterator for DescendentEntriesIter<'a> {
3811    type Item = &'a Entry;
3812
3813    fn next(&mut self) -> Option<Self::Item> {
3814        if let Some(item) = self.traversal.entry() {
3815            if item.path.starts_with(&self.parent_path) {
3816                self.traversal.advance();
3817                return Some(item);
3818            }
3819        }
3820        None
3821    }
3822}
3823
3824impl<'a> From<&'a Entry> for proto::Entry {
3825    fn from(entry: &'a Entry) -> Self {
3826        Self {
3827            id: entry.id.to_proto(),
3828            is_dir: entry.is_dir(),
3829            path: entry.path.to_string_lossy().into(),
3830            inode: entry.inode,
3831            mtime: Some(entry.mtime.into()),
3832            is_symlink: entry.is_symlink,
3833            is_ignored: entry.is_ignored,
3834        }
3835    }
3836}
3837
3838impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3839    type Error = anyhow::Error;
3840
3841    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3842        if let Some(mtime) = entry.mtime {
3843            let kind = if entry.is_dir {
3844                EntryKind::Dir
3845            } else {
3846                let mut char_bag = *root_char_bag;
3847                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3848                EntryKind::File(char_bag)
3849            };
3850            let path: Arc<Path> = PathBuf::from(entry.path).into();
3851            Ok(Entry {
3852                id: ProjectEntryId::from_proto(entry.id),
3853                kind,
3854                path,
3855                inode: entry.inode,
3856                mtime: mtime.into(),
3857                is_symlink: entry.is_symlink,
3858                is_ignored: entry.is_ignored,
3859            })
3860        } else {
3861            Err(anyhow!(
3862                "missing mtime in remote worktree entry {:?}",
3863                entry.path
3864            ))
3865        }
3866    }
3867}
3868
3869#[cfg(test)]
3870mod tests {
3871    use super::*;
3872    use fs::{FakeFs, RealFs};
3873    use gpui::{executor::Deterministic, TestAppContext};
3874    use pretty_assertions::assert_eq;
3875    use rand::prelude::*;
3876    use serde_json::json;
3877    use std::{env, fmt::Write};
3878    use util::{http::FakeHttpClient, test::temp_tree};
3879
3880    #[gpui::test]
3881    async fn test_traversal(cx: &mut TestAppContext) {
3882        let fs = FakeFs::new(cx.background());
3883        fs.insert_tree(
3884            "/root",
3885            json!({
3886               ".gitignore": "a/b\n",
3887               "a": {
3888                   "b": "",
3889                   "c": "",
3890               }
3891            }),
3892        )
3893        .await;
3894
3895        let http_client = FakeHttpClient::with_404_response();
3896        let client = cx.read(|cx| Client::new(http_client, cx));
3897
3898        let tree = Worktree::local(
3899            client,
3900            Path::new("/root"),
3901            true,
3902            fs,
3903            Default::default(),
3904            &mut cx.to_async(),
3905        )
3906        .await
3907        .unwrap();
3908        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3909            .await;
3910
3911        tree.read_with(cx, |tree, _| {
3912            assert_eq!(
3913                tree.entries(false)
3914                    .map(|entry| entry.path.as_ref())
3915                    .collect::<Vec<_>>(),
3916                vec![
3917                    Path::new(""),
3918                    Path::new(".gitignore"),
3919                    Path::new("a"),
3920                    Path::new("a/c"),
3921                ]
3922            );
3923            assert_eq!(
3924                tree.entries(true)
3925                    .map(|entry| entry.path.as_ref())
3926                    .collect::<Vec<_>>(),
3927                vec![
3928                    Path::new(""),
3929                    Path::new(".gitignore"),
3930                    Path::new("a"),
3931                    Path::new("a/b"),
3932                    Path::new("a/c"),
3933                ]
3934            );
3935        })
3936    }
3937
3938    #[gpui::test]
3939    async fn test_descendent_entries(cx: &mut TestAppContext) {
3940        let fs = FakeFs::new(cx.background());
3941        fs.insert_tree(
3942            "/root",
3943            json!({
3944                "a": "",
3945                "b": {
3946                   "c": {
3947                       "d": ""
3948                   },
3949                   "e": {}
3950                },
3951                "f": "",
3952                "g": {
3953                    "h": {}
3954                },
3955                "i": {
3956                    "j": {
3957                        "k": ""
3958                    },
3959                    "l": {
3960
3961                    }
3962                },
3963                ".gitignore": "i/j\n",
3964            }),
3965        )
3966        .await;
3967
3968        let http_client = FakeHttpClient::with_404_response();
3969        let client = cx.read(|cx| Client::new(http_client, cx));
3970
3971        let tree = Worktree::local(
3972            client,
3973            Path::new("/root"),
3974            true,
3975            fs,
3976            Default::default(),
3977            &mut cx.to_async(),
3978        )
3979        .await
3980        .unwrap();
3981        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3982            .await;
3983
3984        tree.read_with(cx, |tree, _| {
3985            assert_eq!(
3986                tree.descendent_entries(false, false, Path::new("b"))
3987                    .map(|entry| entry.path.as_ref())
3988                    .collect::<Vec<_>>(),
3989                vec![Path::new("b/c/d"),]
3990            );
3991            assert_eq!(
3992                tree.descendent_entries(true, false, Path::new("b"))
3993                    .map(|entry| entry.path.as_ref())
3994                    .collect::<Vec<_>>(),
3995                vec![
3996                    Path::new("b"),
3997                    Path::new("b/c"),
3998                    Path::new("b/c/d"),
3999                    Path::new("b/e"),
4000                ]
4001            );
4002
4003            assert_eq!(
4004                tree.descendent_entries(false, false, Path::new("g"))
4005                    .map(|entry| entry.path.as_ref())
4006                    .collect::<Vec<_>>(),
4007                Vec::<PathBuf>::new()
4008            );
4009            assert_eq!(
4010                tree.descendent_entries(true, false, Path::new("g"))
4011                    .map(|entry| entry.path.as_ref())
4012                    .collect::<Vec<_>>(),
4013                vec![Path::new("g"), Path::new("g/h"),]
4014            );
4015
4016            assert_eq!(
4017                tree.descendent_entries(false, false, Path::new("i"))
4018                    .map(|entry| entry.path.as_ref())
4019                    .collect::<Vec<_>>(),
4020                Vec::<PathBuf>::new()
4021            );
4022            assert_eq!(
4023                tree.descendent_entries(false, true, Path::new("i"))
4024                    .map(|entry| entry.path.as_ref())
4025                    .collect::<Vec<_>>(),
4026                vec![Path::new("i/j/k")]
4027            );
4028            assert_eq!(
4029                tree.descendent_entries(true, false, Path::new("i"))
4030                    .map(|entry| entry.path.as_ref())
4031                    .collect::<Vec<_>>(),
4032                vec![Path::new("i"), Path::new("i/l"),]
4033            );
4034        })
4035    }
4036
4037    #[gpui::test(iterations = 10)]
4038    async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
4039        let fs = FakeFs::new(cx.background());
4040        fs.insert_tree(
4041            "/root",
4042            json!({
4043                "lib": {
4044                    "a": {
4045                        "a.txt": ""
4046                    },
4047                    "b": {
4048                        "b.txt": ""
4049                    }
4050                }
4051            }),
4052        )
4053        .await;
4054        fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
4055        fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
4056
4057        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4058        let tree = Worktree::local(
4059            client,
4060            Path::new("/root"),
4061            true,
4062            fs.clone(),
4063            Default::default(),
4064            &mut cx.to_async(),
4065        )
4066        .await
4067        .unwrap();
4068
4069        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4070            .await;
4071
4072        tree.read_with(cx, |tree, _| {
4073            assert_eq!(
4074                tree.entries(false)
4075                    .map(|entry| entry.path.as_ref())
4076                    .collect::<Vec<_>>(),
4077                vec![
4078                    Path::new(""),
4079                    Path::new("lib"),
4080                    Path::new("lib/a"),
4081                    Path::new("lib/a/a.txt"),
4082                    Path::new("lib/a/lib"),
4083                    Path::new("lib/b"),
4084                    Path::new("lib/b/b.txt"),
4085                    Path::new("lib/b/lib"),
4086                ]
4087            );
4088        });
4089
4090        fs.rename(
4091            Path::new("/root/lib/a/lib"),
4092            Path::new("/root/lib/a/lib-2"),
4093            Default::default(),
4094        )
4095        .await
4096        .unwrap();
4097        executor.run_until_parked();
4098        tree.read_with(cx, |tree, _| {
4099            assert_eq!(
4100                tree.entries(false)
4101                    .map(|entry| entry.path.as_ref())
4102                    .collect::<Vec<_>>(),
4103                vec![
4104                    Path::new(""),
4105                    Path::new("lib"),
4106                    Path::new("lib/a"),
4107                    Path::new("lib/a/a.txt"),
4108                    Path::new("lib/a/lib-2"),
4109                    Path::new("lib/b"),
4110                    Path::new("lib/b/b.txt"),
4111                    Path::new("lib/b/lib"),
4112                ]
4113            );
4114        });
4115    }
4116
4117    #[gpui::test]
4118    async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
4119        // .gitignores are handled explicitly by Zed and do not use the git
4120        // machinery that the git_tests module checks
4121        let parent_dir = temp_tree(json!({
4122            ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
4123            "tree": {
4124                ".git": {},
4125                ".gitignore": "ignored-dir\n",
4126                "tracked-dir": {
4127                    "tracked-file1": "",
4128                    "ancestor-ignored-file1": "",
4129                },
4130                "ignored-dir": {
4131                    "ignored-file1": ""
4132                }
4133            }
4134        }));
4135        let dir = parent_dir.path().join("tree");
4136
4137        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4138
4139        let tree = Worktree::local(
4140            client,
4141            dir.as_path(),
4142            true,
4143            Arc::new(RealFs),
4144            Default::default(),
4145            &mut cx.to_async(),
4146        )
4147        .await
4148        .unwrap();
4149        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4150            .await;
4151        tree.flush_fs_events(cx).await;
4152        cx.read(|cx| {
4153            let tree = tree.read(cx);
4154            assert!(
4155                !tree
4156                    .entry_for_path("tracked-dir/tracked-file1")
4157                    .unwrap()
4158                    .is_ignored
4159            );
4160            assert!(
4161                tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
4162                    .unwrap()
4163                    .is_ignored
4164            );
4165            assert!(
4166                tree.entry_for_path("ignored-dir/ignored-file1")
4167                    .unwrap()
4168                    .is_ignored
4169            );
4170        });
4171
4172        std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
4173        std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
4174        std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
4175        tree.flush_fs_events(cx).await;
4176        cx.read(|cx| {
4177            let tree = tree.read(cx);
4178            assert!(
4179                !tree
4180                    .entry_for_path("tracked-dir/tracked-file2")
4181                    .unwrap()
4182                    .is_ignored
4183            );
4184            assert!(
4185                tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
4186                    .unwrap()
4187                    .is_ignored
4188            );
4189            assert!(
4190                tree.entry_for_path("ignored-dir/ignored-file2")
4191                    .unwrap()
4192                    .is_ignored
4193            );
4194            assert!(tree.entry_for_path(".git").unwrap().is_ignored);
4195        });
4196    }
4197
4198    #[gpui::test]
4199    async fn test_write_file(cx: &mut TestAppContext) {
4200        let dir = temp_tree(json!({
4201            ".git": {},
4202            ".gitignore": "ignored-dir\n",
4203            "tracked-dir": {},
4204            "ignored-dir": {}
4205        }));
4206
4207        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4208
4209        let tree = Worktree::local(
4210            client,
4211            dir.path(),
4212            true,
4213            Arc::new(RealFs),
4214            Default::default(),
4215            &mut cx.to_async(),
4216        )
4217        .await
4218        .unwrap();
4219        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4220            .await;
4221        tree.flush_fs_events(cx).await;
4222
4223        tree.update(cx, |tree, cx| {
4224            tree.as_local().unwrap().write_file(
4225                Path::new("tracked-dir/file.txt"),
4226                "hello".into(),
4227                Default::default(),
4228                cx,
4229            )
4230        })
4231        .await
4232        .unwrap();
4233        tree.update(cx, |tree, cx| {
4234            tree.as_local().unwrap().write_file(
4235                Path::new("ignored-dir/file.txt"),
4236                "world".into(),
4237                Default::default(),
4238                cx,
4239            )
4240        })
4241        .await
4242        .unwrap();
4243
4244        tree.read_with(cx, |tree, _| {
4245            let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4246            let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4247            assert!(!tracked.is_ignored);
4248            assert!(ignored.is_ignored);
4249        });
4250    }
4251
4252    #[gpui::test(iterations = 30)]
4253    async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4254        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4255
4256        let fs = FakeFs::new(cx.background());
4257        fs.insert_tree(
4258            "/root",
4259            json!({
4260                "b": {},
4261                "c": {},
4262                "d": {},
4263            }),
4264        )
4265        .await;
4266
4267        let tree = Worktree::local(
4268            client,
4269            "/root".as_ref(),
4270            true,
4271            fs,
4272            Default::default(),
4273            &mut cx.to_async(),
4274        )
4275        .await
4276        .unwrap();
4277
4278        let snapshot1 = tree.update(cx, |tree, cx| {
4279            let tree = tree.as_local_mut().unwrap();
4280            let snapshot = Arc::new(Mutex::new(tree.snapshot()));
4281            let _ = tree.observe_updates(0, cx, {
4282                let snapshot = snapshot.clone();
4283                move |update| {
4284                    snapshot.lock().apply_remote_update(update).unwrap();
4285                    async { true }
4286                }
4287            });
4288            snapshot
4289        });
4290
4291        let entry = tree
4292            .update(cx, |tree, cx| {
4293                tree.as_local_mut()
4294                    .unwrap()
4295                    .create_entry("a/e".as_ref(), true, cx)
4296            })
4297            .await
4298            .unwrap();
4299        assert!(entry.is_dir());
4300
4301        cx.foreground().run_until_parked();
4302        tree.read_with(cx, |tree, _| {
4303            assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4304        });
4305
4306        let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4307        assert_eq!(
4308            snapshot1.lock().entries(true).collect::<Vec<_>>(),
4309            snapshot2.entries(true).collect::<Vec<_>>()
4310        );
4311    }
4312
4313    #[gpui::test(iterations = 100)]
4314    async fn test_random_worktree_operations_during_initial_scan(
4315        cx: &mut TestAppContext,
4316        mut rng: StdRng,
4317    ) {
4318        let operations = env::var("OPERATIONS")
4319            .map(|o| o.parse().unwrap())
4320            .unwrap_or(5);
4321        let initial_entries = env::var("INITIAL_ENTRIES")
4322            .map(|o| o.parse().unwrap())
4323            .unwrap_or(20);
4324
4325        let root_dir = Path::new("/test");
4326        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4327        fs.as_fake().insert_tree(root_dir, json!({})).await;
4328        for _ in 0..initial_entries {
4329            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4330        }
4331        log::info!("generated initial tree");
4332
4333        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4334        let worktree = Worktree::local(
4335            client.clone(),
4336            root_dir,
4337            true,
4338            fs.clone(),
4339            Default::default(),
4340            &mut cx.to_async(),
4341        )
4342        .await
4343        .unwrap();
4344
4345        let mut snapshots =
4346            vec![worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot())];
4347        let updates = Arc::new(Mutex::new(Vec::new()));
4348        worktree.update(cx, |tree, cx| {
4349            check_worktree_change_events(tree, cx);
4350
4351            let _ = tree.as_local_mut().unwrap().observe_updates(0, cx, {
4352                let updates = updates.clone();
4353                move |update| {
4354                    updates.lock().push(update);
4355                    async { true }
4356                }
4357            });
4358        });
4359
4360        for _ in 0..operations {
4361            worktree
4362                .update(cx, |worktree, cx| {
4363                    randomly_mutate_worktree(worktree, &mut rng, cx)
4364                })
4365                .await
4366                .log_err();
4367            worktree.read_with(cx, |tree, _| {
4368                tree.as_local().unwrap().snapshot.check_invariants()
4369            });
4370
4371            if rng.gen_bool(0.6) {
4372                snapshots
4373                    .push(worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot()));
4374            }
4375        }
4376
4377        worktree
4378            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4379            .await;
4380
4381        cx.foreground().run_until_parked();
4382
4383        let final_snapshot = worktree.read_with(cx, |tree, _| {
4384            let tree = tree.as_local().unwrap();
4385            tree.snapshot.check_invariants();
4386            tree.snapshot()
4387        });
4388
4389        for (i, snapshot) in snapshots.into_iter().enumerate().rev() {
4390            let mut updated_snapshot = snapshot.clone();
4391            for update in updates.lock().iter() {
4392                if update.scan_id >= updated_snapshot.scan_id() as u64 {
4393                    updated_snapshot
4394                        .apply_remote_update(update.clone())
4395                        .unwrap();
4396                }
4397            }
4398
4399            assert_eq!(
4400                updated_snapshot.entries(true).collect::<Vec<_>>(),
4401                final_snapshot.entries(true).collect::<Vec<_>>(),
4402                "wrong updates after snapshot {i}: {snapshot:#?} {updates:#?}",
4403            );
4404        }
4405    }
4406
4407    #[gpui::test(iterations = 100)]
4408    async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4409        let operations = env::var("OPERATIONS")
4410            .map(|o| o.parse().unwrap())
4411            .unwrap_or(40);
4412        let initial_entries = env::var("INITIAL_ENTRIES")
4413            .map(|o| o.parse().unwrap())
4414            .unwrap_or(20);
4415
4416        let root_dir = Path::new("/test");
4417        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4418        fs.as_fake().insert_tree(root_dir, json!({})).await;
4419        for _ in 0..initial_entries {
4420            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4421        }
4422        log::info!("generated initial tree");
4423
4424        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4425        let worktree = Worktree::local(
4426            client.clone(),
4427            root_dir,
4428            true,
4429            fs.clone(),
4430            Default::default(),
4431            &mut cx.to_async(),
4432        )
4433        .await
4434        .unwrap();
4435
4436        let updates = Arc::new(Mutex::new(Vec::new()));
4437        worktree.update(cx, |tree, cx| {
4438            check_worktree_change_events(tree, cx);
4439
4440            let _ = tree.as_local_mut().unwrap().observe_updates(0, cx, {
4441                let updates = updates.clone();
4442                move |update| {
4443                    updates.lock().push(update);
4444                    async { true }
4445                }
4446            });
4447        });
4448
4449        worktree
4450            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4451            .await;
4452
4453        fs.as_fake().pause_events();
4454        let mut snapshots = Vec::new();
4455        let mut mutations_len = operations;
4456        while mutations_len > 1 {
4457            if rng.gen_bool(0.2) {
4458                worktree
4459                    .update(cx, |worktree, cx| {
4460                        randomly_mutate_worktree(worktree, &mut rng, cx)
4461                    })
4462                    .await
4463                    .log_err();
4464            } else {
4465                randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4466            }
4467
4468            let buffered_event_count = fs.as_fake().buffered_event_count();
4469            if buffered_event_count > 0 && rng.gen_bool(0.3) {
4470                let len = rng.gen_range(0..=buffered_event_count);
4471                log::info!("flushing {} events", len);
4472                fs.as_fake().flush_events(len);
4473            } else {
4474                randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4475                mutations_len -= 1;
4476            }
4477
4478            cx.foreground().run_until_parked();
4479            if rng.gen_bool(0.2) {
4480                log::info!("storing snapshot {}", snapshots.len());
4481                let snapshot =
4482                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4483                snapshots.push(snapshot);
4484            }
4485        }
4486
4487        log::info!("quiescing");
4488        fs.as_fake().flush_events(usize::MAX);
4489        cx.foreground().run_until_parked();
4490        let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4491        snapshot.check_invariants();
4492
4493        {
4494            let new_worktree = Worktree::local(
4495                client.clone(),
4496                root_dir,
4497                true,
4498                fs.clone(),
4499                Default::default(),
4500                &mut cx.to_async(),
4501            )
4502            .await
4503            .unwrap();
4504            new_worktree
4505                .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4506                .await;
4507            let new_snapshot =
4508                new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4509            assert_eq!(
4510                snapshot.entries_without_ids(true),
4511                new_snapshot.entries_without_ids(true)
4512            );
4513        }
4514
4515        for (i, mut prev_snapshot) in snapshots.into_iter().enumerate().rev() {
4516            for update in updates.lock().iter() {
4517                if update.scan_id >= prev_snapshot.scan_id() as u64 {
4518                    prev_snapshot.apply_remote_update(update.clone()).unwrap();
4519                }
4520            }
4521
4522            assert_eq!(
4523                prev_snapshot.entries(true).collect::<Vec<_>>(),
4524                snapshot.entries(true).collect::<Vec<_>>(),
4525                "wrong updates after snapshot {i}: {updates:#?}",
4526            );
4527        }
4528    }
4529
4530    // The worktree's `UpdatedEntries` event can be used to follow along with
4531    // all changes to the worktree's snapshot.
4532    fn check_worktree_change_events(tree: &mut Worktree, cx: &mut ModelContext<Worktree>) {
4533        let mut entries = tree.entries(true).cloned().collect::<Vec<_>>();
4534        cx.subscribe(&cx.handle(), move |tree, _, event, _| {
4535            if let Event::UpdatedEntries(changes) = event {
4536                for (path, _, change_type) in changes.iter() {
4537                    let entry = tree.entry_for_path(&path).cloned();
4538                    let ix = match entries.binary_search_by_key(&path, |e| &e.path) {
4539                        Ok(ix) | Err(ix) => ix,
4540                    };
4541                    match change_type {
4542                        PathChange::Loaded => entries.insert(ix, entry.unwrap()),
4543                        PathChange::Added => entries.insert(ix, entry.unwrap()),
4544                        PathChange::Removed => drop(entries.remove(ix)),
4545                        PathChange::Updated => {
4546                            let entry = entry.unwrap();
4547                            let existing_entry = entries.get_mut(ix).unwrap();
4548                            assert_eq!(existing_entry.path, entry.path);
4549                            *existing_entry = entry;
4550                        }
4551                        PathChange::AddedOrUpdated => {
4552                            let entry = entry.unwrap();
4553                            if entries.get(ix).map(|e| &e.path) == Some(&entry.path) {
4554                                *entries.get_mut(ix).unwrap() = entry;
4555                            } else {
4556                                entries.insert(ix, entry);
4557                            }
4558                        }
4559                    }
4560                }
4561
4562                let new_entries = tree.entries(true).cloned().collect::<Vec<_>>();
4563                assert_eq!(entries, new_entries, "incorrect changes: {:?}", changes);
4564            }
4565        })
4566        .detach();
4567    }
4568
4569    fn randomly_mutate_worktree(
4570        worktree: &mut Worktree,
4571        rng: &mut impl Rng,
4572        cx: &mut ModelContext<Worktree>,
4573    ) -> Task<Result<()>> {
4574        log::info!("mutating worktree");
4575        let worktree = worktree.as_local_mut().unwrap();
4576        let snapshot = worktree.snapshot();
4577        let entry = snapshot.entries(false).choose(rng).unwrap();
4578
4579        match rng.gen_range(0_u32..100) {
4580            0..=33 if entry.path.as_ref() != Path::new("") => {
4581                log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4582                worktree.delete_entry(entry.id, cx).unwrap()
4583            }
4584            ..=66 if entry.path.as_ref() != Path::new("") => {
4585                let other_entry = snapshot.entries(false).choose(rng).unwrap();
4586                let new_parent_path = if other_entry.is_dir() {
4587                    other_entry.path.clone()
4588                } else {
4589                    other_entry.path.parent().unwrap().into()
4590                };
4591                let mut new_path = new_parent_path.join(gen_name(rng));
4592                if new_path.starts_with(&entry.path) {
4593                    new_path = gen_name(rng).into();
4594                }
4595
4596                log::info!(
4597                    "renaming entry {:?} ({}) to {:?}",
4598                    entry.path,
4599                    entry.id.0,
4600                    new_path
4601                );
4602                let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4603                cx.foreground().spawn(async move {
4604                    task.await?;
4605                    Ok(())
4606                })
4607            }
4608            _ => {
4609                let task = if entry.is_dir() {
4610                    let child_path = entry.path.join(gen_name(rng));
4611                    let is_dir = rng.gen_bool(0.3);
4612                    log::info!(
4613                        "creating {} at {:?}",
4614                        if is_dir { "dir" } else { "file" },
4615                        child_path,
4616                    );
4617                    worktree.create_entry(child_path, is_dir, cx)
4618                } else {
4619                    log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4620                    worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4621                };
4622                cx.foreground().spawn(async move {
4623                    task.await?;
4624                    Ok(())
4625                })
4626            }
4627        }
4628    }
4629
4630    async fn randomly_mutate_fs(
4631        fs: &Arc<dyn Fs>,
4632        root_path: &Path,
4633        insertion_probability: f64,
4634        rng: &mut impl Rng,
4635    ) {
4636        log::info!("mutating fs");
4637        let mut files = Vec::new();
4638        let mut dirs = Vec::new();
4639        for path in fs.as_fake().paths() {
4640            if path.starts_with(root_path) {
4641                if fs.is_file(&path).await {
4642                    files.push(path);
4643                } else {
4644                    dirs.push(path);
4645                }
4646            }
4647        }
4648
4649        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4650            let path = dirs.choose(rng).unwrap();
4651            let new_path = path.join(gen_name(rng));
4652
4653            if rng.gen() {
4654                log::info!(
4655                    "creating dir {:?}",
4656                    new_path.strip_prefix(root_path).unwrap()
4657                );
4658                fs.create_dir(&new_path).await.unwrap();
4659            } else {
4660                log::info!(
4661                    "creating file {:?}",
4662                    new_path.strip_prefix(root_path).unwrap()
4663                );
4664                fs.create_file(&new_path, Default::default()).await.unwrap();
4665            }
4666        } else if rng.gen_bool(0.05) {
4667            let ignore_dir_path = dirs.choose(rng).unwrap();
4668            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4669
4670            let subdirs = dirs
4671                .iter()
4672                .filter(|d| d.starts_with(&ignore_dir_path))
4673                .cloned()
4674                .collect::<Vec<_>>();
4675            let subfiles = files
4676                .iter()
4677                .filter(|d| d.starts_with(&ignore_dir_path))
4678                .cloned()
4679                .collect::<Vec<_>>();
4680            let files_to_ignore = {
4681                let len = rng.gen_range(0..=subfiles.len());
4682                subfiles.choose_multiple(rng, len)
4683            };
4684            let dirs_to_ignore = {
4685                let len = rng.gen_range(0..subdirs.len());
4686                subdirs.choose_multiple(rng, len)
4687            };
4688
4689            let mut ignore_contents = String::new();
4690            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4691                writeln!(
4692                    ignore_contents,
4693                    "{}",
4694                    path_to_ignore
4695                        .strip_prefix(&ignore_dir_path)
4696                        .unwrap()
4697                        .to_str()
4698                        .unwrap()
4699                )
4700                .unwrap();
4701            }
4702            log::info!(
4703                "creating gitignore {:?} with contents:\n{}",
4704                ignore_path.strip_prefix(&root_path).unwrap(),
4705                ignore_contents
4706            );
4707            fs.save(
4708                &ignore_path,
4709                &ignore_contents.as_str().into(),
4710                Default::default(),
4711            )
4712            .await
4713            .unwrap();
4714        } else {
4715            let old_path = {
4716                let file_path = files.choose(rng);
4717                let dir_path = dirs[1..].choose(rng);
4718                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4719            };
4720
4721            let is_rename = rng.gen();
4722            if is_rename {
4723                let new_path_parent = dirs
4724                    .iter()
4725                    .filter(|d| !d.starts_with(old_path))
4726                    .choose(rng)
4727                    .unwrap();
4728
4729                let overwrite_existing_dir =
4730                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4731                let new_path = if overwrite_existing_dir {
4732                    fs.remove_dir(
4733                        &new_path_parent,
4734                        RemoveOptions {
4735                            recursive: true,
4736                            ignore_if_not_exists: true,
4737                        },
4738                    )
4739                    .await
4740                    .unwrap();
4741                    new_path_parent.to_path_buf()
4742                } else {
4743                    new_path_parent.join(gen_name(rng))
4744                };
4745
4746                log::info!(
4747                    "renaming {:?} to {}{:?}",
4748                    old_path.strip_prefix(&root_path).unwrap(),
4749                    if overwrite_existing_dir {
4750                        "overwrite "
4751                    } else {
4752                        ""
4753                    },
4754                    new_path.strip_prefix(&root_path).unwrap()
4755                );
4756                fs.rename(
4757                    &old_path,
4758                    &new_path,
4759                    fs::RenameOptions {
4760                        overwrite: true,
4761                        ignore_if_exists: true,
4762                    },
4763                )
4764                .await
4765                .unwrap();
4766            } else if fs.is_file(&old_path).await {
4767                log::info!(
4768                    "deleting file {:?}",
4769                    old_path.strip_prefix(&root_path).unwrap()
4770                );
4771                fs.remove_file(old_path, Default::default()).await.unwrap();
4772            } else {
4773                log::info!(
4774                    "deleting dir {:?}",
4775                    old_path.strip_prefix(&root_path).unwrap()
4776                );
4777                fs.remove_dir(
4778                    &old_path,
4779                    RemoveOptions {
4780                        recursive: true,
4781                        ignore_if_not_exists: true,
4782                    },
4783                )
4784                .await
4785                .unwrap();
4786            }
4787        }
4788    }
4789
4790    fn gen_name(rng: &mut impl Rng) -> String {
4791        (0..6)
4792            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4793            .map(char::from)
4794            .collect()
4795    }
4796
4797    impl LocalSnapshot {
4798        fn check_invariants(&self) {
4799            assert_eq!(
4800                self.entries_by_path
4801                    .cursor::<()>()
4802                    .map(|e| (&e.path, e.id))
4803                    .collect::<Vec<_>>(),
4804                self.entries_by_id
4805                    .cursor::<()>()
4806                    .map(|e| (&e.path, e.id))
4807                    .collect::<collections::BTreeSet<_>>()
4808                    .into_iter()
4809                    .collect::<Vec<_>>(),
4810                "entries_by_path and entries_by_id are inconsistent"
4811            );
4812
4813            let mut files = self.files(true, 0);
4814            let mut visible_files = self.files(false, 0);
4815            for entry in self.entries_by_path.cursor::<()>() {
4816                if entry.is_file() {
4817                    assert_eq!(files.next().unwrap().inode, entry.inode);
4818                    if !entry.is_ignored {
4819                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4820                    }
4821                }
4822            }
4823
4824            assert!(files.next().is_none());
4825            assert!(visible_files.next().is_none());
4826
4827            let mut bfs_paths = Vec::new();
4828            let mut stack = vec![Path::new("")];
4829            while let Some(path) = stack.pop() {
4830                bfs_paths.push(path);
4831                let ix = stack.len();
4832                for child_entry in self.child_entries(path) {
4833                    stack.insert(ix, &child_entry.path);
4834                }
4835            }
4836
4837            let dfs_paths_via_iter = self
4838                .entries_by_path
4839                .cursor::<()>()
4840                .map(|e| e.path.as_ref())
4841                .collect::<Vec<_>>();
4842            assert_eq!(bfs_paths, dfs_paths_via_iter);
4843
4844            let dfs_paths_via_traversal = self
4845                .entries(true)
4846                .map(|e| e.path.as_ref())
4847                .collect::<Vec<_>>();
4848            assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4849
4850            for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4851                let ignore_parent_path =
4852                    ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4853                assert!(self.entry_for_path(&ignore_parent_path).is_some());
4854                assert!(self
4855                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4856                    .is_some());
4857            }
4858        }
4859
4860        fn entries_without_ids(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4861            let mut paths = Vec::new();
4862            for entry in self.entries_by_path.cursor::<()>() {
4863                if include_ignored || !entry.is_ignored {
4864                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4865                }
4866            }
4867            paths.sort_by(|a, b| a.0.cmp(b.0));
4868            paths
4869        }
4870    }
4871
4872    mod git_tests {
4873        use super::*;
4874        use pretty_assertions::assert_eq;
4875
4876        #[gpui::test]
4877        async fn test_rename_work_directory(cx: &mut TestAppContext) {
4878            let root = temp_tree(json!({
4879                "projects": {
4880                    "project1": {
4881                        "a": "",
4882                        "b": "",
4883                    }
4884                },
4885
4886            }));
4887            let root_path = root.path();
4888
4889            let http_client = FakeHttpClient::with_404_response();
4890            let client = cx.read(|cx| Client::new(http_client, cx));
4891            let tree = Worktree::local(
4892                client,
4893                root_path,
4894                true,
4895                Arc::new(RealFs),
4896                Default::default(),
4897                &mut cx.to_async(),
4898            )
4899            .await
4900            .unwrap();
4901
4902            let repo = git_init(&root_path.join("projects/project1"));
4903            git_add("a", &repo);
4904            git_commit("init", &repo);
4905            std::fs::write(root_path.join("projects/project1/a"), "aa").ok();
4906
4907            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4908                .await;
4909
4910            tree.flush_fs_events(cx).await;
4911
4912            cx.read(|cx| {
4913                let tree = tree.read(cx);
4914                let (work_dir, repo) = tree.repositories().next().unwrap();
4915                assert_eq!(work_dir.as_ref(), Path::new("projects/project1"));
4916                assert_eq!(
4917                    repo.status_for_file(tree, Path::new("projects/project1/a")),
4918                    Some(GitFileStatus::Modified)
4919                );
4920                assert_eq!(
4921                    repo.status_for_file(tree, Path::new("projects/project1/b")),
4922                    Some(GitFileStatus::Added)
4923                );
4924            });
4925
4926            std::fs::rename(
4927                root_path.join("projects/project1"),
4928                root_path.join("projects/project2"),
4929            )
4930            .ok();
4931            tree.flush_fs_events(cx).await;
4932
4933            cx.read(|cx| {
4934                let tree = tree.read(cx);
4935                let (work_dir, repo) = tree.repositories().next().unwrap();
4936                assert_eq!(work_dir.as_ref(), Path::new("projects/project2"));
4937                assert_eq!(
4938                    repo.status_for_file(tree, Path::new("projects/project2/a")),
4939                    Some(GitFileStatus::Modified)
4940                );
4941                assert_eq!(
4942                    repo.status_for_file(tree, Path::new("projects/project2/b")),
4943                    Some(GitFileStatus::Added)
4944                );
4945            });
4946        }
4947
4948        #[gpui::test]
4949        async fn test_git_repository_for_path(cx: &mut TestAppContext) {
4950            let root = temp_tree(json!({
4951                "c.txt": "",
4952                "dir1": {
4953                    ".git": {},
4954                    "deps": {
4955                        "dep1": {
4956                            ".git": {},
4957                            "src": {
4958                                "a.txt": ""
4959                            }
4960                        }
4961                    },
4962                    "src": {
4963                        "b.txt": ""
4964                    }
4965                },
4966            }));
4967
4968            let http_client = FakeHttpClient::with_404_response();
4969            let client = cx.read(|cx| Client::new(http_client, cx));
4970            let tree = Worktree::local(
4971                client,
4972                root.path(),
4973                true,
4974                Arc::new(RealFs),
4975                Default::default(),
4976                &mut cx.to_async(),
4977            )
4978            .await
4979            .unwrap();
4980
4981            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4982                .await;
4983            tree.flush_fs_events(cx).await;
4984
4985            tree.read_with(cx, |tree, _cx| {
4986                let tree = tree.as_local().unwrap();
4987
4988                assert!(tree.repository_for_path("c.txt".as_ref()).is_none());
4989
4990                let entry = tree.repository_for_path("dir1/src/b.txt".as_ref()).unwrap();
4991                assert_eq!(
4992                    entry
4993                        .work_directory(tree)
4994                        .map(|directory| directory.as_ref().to_owned()),
4995                    Some(Path::new("dir1").to_owned())
4996                );
4997
4998                let entry = tree
4999                    .repository_for_path("dir1/deps/dep1/src/a.txt".as_ref())
5000                    .unwrap();
5001                assert_eq!(
5002                    entry
5003                        .work_directory(tree)
5004                        .map(|directory| directory.as_ref().to_owned()),
5005                    Some(Path::new("dir1/deps/dep1").to_owned())
5006                );
5007
5008                let entries = tree.files(false, 0);
5009
5010                let paths_with_repos = tree
5011                    .entries_with_repositories(entries)
5012                    .map(|(entry, repo)| {
5013                        (
5014                            entry.path.as_ref(),
5015                            repo.and_then(|repo| {
5016                                repo.work_directory(&tree)
5017                                    .map(|work_directory| work_directory.0.to_path_buf())
5018                            }),
5019                        )
5020                    })
5021                    .collect::<Vec<_>>();
5022
5023                assert_eq!(
5024                    paths_with_repos,
5025                    &[
5026                        (Path::new("c.txt"), None),
5027                        (
5028                            Path::new("dir1/deps/dep1/src/a.txt"),
5029                            Some(Path::new("dir1/deps/dep1").into())
5030                        ),
5031                        (Path::new("dir1/src/b.txt"), Some(Path::new("dir1").into())),
5032                    ]
5033                );
5034            });
5035
5036            let repo_update_events = Arc::new(Mutex::new(vec![]));
5037            tree.update(cx, |_, cx| {
5038                let repo_update_events = repo_update_events.clone();
5039                cx.subscribe(&tree, move |_, _, event, _| {
5040                    if let Event::UpdatedGitRepositories(update) = event {
5041                        repo_update_events.lock().push(update.clone());
5042                    }
5043                })
5044                .detach();
5045            });
5046
5047            std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
5048            tree.flush_fs_events(cx).await;
5049
5050            assert_eq!(
5051                repo_update_events.lock()[0]
5052                    .iter()
5053                    .map(|e| e.0.clone())
5054                    .collect::<Vec<Arc<Path>>>(),
5055                vec![Path::new("dir1").into()]
5056            );
5057
5058            std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
5059            tree.flush_fs_events(cx).await;
5060
5061            tree.read_with(cx, |tree, _cx| {
5062                let tree = tree.as_local().unwrap();
5063
5064                assert!(tree
5065                    .repository_for_path("dir1/src/b.txt".as_ref())
5066                    .is_none());
5067            });
5068        }
5069
5070        #[gpui::test]
5071        async fn test_git_status(cx: &mut TestAppContext) {
5072            const IGNORE_RULE: &'static str = "**/target";
5073
5074            let root = temp_tree(json!({
5075                "project": {
5076                    "a.txt": "a",
5077                    "b.txt": "bb",
5078                    "c": {
5079                        "d": {
5080                            "e.txt": "eee"
5081                        }
5082                    },
5083                    "f.txt": "ffff",
5084                    "target": {
5085                        "build_file": "???"
5086                    },
5087                    ".gitignore": IGNORE_RULE
5088                },
5089
5090            }));
5091
5092            let http_client = FakeHttpClient::with_404_response();
5093            let client = cx.read(|cx| Client::new(http_client, cx));
5094            let tree = Worktree::local(
5095                client,
5096                root.path(),
5097                true,
5098                Arc::new(RealFs),
5099                Default::default(),
5100                &mut cx.to_async(),
5101            )
5102            .await
5103            .unwrap();
5104
5105            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
5106                .await;
5107
5108            const A_TXT: &'static str = "a.txt";
5109            const B_TXT: &'static str = "b.txt";
5110            const E_TXT: &'static str = "c/d/e.txt";
5111            const F_TXT: &'static str = "f.txt";
5112            const DOTGITIGNORE: &'static str = ".gitignore";
5113            const BUILD_FILE: &'static str = "target/build_file";
5114
5115            let work_dir = root.path().join("project");
5116            let mut repo = git_init(work_dir.as_path());
5117            repo.add_ignore_rule(IGNORE_RULE).unwrap();
5118            git_add(Path::new(A_TXT), &repo);
5119            git_add(Path::new(E_TXT), &repo);
5120            git_add(Path::new(DOTGITIGNORE), &repo);
5121            git_commit("Initial commit", &repo);
5122
5123            std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
5124
5125            tree.flush_fs_events(cx).await;
5126
5127            // Check that the right git state is observed on startup
5128            tree.read_with(cx, |tree, _cx| {
5129                let snapshot = tree.snapshot();
5130                assert_eq!(snapshot.repository_entries.iter().count(), 1);
5131                let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
5132                assert_eq!(dir.0.as_ref(), Path::new("project"));
5133
5134                assert_eq!(repo.statuses.iter().count(), 3);
5135                assert_eq!(
5136                    repo.statuses.get(&Path::new(A_TXT).into()),
5137                    Some(&GitFileStatus::Modified)
5138                );
5139                assert_eq!(
5140                    repo.statuses.get(&Path::new(B_TXT).into()),
5141                    Some(&GitFileStatus::Added)
5142                );
5143                assert_eq!(
5144                    repo.statuses.get(&Path::new(F_TXT).into()),
5145                    Some(&GitFileStatus::Added)
5146                );
5147            });
5148
5149            git_add(Path::new(A_TXT), &repo);
5150            git_add(Path::new(B_TXT), &repo);
5151            git_commit("Committing modified and added", &repo);
5152            tree.flush_fs_events(cx).await;
5153
5154            // Check that repo only changes are tracked
5155            tree.read_with(cx, |tree, _cx| {
5156                let snapshot = tree.snapshot();
5157                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5158
5159                assert_eq!(repo.statuses.iter().count(), 1);
5160                assert_eq!(
5161                    repo.statuses.get(&Path::new(F_TXT).into()),
5162                    Some(&GitFileStatus::Added)
5163                );
5164            });
5165
5166            git_reset(0, &repo);
5167            git_remove_index(Path::new(B_TXT), &repo);
5168            git_stash(&mut repo);
5169            std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
5170            std::fs::write(work_dir.join(BUILD_FILE), "this should be ignored").unwrap();
5171            tree.flush_fs_events(cx).await;
5172
5173            // Check that more complex repo changes are tracked
5174            tree.read_with(cx, |tree, _cx| {
5175                let snapshot = tree.snapshot();
5176                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5177
5178                assert_eq!(repo.statuses.iter().count(), 3);
5179                assert_eq!(repo.statuses.get(&Path::new(A_TXT).into()), None);
5180                assert_eq!(
5181                    repo.statuses.get(&Path::new(B_TXT).into()),
5182                    Some(&GitFileStatus::Added)
5183                );
5184                assert_eq!(
5185                    repo.statuses.get(&Path::new(E_TXT).into()),
5186                    Some(&GitFileStatus::Modified)
5187                );
5188                assert_eq!(
5189                    repo.statuses.get(&Path::new(F_TXT).into()),
5190                    Some(&GitFileStatus::Added)
5191                );
5192            });
5193
5194            std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
5195            std::fs::remove_dir_all(work_dir.join("c")).unwrap();
5196            std::fs::write(
5197                work_dir.join(DOTGITIGNORE),
5198                [IGNORE_RULE, "f.txt"].join("\n"),
5199            )
5200            .unwrap();
5201
5202            git_add(Path::new(DOTGITIGNORE), &repo);
5203            git_commit("Committing modified git ignore", &repo);
5204
5205            tree.flush_fs_events(cx).await;
5206
5207            // Check that non-repo behavior is tracked
5208            tree.read_with(cx, |tree, _cx| {
5209                let snapshot = tree.snapshot();
5210                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5211
5212                assert_eq!(repo.statuses.iter().count(), 0);
5213            });
5214
5215            let mut renamed_dir_name = "first_directory/second_directory";
5216            const RENAMED_FILE: &'static str = "rf.txt";
5217
5218            std::fs::create_dir_all(work_dir.join(renamed_dir_name)).unwrap();
5219            std::fs::write(
5220                work_dir.join(renamed_dir_name).join(RENAMED_FILE),
5221                "new-contents",
5222            )
5223            .unwrap();
5224
5225            tree.flush_fs_events(cx).await;
5226
5227            tree.read_with(cx, |tree, _cx| {
5228                let snapshot = tree.snapshot();
5229                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5230
5231                assert_eq!(repo.statuses.iter().count(), 1);
5232                assert_eq!(
5233                    repo.statuses
5234                        .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5235                    Some(&GitFileStatus::Added)
5236                );
5237            });
5238
5239            renamed_dir_name = "new_first_directory/second_directory";
5240
5241            std::fs::rename(
5242                work_dir.join("first_directory"),
5243                work_dir.join("new_first_directory"),
5244            )
5245            .unwrap();
5246
5247            tree.flush_fs_events(cx).await;
5248
5249            tree.read_with(cx, |tree, _cx| {
5250                let snapshot = tree.snapshot();
5251                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5252
5253                assert_eq!(repo.statuses.iter().count(), 1);
5254                assert_eq!(
5255                    repo.statuses
5256                        .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5257                    Some(&GitFileStatus::Added)
5258                );
5259            });
5260        }
5261
5262        #[track_caller]
5263        fn git_init(path: &Path) -> git2::Repository {
5264            git2::Repository::init(path).expect("Failed to initialize git repository")
5265        }
5266
5267        #[track_caller]
5268        fn git_add<P: AsRef<Path>>(path: P, repo: &git2::Repository) {
5269            let path = path.as_ref();
5270            let mut index = repo.index().expect("Failed to get index");
5271            index.add_path(path).expect("Failed to add a.txt");
5272            index.write().expect("Failed to write index");
5273        }
5274
5275        #[track_caller]
5276        fn git_remove_index(path: &Path, repo: &git2::Repository) {
5277            let mut index = repo.index().expect("Failed to get index");
5278            index.remove_path(path).expect("Failed to add a.txt");
5279            index.write().expect("Failed to write index");
5280        }
5281
5282        #[track_caller]
5283        fn git_commit(msg: &'static str, repo: &git2::Repository) {
5284            use git2::Signature;
5285
5286            let signature = Signature::now("test", "test@zed.dev").unwrap();
5287            let oid = repo.index().unwrap().write_tree().unwrap();
5288            let tree = repo.find_tree(oid).unwrap();
5289            if let Some(head) = repo.head().ok() {
5290                let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
5291
5292                let parent_commit = parent_obj.as_commit().unwrap();
5293
5294                repo.commit(
5295                    Some("HEAD"),
5296                    &signature,
5297                    &signature,
5298                    msg,
5299                    &tree,
5300                    &[parent_commit],
5301                )
5302                .expect("Failed to commit with parent");
5303            } else {
5304                repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
5305                    .expect("Failed to commit");
5306            }
5307        }
5308
5309        #[track_caller]
5310        fn git_stash(repo: &mut git2::Repository) {
5311            use git2::Signature;
5312
5313            let signature = Signature::now("test", "test@zed.dev").unwrap();
5314            repo.stash_save(&signature, "N/A", None)
5315                .expect("Failed to stash");
5316        }
5317
5318        #[track_caller]
5319        fn git_reset(offset: usize, repo: &git2::Repository) {
5320            let head = repo.head().expect("Couldn't get repo head");
5321            let object = head.peel(git2::ObjectType::Commit).unwrap();
5322            let commit = object.as_commit().unwrap();
5323            let new_head = commit
5324                .parents()
5325                .inspect(|parnet| {
5326                    parnet.message();
5327                })
5328                .skip(offset)
5329                .next()
5330                .expect("Not enough history");
5331            repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
5332                .expect("Could not reset");
5333        }
5334
5335        #[allow(dead_code)]
5336        #[track_caller]
5337        fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
5338            repo.statuses(None)
5339                .unwrap()
5340                .iter()
5341                .map(|status| (status.path().unwrap().to_string(), status.status()))
5342                .collect()
5343        }
5344    }
5345}