worktree.rs

   1use crate::{
   2    copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
   3};
   4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   5use anyhow::{anyhow, Context, Result};
   6use client::{proto, Client};
   7use clock::ReplicaId;
   8use collections::{HashMap, VecDeque};
   9use fs::{
  10    repository::{GitFileStatus, GitRepository, RepoPath, RepoPathDescendants},
  11    Fs, LineEnding,
  12};
  13use futures::{
  14    channel::{
  15        mpsc::{self, UnboundedSender},
  16        oneshot,
  17    },
  18    select_biased,
  19    task::Poll,
  20    Stream, StreamExt,
  21};
  22use fuzzy::CharBag;
  23use git::{DOT_GIT, GITIGNORE};
  24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
  25use language::{
  26    proto::{
  27        deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
  28        serialize_version,
  29    },
  30    Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
  31};
  32use lsp::LanguageServerId;
  33use parking_lot::Mutex;
  34use postage::{
  35    barrier,
  36    prelude::{Sink as _, Stream as _},
  37    watch,
  38};
  39use smol::channel::{self, Sender};
  40use std::{
  41    any::Any,
  42    cmp::{self, Ordering},
  43    convert::TryFrom,
  44    ffi::OsStr,
  45    fmt,
  46    future::Future,
  47    mem,
  48    ops::{Deref, DerefMut},
  49    path::{Path, PathBuf},
  50    pin::Pin,
  51    sync::{
  52        atomic::{AtomicUsize, Ordering::SeqCst},
  53        Arc,
  54    },
  55    time::{Duration, SystemTime},
  56};
  57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
  58use util::{paths::HOME, ResultExt, TakeUntilExt, TryFutureExt};
  59
  60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  61pub struct WorktreeId(usize);
  62
  63pub enum Worktree {
  64    Local(LocalWorktree),
  65    Remote(RemoteWorktree),
  66}
  67
  68pub struct LocalWorktree {
  69    snapshot: LocalSnapshot,
  70    path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
  71    is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
  72    _background_scanner_task: Task<()>,
  73    share: Option<ShareState>,
  74    diagnostics: HashMap<
  75        Arc<Path>,
  76        Vec<(
  77            LanguageServerId,
  78            Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
  79        )>,
  80    >,
  81    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  82    client: Arc<Client>,
  83    fs: Arc<dyn Fs>,
  84    visible: bool,
  85}
  86
  87pub struct RemoteWorktree {
  88    snapshot: Snapshot,
  89    background_snapshot: Arc<Mutex<Snapshot>>,
  90    project_id: u64,
  91    client: Arc<Client>,
  92    updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
  93    snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
  94    replica_id: ReplicaId,
  95    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  96    visible: bool,
  97    disconnected: bool,
  98}
  99
 100#[derive(Clone)]
 101pub struct Snapshot {
 102    id: WorktreeId,
 103    abs_path: Arc<Path>,
 104    root_name: String,
 105    root_char_bag: CharBag,
 106    entries_by_path: SumTree<Entry>,
 107    entries_by_id: SumTree<PathEntry>,
 108    repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
 109
 110    /// A number that increases every time the worktree begins scanning
 111    /// a set of paths from the filesystem. This scanning could be caused
 112    /// by some operation performed on the worktree, such as reading or
 113    /// writing a file, or by an event reported by the filesystem.
 114    scan_id: usize,
 115
 116    /// The latest scan id that has completed, and whose preceding scans
 117    /// have all completed. The current `scan_id` could be more than one
 118    /// greater than the `completed_scan_id` if operations are performed
 119    /// on the worktree while it is processing a file-system event.
 120    completed_scan_id: usize,
 121}
 122
 123#[derive(Clone, Debug, PartialEq, Eq)]
 124pub struct RepositoryEntry {
 125    pub(crate) work_directory: WorkDirectoryEntry,
 126    pub(crate) branch: Option<Arc<str>>,
 127    pub(crate) statuses: TreeMap<RepoPath, GitFileStatus>,
 128}
 129
 130fn read_git_status(git_status: i32) -> Option<GitFileStatus> {
 131    proto::GitStatus::from_i32(git_status).map(|status| match status {
 132        proto::GitStatus::Added => GitFileStatus::Added,
 133        proto::GitStatus::Modified => GitFileStatus::Modified,
 134        proto::GitStatus::Conflict => GitFileStatus::Conflict,
 135    })
 136}
 137
 138impl RepositoryEntry {
 139    pub fn branch(&self) -> Option<Arc<str>> {
 140        self.branch.clone()
 141    }
 142
 143    pub fn work_directory_id(&self) -> ProjectEntryId {
 144        *self.work_directory
 145    }
 146
 147    pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
 148        snapshot
 149            .entry_for_id(self.work_directory_id())
 150            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
 151    }
 152
 153    pub fn status_for_path(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
 154        self.work_directory
 155            .relativize(snapshot, path)
 156            .and_then(|repo_path| {
 157                self.statuses
 158                    .iter_from(&repo_path)
 159                    .take_while(|(key, _)| key.starts_with(&repo_path))
 160                    // Short circut once we've found the highest level
 161                    .take_until(|(_, status)| status == &&GitFileStatus::Conflict)
 162                    .map(|(_, status)| status)
 163                    .reduce(
 164                        |status_first, status_second| match (status_first, status_second) {
 165                            (GitFileStatus::Conflict, _) | (_, GitFileStatus::Conflict) => {
 166                                &GitFileStatus::Conflict
 167                            }
 168                            (GitFileStatus::Modified, _) | (_, GitFileStatus::Modified) => {
 169                                &GitFileStatus::Modified
 170                            }
 171                            _ => &GitFileStatus::Added,
 172                        },
 173                    )
 174                    .copied()
 175            })
 176    }
 177
 178    #[cfg(any(test, feature = "test-support"))]
 179    pub fn status_for_file(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
 180        self.work_directory
 181            .relativize(snapshot, path)
 182            .and_then(|repo_path| (&self.statuses).get(&repo_path))
 183            .cloned()
 184    }
 185
 186    pub fn build_update(&self, other: &Self) -> proto::RepositoryEntry {
 187        let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
 188        let mut removed_statuses: Vec<String> = Vec::new();
 189
 190        let mut self_statuses = self.statuses.iter().peekable();
 191        let mut other_statuses = other.statuses.iter().peekable();
 192        loop {
 193            match (self_statuses.peek(), other_statuses.peek()) {
 194                (Some((self_repo_path, self_status)), Some((other_repo_path, other_status))) => {
 195                    match Ord::cmp(self_repo_path, other_repo_path) {
 196                        Ordering::Less => {
 197                            updated_statuses.push(make_status_entry(self_repo_path, self_status));
 198                            self_statuses.next();
 199                        }
 200                        Ordering::Equal => {
 201                            if self_status != other_status {
 202                                updated_statuses
 203                                    .push(make_status_entry(self_repo_path, self_status));
 204                            }
 205
 206                            self_statuses.next();
 207                            other_statuses.next();
 208                        }
 209                        Ordering::Greater => {
 210                            removed_statuses.push(make_repo_path(other_repo_path));
 211                            other_statuses.next();
 212                        }
 213                    }
 214                }
 215                (Some((self_repo_path, self_status)), None) => {
 216                    updated_statuses.push(make_status_entry(self_repo_path, self_status));
 217                    self_statuses.next();
 218                }
 219                (None, Some((other_repo_path, _))) => {
 220                    removed_statuses.push(make_repo_path(other_repo_path));
 221                    other_statuses.next();
 222                }
 223                (None, None) => break,
 224            }
 225        }
 226
 227        proto::RepositoryEntry {
 228            work_directory_id: self.work_directory_id().to_proto(),
 229            branch: self.branch.as_ref().map(|str| str.to_string()),
 230            removed_repo_paths: removed_statuses,
 231            updated_statuses,
 232        }
 233    }
 234}
 235
 236fn make_repo_path(path: &RepoPath) -> String {
 237    path.as_os_str().to_string_lossy().to_string()
 238}
 239
 240fn make_status_entry(path: &RepoPath, status: &GitFileStatus) -> proto::StatusEntry {
 241    proto::StatusEntry {
 242        repo_path: make_repo_path(path),
 243        status: match status {
 244            GitFileStatus::Added => proto::GitStatus::Added.into(),
 245            GitFileStatus::Modified => proto::GitStatus::Modified.into(),
 246            GitFileStatus::Conflict => proto::GitStatus::Conflict.into(),
 247        },
 248    }
 249}
 250
 251impl From<&RepositoryEntry> for proto::RepositoryEntry {
 252    fn from(value: &RepositoryEntry) -> Self {
 253        proto::RepositoryEntry {
 254            work_directory_id: value.work_directory.to_proto(),
 255            branch: value.branch.as_ref().map(|str| str.to_string()),
 256            updated_statuses: value
 257                .statuses
 258                .iter()
 259                .map(|(repo_path, status)| make_status_entry(repo_path, status))
 260                .collect(),
 261            removed_repo_paths: Default::default(),
 262        }
 263    }
 264}
 265
 266/// This path corresponds to the 'content path' (the folder that contains the .git)
 267#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 268pub struct RepositoryWorkDirectory(Arc<Path>);
 269
 270impl Default for RepositoryWorkDirectory {
 271    fn default() -> Self {
 272        RepositoryWorkDirectory(Arc::from(Path::new("")))
 273    }
 274}
 275
 276impl AsRef<Path> for RepositoryWorkDirectory {
 277    fn as_ref(&self) -> &Path {
 278        self.0.as_ref()
 279    }
 280}
 281
 282#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 283pub struct WorkDirectoryEntry(ProjectEntryId);
 284
 285impl WorkDirectoryEntry {
 286    pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
 287        worktree.entry_for_id(self.0).and_then(|entry| {
 288            path.strip_prefix(&entry.path)
 289                .ok()
 290                .map(move |path| path.into())
 291        })
 292    }
 293}
 294
 295impl Deref for WorkDirectoryEntry {
 296    type Target = ProjectEntryId;
 297
 298    fn deref(&self) -> &Self::Target {
 299        &self.0
 300    }
 301}
 302
 303impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
 304    fn from(value: ProjectEntryId) -> Self {
 305        WorkDirectoryEntry(value)
 306    }
 307}
 308
 309#[derive(Debug, Clone)]
 310pub struct LocalSnapshot {
 311    snapshot: Snapshot,
 312    /// All of the gitignore files in the worktree, indexed by their relative path.
 313    /// The boolean indicates whether the gitignore needs to be updated.
 314    ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
 315    /// All of the git repositories in the worktree, indexed by the project entry
 316    /// id of their parent directory.
 317    git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 318}
 319
 320pub struct LocalMutableSnapshot {
 321    snapshot: LocalSnapshot,
 322    /// The ids of all of the entries that were removed from the snapshot
 323    /// as part of the current update. These entry ids may be re-used
 324    /// if the same inode is discovered at a new path, or if the given
 325    /// path is re-created after being deleted.
 326    removed_entry_ids: HashMap<u64, ProjectEntryId>,
 327}
 328
 329#[derive(Debug, Clone)]
 330pub struct LocalRepositoryEntry {
 331    pub(crate) scan_id: usize,
 332    pub(crate) git_dir_scan_id: usize,
 333    pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
 334    /// Path to the actual .git folder.
 335    /// Note: if .git is a file, this points to the folder indicated by the .git file
 336    pub(crate) git_dir_path: Arc<Path>,
 337}
 338
 339impl LocalRepositoryEntry {
 340    // Note that this path should be relative to the worktree root.
 341    pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
 342        path.starts_with(self.git_dir_path.as_ref())
 343    }
 344}
 345
 346impl Deref for LocalSnapshot {
 347    type Target = Snapshot;
 348
 349    fn deref(&self) -> &Self::Target {
 350        &self.snapshot
 351    }
 352}
 353
 354impl DerefMut for LocalSnapshot {
 355    fn deref_mut(&mut self) -> &mut Self::Target {
 356        &mut self.snapshot
 357    }
 358}
 359
 360impl Deref for LocalMutableSnapshot {
 361    type Target = LocalSnapshot;
 362
 363    fn deref(&self) -> &Self::Target {
 364        &self.snapshot
 365    }
 366}
 367
 368impl DerefMut for LocalMutableSnapshot {
 369    fn deref_mut(&mut self) -> &mut Self::Target {
 370        &mut self.snapshot
 371    }
 372}
 373
 374enum ScanState {
 375    Started,
 376    Updated {
 377        snapshot: LocalSnapshot,
 378        changes: HashMap<(Arc<Path>, ProjectEntryId), PathChange>,
 379        barrier: Option<barrier::Sender>,
 380        scanning: bool,
 381    },
 382}
 383
 384struct ShareState {
 385    project_id: u64,
 386    snapshots_tx: watch::Sender<LocalSnapshot>,
 387    resume_updates: watch::Sender<()>,
 388    _maintain_remote_snapshot: Task<Option<()>>,
 389}
 390
 391pub enum Event {
 392    UpdatedEntries(HashMap<(Arc<Path>, ProjectEntryId), PathChange>),
 393    UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
 394}
 395
 396impl Entity for Worktree {
 397    type Event = Event;
 398}
 399
 400impl Worktree {
 401    pub async fn local(
 402        client: Arc<Client>,
 403        path: impl Into<Arc<Path>>,
 404        visible: bool,
 405        fs: Arc<dyn Fs>,
 406        next_entry_id: Arc<AtomicUsize>,
 407        cx: &mut AsyncAppContext,
 408    ) -> Result<ModelHandle<Self>> {
 409        // After determining whether the root entry is a file or a directory, populate the
 410        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 411        let abs_path = path.into();
 412        let metadata = fs
 413            .metadata(&abs_path)
 414            .await
 415            .context("failed to stat worktree path")?;
 416
 417        Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
 418            let root_name = abs_path
 419                .file_name()
 420                .map_or(String::new(), |f| f.to_string_lossy().to_string());
 421
 422            let mut snapshot = LocalSnapshot {
 423                ignores_by_parent_abs_path: Default::default(),
 424                git_repositories: Default::default(),
 425                snapshot: Snapshot {
 426                    id: WorktreeId::from_usize(cx.model_id()),
 427                    abs_path: abs_path.clone(),
 428                    root_name: root_name.clone(),
 429                    root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
 430                    entries_by_path: Default::default(),
 431                    entries_by_id: Default::default(),
 432                    repository_entries: Default::default(),
 433                    scan_id: 1,
 434                    completed_scan_id: 0,
 435                },
 436            };
 437
 438            if let Some(metadata) = metadata {
 439                snapshot.insert_entry(
 440                    Entry::new(
 441                        Arc::from(Path::new("")),
 442                        &metadata,
 443                        &next_entry_id,
 444                        snapshot.root_char_bag,
 445                    ),
 446                    fs.as_ref(),
 447                );
 448            }
 449
 450            let (path_changes_tx, path_changes_rx) = channel::unbounded();
 451            let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
 452
 453            cx.spawn_weak(|this, mut cx| async move {
 454                while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
 455                    this.update(&mut cx, |this, cx| {
 456                        let this = this.as_local_mut().unwrap();
 457                        match state {
 458                            ScanState::Started => {
 459                                *this.is_scanning.0.borrow_mut() = true;
 460                            }
 461                            ScanState::Updated {
 462                                snapshot,
 463                                changes,
 464                                barrier,
 465                                scanning,
 466                            } => {
 467                                *this.is_scanning.0.borrow_mut() = scanning;
 468                                this.set_snapshot(snapshot, cx);
 469                                cx.emit(Event::UpdatedEntries(changes));
 470                                drop(barrier);
 471                            }
 472                        }
 473                        cx.notify();
 474                    });
 475                }
 476            })
 477            .detach();
 478
 479            let background_scanner_task = cx.background().spawn({
 480                let fs = fs.clone();
 481                let snapshot = snapshot.clone();
 482                let background = cx.background().clone();
 483                async move {
 484                    let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 485                    BackgroundScanner::new(
 486                        snapshot,
 487                        next_entry_id,
 488                        fs,
 489                        scan_states_tx,
 490                        background,
 491                        path_changes_rx,
 492                    )
 493                    .run(events)
 494                    .await;
 495                }
 496            });
 497
 498            Worktree::Local(LocalWorktree {
 499                snapshot,
 500                is_scanning: watch::channel_with(true),
 501                share: None,
 502                path_changes_tx,
 503                _background_scanner_task: background_scanner_task,
 504                diagnostics: Default::default(),
 505                diagnostic_summaries: Default::default(),
 506                client,
 507                fs,
 508                visible,
 509            })
 510        }))
 511    }
 512
 513    pub fn remote(
 514        project_remote_id: u64,
 515        replica_id: ReplicaId,
 516        worktree: proto::WorktreeMetadata,
 517        client: Arc<Client>,
 518        cx: &mut AppContext,
 519    ) -> ModelHandle<Self> {
 520        cx.add_model(|cx: &mut ModelContext<Self>| {
 521            let snapshot = Snapshot {
 522                id: WorktreeId(worktree.id as usize),
 523                abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
 524                root_name: worktree.root_name.clone(),
 525                root_char_bag: worktree
 526                    .root_name
 527                    .chars()
 528                    .map(|c| c.to_ascii_lowercase())
 529                    .collect(),
 530                entries_by_path: Default::default(),
 531                entries_by_id: Default::default(),
 532                repository_entries: Default::default(),
 533                scan_id: 1,
 534                completed_scan_id: 0,
 535            };
 536
 537            let (updates_tx, mut updates_rx) = mpsc::unbounded();
 538            let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
 539            let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
 540
 541            cx.background()
 542                .spawn({
 543                    let background_snapshot = background_snapshot.clone();
 544                    async move {
 545                        while let Some(update) = updates_rx.next().await {
 546                            if let Err(error) =
 547                                background_snapshot.lock().apply_remote_update(update)
 548                            {
 549                                log::error!("error applying worktree update: {}", error);
 550                            }
 551                            snapshot_updated_tx.send(()).await.ok();
 552                        }
 553                    }
 554                })
 555                .detach();
 556
 557            cx.spawn_weak(|this, mut cx| async move {
 558                while (snapshot_updated_rx.recv().await).is_some() {
 559                    if let Some(this) = this.upgrade(&cx) {
 560                        this.update(&mut cx, |this, cx| {
 561                            let this = this.as_remote_mut().unwrap();
 562                            this.snapshot = this.background_snapshot.lock().clone();
 563                            cx.emit(Event::UpdatedEntries(Default::default()));
 564                            cx.notify();
 565                            while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
 566                                if this.observed_snapshot(*scan_id) {
 567                                    let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
 568                                    let _ = tx.send(());
 569                                } else {
 570                                    break;
 571                                }
 572                            }
 573                        });
 574                    } else {
 575                        break;
 576                    }
 577                }
 578            })
 579            .detach();
 580
 581            Worktree::Remote(RemoteWorktree {
 582                project_id: project_remote_id,
 583                replica_id,
 584                snapshot: snapshot.clone(),
 585                background_snapshot,
 586                updates_tx: Some(updates_tx),
 587                snapshot_subscriptions: Default::default(),
 588                client: client.clone(),
 589                diagnostic_summaries: Default::default(),
 590                visible: worktree.visible,
 591                disconnected: false,
 592            })
 593        })
 594    }
 595
 596    pub fn as_local(&self) -> Option<&LocalWorktree> {
 597        if let Worktree::Local(worktree) = self {
 598            Some(worktree)
 599        } else {
 600            None
 601        }
 602    }
 603
 604    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 605        if let Worktree::Remote(worktree) = self {
 606            Some(worktree)
 607        } else {
 608            None
 609        }
 610    }
 611
 612    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 613        if let Worktree::Local(worktree) = self {
 614            Some(worktree)
 615        } else {
 616            None
 617        }
 618    }
 619
 620    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 621        if let Worktree::Remote(worktree) = self {
 622            Some(worktree)
 623        } else {
 624            None
 625        }
 626    }
 627
 628    pub fn is_local(&self) -> bool {
 629        matches!(self, Worktree::Local(_))
 630    }
 631
 632    pub fn is_remote(&self) -> bool {
 633        !self.is_local()
 634    }
 635
 636    pub fn snapshot(&self) -> Snapshot {
 637        match self {
 638            Worktree::Local(worktree) => worktree.snapshot().snapshot,
 639            Worktree::Remote(worktree) => worktree.snapshot(),
 640        }
 641    }
 642
 643    pub fn scan_id(&self) -> usize {
 644        match self {
 645            Worktree::Local(worktree) => worktree.snapshot.scan_id,
 646            Worktree::Remote(worktree) => worktree.snapshot.scan_id,
 647        }
 648    }
 649
 650    pub fn completed_scan_id(&self) -> usize {
 651        match self {
 652            Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
 653            Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
 654        }
 655    }
 656
 657    pub fn is_visible(&self) -> bool {
 658        match self {
 659            Worktree::Local(worktree) => worktree.visible,
 660            Worktree::Remote(worktree) => worktree.visible,
 661        }
 662    }
 663
 664    pub fn replica_id(&self) -> ReplicaId {
 665        match self {
 666            Worktree::Local(_) => 0,
 667            Worktree::Remote(worktree) => worktree.replica_id,
 668        }
 669    }
 670
 671    pub fn diagnostic_summaries(
 672        &self,
 673    ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
 674        match self {
 675            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 676            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 677        }
 678        .iter()
 679        .flat_map(|(path, summaries)| {
 680            summaries
 681                .iter()
 682                .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
 683        })
 684    }
 685
 686    pub fn abs_path(&self) -> Arc<Path> {
 687        match self {
 688            Worktree::Local(worktree) => worktree.abs_path.clone(),
 689            Worktree::Remote(worktree) => worktree.abs_path.clone(),
 690        }
 691    }
 692}
 693
 694impl LocalWorktree {
 695    pub fn contains_abs_path(&self, path: &Path) -> bool {
 696        path.starts_with(&self.abs_path)
 697    }
 698
 699    fn absolutize(&self, path: &Path) -> PathBuf {
 700        if path.file_name().is_some() {
 701            self.abs_path.join(path)
 702        } else {
 703            self.abs_path.to_path_buf()
 704        }
 705    }
 706
 707    pub(crate) fn load_buffer(
 708        &mut self,
 709        id: u64,
 710        path: &Path,
 711        cx: &mut ModelContext<Worktree>,
 712    ) -> Task<Result<ModelHandle<Buffer>>> {
 713        let path = Arc::from(path);
 714        cx.spawn(move |this, mut cx| async move {
 715            let (file, contents, diff_base) = this
 716                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
 717                .await?;
 718            let text_buffer = cx
 719                .background()
 720                .spawn(async move { text::Buffer::new(0, id, contents) })
 721                .await;
 722            Ok(cx.add_model(|cx| {
 723                let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
 724                buffer.git_diff_recalc(cx);
 725                buffer
 726            }))
 727        })
 728    }
 729
 730    pub fn diagnostics_for_path(
 731        &self,
 732        path: &Path,
 733    ) -> Vec<(
 734        LanguageServerId,
 735        Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 736    )> {
 737        self.diagnostics.get(path).cloned().unwrap_or_default()
 738    }
 739
 740    pub fn clear_diagnostics_for_language_server(
 741        &mut self,
 742        server_id: LanguageServerId,
 743        _: &mut ModelContext<Worktree>,
 744    ) {
 745        let worktree_id = self.id().to_proto();
 746        self.diagnostic_summaries
 747            .retain(|path, summaries_by_server_id| {
 748                if summaries_by_server_id.remove(&server_id).is_some() {
 749                    if let Some(share) = self.share.as_ref() {
 750                        self.client
 751                            .send(proto::UpdateDiagnosticSummary {
 752                                project_id: share.project_id,
 753                                worktree_id,
 754                                summary: Some(proto::DiagnosticSummary {
 755                                    path: path.to_string_lossy().to_string(),
 756                                    language_server_id: server_id.0 as u64,
 757                                    error_count: 0,
 758                                    warning_count: 0,
 759                                }),
 760                            })
 761                            .log_err();
 762                    }
 763                    !summaries_by_server_id.is_empty()
 764                } else {
 765                    true
 766                }
 767            });
 768
 769        self.diagnostics.retain(|_, diagnostics_by_server_id| {
 770            if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 771                diagnostics_by_server_id.remove(ix);
 772                !diagnostics_by_server_id.is_empty()
 773            } else {
 774                true
 775            }
 776        });
 777    }
 778
 779    pub fn update_diagnostics(
 780        &mut self,
 781        server_id: LanguageServerId,
 782        worktree_path: Arc<Path>,
 783        diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 784        _: &mut ModelContext<Worktree>,
 785    ) -> Result<bool> {
 786        let summaries_by_server_id = self
 787            .diagnostic_summaries
 788            .entry(worktree_path.clone())
 789            .or_default();
 790
 791        let old_summary = summaries_by_server_id
 792            .remove(&server_id)
 793            .unwrap_or_default();
 794
 795        let new_summary = DiagnosticSummary::new(&diagnostics);
 796        if new_summary.is_empty() {
 797            if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
 798                if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 799                    diagnostics_by_server_id.remove(ix);
 800                }
 801                if diagnostics_by_server_id.is_empty() {
 802                    self.diagnostics.remove(&worktree_path);
 803                }
 804            }
 805        } else {
 806            summaries_by_server_id.insert(server_id, new_summary);
 807            let diagnostics_by_server_id =
 808                self.diagnostics.entry(worktree_path.clone()).or_default();
 809            match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 810                Ok(ix) => {
 811                    diagnostics_by_server_id[ix] = (server_id, diagnostics);
 812                }
 813                Err(ix) => {
 814                    diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
 815                }
 816            }
 817        }
 818
 819        if !old_summary.is_empty() || !new_summary.is_empty() {
 820            if let Some(share) = self.share.as_ref() {
 821                self.client
 822                    .send(proto::UpdateDiagnosticSummary {
 823                        project_id: share.project_id,
 824                        worktree_id: self.id().to_proto(),
 825                        summary: Some(proto::DiagnosticSummary {
 826                            path: worktree_path.to_string_lossy().to_string(),
 827                            language_server_id: server_id.0 as u64,
 828                            error_count: new_summary.error_count as u32,
 829                            warning_count: new_summary.warning_count as u32,
 830                        }),
 831                    })
 832                    .log_err();
 833            }
 834        }
 835
 836        Ok(!old_summary.is_empty() || !new_summary.is_empty())
 837    }
 838
 839    fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
 840        let updated_repos =
 841            self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
 842
 843        self.snapshot = new_snapshot;
 844
 845        if let Some(share) = self.share.as_mut() {
 846            *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
 847        }
 848
 849        if !updated_repos.is_empty() {
 850            cx.emit(Event::UpdatedGitRepositories(updated_repos));
 851        }
 852    }
 853
 854    fn changed_repos(
 855        &self,
 856        old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 857        new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 858    ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
 859        let mut diff = HashMap::default();
 860        let mut old_repos = old_repos.iter().peekable();
 861        let mut new_repos = new_repos.iter().peekable();
 862        loop {
 863            match (old_repos.peek(), new_repos.peek()) {
 864                (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
 865                    match Ord::cmp(old_entry_id, new_entry_id) {
 866                        Ordering::Less => {
 867                            if let Some(entry) = self.entry_for_id(**old_entry_id) {
 868                                diff.insert(entry.path.clone(), (*old_repo).clone());
 869                            }
 870                            old_repos.next();
 871                        }
 872                        Ordering::Equal => {
 873                            if old_repo.git_dir_scan_id != new_repo.git_dir_scan_id {
 874                                if let Some(entry) = self.entry_for_id(**new_entry_id) {
 875                                    diff.insert(entry.path.clone(), (*new_repo).clone());
 876                                }
 877                            }
 878
 879                            old_repos.next();
 880                            new_repos.next();
 881                        }
 882                        Ordering::Greater => {
 883                            if let Some(entry) = self.entry_for_id(**new_entry_id) {
 884                                diff.insert(entry.path.clone(), (*new_repo).clone());
 885                            }
 886                            new_repos.next();
 887                        }
 888                    }
 889                }
 890                (Some((old_entry_id, old_repo)), None) => {
 891                    if let Some(entry) = self.entry_for_id(**old_entry_id) {
 892                        diff.insert(entry.path.clone(), (*old_repo).clone());
 893                    }
 894                    old_repos.next();
 895                }
 896                (None, Some((new_entry_id, new_repo))) => {
 897                    if let Some(entry) = self.entry_for_id(**new_entry_id) {
 898                        diff.insert(entry.path.clone(), (*new_repo).clone());
 899                    }
 900                    new_repos.next();
 901                }
 902                (None, None) => break,
 903            }
 904        }
 905        diff
 906    }
 907
 908    pub fn scan_complete(&self) -> impl Future<Output = ()> {
 909        let mut is_scanning_rx = self.is_scanning.1.clone();
 910        async move {
 911            let mut is_scanning = is_scanning_rx.borrow().clone();
 912            while is_scanning {
 913                if let Some(value) = is_scanning_rx.recv().await {
 914                    is_scanning = value;
 915                } else {
 916                    break;
 917                }
 918            }
 919        }
 920    }
 921
 922    pub fn snapshot(&self) -> LocalSnapshot {
 923        self.snapshot.clone()
 924    }
 925
 926    pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
 927        proto::WorktreeMetadata {
 928            id: self.id().to_proto(),
 929            root_name: self.root_name().to_string(),
 930            visible: self.visible,
 931            abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
 932        }
 933    }
 934
 935    fn load(
 936        &self,
 937        path: &Path,
 938        cx: &mut ModelContext<Worktree>,
 939    ) -> Task<Result<(File, String, Option<String>)>> {
 940        let handle = cx.handle();
 941        let path = Arc::from(path);
 942        let abs_path = self.absolutize(&path);
 943        let fs = self.fs.clone();
 944        let snapshot = self.snapshot();
 945
 946        let mut index_task = None;
 947
 948        if let Some(repo) = snapshot.repository_for_path(&path) {
 949            let repo_path = repo.work_directory.relativize(self, &path).unwrap();
 950            if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
 951                let repo = repo.repo_ptr.to_owned();
 952                index_task = Some(
 953                    cx.background()
 954                        .spawn(async move { repo.lock().load_index_text(&repo_path) }),
 955                );
 956            }
 957        }
 958
 959        cx.spawn(|this, mut cx| async move {
 960            let text = fs.load(&abs_path).await?;
 961
 962            let diff_base = if let Some(index_task) = index_task {
 963                index_task.await
 964            } else {
 965                None
 966            };
 967
 968            // Eagerly populate the snapshot with an updated entry for the loaded file
 969            let entry = this
 970                .update(&mut cx, |this, cx| {
 971                    this.as_local().unwrap().refresh_entry(path, None, cx)
 972                })
 973                .await?;
 974
 975            Ok((
 976                File {
 977                    entry_id: entry.id,
 978                    worktree: handle,
 979                    path: entry.path,
 980                    mtime: entry.mtime,
 981                    is_local: true,
 982                    is_deleted: false,
 983                },
 984                text,
 985                diff_base,
 986            ))
 987        })
 988    }
 989
 990    pub fn save_buffer(
 991        &self,
 992        buffer_handle: ModelHandle<Buffer>,
 993        path: Arc<Path>,
 994        has_changed_file: bool,
 995        cx: &mut ModelContext<Worktree>,
 996    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
 997        let handle = cx.handle();
 998        let buffer = buffer_handle.read(cx);
 999
1000        let rpc = self.client.clone();
1001        let buffer_id = buffer.remote_id();
1002        let project_id = self.share.as_ref().map(|share| share.project_id);
1003
1004        let text = buffer.as_rope().clone();
1005        let fingerprint = text.fingerprint();
1006        let version = buffer.version();
1007        let save = self.write_file(path, text, buffer.line_ending(), cx);
1008
1009        cx.as_mut().spawn(|mut cx| async move {
1010            let entry = save.await?;
1011
1012            if has_changed_file {
1013                let new_file = Arc::new(File {
1014                    entry_id: entry.id,
1015                    worktree: handle,
1016                    path: entry.path,
1017                    mtime: entry.mtime,
1018                    is_local: true,
1019                    is_deleted: false,
1020                });
1021
1022                if let Some(project_id) = project_id {
1023                    rpc.send(proto::UpdateBufferFile {
1024                        project_id,
1025                        buffer_id,
1026                        file: Some(new_file.to_proto()),
1027                    })
1028                    .log_err();
1029                }
1030
1031                buffer_handle.update(&mut cx, |buffer, cx| {
1032                    if has_changed_file {
1033                        buffer.file_updated(new_file, cx).detach();
1034                    }
1035                });
1036            }
1037
1038            if let Some(project_id) = project_id {
1039                rpc.send(proto::BufferSaved {
1040                    project_id,
1041                    buffer_id,
1042                    version: serialize_version(&version),
1043                    mtime: Some(entry.mtime.into()),
1044                    fingerprint: serialize_fingerprint(fingerprint),
1045                })?;
1046            }
1047
1048            buffer_handle.update(&mut cx, |buffer, cx| {
1049                buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
1050            });
1051
1052            Ok((version, fingerprint, entry.mtime))
1053        })
1054    }
1055
1056    pub fn create_entry(
1057        &self,
1058        path: impl Into<Arc<Path>>,
1059        is_dir: bool,
1060        cx: &mut ModelContext<Worktree>,
1061    ) -> Task<Result<Entry>> {
1062        let path = path.into();
1063        let abs_path = self.absolutize(&path);
1064        let fs = self.fs.clone();
1065        let write = cx.background().spawn(async move {
1066            if is_dir {
1067                fs.create_dir(&abs_path).await
1068            } else {
1069                fs.save(&abs_path, &Default::default(), Default::default())
1070                    .await
1071            }
1072        });
1073
1074        cx.spawn(|this, mut cx| async move {
1075            write.await?;
1076            this.update(&mut cx, |this, cx| {
1077                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1078            })
1079            .await
1080        })
1081    }
1082
1083    pub fn write_file(
1084        &self,
1085        path: impl Into<Arc<Path>>,
1086        text: Rope,
1087        line_ending: LineEnding,
1088        cx: &mut ModelContext<Worktree>,
1089    ) -> Task<Result<Entry>> {
1090        let path = path.into();
1091        let abs_path = self.absolutize(&path);
1092        let fs = self.fs.clone();
1093        let write = cx
1094            .background()
1095            .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1096
1097        cx.spawn(|this, mut cx| async move {
1098            write.await?;
1099            this.update(&mut cx, |this, cx| {
1100                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1101            })
1102            .await
1103        })
1104    }
1105
1106    pub fn delete_entry(
1107        &self,
1108        entry_id: ProjectEntryId,
1109        cx: &mut ModelContext<Worktree>,
1110    ) -> Option<Task<Result<()>>> {
1111        let entry = self.entry_for_id(entry_id)?.clone();
1112        let abs_path = self.abs_path.clone();
1113        let fs = self.fs.clone();
1114
1115        let delete = cx.background().spawn(async move {
1116            let mut abs_path = fs.canonicalize(&abs_path).await?;
1117            if entry.path.file_name().is_some() {
1118                abs_path = abs_path.join(&entry.path);
1119            }
1120            if entry.is_file() {
1121                fs.remove_file(&abs_path, Default::default()).await?;
1122            } else {
1123                fs.remove_dir(
1124                    &abs_path,
1125                    RemoveOptions {
1126                        recursive: true,
1127                        ignore_if_not_exists: false,
1128                    },
1129                )
1130                .await?;
1131            }
1132            anyhow::Ok(abs_path)
1133        });
1134
1135        Some(cx.spawn(|this, mut cx| async move {
1136            let abs_path = delete.await?;
1137            let (tx, mut rx) = barrier::channel();
1138            this.update(&mut cx, |this, _| {
1139                this.as_local_mut()
1140                    .unwrap()
1141                    .path_changes_tx
1142                    .try_send((vec![abs_path], tx))
1143            })?;
1144            rx.recv().await;
1145            Ok(())
1146        }))
1147    }
1148
1149    pub fn rename_entry(
1150        &self,
1151        entry_id: ProjectEntryId,
1152        new_path: impl Into<Arc<Path>>,
1153        cx: &mut ModelContext<Worktree>,
1154    ) -> Option<Task<Result<Entry>>> {
1155        let old_path = self.entry_for_id(entry_id)?.path.clone();
1156        let new_path = new_path.into();
1157        let abs_old_path = self.absolutize(&old_path);
1158        let abs_new_path = self.absolutize(&new_path);
1159        let fs = self.fs.clone();
1160        let rename = cx.background().spawn(async move {
1161            fs.rename(&abs_old_path, &abs_new_path, Default::default())
1162                .await
1163        });
1164
1165        Some(cx.spawn(|this, mut cx| async move {
1166            rename.await?;
1167            this.update(&mut cx, |this, cx| {
1168                this.as_local_mut()
1169                    .unwrap()
1170                    .refresh_entry(new_path.clone(), Some(old_path), cx)
1171            })
1172            .await
1173        }))
1174    }
1175
1176    pub fn copy_entry(
1177        &self,
1178        entry_id: ProjectEntryId,
1179        new_path: impl Into<Arc<Path>>,
1180        cx: &mut ModelContext<Worktree>,
1181    ) -> Option<Task<Result<Entry>>> {
1182        let old_path = self.entry_for_id(entry_id)?.path.clone();
1183        let new_path = new_path.into();
1184        let abs_old_path = self.absolutize(&old_path);
1185        let abs_new_path = self.absolutize(&new_path);
1186        let fs = self.fs.clone();
1187        let copy = cx.background().spawn(async move {
1188            copy_recursive(
1189                fs.as_ref(),
1190                &abs_old_path,
1191                &abs_new_path,
1192                Default::default(),
1193            )
1194            .await
1195        });
1196
1197        Some(cx.spawn(|this, mut cx| async move {
1198            copy.await?;
1199            this.update(&mut cx, |this, cx| {
1200                this.as_local_mut()
1201                    .unwrap()
1202                    .refresh_entry(new_path.clone(), None, cx)
1203            })
1204            .await
1205        }))
1206    }
1207
1208    fn refresh_entry(
1209        &self,
1210        path: Arc<Path>,
1211        old_path: Option<Arc<Path>>,
1212        cx: &mut ModelContext<Worktree>,
1213    ) -> Task<Result<Entry>> {
1214        let fs = self.fs.clone();
1215        let abs_root_path = self.abs_path.clone();
1216        let path_changes_tx = self.path_changes_tx.clone();
1217        cx.spawn_weak(move |this, mut cx| async move {
1218            let abs_path = fs.canonicalize(&abs_root_path).await?;
1219            let mut paths = Vec::with_capacity(2);
1220            paths.push(if path.file_name().is_some() {
1221                abs_path.join(&path)
1222            } else {
1223                abs_path.clone()
1224            });
1225            if let Some(old_path) = old_path {
1226                paths.push(if old_path.file_name().is_some() {
1227                    abs_path.join(&old_path)
1228                } else {
1229                    abs_path.clone()
1230                });
1231            }
1232
1233            let (tx, mut rx) = barrier::channel();
1234            path_changes_tx.try_send((paths, tx))?;
1235            rx.recv().await;
1236            this.upgrade(&cx)
1237                .ok_or_else(|| anyhow!("worktree was dropped"))?
1238                .update(&mut cx, |this, _| {
1239                    this.entry_for_path(path)
1240                        .cloned()
1241                        .ok_or_else(|| anyhow!("failed to read path after update"))
1242                })
1243        })
1244    }
1245
1246    pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1247        let (share_tx, share_rx) = oneshot::channel();
1248
1249        if let Some(share) = self.share.as_mut() {
1250            let _ = share_tx.send(());
1251            *share.resume_updates.borrow_mut() = ();
1252        } else {
1253            let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1254            let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1255            let worktree_id = cx.model_id() as u64;
1256
1257            for (path, summaries) in &self.diagnostic_summaries {
1258                for (&server_id, summary) in summaries {
1259                    if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1260                        project_id,
1261                        worktree_id,
1262                        summary: Some(summary.to_proto(server_id, &path)),
1263                    }) {
1264                        return Task::ready(Err(e));
1265                    }
1266                }
1267            }
1268
1269            let _maintain_remote_snapshot = cx.background().spawn({
1270                let client = self.client.clone();
1271                async move {
1272                    let mut share_tx = Some(share_tx);
1273                    let mut prev_snapshot = LocalSnapshot {
1274                        ignores_by_parent_abs_path: Default::default(),
1275                        git_repositories: Default::default(),
1276                        snapshot: Snapshot {
1277                            id: WorktreeId(worktree_id as usize),
1278                            abs_path: Path::new("").into(),
1279                            root_name: Default::default(),
1280                            root_char_bag: Default::default(),
1281                            entries_by_path: Default::default(),
1282                            entries_by_id: Default::default(),
1283                            repository_entries: Default::default(),
1284                            scan_id: 0,
1285                            completed_scan_id: 0,
1286                        },
1287                    };
1288                    while let Some(snapshot) = snapshots_rx.recv().await {
1289                        #[cfg(any(test, feature = "test-support"))]
1290                        const MAX_CHUNK_SIZE: usize = 2;
1291                        #[cfg(not(any(test, feature = "test-support")))]
1292                        const MAX_CHUNK_SIZE: usize = 256;
1293
1294                        let update =
1295                            snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1296                        for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1297                            let _ = resume_updates_rx.try_recv();
1298                            while let Err(error) = client.request(update.clone()).await {
1299                                log::error!("failed to send worktree update: {}", error);
1300                                log::info!("waiting to resume updates");
1301                                if resume_updates_rx.next().await.is_none() {
1302                                    return Ok(());
1303                                }
1304                            }
1305                        }
1306
1307                        if let Some(share_tx) = share_tx.take() {
1308                            let _ = share_tx.send(());
1309                        }
1310
1311                        prev_snapshot = snapshot;
1312                    }
1313
1314                    Ok::<_, anyhow::Error>(())
1315                }
1316                .log_err()
1317            });
1318
1319            self.share = Some(ShareState {
1320                project_id,
1321                snapshots_tx,
1322                resume_updates: resume_updates_tx,
1323                _maintain_remote_snapshot,
1324            });
1325        }
1326
1327        cx.foreground()
1328            .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1329    }
1330
1331    pub fn unshare(&mut self) {
1332        self.share.take();
1333    }
1334
1335    pub fn is_shared(&self) -> bool {
1336        self.share.is_some()
1337    }
1338}
1339
1340impl RemoteWorktree {
1341    fn snapshot(&self) -> Snapshot {
1342        self.snapshot.clone()
1343    }
1344
1345    pub fn disconnected_from_host(&mut self) {
1346        self.updates_tx.take();
1347        self.snapshot_subscriptions.clear();
1348        self.disconnected = true;
1349    }
1350
1351    pub fn save_buffer(
1352        &self,
1353        buffer_handle: ModelHandle<Buffer>,
1354        cx: &mut ModelContext<Worktree>,
1355    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1356        let buffer = buffer_handle.read(cx);
1357        let buffer_id = buffer.remote_id();
1358        let version = buffer.version();
1359        let rpc = self.client.clone();
1360        let project_id = self.project_id;
1361        cx.as_mut().spawn(|mut cx| async move {
1362            let response = rpc
1363                .request(proto::SaveBuffer {
1364                    project_id,
1365                    buffer_id,
1366                    version: serialize_version(&version),
1367                })
1368                .await?;
1369            let version = deserialize_version(&response.version);
1370            let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1371            let mtime = response
1372                .mtime
1373                .ok_or_else(|| anyhow!("missing mtime"))?
1374                .into();
1375
1376            buffer_handle.update(&mut cx, |buffer, cx| {
1377                buffer.did_save(version.clone(), fingerprint, mtime, cx);
1378            });
1379
1380            Ok((version, fingerprint, mtime))
1381        })
1382    }
1383
1384    pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1385        if let Some(updates_tx) = &self.updates_tx {
1386            updates_tx
1387                .unbounded_send(update)
1388                .expect("consumer runs to completion");
1389        }
1390    }
1391
1392    fn observed_snapshot(&self, scan_id: usize) -> bool {
1393        self.completed_scan_id >= scan_id
1394    }
1395
1396    fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1397        let (tx, rx) = oneshot::channel();
1398        if self.observed_snapshot(scan_id) {
1399            let _ = tx.send(());
1400        } else if self.disconnected {
1401            drop(tx);
1402        } else {
1403            match self
1404                .snapshot_subscriptions
1405                .binary_search_by_key(&scan_id, |probe| probe.0)
1406            {
1407                Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1408            }
1409        }
1410
1411        async move {
1412            rx.await?;
1413            Ok(())
1414        }
1415    }
1416
1417    pub fn update_diagnostic_summary(
1418        &mut self,
1419        path: Arc<Path>,
1420        summary: &proto::DiagnosticSummary,
1421    ) {
1422        let server_id = LanguageServerId(summary.language_server_id as usize);
1423        let summary = DiagnosticSummary {
1424            error_count: summary.error_count as usize,
1425            warning_count: summary.warning_count as usize,
1426        };
1427
1428        if summary.is_empty() {
1429            if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1430                summaries.remove(&server_id);
1431                if summaries.is_empty() {
1432                    self.diagnostic_summaries.remove(&path);
1433                }
1434            }
1435        } else {
1436            self.diagnostic_summaries
1437                .entry(path)
1438                .or_default()
1439                .insert(server_id, summary);
1440        }
1441    }
1442
1443    pub fn insert_entry(
1444        &mut self,
1445        entry: proto::Entry,
1446        scan_id: usize,
1447        cx: &mut ModelContext<Worktree>,
1448    ) -> Task<Result<Entry>> {
1449        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1450        cx.spawn(|this, mut cx| async move {
1451            wait_for_snapshot.await?;
1452            this.update(&mut cx, |worktree, _| {
1453                let worktree = worktree.as_remote_mut().unwrap();
1454                let mut snapshot = worktree.background_snapshot.lock();
1455                let entry = snapshot.insert_entry(entry);
1456                worktree.snapshot = snapshot.clone();
1457                entry
1458            })
1459        })
1460    }
1461
1462    pub(crate) fn delete_entry(
1463        &mut self,
1464        id: ProjectEntryId,
1465        scan_id: usize,
1466        cx: &mut ModelContext<Worktree>,
1467    ) -> Task<Result<()>> {
1468        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1469        cx.spawn(|this, mut cx| async move {
1470            wait_for_snapshot.await?;
1471            this.update(&mut cx, |worktree, _| {
1472                let worktree = worktree.as_remote_mut().unwrap();
1473                let mut snapshot = worktree.background_snapshot.lock();
1474                snapshot.delete_entry(id);
1475                worktree.snapshot = snapshot.clone();
1476            });
1477            Ok(())
1478        })
1479    }
1480}
1481
1482impl Snapshot {
1483    pub fn id(&self) -> WorktreeId {
1484        self.id
1485    }
1486
1487    pub fn abs_path(&self) -> &Arc<Path> {
1488        &self.abs_path
1489    }
1490
1491    pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1492        self.entries_by_id.get(&entry_id, &()).is_some()
1493    }
1494
1495    pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1496        let entry = Entry::try_from((&self.root_char_bag, entry))?;
1497        let old_entry = self.entries_by_id.insert_or_replace(
1498            PathEntry {
1499                id: entry.id,
1500                path: entry.path.clone(),
1501                is_ignored: entry.is_ignored,
1502                scan_id: 0,
1503            },
1504            &(),
1505        );
1506        if let Some(old_entry) = old_entry {
1507            self.entries_by_path.remove(&PathKey(old_entry.path), &());
1508        }
1509        self.entries_by_path.insert_or_replace(entry.clone(), &());
1510        Ok(entry)
1511    }
1512
1513    fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1514        let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1515        self.entries_by_path = {
1516            let mut cursor = self.entries_by_path.cursor();
1517            let mut new_entries_by_path =
1518                cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1519            while let Some(entry) = cursor.item() {
1520                if entry.path.starts_with(&removed_entry.path) {
1521                    self.entries_by_id.remove(&entry.id, &());
1522                    cursor.next(&());
1523                } else {
1524                    break;
1525                }
1526            }
1527            new_entries_by_path.push_tree(cursor.suffix(&()), &());
1528            new_entries_by_path
1529        };
1530
1531        Some(removed_entry.path)
1532    }
1533
1534    pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1535        let mut entries_by_path_edits = Vec::new();
1536        let mut entries_by_id_edits = Vec::new();
1537        for entry_id in update.removed_entries {
1538            if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1539                entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1540                entries_by_id_edits.push(Edit::Remove(entry.id));
1541            }
1542        }
1543
1544        for entry in update.updated_entries {
1545            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1546            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1547                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1548            }
1549            entries_by_id_edits.push(Edit::Insert(PathEntry {
1550                id: entry.id,
1551                path: entry.path.clone(),
1552                is_ignored: entry.is_ignored,
1553                scan_id: 0,
1554            }));
1555            entries_by_path_edits.push(Edit::Insert(entry));
1556        }
1557
1558        self.entries_by_path.edit(entries_by_path_edits, &());
1559        self.entries_by_id.edit(entries_by_id_edits, &());
1560
1561        update.removed_repositories.sort_unstable();
1562        self.repository_entries.retain(|_, entry| {
1563            if let Ok(_) = update
1564                .removed_repositories
1565                .binary_search(&entry.work_directory.to_proto())
1566            {
1567                false
1568            } else {
1569                true
1570            }
1571        });
1572
1573        for repository in update.updated_repositories {
1574            let work_directory_entry: WorkDirectoryEntry =
1575                ProjectEntryId::from_proto(repository.work_directory_id).into();
1576
1577            if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1578                let mut statuses = TreeMap::default();
1579                for status_entry in repository.updated_statuses {
1580                    let Some(git_file_status) = read_git_status(status_entry.status) else {
1581                        continue;
1582                    };
1583
1584                    let repo_path = RepoPath::new(status_entry.repo_path.into());
1585                    statuses.insert(repo_path, git_file_status);
1586                }
1587
1588                let work_directory = RepositoryWorkDirectory(entry.path.clone());
1589                if self.repository_entries.get(&work_directory).is_some() {
1590                    self.repository_entries.update(&work_directory, |repo| {
1591                        repo.branch = repository.branch.map(Into::into);
1592                        repo.statuses.insert_tree(statuses);
1593
1594                        for repo_path in repository.removed_repo_paths {
1595                            let repo_path = RepoPath::new(repo_path.into());
1596                            repo.statuses.remove(&repo_path);
1597                        }
1598                    });
1599                } else {
1600                    self.repository_entries.insert(
1601                        work_directory,
1602                        RepositoryEntry {
1603                            work_directory: work_directory_entry,
1604                            branch: repository.branch.map(Into::into),
1605                            statuses,
1606                        },
1607                    )
1608                }
1609            } else {
1610                log::error!("no work directory entry for repository {:?}", repository)
1611            }
1612        }
1613
1614        self.scan_id = update.scan_id as usize;
1615        if update.is_last_update {
1616            self.completed_scan_id = update.scan_id as usize;
1617        }
1618
1619        Ok(())
1620    }
1621
1622    pub fn file_count(&self) -> usize {
1623        self.entries_by_path.summary().file_count
1624    }
1625
1626    pub fn visible_file_count(&self) -> usize {
1627        self.entries_by_path.summary().visible_file_count
1628    }
1629
1630    fn traverse_from_offset(
1631        &self,
1632        include_dirs: bool,
1633        include_ignored: bool,
1634        start_offset: usize,
1635    ) -> Traversal {
1636        let mut cursor = self.entries_by_path.cursor();
1637        cursor.seek(
1638            &TraversalTarget::Count {
1639                count: start_offset,
1640                include_dirs,
1641                include_ignored,
1642            },
1643            Bias::Right,
1644            &(),
1645        );
1646        Traversal {
1647            cursor,
1648            include_dirs,
1649            include_ignored,
1650        }
1651    }
1652
1653    fn traverse_from_path(
1654        &self,
1655        include_dirs: bool,
1656        include_ignored: bool,
1657        path: &Path,
1658    ) -> Traversal {
1659        let mut cursor = self.entries_by_path.cursor();
1660        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1661        Traversal {
1662            cursor,
1663            include_dirs,
1664            include_ignored,
1665        }
1666    }
1667
1668    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1669        self.traverse_from_offset(false, include_ignored, start)
1670    }
1671
1672    pub fn entries(&self, include_ignored: bool) -> Traversal {
1673        self.traverse_from_offset(true, include_ignored, 0)
1674    }
1675
1676    pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
1677        self.repository_entries
1678            .iter()
1679            .map(|(path, entry)| (&path.0, entry))
1680    }
1681
1682    /// Get the repository whose work directory contains the given path.
1683    pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
1684        self.repository_entries
1685            .get(&RepositoryWorkDirectory(path.into()))
1686            .cloned()
1687    }
1688
1689    /// Get the repository whose work directory contains the given path.
1690    pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
1691        let mut max_len = 0;
1692        let mut current_candidate = None;
1693        for (work_directory, repo) in (&self.repository_entries).iter() {
1694            if path.starts_with(&work_directory.0) {
1695                if work_directory.0.as_os_str().len() >= max_len {
1696                    current_candidate = Some(repo);
1697                    max_len = work_directory.0.as_os_str().len();
1698                } else {
1699                    break;
1700                }
1701            }
1702        }
1703
1704        current_candidate.cloned()
1705    }
1706
1707    /// Given an ordered iterator of entries, returns an iterator of those entries,
1708    /// along with their containing git repository.
1709    pub fn entries_with_repositories<'a>(
1710        &'a self,
1711        entries: impl 'a + Iterator<Item = &'a Entry>,
1712    ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
1713        let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
1714        let mut repositories = self.repositories().peekable();
1715        entries.map(move |entry| {
1716            while let Some((repo_path, _)) = containing_repos.last() {
1717                if !entry.path.starts_with(repo_path) {
1718                    containing_repos.pop();
1719                } else {
1720                    break;
1721                }
1722            }
1723            while let Some((repo_path, _)) = repositories.peek() {
1724                if entry.path.starts_with(repo_path) {
1725                    containing_repos.push(repositories.next().unwrap());
1726                } else {
1727                    break;
1728                }
1729            }
1730            let repo = containing_repos.last().map(|(_, repo)| *repo);
1731            (entry, repo)
1732        })
1733    }
1734
1735    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1736        let empty_path = Path::new("");
1737        self.entries_by_path
1738            .cursor::<()>()
1739            .filter(move |entry| entry.path.as_ref() != empty_path)
1740            .map(|entry| &entry.path)
1741    }
1742
1743    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1744        let mut cursor = self.entries_by_path.cursor();
1745        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1746        let traversal = Traversal {
1747            cursor,
1748            include_dirs: true,
1749            include_ignored: true,
1750        };
1751        ChildEntriesIter {
1752            traversal,
1753            parent_path,
1754        }
1755    }
1756
1757    fn descendent_entries<'a>(
1758        &'a self,
1759        include_dirs: bool,
1760        include_ignored: bool,
1761        parent_path: &'a Path,
1762    ) -> DescendentEntriesIter<'a> {
1763        let mut cursor = self.entries_by_path.cursor();
1764        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
1765        let mut traversal = Traversal {
1766            cursor,
1767            include_dirs,
1768            include_ignored,
1769        };
1770
1771        if traversal.end_offset() == traversal.start_offset() {
1772            traversal.advance();
1773        }
1774
1775        DescendentEntriesIter {
1776            traversal,
1777            parent_path,
1778        }
1779    }
1780
1781    pub fn root_entry(&self) -> Option<&Entry> {
1782        self.entry_for_path("")
1783    }
1784
1785    pub fn root_name(&self) -> &str {
1786        &self.root_name
1787    }
1788
1789    pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1790        self.repository_entries
1791            .get(&RepositoryWorkDirectory(Path::new("").into()))
1792            .map(|entry| entry.to_owned())
1793    }
1794
1795    pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1796        self.repository_entries.values()
1797    }
1798
1799    pub fn scan_id(&self) -> usize {
1800        self.scan_id
1801    }
1802
1803    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1804        let path = path.as_ref();
1805        self.traverse_from_path(true, true, path)
1806            .entry()
1807            .and_then(|entry| {
1808                if entry.path.as_ref() == path {
1809                    Some(entry)
1810                } else {
1811                    None
1812                }
1813            })
1814    }
1815
1816    pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1817        let entry = self.entries_by_id.get(&id, &())?;
1818        self.entry_for_path(&entry.path)
1819    }
1820
1821    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1822        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1823    }
1824}
1825
1826impl LocalSnapshot {
1827    pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1828        self.git_repositories.get(&repo.work_directory.0)
1829    }
1830
1831    pub(crate) fn repo_for_metadata(
1832        &self,
1833        path: &Path,
1834    ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1835        self.git_repositories
1836            .iter()
1837            .find(|(_, repo)| repo.in_dot_git(path))
1838    }
1839
1840    #[cfg(test)]
1841    pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1842        let root_name = self.root_name.clone();
1843        proto::UpdateWorktree {
1844            project_id,
1845            worktree_id: self.id().to_proto(),
1846            abs_path: self.abs_path().to_string_lossy().into(),
1847            root_name,
1848            updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1849            removed_entries: Default::default(),
1850            scan_id: self.scan_id as u64,
1851            is_last_update: true,
1852            updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1853            removed_repositories: Default::default(),
1854        }
1855    }
1856
1857    pub(crate) fn build_update(
1858        &self,
1859        other: &Self,
1860        project_id: u64,
1861        worktree_id: u64,
1862        include_ignored: bool,
1863    ) -> proto::UpdateWorktree {
1864        let mut updated_entries = Vec::new();
1865        let mut removed_entries = Vec::new();
1866        let mut self_entries = self
1867            .entries_by_id
1868            .cursor::<()>()
1869            .filter(|e| include_ignored || !e.is_ignored)
1870            .peekable();
1871        let mut other_entries = other
1872            .entries_by_id
1873            .cursor::<()>()
1874            .filter(|e| include_ignored || !e.is_ignored)
1875            .peekable();
1876        loop {
1877            match (self_entries.peek(), other_entries.peek()) {
1878                (Some(self_entry), Some(other_entry)) => {
1879                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1880                        Ordering::Less => {
1881                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1882                            updated_entries.push(entry);
1883                            self_entries.next();
1884                        }
1885                        Ordering::Equal => {
1886                            if self_entry.scan_id != other_entry.scan_id {
1887                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1888                                updated_entries.push(entry);
1889                            }
1890
1891                            self_entries.next();
1892                            other_entries.next();
1893                        }
1894                        Ordering::Greater => {
1895                            removed_entries.push(other_entry.id.to_proto());
1896                            other_entries.next();
1897                        }
1898                    }
1899                }
1900                (Some(self_entry), None) => {
1901                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1902                    updated_entries.push(entry);
1903                    self_entries.next();
1904                }
1905                (None, Some(other_entry)) => {
1906                    removed_entries.push(other_entry.id.to_proto());
1907                    other_entries.next();
1908                }
1909                (None, None) => break,
1910            }
1911        }
1912
1913        let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1914        let mut removed_repositories = Vec::new();
1915        let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1916        let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1917        loop {
1918            match (self_repos.peek(), other_repos.peek()) {
1919                (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1920                    match Ord::cmp(self_work_dir, other_work_dir) {
1921                        Ordering::Less => {
1922                            updated_repositories.push((*self_repo).into());
1923                            self_repos.next();
1924                        }
1925                        Ordering::Equal => {
1926                            if self_repo != other_repo {
1927                                updated_repositories.push(self_repo.build_update(other_repo));
1928                            }
1929
1930                            self_repos.next();
1931                            other_repos.next();
1932                        }
1933                        Ordering::Greater => {
1934                            removed_repositories.push(other_repo.work_directory.to_proto());
1935                            other_repos.next();
1936                        }
1937                    }
1938                }
1939                (Some((_, self_repo)), None) => {
1940                    updated_repositories.push((*self_repo).into());
1941                    self_repos.next();
1942                }
1943                (None, Some((_, other_repo))) => {
1944                    removed_repositories.push(other_repo.work_directory.to_proto());
1945                    other_repos.next();
1946                }
1947                (None, None) => break,
1948            }
1949        }
1950
1951        proto::UpdateWorktree {
1952            project_id,
1953            worktree_id,
1954            abs_path: self.abs_path().to_string_lossy().into(),
1955            root_name: self.root_name().to_string(),
1956            updated_entries,
1957            removed_entries,
1958            scan_id: self.scan_id as u64,
1959            is_last_update: self.completed_scan_id == self.scan_id,
1960            updated_repositories,
1961            removed_repositories,
1962        }
1963    }
1964
1965    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1966        if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1967            let abs_path = self.abs_path.join(&entry.path);
1968            match smol::block_on(build_gitignore(&abs_path, fs)) {
1969                Ok(ignore) => {
1970                    self.ignores_by_parent_abs_path
1971                        .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
1972                }
1973                Err(error) => {
1974                    log::error!(
1975                        "error loading .gitignore file {:?} - {:?}",
1976                        &entry.path,
1977                        error
1978                    );
1979                }
1980            }
1981        }
1982
1983        if entry.kind == EntryKind::PendingDir {
1984            if let Some(existing_entry) =
1985                self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1986            {
1987                entry.kind = existing_entry.kind;
1988            }
1989        }
1990
1991        let scan_id = self.scan_id;
1992        let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1993        if let Some(removed) = removed {
1994            if removed.id != entry.id {
1995                self.entries_by_id.remove(&removed.id, &());
1996            }
1997        }
1998        self.entries_by_id.insert_or_replace(
1999            PathEntry {
2000                id: entry.id,
2001                path: entry.path.clone(),
2002                is_ignored: entry.is_ignored,
2003                scan_id,
2004            },
2005            &(),
2006        );
2007
2008        entry
2009    }
2010
2011    fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
2012        let abs_path = self.abs_path.join(&parent_path);
2013        let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
2014
2015        // Guard against repositories inside the repository metadata
2016        if work_dir
2017            .components()
2018            .find(|component| component.as_os_str() == *DOT_GIT)
2019            .is_some()
2020        {
2021            return None;
2022        };
2023
2024        let work_dir_id = self
2025            .entry_for_path(work_dir.clone())
2026            .map(|entry| entry.id)?;
2027
2028        if self.git_repositories.get(&work_dir_id).is_none() {
2029            let repo = fs.open_repo(abs_path.as_path())?;
2030            let work_directory = RepositoryWorkDirectory(work_dir.clone());
2031            let scan_id = self.scan_id;
2032
2033            let repo_lock = repo.lock();
2034
2035            self.repository_entries.insert(
2036                work_directory,
2037                RepositoryEntry {
2038                    work_directory: work_dir_id.into(),
2039                    branch: repo_lock.branch_name().map(Into::into),
2040                    statuses: repo_lock.statuses().unwrap_or_default(),
2041                },
2042            );
2043            drop(repo_lock);
2044
2045            self.git_repositories.insert(
2046                work_dir_id,
2047                LocalRepositoryEntry {
2048                    scan_id,
2049                    git_dir_scan_id: scan_id,
2050                    repo_ptr: repo,
2051                    git_dir_path: parent_path.clone(),
2052                },
2053            )
2054        }
2055
2056        Some(())
2057    }
2058
2059    fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2060        let mut inodes = TreeSet::default();
2061        for ancestor in path.ancestors().skip(1) {
2062            if let Some(entry) = self.entry_for_path(ancestor) {
2063                inodes.insert(entry.inode);
2064            }
2065        }
2066        inodes
2067    }
2068
2069    fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2070        let mut new_ignores = Vec::new();
2071        for ancestor in abs_path.ancestors().skip(1) {
2072            if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2073                new_ignores.push((ancestor, Some(ignore.clone())));
2074            } else {
2075                new_ignores.push((ancestor, None));
2076            }
2077        }
2078
2079        let mut ignore_stack = IgnoreStack::none();
2080        for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2081            if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2082                ignore_stack = IgnoreStack::all();
2083                break;
2084            } else if let Some(ignore) = ignore {
2085                ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2086            }
2087        }
2088
2089        if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2090            ignore_stack = IgnoreStack::all();
2091        }
2092
2093        ignore_stack
2094    }
2095}
2096
2097impl LocalMutableSnapshot {
2098    fn reuse_entry_id(&mut self, entry: &mut Entry) {
2099        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2100            entry.id = removed_entry_id;
2101        } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2102            entry.id = existing_entry.id;
2103        }
2104    }
2105
2106    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2107        self.reuse_entry_id(&mut entry);
2108        self.snapshot.insert_entry(entry, fs)
2109    }
2110
2111    fn populate_dir(
2112        &mut self,
2113        parent_path: Arc<Path>,
2114        entries: impl IntoIterator<Item = Entry>,
2115        ignore: Option<Arc<Gitignore>>,
2116        fs: &dyn Fs,
2117    ) {
2118        let mut parent_entry = if let Some(parent_entry) =
2119            self.entries_by_path.get(&PathKey(parent_path.clone()), &())
2120        {
2121            parent_entry.clone()
2122        } else {
2123            log::warn!(
2124                "populating a directory {:?} that has been removed",
2125                parent_path
2126            );
2127            return;
2128        };
2129
2130        match parent_entry.kind {
2131            EntryKind::PendingDir => {
2132                parent_entry.kind = EntryKind::Dir;
2133            }
2134            EntryKind::Dir => {}
2135            _ => return,
2136        }
2137
2138        if let Some(ignore) = ignore {
2139            let abs_parent_path = self.abs_path.join(&parent_path).into();
2140            self.ignores_by_parent_abs_path
2141                .insert(abs_parent_path, (ignore, false));
2142        }
2143
2144        if parent_path.file_name() == Some(&DOT_GIT) {
2145            self.build_repo(parent_path, fs);
2146        }
2147
2148        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2149        let mut entries_by_id_edits = Vec::new();
2150
2151        for mut entry in entries {
2152            self.reuse_entry_id(&mut entry);
2153            entries_by_id_edits.push(Edit::Insert(PathEntry {
2154                id: entry.id,
2155                path: entry.path.clone(),
2156                is_ignored: entry.is_ignored,
2157                scan_id: self.scan_id,
2158            }));
2159            entries_by_path_edits.push(Edit::Insert(entry));
2160        }
2161
2162        self.entries_by_path.edit(entries_by_path_edits, &());
2163        self.entries_by_id.edit(entries_by_id_edits, &());
2164    }
2165
2166    fn remove_path(&mut self, path: &Path) {
2167        let mut new_entries;
2168        let removed_entries;
2169        {
2170            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2171            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2172            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2173            new_entries.push_tree(cursor.suffix(&()), &());
2174        }
2175        self.entries_by_path = new_entries;
2176
2177        let mut entries_by_id_edits = Vec::new();
2178        for entry in removed_entries.cursor::<()>() {
2179            let removed_entry_id = self
2180                .removed_entry_ids
2181                .entry(entry.inode)
2182                .or_insert(entry.id);
2183            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2184            entries_by_id_edits.push(Edit::Remove(entry.id));
2185        }
2186        self.entries_by_id.edit(entries_by_id_edits, &());
2187
2188        if path.file_name() == Some(&GITIGNORE) {
2189            let abs_parent_path = self.abs_path.join(path.parent().unwrap());
2190            if let Some((_, needs_update)) = self
2191                .ignores_by_parent_abs_path
2192                .get_mut(abs_parent_path.as_path())
2193            {
2194                *needs_update = true;
2195            }
2196        }
2197    }
2198}
2199
2200async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2201    let contents = fs.load(abs_path).await?;
2202    let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2203    let mut builder = GitignoreBuilder::new(parent);
2204    for line in contents.lines() {
2205        builder.add_line(Some(abs_path.into()), line)?;
2206    }
2207    Ok(builder.build()?)
2208}
2209
2210impl WorktreeId {
2211    pub fn from_usize(handle_id: usize) -> Self {
2212        Self(handle_id)
2213    }
2214
2215    pub(crate) fn from_proto(id: u64) -> Self {
2216        Self(id as usize)
2217    }
2218
2219    pub fn to_proto(&self) -> u64 {
2220        self.0 as u64
2221    }
2222
2223    pub fn to_usize(&self) -> usize {
2224        self.0
2225    }
2226}
2227
2228impl fmt::Display for WorktreeId {
2229    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2230        self.0.fmt(f)
2231    }
2232}
2233
2234impl Deref for Worktree {
2235    type Target = Snapshot;
2236
2237    fn deref(&self) -> &Self::Target {
2238        match self {
2239            Worktree::Local(worktree) => &worktree.snapshot,
2240            Worktree::Remote(worktree) => &worktree.snapshot,
2241        }
2242    }
2243}
2244
2245impl Deref for LocalWorktree {
2246    type Target = LocalSnapshot;
2247
2248    fn deref(&self) -> &Self::Target {
2249        &self.snapshot
2250    }
2251}
2252
2253impl Deref for RemoteWorktree {
2254    type Target = Snapshot;
2255
2256    fn deref(&self) -> &Self::Target {
2257        &self.snapshot
2258    }
2259}
2260
2261impl fmt::Debug for LocalWorktree {
2262    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2263        self.snapshot.fmt(f)
2264    }
2265}
2266
2267impl fmt::Debug for Snapshot {
2268    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2269        struct EntriesById<'a>(&'a SumTree<PathEntry>);
2270        struct EntriesByPath<'a>(&'a SumTree<Entry>);
2271
2272        impl<'a> fmt::Debug for EntriesByPath<'a> {
2273            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2274                f.debug_map()
2275                    .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2276                    .finish()
2277            }
2278        }
2279
2280        impl<'a> fmt::Debug for EntriesById<'a> {
2281            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2282                f.debug_list().entries(self.0.iter()).finish()
2283            }
2284        }
2285
2286        f.debug_struct("Snapshot")
2287            .field("id", &self.id)
2288            .field("root_name", &self.root_name)
2289            .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2290            .field("entries_by_id", &EntriesById(&self.entries_by_id))
2291            .finish()
2292    }
2293}
2294
2295#[derive(Clone, PartialEq)]
2296pub struct File {
2297    pub worktree: ModelHandle<Worktree>,
2298    pub path: Arc<Path>,
2299    pub mtime: SystemTime,
2300    pub(crate) entry_id: ProjectEntryId,
2301    pub(crate) is_local: bool,
2302    pub(crate) is_deleted: bool,
2303}
2304
2305impl language::File for File {
2306    fn as_local(&self) -> Option<&dyn language::LocalFile> {
2307        if self.is_local {
2308            Some(self)
2309        } else {
2310            None
2311        }
2312    }
2313
2314    fn mtime(&self) -> SystemTime {
2315        self.mtime
2316    }
2317
2318    fn path(&self) -> &Arc<Path> {
2319        &self.path
2320    }
2321
2322    fn full_path(&self, cx: &AppContext) -> PathBuf {
2323        let mut full_path = PathBuf::new();
2324        let worktree = self.worktree.read(cx);
2325
2326        if worktree.is_visible() {
2327            full_path.push(worktree.root_name());
2328        } else {
2329            let path = worktree.abs_path();
2330
2331            if worktree.is_local() && path.starts_with(HOME.as_path()) {
2332                full_path.push("~");
2333                full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2334            } else {
2335                full_path.push(path)
2336            }
2337        }
2338
2339        if self.path.components().next().is_some() {
2340            full_path.push(&self.path);
2341        }
2342
2343        full_path
2344    }
2345
2346    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2347    /// of its worktree, then this method will return the name of the worktree itself.
2348    fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2349        self.path
2350            .file_name()
2351            .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2352    }
2353
2354    fn is_deleted(&self) -> bool {
2355        self.is_deleted
2356    }
2357
2358    fn as_any(&self) -> &dyn Any {
2359        self
2360    }
2361
2362    fn to_proto(&self) -> rpc::proto::File {
2363        rpc::proto::File {
2364            worktree_id: self.worktree.id() as u64,
2365            entry_id: self.entry_id.to_proto(),
2366            path: self.path.to_string_lossy().into(),
2367            mtime: Some(self.mtime.into()),
2368            is_deleted: self.is_deleted,
2369        }
2370    }
2371}
2372
2373impl language::LocalFile for File {
2374    fn abs_path(&self, cx: &AppContext) -> PathBuf {
2375        self.worktree
2376            .read(cx)
2377            .as_local()
2378            .unwrap()
2379            .abs_path
2380            .join(&self.path)
2381    }
2382
2383    fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2384        let worktree = self.worktree.read(cx).as_local().unwrap();
2385        let abs_path = worktree.absolutize(&self.path);
2386        let fs = worktree.fs.clone();
2387        cx.background()
2388            .spawn(async move { fs.load(&abs_path).await })
2389    }
2390
2391    fn buffer_reloaded(
2392        &self,
2393        buffer_id: u64,
2394        version: &clock::Global,
2395        fingerprint: RopeFingerprint,
2396        line_ending: LineEnding,
2397        mtime: SystemTime,
2398        cx: &mut AppContext,
2399    ) {
2400        let worktree = self.worktree.read(cx).as_local().unwrap();
2401        if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2402            worktree
2403                .client
2404                .send(proto::BufferReloaded {
2405                    project_id,
2406                    buffer_id,
2407                    version: serialize_version(version),
2408                    mtime: Some(mtime.into()),
2409                    fingerprint: serialize_fingerprint(fingerprint),
2410                    line_ending: serialize_line_ending(line_ending) as i32,
2411                })
2412                .log_err();
2413        }
2414    }
2415}
2416
2417impl File {
2418    pub fn from_proto(
2419        proto: rpc::proto::File,
2420        worktree: ModelHandle<Worktree>,
2421        cx: &AppContext,
2422    ) -> Result<Self> {
2423        let worktree_id = worktree
2424            .read(cx)
2425            .as_remote()
2426            .ok_or_else(|| anyhow!("not remote"))?
2427            .id();
2428
2429        if worktree_id.to_proto() != proto.worktree_id {
2430            return Err(anyhow!("worktree id does not match file"));
2431        }
2432
2433        Ok(Self {
2434            worktree,
2435            path: Path::new(&proto.path).into(),
2436            mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2437            entry_id: ProjectEntryId::from_proto(proto.entry_id),
2438            is_local: false,
2439            is_deleted: proto.is_deleted,
2440        })
2441    }
2442
2443    pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2444        file.and_then(|f| f.as_any().downcast_ref())
2445    }
2446
2447    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2448        self.worktree.read(cx).id()
2449    }
2450
2451    pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2452        if self.is_deleted {
2453            None
2454        } else {
2455            Some(self.entry_id)
2456        }
2457    }
2458}
2459
2460#[derive(Clone, Debug, PartialEq, Eq)]
2461pub struct Entry {
2462    pub id: ProjectEntryId,
2463    pub kind: EntryKind,
2464    pub path: Arc<Path>,
2465    pub inode: u64,
2466    pub mtime: SystemTime,
2467    pub is_symlink: bool,
2468    pub is_ignored: bool,
2469}
2470
2471#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2472pub enum EntryKind {
2473    PendingDir,
2474    Dir,
2475    File(CharBag),
2476}
2477
2478#[derive(Clone, Copy, Debug)]
2479pub enum PathChange {
2480    Added,
2481    Removed,
2482    Updated,
2483    AddedOrUpdated,
2484}
2485
2486impl Entry {
2487    fn new(
2488        path: Arc<Path>,
2489        metadata: &fs::Metadata,
2490        next_entry_id: &AtomicUsize,
2491        root_char_bag: CharBag,
2492    ) -> Self {
2493        Self {
2494            id: ProjectEntryId::new(next_entry_id),
2495            kind: if metadata.is_dir {
2496                EntryKind::PendingDir
2497            } else {
2498                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2499            },
2500            path,
2501            inode: metadata.inode,
2502            mtime: metadata.mtime,
2503            is_symlink: metadata.is_symlink,
2504            is_ignored: false,
2505        }
2506    }
2507
2508    pub fn is_dir(&self) -> bool {
2509        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2510    }
2511
2512    pub fn is_file(&self) -> bool {
2513        matches!(self.kind, EntryKind::File(_))
2514    }
2515}
2516
2517impl sum_tree::Item for Entry {
2518    type Summary = EntrySummary;
2519
2520    fn summary(&self) -> Self::Summary {
2521        let visible_count = if self.is_ignored { 0 } else { 1 };
2522        let file_count;
2523        let visible_file_count;
2524        if self.is_file() {
2525            file_count = 1;
2526            visible_file_count = visible_count;
2527        } else {
2528            file_count = 0;
2529            visible_file_count = 0;
2530        }
2531
2532        EntrySummary {
2533            max_path: self.path.clone(),
2534            count: 1,
2535            visible_count,
2536            file_count,
2537            visible_file_count,
2538        }
2539    }
2540}
2541
2542impl sum_tree::KeyedItem for Entry {
2543    type Key = PathKey;
2544
2545    fn key(&self) -> Self::Key {
2546        PathKey(self.path.clone())
2547    }
2548}
2549
2550#[derive(Clone, Debug)]
2551pub struct EntrySummary {
2552    max_path: Arc<Path>,
2553    count: usize,
2554    visible_count: usize,
2555    file_count: usize,
2556    visible_file_count: usize,
2557}
2558
2559impl Default for EntrySummary {
2560    fn default() -> Self {
2561        Self {
2562            max_path: Arc::from(Path::new("")),
2563            count: 0,
2564            visible_count: 0,
2565            file_count: 0,
2566            visible_file_count: 0,
2567        }
2568    }
2569}
2570
2571impl sum_tree::Summary for EntrySummary {
2572    type Context = ();
2573
2574    fn add_summary(&mut self, rhs: &Self, _: &()) {
2575        self.max_path = rhs.max_path.clone();
2576        self.count += rhs.count;
2577        self.visible_count += rhs.visible_count;
2578        self.file_count += rhs.file_count;
2579        self.visible_file_count += rhs.visible_file_count;
2580    }
2581}
2582
2583#[derive(Clone, Debug)]
2584struct PathEntry {
2585    id: ProjectEntryId,
2586    path: Arc<Path>,
2587    is_ignored: bool,
2588    scan_id: usize,
2589}
2590
2591impl sum_tree::Item for PathEntry {
2592    type Summary = PathEntrySummary;
2593
2594    fn summary(&self) -> Self::Summary {
2595        PathEntrySummary { max_id: self.id }
2596    }
2597}
2598
2599impl sum_tree::KeyedItem for PathEntry {
2600    type Key = ProjectEntryId;
2601
2602    fn key(&self) -> Self::Key {
2603        self.id
2604    }
2605}
2606
2607#[derive(Clone, Debug, Default)]
2608struct PathEntrySummary {
2609    max_id: ProjectEntryId,
2610}
2611
2612impl sum_tree::Summary for PathEntrySummary {
2613    type Context = ();
2614
2615    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2616        self.max_id = summary.max_id;
2617    }
2618}
2619
2620impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2621    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2622        *self = summary.max_id;
2623    }
2624}
2625
2626#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2627pub struct PathKey(Arc<Path>);
2628
2629impl Default for PathKey {
2630    fn default() -> Self {
2631        Self(Path::new("").into())
2632    }
2633}
2634
2635impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2636    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2637        self.0 = summary.max_path.clone();
2638    }
2639}
2640
2641struct BackgroundScanner {
2642    snapshot: Mutex<LocalMutableSnapshot>,
2643    fs: Arc<dyn Fs>,
2644    status_updates_tx: UnboundedSender<ScanState>,
2645    executor: Arc<executor::Background>,
2646    refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2647    prev_state: Mutex<BackgroundScannerState>,
2648    next_entry_id: Arc<AtomicUsize>,
2649    finished_initial_scan: bool,
2650}
2651
2652struct BackgroundScannerState {
2653    snapshot: Snapshot,
2654    event_paths: Vec<Arc<Path>>,
2655}
2656
2657impl BackgroundScanner {
2658    fn new(
2659        snapshot: LocalSnapshot,
2660        next_entry_id: Arc<AtomicUsize>,
2661        fs: Arc<dyn Fs>,
2662        status_updates_tx: UnboundedSender<ScanState>,
2663        executor: Arc<executor::Background>,
2664        refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2665    ) -> Self {
2666        Self {
2667            fs,
2668            status_updates_tx,
2669            executor,
2670            refresh_requests_rx,
2671            next_entry_id,
2672            prev_state: Mutex::new(BackgroundScannerState {
2673                snapshot: snapshot.snapshot.clone(),
2674                event_paths: Default::default(),
2675            }),
2676            snapshot: Mutex::new(LocalMutableSnapshot {
2677                snapshot,
2678                removed_entry_ids: Default::default(),
2679            }),
2680            finished_initial_scan: false,
2681        }
2682    }
2683
2684    async fn run(
2685        &mut self,
2686        mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2687    ) {
2688        use futures::FutureExt as _;
2689
2690        let (root_abs_path, root_inode) = {
2691            let snapshot = self.snapshot.lock();
2692            (
2693                snapshot.abs_path.clone(),
2694                snapshot.root_entry().map(|e| e.inode),
2695            )
2696        };
2697
2698        // Populate ignores above the root.
2699        let ignore_stack;
2700        for ancestor in root_abs_path.ancestors().skip(1) {
2701            if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2702            {
2703                self.snapshot
2704                    .lock()
2705                    .ignores_by_parent_abs_path
2706                    .insert(ancestor.into(), (ignore.into(), false));
2707            }
2708        }
2709        {
2710            let mut snapshot = self.snapshot.lock();
2711            snapshot.scan_id += 1;
2712            ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2713            if ignore_stack.is_all() {
2714                if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2715                    root_entry.is_ignored = true;
2716                    snapshot.insert_entry(root_entry, self.fs.as_ref());
2717                }
2718            }
2719        };
2720
2721        // Perform an initial scan of the directory.
2722        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2723        smol::block_on(scan_job_tx.send(ScanJob {
2724            abs_path: root_abs_path,
2725            path: Arc::from(Path::new("")),
2726            ignore_stack,
2727            ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2728            scan_queue: scan_job_tx.clone(),
2729        }))
2730        .unwrap();
2731        drop(scan_job_tx);
2732        self.scan_dirs(true, scan_job_rx).await;
2733        {
2734            let mut snapshot = self.snapshot.lock();
2735            snapshot.completed_scan_id = snapshot.scan_id;
2736        }
2737        self.send_status_update(false, None);
2738
2739        // Process any any FS events that occurred while performing the initial scan.
2740        // For these events, update events cannot be as precise, because we didn't
2741        // have the previous state loaded yet.
2742        if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2743            let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2744            while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2745                paths.extend(more_events.into_iter().map(|e| e.path));
2746            }
2747            self.process_events(paths).await;
2748        }
2749
2750        self.finished_initial_scan = true;
2751
2752        // Continue processing events until the worktree is dropped.
2753        loop {
2754            select_biased! {
2755                // Process any path refresh requests from the worktree. Prioritize
2756                // these before handling changes reported by the filesystem.
2757                request = self.refresh_requests_rx.recv().fuse() => {
2758                    let Ok((paths, barrier)) = request else { break };
2759                    if !self.process_refresh_request(paths.clone(), barrier).await {
2760                        return;
2761                    }
2762                }
2763
2764                events = events_rx.next().fuse() => {
2765                    let Some(events) = events else { break };
2766                    let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2767                    while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2768                        paths.extend(more_events.into_iter().map(|e| e.path));
2769                    }
2770                    self.process_events(paths.clone()).await;
2771                }
2772            }
2773        }
2774    }
2775
2776    async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2777        if let Some(mut paths) = self.reload_entries_for_paths(paths, None).await {
2778            paths.sort_unstable();
2779            util::extend_sorted(
2780                &mut self.prev_state.lock().event_paths,
2781                paths,
2782                usize::MAX,
2783                Ord::cmp,
2784            );
2785        }
2786        self.send_status_update(false, Some(barrier))
2787    }
2788
2789    async fn process_events(&mut self, paths: Vec<PathBuf>) {
2790        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2791        let paths = self
2792            .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2793            .await;
2794        if let Some(paths) = &paths {
2795            util::extend_sorted(
2796                &mut self.prev_state.lock().event_paths,
2797                paths.iter().cloned(),
2798                usize::MAX,
2799                Ord::cmp,
2800            );
2801        }
2802        drop(scan_job_tx);
2803        self.scan_dirs(false, scan_job_rx).await;
2804
2805        self.update_ignore_statuses().await;
2806
2807        let mut snapshot = self.snapshot.lock();
2808
2809        if let Some(paths) = paths {
2810            for path in paths {
2811                self.reload_repo_for_file_path(&path, &mut *snapshot, self.fs.as_ref());
2812            }
2813        }
2814
2815        let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2816        git_repositories.retain(|work_directory_id, _| {
2817            snapshot
2818                .entry_for_id(*work_directory_id)
2819                .map_or(false, |entry| {
2820                    snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2821                })
2822        });
2823        snapshot.git_repositories = git_repositories;
2824
2825        let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2826        git_repository_entries.retain(|_, entry| {
2827            snapshot
2828                .git_repositories
2829                .get(&entry.work_directory.0)
2830                .is_some()
2831        });
2832        snapshot.snapshot.repository_entries = git_repository_entries;
2833        snapshot.completed_scan_id = snapshot.scan_id;
2834        drop(snapshot);
2835
2836        self.send_status_update(false, None);
2837        self.prev_state.lock().event_paths.clear();
2838    }
2839
2840    async fn scan_dirs(
2841        &self,
2842        enable_progress_updates: bool,
2843        scan_jobs_rx: channel::Receiver<ScanJob>,
2844    ) {
2845        use futures::FutureExt as _;
2846
2847        if self
2848            .status_updates_tx
2849            .unbounded_send(ScanState::Started)
2850            .is_err()
2851        {
2852            return;
2853        }
2854
2855        let progress_update_count = AtomicUsize::new(0);
2856        self.executor
2857            .scoped(|scope| {
2858                for _ in 0..self.executor.num_cpus() {
2859                    scope.spawn(async {
2860                        let mut last_progress_update_count = 0;
2861                        let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2862                        futures::pin_mut!(progress_update_timer);
2863
2864                        loop {
2865                            select_biased! {
2866                                // Process any path refresh requests before moving on to process
2867                                // the scan queue, so that user operations are prioritized.
2868                                request = self.refresh_requests_rx.recv().fuse() => {
2869                                    let Ok((paths, barrier)) = request else { break };
2870                                    if !self.process_refresh_request(paths, barrier).await {
2871                                        return;
2872                                    }
2873                                }
2874
2875                                // Send periodic progress updates to the worktree. Use an atomic counter
2876                                // to ensure that only one of the workers sends a progress update after
2877                                // the update interval elapses.
2878                                _ = progress_update_timer => {
2879                                    match progress_update_count.compare_exchange(
2880                                        last_progress_update_count,
2881                                        last_progress_update_count + 1,
2882                                        SeqCst,
2883                                        SeqCst
2884                                    ) {
2885                                        Ok(_) => {
2886                                            last_progress_update_count += 1;
2887                                            self.send_status_update(true, None);
2888                                        }
2889                                        Err(count) => {
2890                                            last_progress_update_count = count;
2891                                        }
2892                                    }
2893                                    progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2894                                }
2895
2896                                // Recursively load directories from the file system.
2897                                job = scan_jobs_rx.recv().fuse() => {
2898                                    let Ok(job) = job else { break };
2899                                    if let Err(err) = self.scan_dir(&job).await {
2900                                        if job.path.as_ref() != Path::new("") {
2901                                            log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2902                                        }
2903                                    }
2904                                }
2905                            }
2906                        }
2907                    })
2908                }
2909            })
2910            .await;
2911    }
2912
2913    fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2914        let mut prev_state = self.prev_state.lock();
2915        let new_snapshot = self.snapshot.lock().clone();
2916        let old_snapshot = mem::replace(&mut prev_state.snapshot, new_snapshot.snapshot.clone());
2917
2918        let changes = self.build_change_set(
2919            &old_snapshot,
2920            &new_snapshot.snapshot,
2921            &prev_state.event_paths,
2922        );
2923
2924        self.status_updates_tx
2925            .unbounded_send(ScanState::Updated {
2926                snapshot: new_snapshot,
2927                changes,
2928                scanning,
2929                barrier,
2930            })
2931            .is_ok()
2932    }
2933
2934    async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2935        let mut new_entries: Vec<Entry> = Vec::new();
2936        let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2937        let mut ignore_stack = job.ignore_stack.clone();
2938        let mut new_ignore = None;
2939        let (root_abs_path, root_char_bag, next_entry_id) = {
2940            let snapshot = self.snapshot.lock();
2941            (
2942                snapshot.abs_path().clone(),
2943                snapshot.root_char_bag,
2944                self.next_entry_id.clone(),
2945            )
2946        };
2947        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2948        while let Some(child_abs_path) = child_paths.next().await {
2949            let child_abs_path: Arc<Path> = match child_abs_path {
2950                Ok(child_abs_path) => child_abs_path.into(),
2951                Err(error) => {
2952                    log::error!("error processing entry {:?}", error);
2953                    continue;
2954                }
2955            };
2956
2957            let child_name = child_abs_path.file_name().unwrap();
2958            let child_path: Arc<Path> = job.path.join(child_name).into();
2959            let child_metadata = match self.fs.metadata(&child_abs_path).await {
2960                Ok(Some(metadata)) => metadata,
2961                Ok(None) => continue,
2962                Err(err) => {
2963                    log::error!("error processing {:?}: {:?}", child_abs_path, err);
2964                    continue;
2965                }
2966            };
2967
2968            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2969            if child_name == *GITIGNORE {
2970                match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2971                    Ok(ignore) => {
2972                        let ignore = Arc::new(ignore);
2973                        ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2974                        new_ignore = Some(ignore);
2975                    }
2976                    Err(error) => {
2977                        log::error!(
2978                            "error loading .gitignore file {:?} - {:?}",
2979                            child_name,
2980                            error
2981                        );
2982                    }
2983                }
2984
2985                // Update ignore status of any child entries we've already processed to reflect the
2986                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2987                // there should rarely be too numerous. Update the ignore stack associated with any
2988                // new jobs as well.
2989                let mut new_jobs = new_jobs.iter_mut();
2990                for entry in &mut new_entries {
2991                    let entry_abs_path = root_abs_path.join(&entry.path);
2992                    entry.is_ignored =
2993                        ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2994
2995                    if entry.is_dir() {
2996                        if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2997                            job.ignore_stack = if entry.is_ignored {
2998                                IgnoreStack::all()
2999                            } else {
3000                                ignore_stack.clone()
3001                            };
3002                        }
3003                    }
3004                }
3005            }
3006
3007            let mut child_entry = Entry::new(
3008                child_path.clone(),
3009                &child_metadata,
3010                &next_entry_id,
3011                root_char_bag,
3012            );
3013
3014            if child_entry.is_dir() {
3015                let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
3016                child_entry.is_ignored = is_ignored;
3017
3018                // Avoid recursing until crash in the case of a recursive symlink
3019                if !job.ancestor_inodes.contains(&child_entry.inode) {
3020                    let mut ancestor_inodes = job.ancestor_inodes.clone();
3021                    ancestor_inodes.insert(child_entry.inode);
3022
3023                    new_jobs.push(Some(ScanJob {
3024                        abs_path: child_abs_path,
3025                        path: child_path,
3026                        ignore_stack: if is_ignored {
3027                            IgnoreStack::all()
3028                        } else {
3029                            ignore_stack.clone()
3030                        },
3031                        ancestor_inodes,
3032                        scan_queue: job.scan_queue.clone(),
3033                    }));
3034                } else {
3035                    new_jobs.push(None);
3036                }
3037            } else {
3038                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
3039            }
3040
3041            new_entries.push(child_entry);
3042        }
3043
3044        self.snapshot.lock().populate_dir(
3045            job.path.clone(),
3046            new_entries,
3047            new_ignore,
3048            self.fs.as_ref(),
3049        );
3050
3051        for new_job in new_jobs {
3052            if let Some(new_job) = new_job {
3053                job.scan_queue.send(new_job).await.unwrap();
3054            }
3055        }
3056
3057        Ok(())
3058    }
3059
3060    async fn reload_entries_for_paths(
3061        &self,
3062        mut abs_paths: Vec<PathBuf>,
3063        scan_queue_tx: Option<Sender<ScanJob>>,
3064    ) -> Option<Vec<Arc<Path>>> {
3065        let doing_recursive_update = scan_queue_tx.is_some();
3066
3067        abs_paths.sort_unstable();
3068        abs_paths.dedup_by(|a, b| a.starts_with(&b));
3069
3070        let root_abs_path = self.snapshot.lock().abs_path.clone();
3071        let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
3072        let metadata = futures::future::join_all(
3073            abs_paths
3074                .iter()
3075                .map(|abs_path| self.fs.metadata(&abs_path))
3076                .collect::<Vec<_>>(),
3077        )
3078        .await;
3079
3080        let mut snapshot = self.snapshot.lock();
3081        let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
3082        snapshot.scan_id += 1;
3083        if is_idle && !doing_recursive_update {
3084            snapshot.completed_scan_id = snapshot.scan_id;
3085        }
3086
3087        // Remove any entries for paths that no longer exist or are being recursively
3088        // refreshed. Do this before adding any new entries, so that renames can be
3089        // detected regardless of the order of the paths.
3090        let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
3091        for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
3092            if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3093                if matches!(metadata, Ok(None)) || doing_recursive_update {
3094                    snapshot.remove_path(path);
3095                }
3096                event_paths.push(path.into());
3097            } else {
3098                log::error!(
3099                    "unexpected event {:?} for root path {:?}",
3100                    abs_path,
3101                    root_canonical_path
3102                );
3103            }
3104        }
3105
3106        for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
3107            let abs_path: Arc<Path> = root_abs_path.join(&path).into();
3108
3109            match metadata {
3110                Ok(Some(metadata)) => {
3111                    let ignore_stack =
3112                        snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
3113                    let mut fs_entry = Entry::new(
3114                        path.clone(),
3115                        &metadata,
3116                        self.next_entry_id.as_ref(),
3117                        snapshot.root_char_bag,
3118                    );
3119                    fs_entry.is_ignored = ignore_stack.is_all();
3120                    snapshot.insert_entry(fs_entry, self.fs.as_ref());
3121
3122                    if let Some(scan_queue_tx) = &scan_queue_tx {
3123                        let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
3124                        if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
3125                            ancestor_inodes.insert(metadata.inode);
3126                            smol::block_on(scan_queue_tx.send(ScanJob {
3127                                abs_path,
3128                                path,
3129                                ignore_stack,
3130                                ancestor_inodes,
3131                                scan_queue: scan_queue_tx.clone(),
3132                            }))
3133                            .unwrap();
3134                        }
3135                    }
3136                }
3137                Ok(None) => {
3138                    self.remove_repo_path(&path, &mut snapshot);
3139                }
3140                Err(err) => {
3141                    // TODO - create a special 'error' entry in the entries tree to mark this
3142                    log::error!("error reading file on event {:?}", err);
3143                }
3144            }
3145        }
3146
3147        Some(event_paths)
3148    }
3149
3150    fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3151        if !path
3152            .components()
3153            .any(|component| component.as_os_str() == *DOT_GIT)
3154        {
3155            let scan_id = snapshot.scan_id;
3156
3157            if let Some(repository) = snapshot.repository_for_work_directory(path) {
3158                let entry = repository.work_directory.0;
3159                snapshot.git_repositories.remove(&entry);
3160                snapshot
3161                    .snapshot
3162                    .repository_entries
3163                    .remove(&RepositoryWorkDirectory(path.into()));
3164                return Some(());
3165            }
3166
3167            let repo = snapshot.repository_for_path(&path)?;
3168
3169            let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3170
3171            let work_dir = repo.work_directory(snapshot)?;
3172            let work_dir_id = repo.work_directory;
3173
3174            snapshot
3175                .git_repositories
3176                .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3177
3178            snapshot.repository_entries.update(&work_dir, |entry| {
3179                entry
3180                    .statuses
3181                    .remove_range(&repo_path, &RepoPathDescendants(&repo_path))
3182            });
3183        }
3184
3185        Some(())
3186    }
3187
3188    fn reload_repo_for_file_path(
3189        &self,
3190        path: &Path,
3191        snapshot: &mut LocalSnapshot,
3192        fs: &dyn Fs,
3193    ) -> Option<()> {
3194        let scan_id = snapshot.scan_id;
3195
3196        if path
3197            .components()
3198            .any(|component| component.as_os_str() == *DOT_GIT)
3199        {
3200            let (entry_id, repo_ptr) = {
3201                let Some((entry_id, repo)) = snapshot.repo_for_metadata(&path) else {
3202                    let dot_git_dir = path.ancestors()
3203                    .skip_while(|ancestor| ancestor.file_name() != Some(&*DOT_GIT))
3204                    .next()?;
3205
3206                    snapshot.build_repo(dot_git_dir.into(), fs);
3207                    return None;
3208                };
3209                if repo.git_dir_scan_id == scan_id {
3210                    return None;
3211                }
3212                (*entry_id, repo.repo_ptr.to_owned())
3213            };
3214
3215            let work_dir = snapshot
3216                .entry_for_id(entry_id)
3217                .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3218
3219            let repo = repo_ptr.lock();
3220            repo.reload_index();
3221            let branch = repo.branch_name();
3222            let statuses = repo.statuses().unwrap_or_default();
3223
3224            snapshot.git_repositories.update(&entry_id, |entry| {
3225                entry.scan_id = scan_id;
3226                entry.git_dir_scan_id = scan_id;
3227            });
3228
3229            snapshot.repository_entries.update(&work_dir, |entry| {
3230                entry.branch = branch.map(Into::into);
3231                entry.statuses = statuses;
3232            });
3233        } else {
3234            if snapshot
3235                .entry_for_path(&path)
3236                .map(|entry| entry.is_ignored)
3237                .unwrap_or(false)
3238            {
3239                self.remove_repo_path(&path, snapshot);
3240                return None;
3241            }
3242
3243            let repo = snapshot.repository_for_path(&path)?;
3244
3245            let work_dir = repo.work_directory(snapshot)?;
3246            let work_dir_id = repo.work_directory.clone();
3247
3248            snapshot
3249                .git_repositories
3250                .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3251
3252            let local_repo = snapshot.get_local_repo(&repo)?.to_owned();
3253
3254            // Short circuit if we've already scanned everything
3255            if local_repo.git_dir_scan_id == scan_id {
3256                return None;
3257            }
3258
3259            let mut repository = snapshot.repository_entries.remove(&work_dir)?;
3260
3261            for entry in snapshot.descendent_entries(false, false, path) {
3262                let Some(repo_path) = repo.work_directory.relativize(snapshot, &entry.path) else {
3263                    continue;
3264                };
3265
3266                let status = local_repo.repo_ptr.lock().status(&repo_path);
3267                if let Some(status) = status {
3268                    repository.statuses.insert(repo_path.clone(), status);
3269                } else {
3270                    repository.statuses.remove(&repo_path);
3271                }
3272            }
3273
3274            snapshot.repository_entries.insert(work_dir, repository)
3275        }
3276
3277        Some(())
3278    }
3279
3280    async fn update_ignore_statuses(&self) {
3281        use futures::FutureExt as _;
3282
3283        let mut snapshot = self.snapshot.lock().clone();
3284        let mut ignores_to_update = Vec::new();
3285        let mut ignores_to_delete = Vec::new();
3286        let abs_path = snapshot.abs_path.clone();
3287        for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
3288            if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
3289                if *needs_update {
3290                    *needs_update = false;
3291                    if snapshot.snapshot.entry_for_path(parent_path).is_some() {
3292                        ignores_to_update.push(parent_abs_path.clone());
3293                    }
3294                }
3295
3296                let ignore_path = parent_path.join(&*GITIGNORE);
3297                if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
3298                    ignores_to_delete.push(parent_abs_path.clone());
3299                }
3300            }
3301        }
3302
3303        for parent_abs_path in ignores_to_delete {
3304            snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3305            self.snapshot
3306                .lock()
3307                .ignores_by_parent_abs_path
3308                .remove(&parent_abs_path);
3309        }
3310
3311        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3312        ignores_to_update.sort_unstable();
3313        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3314        while let Some(parent_abs_path) = ignores_to_update.next() {
3315            while ignores_to_update
3316                .peek()
3317                .map_or(false, |p| p.starts_with(&parent_abs_path))
3318            {
3319                ignores_to_update.next().unwrap();
3320            }
3321
3322            let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3323            smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3324                abs_path: parent_abs_path,
3325                ignore_stack,
3326                ignore_queue: ignore_queue_tx.clone(),
3327            }))
3328            .unwrap();
3329        }
3330        drop(ignore_queue_tx);
3331
3332        self.executor
3333            .scoped(|scope| {
3334                for _ in 0..self.executor.num_cpus() {
3335                    scope.spawn(async {
3336                        loop {
3337                            select_biased! {
3338                                // Process any path refresh requests before moving on to process
3339                                // the queue of ignore statuses.
3340                                request = self.refresh_requests_rx.recv().fuse() => {
3341                                    let Ok((paths, barrier)) = request else { break };
3342                                    if !self.process_refresh_request(paths, barrier).await {
3343                                        return;
3344                                    }
3345                                }
3346
3347                                // Recursively process directories whose ignores have changed.
3348                                job = ignore_queue_rx.recv().fuse() => {
3349                                    let Ok(job) = job else { break };
3350                                    self.update_ignore_status(job, &snapshot).await;
3351                                }
3352                            }
3353                        }
3354                    });
3355                }
3356            })
3357            .await;
3358    }
3359
3360    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3361        let mut ignore_stack = job.ignore_stack;
3362        if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3363            ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3364        }
3365
3366        let mut entries_by_id_edits = Vec::new();
3367        let mut entries_by_path_edits = Vec::new();
3368        let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3369        for mut entry in snapshot.child_entries(path).cloned() {
3370            let was_ignored = entry.is_ignored;
3371            let abs_path = snapshot.abs_path().join(&entry.path);
3372            entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3373            if entry.is_dir() {
3374                let child_ignore_stack = if entry.is_ignored {
3375                    IgnoreStack::all()
3376                } else {
3377                    ignore_stack.clone()
3378                };
3379                job.ignore_queue
3380                    .send(UpdateIgnoreStatusJob {
3381                        abs_path: abs_path.into(),
3382                        ignore_stack: child_ignore_stack,
3383                        ignore_queue: job.ignore_queue.clone(),
3384                    })
3385                    .await
3386                    .unwrap();
3387            }
3388
3389            if entry.is_ignored != was_ignored {
3390                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3391                path_entry.scan_id = snapshot.scan_id;
3392                path_entry.is_ignored = entry.is_ignored;
3393                entries_by_id_edits.push(Edit::Insert(path_entry));
3394                entries_by_path_edits.push(Edit::Insert(entry));
3395            }
3396        }
3397
3398        let mut snapshot = self.snapshot.lock();
3399        snapshot.entries_by_path.edit(entries_by_path_edits, &());
3400        snapshot.entries_by_id.edit(entries_by_id_edits, &());
3401    }
3402
3403    fn build_change_set(
3404        &self,
3405        old_snapshot: &Snapshot,
3406        new_snapshot: &Snapshot,
3407        event_paths: &[Arc<Path>],
3408    ) -> HashMap<(Arc<Path>, ProjectEntryId), PathChange> {
3409        use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3410
3411        let mut changes = HashMap::default();
3412        let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3413        let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3414        let received_before_initialized = !self.finished_initial_scan;
3415
3416        for path in event_paths {
3417            let path = PathKey(path.clone());
3418            old_paths.seek(&path, Bias::Left, &());
3419            new_paths.seek(&path, Bias::Left, &());
3420
3421            loop {
3422                match (old_paths.item(), new_paths.item()) {
3423                    (Some(old_entry), Some(new_entry)) => {
3424                        if old_entry.path > path.0
3425                            && new_entry.path > path.0
3426                            && !old_entry.path.starts_with(&path.0)
3427                            && !new_entry.path.starts_with(&path.0)
3428                        {
3429                            break;
3430                        }
3431
3432                        match Ord::cmp(&old_entry.path, &new_entry.path) {
3433                            Ordering::Less => {
3434                                changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3435                                old_paths.next(&());
3436                            }
3437                            Ordering::Equal => {
3438                                if received_before_initialized {
3439                                    // If the worktree was not fully initialized when this event was generated,
3440                                    // we can't know whether this entry was added during the scan or whether
3441                                    // it was merely updated.
3442                                    changes.insert(
3443                                        (new_entry.path.clone(), new_entry.id),
3444                                        AddedOrUpdated,
3445                                    );
3446                                } else if old_entry.mtime != new_entry.mtime {
3447                                    changes.insert((new_entry.path.clone(), new_entry.id), Updated);
3448                                }
3449                                old_paths.next(&());
3450                                new_paths.next(&());
3451                            }
3452                            Ordering::Greater => {
3453                                changes.insert((new_entry.path.clone(), new_entry.id), Added);
3454                                new_paths.next(&());
3455                            }
3456                        }
3457                    }
3458                    (Some(old_entry), None) => {
3459                        changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3460                        old_paths.next(&());
3461                    }
3462                    (None, Some(new_entry)) => {
3463                        changes.insert((new_entry.path.clone(), new_entry.id), Added);
3464                        new_paths.next(&());
3465                    }
3466                    (None, None) => break,
3467                }
3468            }
3469        }
3470
3471        changes
3472    }
3473
3474    async fn progress_timer(&self, running: bool) {
3475        if !running {
3476            return futures::future::pending().await;
3477        }
3478
3479        #[cfg(any(test, feature = "test-support"))]
3480        if self.fs.is_fake() {
3481            return self.executor.simulate_random_delay().await;
3482        }
3483
3484        smol::Timer::after(Duration::from_millis(100)).await;
3485    }
3486}
3487
3488fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3489    let mut result = root_char_bag;
3490    result.extend(
3491        path.to_string_lossy()
3492            .chars()
3493            .map(|c| c.to_ascii_lowercase()),
3494    );
3495    result
3496}
3497
3498struct ScanJob {
3499    abs_path: Arc<Path>,
3500    path: Arc<Path>,
3501    ignore_stack: Arc<IgnoreStack>,
3502    scan_queue: Sender<ScanJob>,
3503    ancestor_inodes: TreeSet<u64>,
3504}
3505
3506struct UpdateIgnoreStatusJob {
3507    abs_path: Arc<Path>,
3508    ignore_stack: Arc<IgnoreStack>,
3509    ignore_queue: Sender<UpdateIgnoreStatusJob>,
3510}
3511
3512pub trait WorktreeHandle {
3513    #[cfg(any(test, feature = "test-support"))]
3514    fn flush_fs_events<'a>(
3515        &self,
3516        cx: &'a gpui::TestAppContext,
3517    ) -> futures::future::LocalBoxFuture<'a, ()>;
3518}
3519
3520impl WorktreeHandle for ModelHandle<Worktree> {
3521    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3522    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3523    // extra directory scans, and emit extra scan-state notifications.
3524    //
3525    // This function mutates the worktree's directory and waits for those mutations to be picked up,
3526    // to ensure that all redundant FS events have already been processed.
3527    #[cfg(any(test, feature = "test-support"))]
3528    fn flush_fs_events<'a>(
3529        &self,
3530        cx: &'a gpui::TestAppContext,
3531    ) -> futures::future::LocalBoxFuture<'a, ()> {
3532        use smol::future::FutureExt;
3533
3534        let filename = "fs-event-sentinel";
3535        let tree = self.clone();
3536        let (fs, root_path) = self.read_with(cx, |tree, _| {
3537            let tree = tree.as_local().unwrap();
3538            (tree.fs.clone(), tree.abs_path().clone())
3539        });
3540
3541        async move {
3542            fs.create_file(&root_path.join(filename), Default::default())
3543                .await
3544                .unwrap();
3545            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3546                .await;
3547
3548            fs.remove_file(&root_path.join(filename), Default::default())
3549                .await
3550                .unwrap();
3551            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3552                .await;
3553
3554            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3555                .await;
3556        }
3557        .boxed_local()
3558    }
3559}
3560
3561#[derive(Clone, Debug)]
3562struct TraversalProgress<'a> {
3563    max_path: &'a Path,
3564    count: usize,
3565    visible_count: usize,
3566    file_count: usize,
3567    visible_file_count: usize,
3568}
3569
3570impl<'a> TraversalProgress<'a> {
3571    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3572        match (include_ignored, include_dirs) {
3573            (true, true) => self.count,
3574            (true, false) => self.file_count,
3575            (false, true) => self.visible_count,
3576            (false, false) => self.visible_file_count,
3577        }
3578    }
3579}
3580
3581impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3582    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3583        self.max_path = summary.max_path.as_ref();
3584        self.count += summary.count;
3585        self.visible_count += summary.visible_count;
3586        self.file_count += summary.file_count;
3587        self.visible_file_count += summary.visible_file_count;
3588    }
3589}
3590
3591impl<'a> Default for TraversalProgress<'a> {
3592    fn default() -> Self {
3593        Self {
3594            max_path: Path::new(""),
3595            count: 0,
3596            visible_count: 0,
3597            file_count: 0,
3598            visible_file_count: 0,
3599        }
3600    }
3601}
3602
3603pub struct Traversal<'a> {
3604    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3605    include_ignored: bool,
3606    include_dirs: bool,
3607}
3608
3609impl<'a> Traversal<'a> {
3610    pub fn advance(&mut self) -> bool {
3611        self.cursor.seek_forward(
3612            &TraversalTarget::Count {
3613                count: self.end_offset() + 1,
3614                include_dirs: self.include_dirs,
3615                include_ignored: self.include_ignored,
3616            },
3617            Bias::Left,
3618            &(),
3619        )
3620    }
3621
3622    pub fn advance_to_sibling(&mut self) -> bool {
3623        while let Some(entry) = self.cursor.item() {
3624            self.cursor.seek_forward(
3625                &TraversalTarget::PathSuccessor(&entry.path),
3626                Bias::Left,
3627                &(),
3628            );
3629            if let Some(entry) = self.cursor.item() {
3630                if (self.include_dirs || !entry.is_dir())
3631                    && (self.include_ignored || !entry.is_ignored)
3632                {
3633                    return true;
3634                }
3635            }
3636        }
3637        false
3638    }
3639
3640    pub fn entry(&self) -> Option<&'a Entry> {
3641        self.cursor.item()
3642    }
3643
3644    pub fn start_offset(&self) -> usize {
3645        self.cursor
3646            .start()
3647            .count(self.include_dirs, self.include_ignored)
3648    }
3649
3650    pub fn end_offset(&self) -> usize {
3651        self.cursor
3652            .end(&())
3653            .count(self.include_dirs, self.include_ignored)
3654    }
3655}
3656
3657impl<'a> Iterator for Traversal<'a> {
3658    type Item = &'a Entry;
3659
3660    fn next(&mut self) -> Option<Self::Item> {
3661        if let Some(item) = self.entry() {
3662            self.advance();
3663            Some(item)
3664        } else {
3665            None
3666        }
3667    }
3668}
3669
3670#[derive(Debug)]
3671enum TraversalTarget<'a> {
3672    Path(&'a Path),
3673    PathSuccessor(&'a Path),
3674    Count {
3675        count: usize,
3676        include_ignored: bool,
3677        include_dirs: bool,
3678    },
3679}
3680
3681impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3682    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3683        match self {
3684            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3685            TraversalTarget::PathSuccessor(path) => {
3686                if !cursor_location.max_path.starts_with(path) {
3687                    Ordering::Equal
3688                } else {
3689                    Ordering::Greater
3690                }
3691            }
3692            TraversalTarget::Count {
3693                count,
3694                include_dirs,
3695                include_ignored,
3696            } => Ord::cmp(
3697                count,
3698                &cursor_location.count(*include_dirs, *include_ignored),
3699            ),
3700        }
3701    }
3702}
3703
3704struct ChildEntriesIter<'a> {
3705    parent_path: &'a Path,
3706    traversal: Traversal<'a>,
3707}
3708
3709impl<'a> Iterator for ChildEntriesIter<'a> {
3710    type Item = &'a Entry;
3711
3712    fn next(&mut self) -> Option<Self::Item> {
3713        if let Some(item) = self.traversal.entry() {
3714            if item.path.starts_with(&self.parent_path) {
3715                self.traversal.advance_to_sibling();
3716                return Some(item);
3717            }
3718        }
3719        None
3720    }
3721}
3722
3723struct DescendentEntriesIter<'a> {
3724    parent_path: &'a Path,
3725    traversal: Traversal<'a>,
3726}
3727
3728impl<'a> Iterator for DescendentEntriesIter<'a> {
3729    type Item = &'a Entry;
3730
3731    fn next(&mut self) -> Option<Self::Item> {
3732        if let Some(item) = self.traversal.entry() {
3733            if item.path.starts_with(&self.parent_path) {
3734                self.traversal.advance();
3735                return Some(item);
3736            }
3737        }
3738        None
3739    }
3740}
3741
3742impl<'a> From<&'a Entry> for proto::Entry {
3743    fn from(entry: &'a Entry) -> Self {
3744        Self {
3745            id: entry.id.to_proto(),
3746            is_dir: entry.is_dir(),
3747            path: entry.path.to_string_lossy().into(),
3748            inode: entry.inode,
3749            mtime: Some(entry.mtime.into()),
3750            is_symlink: entry.is_symlink,
3751            is_ignored: entry.is_ignored,
3752        }
3753    }
3754}
3755
3756impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3757    type Error = anyhow::Error;
3758
3759    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3760        if let Some(mtime) = entry.mtime {
3761            let kind = if entry.is_dir {
3762                EntryKind::Dir
3763            } else {
3764                let mut char_bag = *root_char_bag;
3765                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3766                EntryKind::File(char_bag)
3767            };
3768            let path: Arc<Path> = PathBuf::from(entry.path).into();
3769            Ok(Entry {
3770                id: ProjectEntryId::from_proto(entry.id),
3771                kind,
3772                path,
3773                inode: entry.inode,
3774                mtime: mtime.into(),
3775                is_symlink: entry.is_symlink,
3776                is_ignored: entry.is_ignored,
3777            })
3778        } else {
3779            Err(anyhow!(
3780                "missing mtime in remote worktree entry {:?}",
3781                entry.path
3782            ))
3783        }
3784    }
3785}
3786
3787#[cfg(test)]
3788mod tests {
3789    use super::*;
3790    use fs::{FakeFs, RealFs};
3791    use gpui::{executor::Deterministic, TestAppContext};
3792    use pretty_assertions::assert_eq;
3793    use rand::prelude::*;
3794    use serde_json::json;
3795    use std::{env, fmt::Write};
3796    use util::{http::FakeHttpClient, test::temp_tree};
3797
3798    #[gpui::test]
3799    async fn test_traversal(cx: &mut TestAppContext) {
3800        let fs = FakeFs::new(cx.background());
3801        fs.insert_tree(
3802            "/root",
3803            json!({
3804               ".gitignore": "a/b\n",
3805               "a": {
3806                   "b": "",
3807                   "c": "",
3808               }
3809            }),
3810        )
3811        .await;
3812
3813        let http_client = FakeHttpClient::with_404_response();
3814        let client = cx.read(|cx| Client::new(http_client, cx));
3815
3816        let tree = Worktree::local(
3817            client,
3818            Path::new("/root"),
3819            true,
3820            fs,
3821            Default::default(),
3822            &mut cx.to_async(),
3823        )
3824        .await
3825        .unwrap();
3826        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3827            .await;
3828
3829        tree.read_with(cx, |tree, _| {
3830            assert_eq!(
3831                tree.entries(false)
3832                    .map(|entry| entry.path.as_ref())
3833                    .collect::<Vec<_>>(),
3834                vec![
3835                    Path::new(""),
3836                    Path::new(".gitignore"),
3837                    Path::new("a"),
3838                    Path::new("a/c"),
3839                ]
3840            );
3841            assert_eq!(
3842                tree.entries(true)
3843                    .map(|entry| entry.path.as_ref())
3844                    .collect::<Vec<_>>(),
3845                vec![
3846                    Path::new(""),
3847                    Path::new(".gitignore"),
3848                    Path::new("a"),
3849                    Path::new("a/b"),
3850                    Path::new("a/c"),
3851                ]
3852            );
3853        })
3854    }
3855
3856    #[gpui::test]
3857    async fn test_descendent_entries(cx: &mut TestAppContext) {
3858        let fs = FakeFs::new(cx.background());
3859        fs.insert_tree(
3860            "/root",
3861            json!({
3862                "a": "",
3863                "b": {
3864                   "c": {
3865                       "d": ""
3866                   },
3867                   "e": {}
3868                },
3869                "f": "",
3870                "g": {
3871                    "h": {}
3872                },
3873                "i": {
3874                    "j": {
3875                        "k": ""
3876                    },
3877                    "l": {
3878
3879                    }
3880                },
3881                ".gitignore": "i/j\n",
3882            }),
3883        )
3884        .await;
3885
3886        let http_client = FakeHttpClient::with_404_response();
3887        let client = cx.read(|cx| Client::new(http_client, cx));
3888
3889        let tree = Worktree::local(
3890            client,
3891            Path::new("/root"),
3892            true,
3893            fs,
3894            Default::default(),
3895            &mut cx.to_async(),
3896        )
3897        .await
3898        .unwrap();
3899        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3900            .await;
3901
3902        tree.read_with(cx, |tree, _| {
3903            assert_eq!(
3904                tree.descendent_entries(false, false, Path::new("b"))
3905                    .map(|entry| entry.path.as_ref())
3906                    .collect::<Vec<_>>(),
3907                vec![Path::new("b/c/d"),]
3908            );
3909            assert_eq!(
3910                tree.descendent_entries(true, false, Path::new("b"))
3911                    .map(|entry| entry.path.as_ref())
3912                    .collect::<Vec<_>>(),
3913                vec![
3914                    Path::new("b"),
3915                    Path::new("b/c"),
3916                    Path::new("b/c/d"),
3917                    Path::new("b/e"),
3918                ]
3919            );
3920
3921            assert_eq!(
3922                tree.descendent_entries(false, false, Path::new("g"))
3923                    .map(|entry| entry.path.as_ref())
3924                    .collect::<Vec<_>>(),
3925                Vec::<PathBuf>::new()
3926            );
3927            assert_eq!(
3928                tree.descendent_entries(true, false, Path::new("g"))
3929                    .map(|entry| entry.path.as_ref())
3930                    .collect::<Vec<_>>(),
3931                vec![Path::new("g"), Path::new("g/h"),]
3932            );
3933
3934            assert_eq!(
3935                tree.descendent_entries(false, false, Path::new("i"))
3936                    .map(|entry| entry.path.as_ref())
3937                    .collect::<Vec<_>>(),
3938                Vec::<PathBuf>::new()
3939            );
3940            assert_eq!(
3941                tree.descendent_entries(false, true, Path::new("i"))
3942                    .map(|entry| entry.path.as_ref())
3943                    .collect::<Vec<_>>(),
3944                vec![Path::new("i/j/k")]
3945            );
3946            assert_eq!(
3947                tree.descendent_entries(true, false, Path::new("i"))
3948                    .map(|entry| entry.path.as_ref())
3949                    .collect::<Vec<_>>(),
3950                vec![Path::new("i"), Path::new("i/l"),]
3951            );
3952        })
3953    }
3954
3955    #[gpui::test(iterations = 10)]
3956    async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3957        let fs = FakeFs::new(cx.background());
3958        fs.insert_tree(
3959            "/root",
3960            json!({
3961                "lib": {
3962                    "a": {
3963                        "a.txt": ""
3964                    },
3965                    "b": {
3966                        "b.txt": ""
3967                    }
3968                }
3969            }),
3970        )
3971        .await;
3972        fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3973        fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3974
3975        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3976        let tree = Worktree::local(
3977            client,
3978            Path::new("/root"),
3979            true,
3980            fs.clone(),
3981            Default::default(),
3982            &mut cx.to_async(),
3983        )
3984        .await
3985        .unwrap();
3986
3987        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3988            .await;
3989
3990        tree.read_with(cx, |tree, _| {
3991            assert_eq!(
3992                tree.entries(false)
3993                    .map(|entry| entry.path.as_ref())
3994                    .collect::<Vec<_>>(),
3995                vec![
3996                    Path::new(""),
3997                    Path::new("lib"),
3998                    Path::new("lib/a"),
3999                    Path::new("lib/a/a.txt"),
4000                    Path::new("lib/a/lib"),
4001                    Path::new("lib/b"),
4002                    Path::new("lib/b/b.txt"),
4003                    Path::new("lib/b/lib"),
4004                ]
4005            );
4006        });
4007
4008        fs.rename(
4009            Path::new("/root/lib/a/lib"),
4010            Path::new("/root/lib/a/lib-2"),
4011            Default::default(),
4012        )
4013        .await
4014        .unwrap();
4015        executor.run_until_parked();
4016        tree.read_with(cx, |tree, _| {
4017            assert_eq!(
4018                tree.entries(false)
4019                    .map(|entry| entry.path.as_ref())
4020                    .collect::<Vec<_>>(),
4021                vec![
4022                    Path::new(""),
4023                    Path::new("lib"),
4024                    Path::new("lib/a"),
4025                    Path::new("lib/a/a.txt"),
4026                    Path::new("lib/a/lib-2"),
4027                    Path::new("lib/b"),
4028                    Path::new("lib/b/b.txt"),
4029                    Path::new("lib/b/lib"),
4030                ]
4031            );
4032        });
4033    }
4034
4035    #[gpui::test]
4036    async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
4037        // .gitignores are handled explicitly by Zed and do not use the git
4038        // machinery that the git_tests module checks
4039        let parent_dir = temp_tree(json!({
4040            ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
4041            "tree": {
4042                ".git": {},
4043                ".gitignore": "ignored-dir\n",
4044                "tracked-dir": {
4045                    "tracked-file1": "",
4046                    "ancestor-ignored-file1": "",
4047                },
4048                "ignored-dir": {
4049                    "ignored-file1": ""
4050                }
4051            }
4052        }));
4053        let dir = parent_dir.path().join("tree");
4054
4055        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4056
4057        let tree = Worktree::local(
4058            client,
4059            dir.as_path(),
4060            true,
4061            Arc::new(RealFs),
4062            Default::default(),
4063            &mut cx.to_async(),
4064        )
4065        .await
4066        .unwrap();
4067        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4068            .await;
4069        tree.flush_fs_events(cx).await;
4070        cx.read(|cx| {
4071            let tree = tree.read(cx);
4072            assert!(
4073                !tree
4074                    .entry_for_path("tracked-dir/tracked-file1")
4075                    .unwrap()
4076                    .is_ignored
4077            );
4078            assert!(
4079                tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
4080                    .unwrap()
4081                    .is_ignored
4082            );
4083            assert!(
4084                tree.entry_for_path("ignored-dir/ignored-file1")
4085                    .unwrap()
4086                    .is_ignored
4087            );
4088        });
4089
4090        std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
4091        std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
4092        std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
4093        tree.flush_fs_events(cx).await;
4094        cx.read(|cx| {
4095            let tree = tree.read(cx);
4096            assert!(
4097                !tree
4098                    .entry_for_path("tracked-dir/tracked-file2")
4099                    .unwrap()
4100                    .is_ignored
4101            );
4102            assert!(
4103                tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
4104                    .unwrap()
4105                    .is_ignored
4106            );
4107            assert!(
4108                tree.entry_for_path("ignored-dir/ignored-file2")
4109                    .unwrap()
4110                    .is_ignored
4111            );
4112            assert!(tree.entry_for_path(".git").unwrap().is_ignored);
4113        });
4114    }
4115
4116    #[gpui::test]
4117    async fn test_write_file(cx: &mut TestAppContext) {
4118        let dir = temp_tree(json!({
4119            ".git": {},
4120            ".gitignore": "ignored-dir\n",
4121            "tracked-dir": {},
4122            "ignored-dir": {}
4123        }));
4124
4125        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4126
4127        let tree = Worktree::local(
4128            client,
4129            dir.path(),
4130            true,
4131            Arc::new(RealFs),
4132            Default::default(),
4133            &mut cx.to_async(),
4134        )
4135        .await
4136        .unwrap();
4137        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4138            .await;
4139        tree.flush_fs_events(cx).await;
4140
4141        tree.update(cx, |tree, cx| {
4142            tree.as_local().unwrap().write_file(
4143                Path::new("tracked-dir/file.txt"),
4144                "hello".into(),
4145                Default::default(),
4146                cx,
4147            )
4148        })
4149        .await
4150        .unwrap();
4151        tree.update(cx, |tree, cx| {
4152            tree.as_local().unwrap().write_file(
4153                Path::new("ignored-dir/file.txt"),
4154                "world".into(),
4155                Default::default(),
4156                cx,
4157            )
4158        })
4159        .await
4160        .unwrap();
4161
4162        tree.read_with(cx, |tree, _| {
4163            let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4164            let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4165            assert!(!tracked.is_ignored);
4166            assert!(ignored.is_ignored);
4167        });
4168    }
4169
4170    #[gpui::test(iterations = 30)]
4171    async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4172        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4173
4174        let fs = FakeFs::new(cx.background());
4175        fs.insert_tree(
4176            "/root",
4177            json!({
4178                "b": {},
4179                "c": {},
4180                "d": {},
4181            }),
4182        )
4183        .await;
4184
4185        let tree = Worktree::local(
4186            client,
4187            "/root".as_ref(),
4188            true,
4189            fs,
4190            Default::default(),
4191            &mut cx.to_async(),
4192        )
4193        .await
4194        .unwrap();
4195
4196        let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4197
4198        let entry = tree
4199            .update(cx, |tree, cx| {
4200                tree.as_local_mut()
4201                    .unwrap()
4202                    .create_entry("a/e".as_ref(), true, cx)
4203            })
4204            .await
4205            .unwrap();
4206        assert!(entry.is_dir());
4207
4208        cx.foreground().run_until_parked();
4209        tree.read_with(cx, |tree, _| {
4210            assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4211        });
4212
4213        let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4214        let update = snapshot2.build_update(&snapshot1, 0, 0, true);
4215        snapshot1.apply_remote_update(update).unwrap();
4216        assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
4217    }
4218
4219    #[gpui::test(iterations = 100)]
4220    async fn test_random_worktree_operations_during_initial_scan(
4221        cx: &mut TestAppContext,
4222        mut rng: StdRng,
4223    ) {
4224        let operations = env::var("OPERATIONS")
4225            .map(|o| o.parse().unwrap())
4226            .unwrap_or(5);
4227        let initial_entries = env::var("INITIAL_ENTRIES")
4228            .map(|o| o.parse().unwrap())
4229            .unwrap_or(20);
4230
4231        let root_dir = Path::new("/test");
4232        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4233        fs.as_fake().insert_tree(root_dir, json!({})).await;
4234        for _ in 0..initial_entries {
4235            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4236        }
4237        log::info!("generated initial tree");
4238
4239        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4240        let worktree = Worktree::local(
4241            client.clone(),
4242            root_dir,
4243            true,
4244            fs.clone(),
4245            Default::default(),
4246            &mut cx.to_async(),
4247        )
4248        .await
4249        .unwrap();
4250
4251        let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4252
4253        for _ in 0..operations {
4254            worktree
4255                .update(cx, |worktree, cx| {
4256                    randomly_mutate_worktree(worktree, &mut rng, cx)
4257                })
4258                .await
4259                .log_err();
4260            worktree.read_with(cx, |tree, _| {
4261                tree.as_local().unwrap().snapshot.check_invariants()
4262            });
4263
4264            if rng.gen_bool(0.6) {
4265                let new_snapshot =
4266                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4267                let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4268                snapshot.apply_remote_update(update.clone()).unwrap();
4269                assert_eq!(
4270                    snapshot.to_vec(true),
4271                    new_snapshot.to_vec(true),
4272                    "incorrect snapshot after update {:?}",
4273                    update
4274                );
4275            }
4276        }
4277
4278        worktree
4279            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4280            .await;
4281        worktree.read_with(cx, |tree, _| {
4282            tree.as_local().unwrap().snapshot.check_invariants()
4283        });
4284
4285        let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4286        let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4287        snapshot.apply_remote_update(update.clone()).unwrap();
4288        assert_eq!(
4289            snapshot.to_vec(true),
4290            new_snapshot.to_vec(true),
4291            "incorrect snapshot after update {:?}",
4292            update
4293        );
4294    }
4295
4296    #[gpui::test(iterations = 100)]
4297    async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4298        let operations = env::var("OPERATIONS")
4299            .map(|o| o.parse().unwrap())
4300            .unwrap_or(40);
4301        let initial_entries = env::var("INITIAL_ENTRIES")
4302            .map(|o| o.parse().unwrap())
4303            .unwrap_or(20);
4304
4305        let root_dir = Path::new("/test");
4306        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4307        fs.as_fake().insert_tree(root_dir, json!({})).await;
4308        for _ in 0..initial_entries {
4309            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4310        }
4311        log::info!("generated initial tree");
4312
4313        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4314        let worktree = Worktree::local(
4315            client.clone(),
4316            root_dir,
4317            true,
4318            fs.clone(),
4319            Default::default(),
4320            &mut cx.to_async(),
4321        )
4322        .await
4323        .unwrap();
4324
4325        worktree
4326            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4327            .await;
4328
4329        // After the initial scan is complete, the `UpdatedEntries` event can
4330        // be used to follow along with all changes to the worktree's snapshot.
4331        worktree.update(cx, |tree, cx| {
4332            let mut paths = tree
4333                .as_local()
4334                .unwrap()
4335                .paths()
4336                .cloned()
4337                .collect::<Vec<_>>();
4338
4339            cx.subscribe(&worktree, move |tree, _, event, _| {
4340                if let Event::UpdatedEntries(changes) = event {
4341                    for ((path, _), change_type) in changes.iter() {
4342                        let path = path.clone();
4343                        let ix = match paths.binary_search(&path) {
4344                            Ok(ix) | Err(ix) => ix,
4345                        };
4346                        match change_type {
4347                            PathChange::Added => {
4348                                assert_ne!(paths.get(ix), Some(&path));
4349                                paths.insert(ix, path);
4350                            }
4351
4352                            PathChange::Removed => {
4353                                assert_eq!(paths.get(ix), Some(&path));
4354                                paths.remove(ix);
4355                            }
4356
4357                            PathChange::Updated => {
4358                                assert_eq!(paths.get(ix), Some(&path));
4359                            }
4360
4361                            PathChange::AddedOrUpdated => {
4362                                if paths[ix] != path {
4363                                    paths.insert(ix, path);
4364                                }
4365                            }
4366                        }
4367                    }
4368
4369                    let new_paths = tree.paths().cloned().collect::<Vec<_>>();
4370                    assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
4371                }
4372            })
4373            .detach();
4374        });
4375
4376        fs.as_fake().pause_events();
4377        let mut snapshots = Vec::new();
4378        let mut mutations_len = operations;
4379        while mutations_len > 1 {
4380            if rng.gen_bool(0.2) {
4381                worktree
4382                    .update(cx, |worktree, cx| {
4383                        randomly_mutate_worktree(worktree, &mut rng, cx)
4384                    })
4385                    .await
4386                    .log_err();
4387            } else {
4388                randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4389            }
4390
4391            let buffered_event_count = fs.as_fake().buffered_event_count();
4392            if buffered_event_count > 0 && rng.gen_bool(0.3) {
4393                let len = rng.gen_range(0..=buffered_event_count);
4394                log::info!("flushing {} events", len);
4395                fs.as_fake().flush_events(len);
4396            } else {
4397                randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4398                mutations_len -= 1;
4399            }
4400
4401            cx.foreground().run_until_parked();
4402            if rng.gen_bool(0.2) {
4403                log::info!("storing snapshot {}", snapshots.len());
4404                let snapshot =
4405                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4406                snapshots.push(snapshot);
4407            }
4408        }
4409
4410        log::info!("quiescing");
4411        fs.as_fake().flush_events(usize::MAX);
4412        cx.foreground().run_until_parked();
4413        let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4414        snapshot.check_invariants();
4415
4416        {
4417            let new_worktree = Worktree::local(
4418                client.clone(),
4419                root_dir,
4420                true,
4421                fs.clone(),
4422                Default::default(),
4423                &mut cx.to_async(),
4424            )
4425            .await
4426            .unwrap();
4427            new_worktree
4428                .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4429                .await;
4430            let new_snapshot =
4431                new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4432            assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4433        }
4434
4435        for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4436            let include_ignored = rng.gen::<bool>();
4437            if !include_ignored {
4438                let mut entries_by_path_edits = Vec::new();
4439                let mut entries_by_id_edits = Vec::new();
4440                for entry in prev_snapshot
4441                    .entries_by_id
4442                    .cursor::<()>()
4443                    .filter(|e| e.is_ignored)
4444                {
4445                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4446                    entries_by_id_edits.push(Edit::Remove(entry.id));
4447                }
4448
4449                prev_snapshot
4450                    .entries_by_path
4451                    .edit(entries_by_path_edits, &());
4452                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4453            }
4454
4455            let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4456            prev_snapshot.apply_remote_update(update.clone()).unwrap();
4457            assert_eq!(
4458                prev_snapshot.to_vec(include_ignored),
4459                snapshot.to_vec(include_ignored),
4460                "wrong update for snapshot {i}. update: {:?}",
4461                update
4462            );
4463        }
4464    }
4465
4466    fn randomly_mutate_worktree(
4467        worktree: &mut Worktree,
4468        rng: &mut impl Rng,
4469        cx: &mut ModelContext<Worktree>,
4470    ) -> Task<Result<()>> {
4471        log::info!("mutating worktree");
4472        let worktree = worktree.as_local_mut().unwrap();
4473        let snapshot = worktree.snapshot();
4474        let entry = snapshot.entries(false).choose(rng).unwrap();
4475
4476        match rng.gen_range(0_u32..100) {
4477            0..=33 if entry.path.as_ref() != Path::new("") => {
4478                log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4479                worktree.delete_entry(entry.id, cx).unwrap()
4480            }
4481            ..=66 if entry.path.as_ref() != Path::new("") => {
4482                let other_entry = snapshot.entries(false).choose(rng).unwrap();
4483                let new_parent_path = if other_entry.is_dir() {
4484                    other_entry.path.clone()
4485                } else {
4486                    other_entry.path.parent().unwrap().into()
4487                };
4488                let mut new_path = new_parent_path.join(gen_name(rng));
4489                if new_path.starts_with(&entry.path) {
4490                    new_path = gen_name(rng).into();
4491                }
4492
4493                log::info!(
4494                    "renaming entry {:?} ({}) to {:?}",
4495                    entry.path,
4496                    entry.id.0,
4497                    new_path
4498                );
4499                let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4500                cx.foreground().spawn(async move {
4501                    task.await?;
4502                    Ok(())
4503                })
4504            }
4505            _ => {
4506                let task = if entry.is_dir() {
4507                    let child_path = entry.path.join(gen_name(rng));
4508                    let is_dir = rng.gen_bool(0.3);
4509                    log::info!(
4510                        "creating {} at {:?}",
4511                        if is_dir { "dir" } else { "file" },
4512                        child_path,
4513                    );
4514                    worktree.create_entry(child_path, is_dir, cx)
4515                } else {
4516                    log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4517                    worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4518                };
4519                cx.foreground().spawn(async move {
4520                    task.await?;
4521                    Ok(())
4522                })
4523            }
4524        }
4525    }
4526
4527    async fn randomly_mutate_fs(
4528        fs: &Arc<dyn Fs>,
4529        root_path: &Path,
4530        insertion_probability: f64,
4531        rng: &mut impl Rng,
4532    ) {
4533        log::info!("mutating fs");
4534        let mut files = Vec::new();
4535        let mut dirs = Vec::new();
4536        for path in fs.as_fake().paths() {
4537            if path.starts_with(root_path) {
4538                if fs.is_file(&path).await {
4539                    files.push(path);
4540                } else {
4541                    dirs.push(path);
4542                }
4543            }
4544        }
4545
4546        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4547            let path = dirs.choose(rng).unwrap();
4548            let new_path = path.join(gen_name(rng));
4549
4550            if rng.gen() {
4551                log::info!(
4552                    "creating dir {:?}",
4553                    new_path.strip_prefix(root_path).unwrap()
4554                );
4555                fs.create_dir(&new_path).await.unwrap();
4556            } else {
4557                log::info!(
4558                    "creating file {:?}",
4559                    new_path.strip_prefix(root_path).unwrap()
4560                );
4561                fs.create_file(&new_path, Default::default()).await.unwrap();
4562            }
4563        } else if rng.gen_bool(0.05) {
4564            let ignore_dir_path = dirs.choose(rng).unwrap();
4565            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4566
4567            let subdirs = dirs
4568                .iter()
4569                .filter(|d| d.starts_with(&ignore_dir_path))
4570                .cloned()
4571                .collect::<Vec<_>>();
4572            let subfiles = files
4573                .iter()
4574                .filter(|d| d.starts_with(&ignore_dir_path))
4575                .cloned()
4576                .collect::<Vec<_>>();
4577            let files_to_ignore = {
4578                let len = rng.gen_range(0..=subfiles.len());
4579                subfiles.choose_multiple(rng, len)
4580            };
4581            let dirs_to_ignore = {
4582                let len = rng.gen_range(0..subdirs.len());
4583                subdirs.choose_multiple(rng, len)
4584            };
4585
4586            let mut ignore_contents = String::new();
4587            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4588                writeln!(
4589                    ignore_contents,
4590                    "{}",
4591                    path_to_ignore
4592                        .strip_prefix(&ignore_dir_path)
4593                        .unwrap()
4594                        .to_str()
4595                        .unwrap()
4596                )
4597                .unwrap();
4598            }
4599            log::info!(
4600                "creating gitignore {:?} with contents:\n{}",
4601                ignore_path.strip_prefix(&root_path).unwrap(),
4602                ignore_contents
4603            );
4604            fs.save(
4605                &ignore_path,
4606                &ignore_contents.as_str().into(),
4607                Default::default(),
4608            )
4609            .await
4610            .unwrap();
4611        } else {
4612            let old_path = {
4613                let file_path = files.choose(rng);
4614                let dir_path = dirs[1..].choose(rng);
4615                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4616            };
4617
4618            let is_rename = rng.gen();
4619            if is_rename {
4620                let new_path_parent = dirs
4621                    .iter()
4622                    .filter(|d| !d.starts_with(old_path))
4623                    .choose(rng)
4624                    .unwrap();
4625
4626                let overwrite_existing_dir =
4627                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4628                let new_path = if overwrite_existing_dir {
4629                    fs.remove_dir(
4630                        &new_path_parent,
4631                        RemoveOptions {
4632                            recursive: true,
4633                            ignore_if_not_exists: true,
4634                        },
4635                    )
4636                    .await
4637                    .unwrap();
4638                    new_path_parent.to_path_buf()
4639                } else {
4640                    new_path_parent.join(gen_name(rng))
4641                };
4642
4643                log::info!(
4644                    "renaming {:?} to {}{:?}",
4645                    old_path.strip_prefix(&root_path).unwrap(),
4646                    if overwrite_existing_dir {
4647                        "overwrite "
4648                    } else {
4649                        ""
4650                    },
4651                    new_path.strip_prefix(&root_path).unwrap()
4652                );
4653                fs.rename(
4654                    &old_path,
4655                    &new_path,
4656                    fs::RenameOptions {
4657                        overwrite: true,
4658                        ignore_if_exists: true,
4659                    },
4660                )
4661                .await
4662                .unwrap();
4663            } else if fs.is_file(&old_path).await {
4664                log::info!(
4665                    "deleting file {:?}",
4666                    old_path.strip_prefix(&root_path).unwrap()
4667                );
4668                fs.remove_file(old_path, Default::default()).await.unwrap();
4669            } else {
4670                log::info!(
4671                    "deleting dir {:?}",
4672                    old_path.strip_prefix(&root_path).unwrap()
4673                );
4674                fs.remove_dir(
4675                    &old_path,
4676                    RemoveOptions {
4677                        recursive: true,
4678                        ignore_if_not_exists: true,
4679                    },
4680                )
4681                .await
4682                .unwrap();
4683            }
4684        }
4685    }
4686
4687    fn gen_name(rng: &mut impl Rng) -> String {
4688        (0..6)
4689            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4690            .map(char::from)
4691            .collect()
4692    }
4693
4694    impl LocalSnapshot {
4695        fn check_invariants(&self) {
4696            assert_eq!(
4697                self.entries_by_path
4698                    .cursor::<()>()
4699                    .map(|e| (&e.path, e.id))
4700                    .collect::<Vec<_>>(),
4701                self.entries_by_id
4702                    .cursor::<()>()
4703                    .map(|e| (&e.path, e.id))
4704                    .collect::<collections::BTreeSet<_>>()
4705                    .into_iter()
4706                    .collect::<Vec<_>>(),
4707                "entries_by_path and entries_by_id are inconsistent"
4708            );
4709
4710            let mut files = self.files(true, 0);
4711            let mut visible_files = self.files(false, 0);
4712            for entry in self.entries_by_path.cursor::<()>() {
4713                if entry.is_file() {
4714                    assert_eq!(files.next().unwrap().inode, entry.inode);
4715                    if !entry.is_ignored {
4716                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4717                    }
4718                }
4719            }
4720
4721            assert!(files.next().is_none());
4722            assert!(visible_files.next().is_none());
4723
4724            let mut bfs_paths = Vec::new();
4725            let mut stack = vec![Path::new("")];
4726            while let Some(path) = stack.pop() {
4727                bfs_paths.push(path);
4728                let ix = stack.len();
4729                for child_entry in self.child_entries(path) {
4730                    stack.insert(ix, &child_entry.path);
4731                }
4732            }
4733
4734            let dfs_paths_via_iter = self
4735                .entries_by_path
4736                .cursor::<()>()
4737                .map(|e| e.path.as_ref())
4738                .collect::<Vec<_>>();
4739            assert_eq!(bfs_paths, dfs_paths_via_iter);
4740
4741            let dfs_paths_via_traversal = self
4742                .entries(true)
4743                .map(|e| e.path.as_ref())
4744                .collect::<Vec<_>>();
4745            assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4746
4747            for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4748                let ignore_parent_path =
4749                    ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4750                assert!(self.entry_for_path(&ignore_parent_path).is_some());
4751                assert!(self
4752                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4753                    .is_some());
4754            }
4755        }
4756
4757        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4758            let mut paths = Vec::new();
4759            for entry in self.entries_by_path.cursor::<()>() {
4760                if include_ignored || !entry.is_ignored {
4761                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4762                }
4763            }
4764            paths.sort_by(|a, b| a.0.cmp(b.0));
4765            paths
4766        }
4767    }
4768
4769    mod git_tests {
4770        use super::*;
4771        use pretty_assertions::assert_eq;
4772
4773        #[gpui::test]
4774        async fn test_rename_work_directory(cx: &mut TestAppContext) {
4775            let root = temp_tree(json!({
4776                "projects": {
4777                    "project1": {
4778                        "a": "",
4779                        "b": "",
4780                    }
4781                },
4782
4783            }));
4784            let root_path = root.path();
4785
4786            let http_client = FakeHttpClient::with_404_response();
4787            let client = cx.read(|cx| Client::new(http_client, cx));
4788            let tree = Worktree::local(
4789                client,
4790                root_path,
4791                true,
4792                Arc::new(RealFs),
4793                Default::default(),
4794                &mut cx.to_async(),
4795            )
4796            .await
4797            .unwrap();
4798
4799            let repo = git_init(&root_path.join("projects/project1"));
4800            git_add("a", &repo);
4801            git_commit("init", &repo);
4802            std::fs::write(root_path.join("projects/project1/a"), "aa").ok();
4803
4804            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4805                .await;
4806
4807            tree.flush_fs_events(cx).await;
4808
4809            cx.read(|cx| {
4810                let tree = tree.read(cx);
4811                let (work_dir, repo) = tree.repositories().next().unwrap();
4812                assert_eq!(work_dir.as_ref(), Path::new("projects/project1"));
4813                assert_eq!(
4814                    repo.status_for_file(tree, Path::new("projects/project1/a")),
4815                    Some(GitFileStatus::Modified)
4816                );
4817                assert_eq!(
4818                    repo.status_for_file(tree, Path::new("projects/project1/b")),
4819                    Some(GitFileStatus::Added)
4820                );
4821            });
4822
4823            std::fs::rename(
4824                root_path.join("projects/project1"),
4825                root_path.join("projects/project2"),
4826            )
4827            .ok();
4828            tree.flush_fs_events(cx).await;
4829
4830            cx.read(|cx| {
4831                let tree = tree.read(cx);
4832                let (work_dir, repo) = tree.repositories().next().unwrap();
4833                assert_eq!(work_dir.as_ref(), Path::new("projects/project2"));
4834                assert_eq!(
4835                    repo.status_for_file(tree, Path::new("projects/project2/a")),
4836                    Some(GitFileStatus::Modified)
4837                );
4838                assert_eq!(
4839                    repo.status_for_file(tree, Path::new("projects/project2/b")),
4840                    Some(GitFileStatus::Added)
4841                );
4842            });
4843        }
4844
4845        #[gpui::test]
4846        async fn test_git_repository_for_path(cx: &mut TestAppContext) {
4847            let root = temp_tree(json!({
4848                "c.txt": "",
4849                "dir1": {
4850                    ".git": {},
4851                    "deps": {
4852                        "dep1": {
4853                            ".git": {},
4854                            "src": {
4855                                "a.txt": ""
4856                            }
4857                        }
4858                    },
4859                    "src": {
4860                        "b.txt": ""
4861                    }
4862                },
4863            }));
4864
4865            let http_client = FakeHttpClient::with_404_response();
4866            let client = cx.read(|cx| Client::new(http_client, cx));
4867            let tree = Worktree::local(
4868                client,
4869                root.path(),
4870                true,
4871                Arc::new(RealFs),
4872                Default::default(),
4873                &mut cx.to_async(),
4874            )
4875            .await
4876            .unwrap();
4877
4878            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4879                .await;
4880            tree.flush_fs_events(cx).await;
4881
4882            tree.read_with(cx, |tree, _cx| {
4883                let tree = tree.as_local().unwrap();
4884
4885                assert!(tree.repository_for_path("c.txt".as_ref()).is_none());
4886
4887                let entry = tree.repository_for_path("dir1/src/b.txt".as_ref()).unwrap();
4888                assert_eq!(
4889                    entry
4890                        .work_directory(tree)
4891                        .map(|directory| directory.as_ref().to_owned()),
4892                    Some(Path::new("dir1").to_owned())
4893                );
4894
4895                let entry = tree
4896                    .repository_for_path("dir1/deps/dep1/src/a.txt".as_ref())
4897                    .unwrap();
4898                assert_eq!(
4899                    entry
4900                        .work_directory(tree)
4901                        .map(|directory| directory.as_ref().to_owned()),
4902                    Some(Path::new("dir1/deps/dep1").to_owned())
4903                );
4904
4905                let entries = tree.files(false, 0);
4906
4907                let paths_with_repos = tree
4908                    .entries_with_repositories(entries)
4909                    .map(|(entry, repo)| {
4910                        (
4911                            entry.path.as_ref(),
4912                            repo.and_then(|repo| {
4913                                repo.work_directory(&tree)
4914                                    .map(|work_directory| work_directory.0.to_path_buf())
4915                            }),
4916                        )
4917                    })
4918                    .collect::<Vec<_>>();
4919
4920                assert_eq!(
4921                    paths_with_repos,
4922                    &[
4923                        (Path::new("c.txt"), None),
4924                        (
4925                            Path::new("dir1/deps/dep1/src/a.txt"),
4926                            Some(Path::new("dir1/deps/dep1").into())
4927                        ),
4928                        (Path::new("dir1/src/b.txt"), Some(Path::new("dir1").into())),
4929                    ]
4930                );
4931            });
4932
4933            let repo_update_events = Arc::new(Mutex::new(vec![]));
4934            tree.update(cx, |_, cx| {
4935                let repo_update_events = repo_update_events.clone();
4936                cx.subscribe(&tree, move |_, _, event, _| {
4937                    if let Event::UpdatedGitRepositories(update) = event {
4938                        repo_update_events.lock().push(update.clone());
4939                    }
4940                })
4941                .detach();
4942            });
4943
4944            std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
4945            tree.flush_fs_events(cx).await;
4946
4947            assert_eq!(
4948                repo_update_events.lock()[0]
4949                    .keys()
4950                    .cloned()
4951                    .collect::<Vec<Arc<Path>>>(),
4952                vec![Path::new("dir1").into()]
4953            );
4954
4955            std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
4956            tree.flush_fs_events(cx).await;
4957
4958            tree.read_with(cx, |tree, _cx| {
4959                let tree = tree.as_local().unwrap();
4960
4961                assert!(tree
4962                    .repository_for_path("dir1/src/b.txt".as_ref())
4963                    .is_none());
4964            });
4965        }
4966
4967        #[gpui::test]
4968        async fn test_git_status(cx: &mut TestAppContext) {
4969            const IGNORE_RULE: &'static str = "**/target";
4970
4971            let root = temp_tree(json!({
4972                "project": {
4973                    "a.txt": "a",
4974                    "b.txt": "bb",
4975                    "c": {
4976                        "d": {
4977                            "e.txt": "eee"
4978                        }
4979                    },
4980                    "f.txt": "ffff",
4981                    "target": {
4982                        "build_file": "???"
4983                    },
4984                    ".gitignore": IGNORE_RULE
4985                },
4986
4987            }));
4988
4989            let http_client = FakeHttpClient::with_404_response();
4990            let client = cx.read(|cx| Client::new(http_client, cx));
4991            let tree = Worktree::local(
4992                client,
4993                root.path(),
4994                true,
4995                Arc::new(RealFs),
4996                Default::default(),
4997                &mut cx.to_async(),
4998            )
4999            .await
5000            .unwrap();
5001
5002            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
5003                .await;
5004
5005            const A_TXT: &'static str = "a.txt";
5006            const B_TXT: &'static str = "b.txt";
5007            const E_TXT: &'static str = "c/d/e.txt";
5008            const F_TXT: &'static str = "f.txt";
5009            const DOTGITIGNORE: &'static str = ".gitignore";
5010            const BUILD_FILE: &'static str = "target/build_file";
5011
5012            let work_dir = root.path().join("project");
5013            let mut repo = git_init(work_dir.as_path());
5014            repo.add_ignore_rule(IGNORE_RULE).unwrap();
5015            git_add(Path::new(A_TXT), &repo);
5016            git_add(Path::new(E_TXT), &repo);
5017            git_add(Path::new(DOTGITIGNORE), &repo);
5018            git_commit("Initial commit", &repo);
5019
5020            std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
5021
5022            tree.flush_fs_events(cx).await;
5023
5024            // Check that the right git state is observed on startup
5025            tree.read_with(cx, |tree, _cx| {
5026                let snapshot = tree.snapshot();
5027                assert_eq!(snapshot.repository_entries.iter().count(), 1);
5028                let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
5029                assert_eq!(dir.0.as_ref(), Path::new("project"));
5030
5031                assert_eq!(repo.statuses.iter().count(), 3);
5032                assert_eq!(
5033                    repo.statuses.get(&Path::new(A_TXT).into()),
5034                    Some(&GitFileStatus::Modified)
5035                );
5036                assert_eq!(
5037                    repo.statuses.get(&Path::new(B_TXT).into()),
5038                    Some(&GitFileStatus::Added)
5039                );
5040                assert_eq!(
5041                    repo.statuses.get(&Path::new(F_TXT).into()),
5042                    Some(&GitFileStatus::Added)
5043                );
5044            });
5045
5046            git_add(Path::new(A_TXT), &repo);
5047            git_add(Path::new(B_TXT), &repo);
5048            git_commit("Committing modified and added", &repo);
5049            tree.flush_fs_events(cx).await;
5050
5051            // Check that repo only changes are tracked
5052            tree.read_with(cx, |tree, _cx| {
5053                let snapshot = tree.snapshot();
5054                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5055
5056                assert_eq!(repo.statuses.iter().count(), 1);
5057                assert_eq!(
5058                    repo.statuses.get(&Path::new(F_TXT).into()),
5059                    Some(&GitFileStatus::Added)
5060                );
5061            });
5062
5063            git_reset(0, &repo);
5064            git_remove_index(Path::new(B_TXT), &repo);
5065            git_stash(&mut repo);
5066            std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
5067            std::fs::write(work_dir.join(BUILD_FILE), "this should be ignored").unwrap();
5068            tree.flush_fs_events(cx).await;
5069
5070            // Check that more complex repo changes are tracked
5071            tree.read_with(cx, |tree, _cx| {
5072                let snapshot = tree.snapshot();
5073                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5074
5075                assert_eq!(repo.statuses.iter().count(), 3);
5076                assert_eq!(repo.statuses.get(&Path::new(A_TXT).into()), None);
5077                assert_eq!(
5078                    repo.statuses.get(&Path::new(B_TXT).into()),
5079                    Some(&GitFileStatus::Added)
5080                );
5081                assert_eq!(
5082                    repo.statuses.get(&Path::new(E_TXT).into()),
5083                    Some(&GitFileStatus::Modified)
5084                );
5085                assert_eq!(
5086                    repo.statuses.get(&Path::new(F_TXT).into()),
5087                    Some(&GitFileStatus::Added)
5088                );
5089            });
5090
5091            std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
5092            std::fs::remove_dir_all(work_dir.join("c")).unwrap();
5093            std::fs::write(
5094                work_dir.join(DOTGITIGNORE),
5095                [IGNORE_RULE, "f.txt"].join("\n"),
5096            )
5097            .unwrap();
5098
5099            git_add(Path::new(DOTGITIGNORE), &repo);
5100            git_commit("Committing modified git ignore", &repo);
5101
5102            tree.flush_fs_events(cx).await;
5103
5104            // Check that non-repo behavior is tracked
5105            tree.read_with(cx, |tree, _cx| {
5106                let snapshot = tree.snapshot();
5107                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5108
5109                assert_eq!(repo.statuses.iter().count(), 0);
5110            });
5111
5112            let mut renamed_dir_name = "first_directory/second_directory";
5113            const RENAMED_FILE: &'static str = "rf.txt";
5114
5115            std::fs::create_dir_all(work_dir.join(renamed_dir_name)).unwrap();
5116            std::fs::write(
5117                work_dir.join(renamed_dir_name).join(RENAMED_FILE),
5118                "new-contents",
5119            )
5120            .unwrap();
5121
5122            tree.flush_fs_events(cx).await;
5123
5124            tree.read_with(cx, |tree, _cx| {
5125                let snapshot = tree.snapshot();
5126                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5127
5128                assert_eq!(repo.statuses.iter().count(), 1);
5129                assert_eq!(
5130                    repo.statuses
5131                        .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5132                    Some(&GitFileStatus::Added)
5133                );
5134            });
5135
5136            renamed_dir_name = "new_first_directory/second_directory";
5137
5138            std::fs::rename(
5139                work_dir.join("first_directory"),
5140                work_dir.join("new_first_directory"),
5141            )
5142            .unwrap();
5143
5144            tree.flush_fs_events(cx).await;
5145
5146            tree.read_with(cx, |tree, _cx| {
5147                let snapshot = tree.snapshot();
5148                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5149
5150                assert_eq!(repo.statuses.iter().count(), 1);
5151                assert_eq!(
5152                    repo.statuses
5153                        .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5154                    Some(&GitFileStatus::Added)
5155                );
5156            });
5157        }
5158
5159        #[track_caller]
5160        fn git_init(path: &Path) -> git2::Repository {
5161            git2::Repository::init(path).expect("Failed to initialize git repository")
5162        }
5163
5164        #[track_caller]
5165        fn git_add<P: AsRef<Path>>(path: P, repo: &git2::Repository) {
5166            let path = path.as_ref();
5167            let mut index = repo.index().expect("Failed to get index");
5168            index.add_path(path).expect("Failed to add a.txt");
5169            index.write().expect("Failed to write index");
5170        }
5171
5172        #[track_caller]
5173        fn git_remove_index(path: &Path, repo: &git2::Repository) {
5174            let mut index = repo.index().expect("Failed to get index");
5175            index.remove_path(path).expect("Failed to add a.txt");
5176            index.write().expect("Failed to write index");
5177        }
5178
5179        #[track_caller]
5180        fn git_commit(msg: &'static str, repo: &git2::Repository) {
5181            use git2::Signature;
5182
5183            let signature = Signature::now("test", "test@zed.dev").unwrap();
5184            let oid = repo.index().unwrap().write_tree().unwrap();
5185            let tree = repo.find_tree(oid).unwrap();
5186            if let Some(head) = repo.head().ok() {
5187                let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
5188
5189                let parent_commit = parent_obj.as_commit().unwrap();
5190
5191                repo.commit(
5192                    Some("HEAD"),
5193                    &signature,
5194                    &signature,
5195                    msg,
5196                    &tree,
5197                    &[parent_commit],
5198                )
5199                .expect("Failed to commit with parent");
5200            } else {
5201                repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
5202                    .expect("Failed to commit");
5203            }
5204        }
5205
5206        #[track_caller]
5207        fn git_stash(repo: &mut git2::Repository) {
5208            use git2::Signature;
5209
5210            let signature = Signature::now("test", "test@zed.dev").unwrap();
5211            repo.stash_save(&signature, "N/A", None)
5212                .expect("Failed to stash");
5213        }
5214
5215        #[track_caller]
5216        fn git_reset(offset: usize, repo: &git2::Repository) {
5217            let head = repo.head().expect("Couldn't get repo head");
5218            let object = head.peel(git2::ObjectType::Commit).unwrap();
5219            let commit = object.as_commit().unwrap();
5220            let new_head = commit
5221                .parents()
5222                .inspect(|parnet| {
5223                    parnet.message();
5224                })
5225                .skip(offset)
5226                .next()
5227                .expect("Not enough history");
5228            repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
5229                .expect("Could not reset");
5230        }
5231
5232        #[allow(dead_code)]
5233        #[track_caller]
5234        fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
5235            repo.statuses(None)
5236                .unwrap()
5237                .iter()
5238                .map(|status| (status.path().unwrap().to_string(), status.status()))
5239                .collect()
5240        }
5241    }
5242}