worktree.rs

   1use crate::{
   2    copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
   3};
   4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   5use anyhow::{anyhow, Context, Result};
   6use client::{proto, Client};
   7use clock::ReplicaId;
   8use collections::{HashMap, VecDeque};
   9use fs::{
  10    repository::{GitFileStatus, GitRepository, RepoPath, RepoPathDescendants},
  11    Fs, LineEnding,
  12};
  13use futures::{
  14    channel::{
  15        mpsc::{self, UnboundedSender},
  16        oneshot,
  17    },
  18    select_biased,
  19    task::Poll,
  20    Stream, StreamExt,
  21};
  22use fuzzy::CharBag;
  23use git::{DOT_GIT, GITIGNORE};
  24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
  25use language::{
  26    proto::{
  27        deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
  28        serialize_version,
  29    },
  30    Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
  31};
  32use lsp::LanguageServerId;
  33use parking_lot::Mutex;
  34use postage::{
  35    barrier,
  36    prelude::{Sink as _, Stream as _},
  37    watch,
  38};
  39use smol::channel::{self, Sender};
  40use std::{
  41    any::Any,
  42    cmp::{self, Ordering},
  43    convert::TryFrom,
  44    ffi::OsStr,
  45    fmt,
  46    future::Future,
  47    mem,
  48    ops::{Deref, DerefMut},
  49    path::{Path, PathBuf},
  50    pin::Pin,
  51    sync::{
  52        atomic::{AtomicUsize, Ordering::SeqCst},
  53        Arc,
  54    },
  55    time::{Duration, SystemTime},
  56};
  57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
  58use util::{paths::HOME, ResultExt, TakeUntilExt, TryFutureExt};
  59
  60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  61pub struct WorktreeId(usize);
  62
  63pub enum Worktree {
  64    Local(LocalWorktree),
  65    Remote(RemoteWorktree),
  66}
  67
  68pub struct LocalWorktree {
  69    snapshot: LocalSnapshot,
  70    path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
  71    is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
  72    _background_scanner_task: Task<()>,
  73    share: Option<ShareState>,
  74    diagnostics: HashMap<
  75        Arc<Path>,
  76        Vec<(
  77            LanguageServerId,
  78            Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
  79        )>,
  80    >,
  81    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  82    client: Arc<Client>,
  83    fs: Arc<dyn Fs>,
  84    visible: bool,
  85}
  86
  87pub struct RemoteWorktree {
  88    snapshot: Snapshot,
  89    background_snapshot: Arc<Mutex<Snapshot>>,
  90    project_id: u64,
  91    client: Arc<Client>,
  92    updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
  93    snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
  94    replica_id: ReplicaId,
  95    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  96    visible: bool,
  97    disconnected: bool,
  98}
  99
 100#[derive(Clone)]
 101pub struct Snapshot {
 102    id: WorktreeId,
 103    abs_path: Arc<Path>,
 104    root_name: String,
 105    root_char_bag: CharBag,
 106    entries_by_path: SumTree<Entry>,
 107    entries_by_id: SumTree<PathEntry>,
 108    repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
 109
 110    /// A number that increases every time the worktree begins scanning
 111    /// a set of paths from the filesystem. This scanning could be caused
 112    /// by some operation performed on the worktree, such as reading or
 113    /// writing a file, or by an event reported by the filesystem.
 114    scan_id: usize,
 115
 116    /// The latest scan id that has completed, and whose preceding scans
 117    /// have all completed. The current `scan_id` could be more than one
 118    /// greater than the `completed_scan_id` if operations are performed
 119    /// on the worktree while it is processing a file-system event.
 120    completed_scan_id: usize,
 121}
 122
 123#[derive(Clone, Debug, PartialEq, Eq)]
 124pub struct RepositoryEntry {
 125    pub(crate) work_directory: WorkDirectoryEntry,
 126    pub(crate) branch: Option<Arc<str>>,
 127    pub(crate) statuses: TreeMap<RepoPath, GitFileStatus>,
 128}
 129
 130fn read_git_status(git_status: i32) -> Option<GitFileStatus> {
 131    proto::GitStatus::from_i32(git_status).map(|status| match status {
 132        proto::GitStatus::Added => GitFileStatus::Added,
 133        proto::GitStatus::Modified => GitFileStatus::Modified,
 134        proto::GitStatus::Conflict => GitFileStatus::Conflict,
 135    })
 136}
 137
 138impl RepositoryEntry {
 139    pub fn branch(&self) -> Option<Arc<str>> {
 140        self.branch.clone()
 141    }
 142
 143    pub fn work_directory_id(&self) -> ProjectEntryId {
 144        *self.work_directory
 145    }
 146
 147    pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
 148        snapshot
 149            .entry_for_id(self.work_directory_id())
 150            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
 151    }
 152
 153    pub fn status_for_path(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
 154        self.work_directory
 155            .relativize(snapshot, path)
 156            .and_then(|repo_path| {
 157                self.statuses
 158                    .iter_from(&repo_path)
 159                    .take_while(|(key, _)| key.starts_with(&repo_path))
 160                    // Short circut once we've found the highest level
 161                    .take_until(|(_, status)| status == &&GitFileStatus::Conflict)
 162                    .map(|(_, status)| status)
 163                    .reduce(
 164                        |status_first, status_second| match (status_first, status_second) {
 165                            (GitFileStatus::Conflict, _) | (_, GitFileStatus::Conflict) => {
 166                                &GitFileStatus::Conflict
 167                            }
 168                            (GitFileStatus::Modified, _) | (_, GitFileStatus::Modified) => {
 169                                &GitFileStatus::Modified
 170                            }
 171                            _ => &GitFileStatus::Added,
 172                        },
 173                    )
 174                    .copied()
 175            })
 176    }
 177
 178    #[cfg(any(test, feature = "test-support"))]
 179    pub fn status_for_file(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
 180        self.work_directory
 181            .relativize(snapshot, path)
 182            .and_then(|repo_path| (&self.statuses).get(&repo_path))
 183            .cloned()
 184    }
 185
 186    pub fn build_update(&self, other: &Self) -> proto::RepositoryEntry {
 187        let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
 188        let mut removed_statuses: Vec<String> = Vec::new();
 189
 190        let mut self_statuses = self.statuses.iter().peekable();
 191        let mut other_statuses = other.statuses.iter().peekable();
 192        loop {
 193            match (self_statuses.peek(), other_statuses.peek()) {
 194                (Some((self_repo_path, self_status)), Some((other_repo_path, other_status))) => {
 195                    match Ord::cmp(self_repo_path, other_repo_path) {
 196                        Ordering::Less => {
 197                            updated_statuses.push(make_status_entry(self_repo_path, self_status));
 198                            self_statuses.next();
 199                        }
 200                        Ordering::Equal => {
 201                            if self_status != other_status {
 202                                updated_statuses
 203                                    .push(make_status_entry(self_repo_path, self_status));
 204                            }
 205
 206                            self_statuses.next();
 207                            other_statuses.next();
 208                        }
 209                        Ordering::Greater => {
 210                            removed_statuses.push(make_repo_path(other_repo_path));
 211                            other_statuses.next();
 212                        }
 213                    }
 214                }
 215                (Some((self_repo_path, self_status)), None) => {
 216                    updated_statuses.push(make_status_entry(self_repo_path, self_status));
 217                    self_statuses.next();
 218                }
 219                (None, Some((other_repo_path, _))) => {
 220                    removed_statuses.push(make_repo_path(other_repo_path));
 221                    other_statuses.next();
 222                }
 223                (None, None) => break,
 224            }
 225        }
 226
 227        proto::RepositoryEntry {
 228            work_directory_id: self.work_directory_id().to_proto(),
 229            branch: self.branch.as_ref().map(|str| str.to_string()),
 230            removed_repo_paths: removed_statuses,
 231            updated_statuses,
 232        }
 233    }
 234}
 235
 236fn make_repo_path(path: &RepoPath) -> String {
 237    path.as_os_str().to_string_lossy().to_string()
 238}
 239
 240fn make_status_entry(path: &RepoPath, status: &GitFileStatus) -> proto::StatusEntry {
 241    proto::StatusEntry {
 242        repo_path: make_repo_path(path),
 243        status: match status {
 244            GitFileStatus::Added => proto::GitStatus::Added.into(),
 245            GitFileStatus::Modified => proto::GitStatus::Modified.into(),
 246            GitFileStatus::Conflict => proto::GitStatus::Conflict.into(),
 247        },
 248    }
 249}
 250
 251impl From<&RepositoryEntry> for proto::RepositoryEntry {
 252    fn from(value: &RepositoryEntry) -> Self {
 253        proto::RepositoryEntry {
 254            work_directory_id: value.work_directory.to_proto(),
 255            branch: value.branch.as_ref().map(|str| str.to_string()),
 256            updated_statuses: value
 257                .statuses
 258                .iter()
 259                .map(|(repo_path, status)| make_status_entry(repo_path, status))
 260                .collect(),
 261            removed_repo_paths: Default::default(),
 262        }
 263    }
 264}
 265
 266/// This path corresponds to the 'content path' (the folder that contains the .git)
 267#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 268pub struct RepositoryWorkDirectory(Arc<Path>);
 269
 270impl Default for RepositoryWorkDirectory {
 271    fn default() -> Self {
 272        RepositoryWorkDirectory(Arc::from(Path::new("")))
 273    }
 274}
 275
 276impl AsRef<Path> for RepositoryWorkDirectory {
 277    fn as_ref(&self) -> &Path {
 278        self.0.as_ref()
 279    }
 280}
 281
 282#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 283pub struct WorkDirectoryEntry(ProjectEntryId);
 284
 285impl WorkDirectoryEntry {
 286    pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
 287        worktree.entry_for_id(self.0).and_then(|entry| {
 288            path.strip_prefix(&entry.path)
 289                .ok()
 290                .map(move |path| path.into())
 291        })
 292    }
 293}
 294
 295impl Deref for WorkDirectoryEntry {
 296    type Target = ProjectEntryId;
 297
 298    fn deref(&self) -> &Self::Target {
 299        &self.0
 300    }
 301}
 302
 303impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
 304    fn from(value: ProjectEntryId) -> Self {
 305        WorkDirectoryEntry(value)
 306    }
 307}
 308
 309#[derive(Debug, Clone)]
 310pub struct LocalSnapshot {
 311    snapshot: Snapshot,
 312    /// All of the gitignore files in the worktree, indexed by their relative path.
 313    /// The boolean indicates whether the gitignore needs to be updated.
 314    ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
 315    /// All of the git repositories in the worktree, indexed by the project entry
 316    /// id of their parent directory.
 317    git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 318}
 319
 320pub struct LocalMutableSnapshot {
 321    snapshot: LocalSnapshot,
 322    /// The ids of all of the entries that were removed from the snapshot
 323    /// as part of the current update. These entry ids may be re-used
 324    /// if the same inode is discovered at a new path, or if the given
 325    /// path is re-created after being deleted.
 326    removed_entry_ids: HashMap<u64, ProjectEntryId>,
 327}
 328
 329#[derive(Debug, Clone)]
 330pub struct LocalRepositoryEntry {
 331    pub(crate) scan_id: usize,
 332    pub(crate) git_dir_scan_id: usize,
 333    pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
 334    /// Path to the actual .git folder.
 335    /// Note: if .git is a file, this points to the folder indicated by the .git file
 336    pub(crate) git_dir_path: Arc<Path>,
 337}
 338
 339impl LocalRepositoryEntry {
 340    // Note that this path should be relative to the worktree root.
 341    pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
 342        path.starts_with(self.git_dir_path.as_ref())
 343    }
 344}
 345
 346impl Deref for LocalSnapshot {
 347    type Target = Snapshot;
 348
 349    fn deref(&self) -> &Self::Target {
 350        &self.snapshot
 351    }
 352}
 353
 354impl DerefMut for LocalSnapshot {
 355    fn deref_mut(&mut self) -> &mut Self::Target {
 356        &mut self.snapshot
 357    }
 358}
 359
 360impl Deref for LocalMutableSnapshot {
 361    type Target = LocalSnapshot;
 362
 363    fn deref(&self) -> &Self::Target {
 364        &self.snapshot
 365    }
 366}
 367
 368impl DerefMut for LocalMutableSnapshot {
 369    fn deref_mut(&mut self) -> &mut Self::Target {
 370        &mut self.snapshot
 371    }
 372}
 373
 374enum ScanState {
 375    Started,
 376    Updated {
 377        snapshot: LocalSnapshot,
 378        changes: HashMap<(Arc<Path>, ProjectEntryId), PathChange>,
 379        barrier: Option<barrier::Sender>,
 380        scanning: bool,
 381    },
 382}
 383
 384struct ShareState {
 385    project_id: u64,
 386    snapshots_tx: watch::Sender<LocalSnapshot>,
 387    resume_updates: watch::Sender<()>,
 388    _maintain_remote_snapshot: Task<Option<()>>,
 389}
 390
 391pub enum Event {
 392    UpdatedEntries(HashMap<(Arc<Path>, ProjectEntryId), PathChange>),
 393    UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
 394}
 395
 396impl Entity for Worktree {
 397    type Event = Event;
 398}
 399
 400impl Worktree {
 401    pub async fn local(
 402        client: Arc<Client>,
 403        path: impl Into<Arc<Path>>,
 404        visible: bool,
 405        fs: Arc<dyn Fs>,
 406        next_entry_id: Arc<AtomicUsize>,
 407        cx: &mut AsyncAppContext,
 408    ) -> Result<ModelHandle<Self>> {
 409        // After determining whether the root entry is a file or a directory, populate the
 410        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 411        let abs_path = path.into();
 412        let metadata = fs
 413            .metadata(&abs_path)
 414            .await
 415            .context("failed to stat worktree path")?;
 416
 417        Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
 418            let root_name = abs_path
 419                .file_name()
 420                .map_or(String::new(), |f| f.to_string_lossy().to_string());
 421
 422            let mut snapshot = LocalSnapshot {
 423                ignores_by_parent_abs_path: Default::default(),
 424                git_repositories: Default::default(),
 425                snapshot: Snapshot {
 426                    id: WorktreeId::from_usize(cx.model_id()),
 427                    abs_path: abs_path.clone(),
 428                    root_name: root_name.clone(),
 429                    root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
 430                    entries_by_path: Default::default(),
 431                    entries_by_id: Default::default(),
 432                    repository_entries: Default::default(),
 433                    scan_id: 1,
 434                    completed_scan_id: 0,
 435                },
 436            };
 437
 438            if let Some(metadata) = metadata {
 439                snapshot.insert_entry(
 440                    Entry::new(
 441                        Arc::from(Path::new("")),
 442                        &metadata,
 443                        &next_entry_id,
 444                        snapshot.root_char_bag,
 445                    ),
 446                    fs.as_ref(),
 447                );
 448            }
 449
 450            let (path_changes_tx, path_changes_rx) = channel::unbounded();
 451            let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
 452
 453            cx.spawn_weak(|this, mut cx| async move {
 454                while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
 455                    this.update(&mut cx, |this, cx| {
 456                        let this = this.as_local_mut().unwrap();
 457                        match state {
 458                            ScanState::Started => {
 459                                *this.is_scanning.0.borrow_mut() = true;
 460                            }
 461                            ScanState::Updated {
 462                                snapshot,
 463                                changes,
 464                                barrier,
 465                                scanning,
 466                            } => {
 467                                *this.is_scanning.0.borrow_mut() = scanning;
 468                                this.set_snapshot(snapshot, cx);
 469                                cx.emit(Event::UpdatedEntries(changes));
 470                                drop(barrier);
 471                            }
 472                        }
 473                        cx.notify();
 474                    });
 475                }
 476            })
 477            .detach();
 478
 479            let background_scanner_task = cx.background().spawn({
 480                let fs = fs.clone();
 481                let snapshot = snapshot.clone();
 482                let background = cx.background().clone();
 483                async move {
 484                    let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 485                    BackgroundScanner::new(
 486                        snapshot,
 487                        next_entry_id,
 488                        fs,
 489                        scan_states_tx,
 490                        background,
 491                        path_changes_rx,
 492                    )
 493                    .run(events)
 494                    .await;
 495                }
 496            });
 497
 498            Worktree::Local(LocalWorktree {
 499                snapshot,
 500                is_scanning: watch::channel_with(true),
 501                share: None,
 502                path_changes_tx,
 503                _background_scanner_task: background_scanner_task,
 504                diagnostics: Default::default(),
 505                diagnostic_summaries: Default::default(),
 506                client,
 507                fs,
 508                visible,
 509            })
 510        }))
 511    }
 512
 513    pub fn remote(
 514        project_remote_id: u64,
 515        replica_id: ReplicaId,
 516        worktree: proto::WorktreeMetadata,
 517        client: Arc<Client>,
 518        cx: &mut AppContext,
 519    ) -> ModelHandle<Self> {
 520        cx.add_model(|cx: &mut ModelContext<Self>| {
 521            let snapshot = Snapshot {
 522                id: WorktreeId(worktree.id as usize),
 523                abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
 524                root_name: worktree.root_name.clone(),
 525                root_char_bag: worktree
 526                    .root_name
 527                    .chars()
 528                    .map(|c| c.to_ascii_lowercase())
 529                    .collect(),
 530                entries_by_path: Default::default(),
 531                entries_by_id: Default::default(),
 532                repository_entries: Default::default(),
 533                scan_id: 1,
 534                completed_scan_id: 0,
 535            };
 536
 537            let (updates_tx, mut updates_rx) = mpsc::unbounded();
 538            let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
 539            let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
 540
 541            cx.background()
 542                .spawn({
 543                    let background_snapshot = background_snapshot.clone();
 544                    async move {
 545                        while let Some(update) = updates_rx.next().await {
 546                            if let Err(error) =
 547                                background_snapshot.lock().apply_remote_update(update)
 548                            {
 549                                log::error!("error applying worktree update: {}", error);
 550                            }
 551                            snapshot_updated_tx.send(()).await.ok();
 552                        }
 553                    }
 554                })
 555                .detach();
 556
 557            cx.spawn_weak(|this, mut cx| async move {
 558                while (snapshot_updated_rx.recv().await).is_some() {
 559                    if let Some(this) = this.upgrade(&cx) {
 560                        this.update(&mut cx, |this, cx| {
 561                            let this = this.as_remote_mut().unwrap();
 562                            this.snapshot = this.background_snapshot.lock().clone();
 563                            cx.emit(Event::UpdatedEntries(Default::default()));
 564                            cx.notify();
 565                            while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
 566                                if this.observed_snapshot(*scan_id) {
 567                                    let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
 568                                    let _ = tx.send(());
 569                                } else {
 570                                    break;
 571                                }
 572                            }
 573                        });
 574                    } else {
 575                        break;
 576                    }
 577                }
 578            })
 579            .detach();
 580
 581            Worktree::Remote(RemoteWorktree {
 582                project_id: project_remote_id,
 583                replica_id,
 584                snapshot: snapshot.clone(),
 585                background_snapshot,
 586                updates_tx: Some(updates_tx),
 587                snapshot_subscriptions: Default::default(),
 588                client: client.clone(),
 589                diagnostic_summaries: Default::default(),
 590                visible: worktree.visible,
 591                disconnected: false,
 592            })
 593        })
 594    }
 595
 596    pub fn as_local(&self) -> Option<&LocalWorktree> {
 597        if let Worktree::Local(worktree) = self {
 598            Some(worktree)
 599        } else {
 600            None
 601        }
 602    }
 603
 604    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 605        if let Worktree::Remote(worktree) = self {
 606            Some(worktree)
 607        } else {
 608            None
 609        }
 610    }
 611
 612    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 613        if let Worktree::Local(worktree) = self {
 614            Some(worktree)
 615        } else {
 616            None
 617        }
 618    }
 619
 620    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 621        if let Worktree::Remote(worktree) = self {
 622            Some(worktree)
 623        } else {
 624            None
 625        }
 626    }
 627
 628    pub fn is_local(&self) -> bool {
 629        matches!(self, Worktree::Local(_))
 630    }
 631
 632    pub fn is_remote(&self) -> bool {
 633        !self.is_local()
 634    }
 635
 636    pub fn snapshot(&self) -> Snapshot {
 637        match self {
 638            Worktree::Local(worktree) => worktree.snapshot().snapshot,
 639            Worktree::Remote(worktree) => worktree.snapshot(),
 640        }
 641    }
 642
 643    pub fn scan_id(&self) -> usize {
 644        match self {
 645            Worktree::Local(worktree) => worktree.snapshot.scan_id,
 646            Worktree::Remote(worktree) => worktree.snapshot.scan_id,
 647        }
 648    }
 649
 650    pub fn completed_scan_id(&self) -> usize {
 651        match self {
 652            Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
 653            Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
 654        }
 655    }
 656
 657    pub fn is_visible(&self) -> bool {
 658        match self {
 659            Worktree::Local(worktree) => worktree.visible,
 660            Worktree::Remote(worktree) => worktree.visible,
 661        }
 662    }
 663
 664    pub fn replica_id(&self) -> ReplicaId {
 665        match self {
 666            Worktree::Local(_) => 0,
 667            Worktree::Remote(worktree) => worktree.replica_id,
 668        }
 669    }
 670
 671    pub fn diagnostic_summaries(
 672        &self,
 673    ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
 674        match self {
 675            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 676            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 677        }
 678        .iter()
 679        .flat_map(|(path, summaries)| {
 680            summaries
 681                .iter()
 682                .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
 683        })
 684    }
 685
 686    pub fn abs_path(&self) -> Arc<Path> {
 687        match self {
 688            Worktree::Local(worktree) => worktree.abs_path.clone(),
 689            Worktree::Remote(worktree) => worktree.abs_path.clone(),
 690        }
 691    }
 692}
 693
 694impl LocalWorktree {
 695    pub fn contains_abs_path(&self, path: &Path) -> bool {
 696        path.starts_with(&self.abs_path)
 697    }
 698
 699    fn absolutize(&self, path: &Path) -> PathBuf {
 700        if path.file_name().is_some() {
 701            self.abs_path.join(path)
 702        } else {
 703            self.abs_path.to_path_buf()
 704        }
 705    }
 706
 707    pub(crate) fn load_buffer(
 708        &mut self,
 709        id: u64,
 710        path: &Path,
 711        cx: &mut ModelContext<Worktree>,
 712    ) -> Task<Result<ModelHandle<Buffer>>> {
 713        let path = Arc::from(path);
 714        cx.spawn(move |this, mut cx| async move {
 715            let (file, contents, diff_base) = this
 716                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
 717                .await?;
 718            let text_buffer = cx
 719                .background()
 720                .spawn(async move { text::Buffer::new(0, id, contents) })
 721                .await;
 722            Ok(cx.add_model(|cx| {
 723                let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
 724                buffer.git_diff_recalc(cx);
 725                buffer
 726            }))
 727        })
 728    }
 729
 730    pub fn diagnostics_for_path(
 731        &self,
 732        path: &Path,
 733    ) -> Vec<(
 734        LanguageServerId,
 735        Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 736    )> {
 737        self.diagnostics.get(path).cloned().unwrap_or_default()
 738    }
 739
 740    pub fn update_diagnostics(
 741        &mut self,
 742        server_id: LanguageServerId,
 743        worktree_path: Arc<Path>,
 744        diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 745        _: &mut ModelContext<Worktree>,
 746    ) -> Result<bool> {
 747        let summaries_by_server_id = self
 748            .diagnostic_summaries
 749            .entry(worktree_path.clone())
 750            .or_default();
 751
 752        let old_summary = summaries_by_server_id
 753            .remove(&server_id)
 754            .unwrap_or_default();
 755
 756        let new_summary = DiagnosticSummary::new(&diagnostics);
 757        if new_summary.is_empty() {
 758            if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
 759                if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 760                    diagnostics_by_server_id.remove(ix);
 761                }
 762                if diagnostics_by_server_id.is_empty() {
 763                    self.diagnostics.remove(&worktree_path);
 764                }
 765            }
 766        } else {
 767            summaries_by_server_id.insert(server_id, new_summary);
 768            let diagnostics_by_server_id =
 769                self.diagnostics.entry(worktree_path.clone()).or_default();
 770            match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 771                Ok(ix) => {
 772                    diagnostics_by_server_id[ix] = (server_id, diagnostics);
 773                }
 774                Err(ix) => {
 775                    diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
 776                }
 777            }
 778        }
 779
 780        if !old_summary.is_empty() || !new_summary.is_empty() {
 781            if let Some(share) = self.share.as_ref() {
 782                self.client
 783                    .send(proto::UpdateDiagnosticSummary {
 784                        project_id: share.project_id,
 785                        worktree_id: self.id().to_proto(),
 786                        summary: Some(proto::DiagnosticSummary {
 787                            path: worktree_path.to_string_lossy().to_string(),
 788                            language_server_id: server_id.0 as u64,
 789                            error_count: new_summary.error_count as u32,
 790                            warning_count: new_summary.warning_count as u32,
 791                        }),
 792                    })
 793                    .log_err();
 794            }
 795        }
 796
 797        Ok(!old_summary.is_empty() || !new_summary.is_empty())
 798    }
 799
 800    fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
 801        let updated_repos =
 802            self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
 803
 804        self.snapshot = new_snapshot;
 805
 806        if let Some(share) = self.share.as_mut() {
 807            *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
 808        }
 809
 810        if !updated_repos.is_empty() {
 811            cx.emit(Event::UpdatedGitRepositories(updated_repos));
 812        }
 813    }
 814
 815    fn changed_repos(
 816        &self,
 817        old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 818        new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 819    ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
 820        let mut diff = HashMap::default();
 821        let mut old_repos = old_repos.iter().peekable();
 822        let mut new_repos = new_repos.iter().peekable();
 823        loop {
 824            match (old_repos.peek(), new_repos.peek()) {
 825                (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
 826                    match Ord::cmp(old_entry_id, new_entry_id) {
 827                        Ordering::Less => {
 828                            if let Some(entry) = self.entry_for_id(**old_entry_id) {
 829                                diff.insert(entry.path.clone(), (*old_repo).clone());
 830                            }
 831                            old_repos.next();
 832                        }
 833                        Ordering::Equal => {
 834                            if old_repo.git_dir_scan_id != new_repo.git_dir_scan_id {
 835                                if let Some(entry) = self.entry_for_id(**new_entry_id) {
 836                                    diff.insert(entry.path.clone(), (*new_repo).clone());
 837                                }
 838                            }
 839
 840                            old_repos.next();
 841                            new_repos.next();
 842                        }
 843                        Ordering::Greater => {
 844                            if let Some(entry) = self.entry_for_id(**new_entry_id) {
 845                                diff.insert(entry.path.clone(), (*new_repo).clone());
 846                            }
 847                            new_repos.next();
 848                        }
 849                    }
 850                }
 851                (Some((old_entry_id, old_repo)), None) => {
 852                    if let Some(entry) = self.entry_for_id(**old_entry_id) {
 853                        diff.insert(entry.path.clone(), (*old_repo).clone());
 854                    }
 855                    old_repos.next();
 856                }
 857                (None, Some((new_entry_id, new_repo))) => {
 858                    if let Some(entry) = self.entry_for_id(**new_entry_id) {
 859                        diff.insert(entry.path.clone(), (*new_repo).clone());
 860                    }
 861                    new_repos.next();
 862                }
 863                (None, None) => break,
 864            }
 865        }
 866        diff
 867    }
 868
 869    pub fn scan_complete(&self) -> impl Future<Output = ()> {
 870        let mut is_scanning_rx = self.is_scanning.1.clone();
 871        async move {
 872            let mut is_scanning = is_scanning_rx.borrow().clone();
 873            while is_scanning {
 874                if let Some(value) = is_scanning_rx.recv().await {
 875                    is_scanning = value;
 876                } else {
 877                    break;
 878                }
 879            }
 880        }
 881    }
 882
 883    pub fn snapshot(&self) -> LocalSnapshot {
 884        self.snapshot.clone()
 885    }
 886
 887    pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
 888        proto::WorktreeMetadata {
 889            id: self.id().to_proto(),
 890            root_name: self.root_name().to_string(),
 891            visible: self.visible,
 892            abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
 893        }
 894    }
 895
 896    fn load(
 897        &self,
 898        path: &Path,
 899        cx: &mut ModelContext<Worktree>,
 900    ) -> Task<Result<(File, String, Option<String>)>> {
 901        let handle = cx.handle();
 902        let path = Arc::from(path);
 903        let abs_path = self.absolutize(&path);
 904        let fs = self.fs.clone();
 905        let snapshot = self.snapshot();
 906
 907        let mut index_task = None;
 908
 909        if let Some(repo) = snapshot.repository_for_path(&path) {
 910            let repo_path = repo.work_directory.relativize(self, &path).unwrap();
 911            if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
 912                let repo = repo.repo_ptr.to_owned();
 913                index_task = Some(
 914                    cx.background()
 915                        .spawn(async move { repo.lock().load_index_text(&repo_path) }),
 916                );
 917            }
 918        }
 919
 920        cx.spawn(|this, mut cx| async move {
 921            let text = fs.load(&abs_path).await?;
 922
 923            let diff_base = if let Some(index_task) = index_task {
 924                index_task.await
 925            } else {
 926                None
 927            };
 928
 929            // Eagerly populate the snapshot with an updated entry for the loaded file
 930            let entry = this
 931                .update(&mut cx, |this, cx| {
 932                    this.as_local().unwrap().refresh_entry(path, None, cx)
 933                })
 934                .await?;
 935
 936            Ok((
 937                File {
 938                    entry_id: entry.id,
 939                    worktree: handle,
 940                    path: entry.path,
 941                    mtime: entry.mtime,
 942                    is_local: true,
 943                    is_deleted: false,
 944                },
 945                text,
 946                diff_base,
 947            ))
 948        })
 949    }
 950
 951    pub fn save_buffer(
 952        &self,
 953        buffer_handle: ModelHandle<Buffer>,
 954        path: Arc<Path>,
 955        has_changed_file: bool,
 956        cx: &mut ModelContext<Worktree>,
 957    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
 958        let handle = cx.handle();
 959        let buffer = buffer_handle.read(cx);
 960
 961        let rpc = self.client.clone();
 962        let buffer_id = buffer.remote_id();
 963        let project_id = self.share.as_ref().map(|share| share.project_id);
 964
 965        let text = buffer.as_rope().clone();
 966        let fingerprint = text.fingerprint();
 967        let version = buffer.version();
 968        let save = self.write_file(path, text, buffer.line_ending(), cx);
 969
 970        cx.as_mut().spawn(|mut cx| async move {
 971            let entry = save.await?;
 972
 973            if has_changed_file {
 974                let new_file = Arc::new(File {
 975                    entry_id: entry.id,
 976                    worktree: handle,
 977                    path: entry.path,
 978                    mtime: entry.mtime,
 979                    is_local: true,
 980                    is_deleted: false,
 981                });
 982
 983                if let Some(project_id) = project_id {
 984                    rpc.send(proto::UpdateBufferFile {
 985                        project_id,
 986                        buffer_id,
 987                        file: Some(new_file.to_proto()),
 988                    })
 989                    .log_err();
 990                }
 991
 992                buffer_handle.update(&mut cx, |buffer, cx| {
 993                    if has_changed_file {
 994                        buffer.file_updated(new_file, cx).detach();
 995                    }
 996                });
 997            }
 998
 999            if let Some(project_id) = project_id {
1000                rpc.send(proto::BufferSaved {
1001                    project_id,
1002                    buffer_id,
1003                    version: serialize_version(&version),
1004                    mtime: Some(entry.mtime.into()),
1005                    fingerprint: serialize_fingerprint(fingerprint),
1006                })?;
1007            }
1008
1009            buffer_handle.update(&mut cx, |buffer, cx| {
1010                buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
1011            });
1012
1013            Ok((version, fingerprint, entry.mtime))
1014        })
1015    }
1016
1017    pub fn create_entry(
1018        &self,
1019        path: impl Into<Arc<Path>>,
1020        is_dir: bool,
1021        cx: &mut ModelContext<Worktree>,
1022    ) -> Task<Result<Entry>> {
1023        let path = path.into();
1024        let abs_path = self.absolutize(&path);
1025        let fs = self.fs.clone();
1026        let write = cx.background().spawn(async move {
1027            if is_dir {
1028                fs.create_dir(&abs_path).await
1029            } else {
1030                fs.save(&abs_path, &Default::default(), Default::default())
1031                    .await
1032            }
1033        });
1034
1035        cx.spawn(|this, mut cx| async move {
1036            write.await?;
1037            this.update(&mut cx, |this, cx| {
1038                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1039            })
1040            .await
1041        })
1042    }
1043
1044    pub fn write_file(
1045        &self,
1046        path: impl Into<Arc<Path>>,
1047        text: Rope,
1048        line_ending: LineEnding,
1049        cx: &mut ModelContext<Worktree>,
1050    ) -> Task<Result<Entry>> {
1051        let path = path.into();
1052        let abs_path = self.absolutize(&path);
1053        let fs = self.fs.clone();
1054        let write = cx
1055            .background()
1056            .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1057
1058        cx.spawn(|this, mut cx| async move {
1059            write.await?;
1060            this.update(&mut cx, |this, cx| {
1061                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1062            })
1063            .await
1064        })
1065    }
1066
1067    pub fn delete_entry(
1068        &self,
1069        entry_id: ProjectEntryId,
1070        cx: &mut ModelContext<Worktree>,
1071    ) -> Option<Task<Result<()>>> {
1072        let entry = self.entry_for_id(entry_id)?.clone();
1073        let abs_path = self.abs_path.clone();
1074        let fs = self.fs.clone();
1075
1076        let delete = cx.background().spawn(async move {
1077            let mut abs_path = fs.canonicalize(&abs_path).await?;
1078            if entry.path.file_name().is_some() {
1079                abs_path = abs_path.join(&entry.path);
1080            }
1081            if entry.is_file() {
1082                fs.remove_file(&abs_path, Default::default()).await?;
1083            } else {
1084                fs.remove_dir(
1085                    &abs_path,
1086                    RemoveOptions {
1087                        recursive: true,
1088                        ignore_if_not_exists: false,
1089                    },
1090                )
1091                .await?;
1092            }
1093            anyhow::Ok(abs_path)
1094        });
1095
1096        Some(cx.spawn(|this, mut cx| async move {
1097            let abs_path = delete.await?;
1098            let (tx, mut rx) = barrier::channel();
1099            this.update(&mut cx, |this, _| {
1100                this.as_local_mut()
1101                    .unwrap()
1102                    .path_changes_tx
1103                    .try_send((vec![abs_path], tx))
1104            })?;
1105            rx.recv().await;
1106            Ok(())
1107        }))
1108    }
1109
1110    pub fn rename_entry(
1111        &self,
1112        entry_id: ProjectEntryId,
1113        new_path: impl Into<Arc<Path>>,
1114        cx: &mut ModelContext<Worktree>,
1115    ) -> Option<Task<Result<Entry>>> {
1116        let old_path = self.entry_for_id(entry_id)?.path.clone();
1117        let new_path = new_path.into();
1118        let abs_old_path = self.absolutize(&old_path);
1119        let abs_new_path = self.absolutize(&new_path);
1120        let fs = self.fs.clone();
1121        let rename = cx.background().spawn(async move {
1122            fs.rename(&abs_old_path, &abs_new_path, Default::default())
1123                .await
1124        });
1125
1126        Some(cx.spawn(|this, mut cx| async move {
1127            rename.await?;
1128            this.update(&mut cx, |this, cx| {
1129                this.as_local_mut()
1130                    .unwrap()
1131                    .refresh_entry(new_path.clone(), Some(old_path), cx)
1132            })
1133            .await
1134        }))
1135    }
1136
1137    pub fn copy_entry(
1138        &self,
1139        entry_id: ProjectEntryId,
1140        new_path: impl Into<Arc<Path>>,
1141        cx: &mut ModelContext<Worktree>,
1142    ) -> Option<Task<Result<Entry>>> {
1143        let old_path = self.entry_for_id(entry_id)?.path.clone();
1144        let new_path = new_path.into();
1145        let abs_old_path = self.absolutize(&old_path);
1146        let abs_new_path = self.absolutize(&new_path);
1147        let fs = self.fs.clone();
1148        let copy = cx.background().spawn(async move {
1149            copy_recursive(
1150                fs.as_ref(),
1151                &abs_old_path,
1152                &abs_new_path,
1153                Default::default(),
1154            )
1155            .await
1156        });
1157
1158        Some(cx.spawn(|this, mut cx| async move {
1159            copy.await?;
1160            this.update(&mut cx, |this, cx| {
1161                this.as_local_mut()
1162                    .unwrap()
1163                    .refresh_entry(new_path.clone(), None, cx)
1164            })
1165            .await
1166        }))
1167    }
1168
1169    fn refresh_entry(
1170        &self,
1171        path: Arc<Path>,
1172        old_path: Option<Arc<Path>>,
1173        cx: &mut ModelContext<Worktree>,
1174    ) -> Task<Result<Entry>> {
1175        let fs = self.fs.clone();
1176        let abs_root_path = self.abs_path.clone();
1177        let path_changes_tx = self.path_changes_tx.clone();
1178        cx.spawn_weak(move |this, mut cx| async move {
1179            let abs_path = fs.canonicalize(&abs_root_path).await?;
1180            let mut paths = Vec::with_capacity(2);
1181            paths.push(if path.file_name().is_some() {
1182                abs_path.join(&path)
1183            } else {
1184                abs_path.clone()
1185            });
1186            if let Some(old_path) = old_path {
1187                paths.push(if old_path.file_name().is_some() {
1188                    abs_path.join(&old_path)
1189                } else {
1190                    abs_path.clone()
1191                });
1192            }
1193
1194            let (tx, mut rx) = barrier::channel();
1195            path_changes_tx.try_send((paths, tx))?;
1196            rx.recv().await;
1197            this.upgrade(&cx)
1198                .ok_or_else(|| anyhow!("worktree was dropped"))?
1199                .update(&mut cx, |this, _| {
1200                    this.entry_for_path(path)
1201                        .cloned()
1202                        .ok_or_else(|| anyhow!("failed to read path after update"))
1203                })
1204        })
1205    }
1206
1207    pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1208        let (share_tx, share_rx) = oneshot::channel();
1209
1210        if let Some(share) = self.share.as_mut() {
1211            let _ = share_tx.send(());
1212            *share.resume_updates.borrow_mut() = ();
1213        } else {
1214            let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1215            let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1216            let worktree_id = cx.model_id() as u64;
1217
1218            for (path, summaries) in &self.diagnostic_summaries {
1219                for (&server_id, summary) in summaries {
1220                    if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1221                        project_id,
1222                        worktree_id,
1223                        summary: Some(summary.to_proto(server_id, &path)),
1224                    }) {
1225                        return Task::ready(Err(e));
1226                    }
1227                }
1228            }
1229
1230            let _maintain_remote_snapshot = cx.background().spawn({
1231                let client = self.client.clone();
1232                async move {
1233                    let mut share_tx = Some(share_tx);
1234                    let mut prev_snapshot = LocalSnapshot {
1235                        ignores_by_parent_abs_path: Default::default(),
1236                        git_repositories: Default::default(),
1237                        snapshot: Snapshot {
1238                            id: WorktreeId(worktree_id as usize),
1239                            abs_path: Path::new("").into(),
1240                            root_name: Default::default(),
1241                            root_char_bag: Default::default(),
1242                            entries_by_path: Default::default(),
1243                            entries_by_id: Default::default(),
1244                            repository_entries: Default::default(),
1245                            scan_id: 0,
1246                            completed_scan_id: 0,
1247                        },
1248                    };
1249                    while let Some(snapshot) = snapshots_rx.recv().await {
1250                        #[cfg(any(test, feature = "test-support"))]
1251                        const MAX_CHUNK_SIZE: usize = 2;
1252                        #[cfg(not(any(test, feature = "test-support")))]
1253                        const MAX_CHUNK_SIZE: usize = 256;
1254
1255                        let update =
1256                            snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1257                        for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1258                            let _ = resume_updates_rx.try_recv();
1259                            while let Err(error) = client.request(update.clone()).await {
1260                                log::error!("failed to send worktree update: {}", error);
1261                                log::info!("waiting to resume updates");
1262                                if resume_updates_rx.next().await.is_none() {
1263                                    return Ok(());
1264                                }
1265                            }
1266                        }
1267
1268                        if let Some(share_tx) = share_tx.take() {
1269                            let _ = share_tx.send(());
1270                        }
1271
1272                        prev_snapshot = snapshot;
1273                    }
1274
1275                    Ok::<_, anyhow::Error>(())
1276                }
1277                .log_err()
1278            });
1279
1280            self.share = Some(ShareState {
1281                project_id,
1282                snapshots_tx,
1283                resume_updates: resume_updates_tx,
1284                _maintain_remote_snapshot,
1285            });
1286        }
1287
1288        cx.foreground()
1289            .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1290    }
1291
1292    pub fn unshare(&mut self) {
1293        self.share.take();
1294    }
1295
1296    pub fn is_shared(&self) -> bool {
1297        self.share.is_some()
1298    }
1299}
1300
1301impl RemoteWorktree {
1302    fn snapshot(&self) -> Snapshot {
1303        self.snapshot.clone()
1304    }
1305
1306    pub fn disconnected_from_host(&mut self) {
1307        self.updates_tx.take();
1308        self.snapshot_subscriptions.clear();
1309        self.disconnected = true;
1310    }
1311
1312    pub fn save_buffer(
1313        &self,
1314        buffer_handle: ModelHandle<Buffer>,
1315        cx: &mut ModelContext<Worktree>,
1316    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1317        let buffer = buffer_handle.read(cx);
1318        let buffer_id = buffer.remote_id();
1319        let version = buffer.version();
1320        let rpc = self.client.clone();
1321        let project_id = self.project_id;
1322        cx.as_mut().spawn(|mut cx| async move {
1323            let response = rpc
1324                .request(proto::SaveBuffer {
1325                    project_id,
1326                    buffer_id,
1327                    version: serialize_version(&version),
1328                })
1329                .await?;
1330            let version = deserialize_version(&response.version);
1331            let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1332            let mtime = response
1333                .mtime
1334                .ok_or_else(|| anyhow!("missing mtime"))?
1335                .into();
1336
1337            buffer_handle.update(&mut cx, |buffer, cx| {
1338                buffer.did_save(version.clone(), fingerprint, mtime, cx);
1339            });
1340
1341            Ok((version, fingerprint, mtime))
1342        })
1343    }
1344
1345    pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1346        if let Some(updates_tx) = &self.updates_tx {
1347            updates_tx
1348                .unbounded_send(update)
1349                .expect("consumer runs to completion");
1350        }
1351    }
1352
1353    fn observed_snapshot(&self, scan_id: usize) -> bool {
1354        self.completed_scan_id >= scan_id
1355    }
1356
1357    fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1358        let (tx, rx) = oneshot::channel();
1359        if self.observed_snapshot(scan_id) {
1360            let _ = tx.send(());
1361        } else if self.disconnected {
1362            drop(tx);
1363        } else {
1364            match self
1365                .snapshot_subscriptions
1366                .binary_search_by_key(&scan_id, |probe| probe.0)
1367            {
1368                Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1369            }
1370        }
1371
1372        async move {
1373            rx.await?;
1374            Ok(())
1375        }
1376    }
1377
1378    pub fn update_diagnostic_summary(
1379        &mut self,
1380        path: Arc<Path>,
1381        summary: &proto::DiagnosticSummary,
1382    ) {
1383        let server_id = LanguageServerId(summary.language_server_id as usize);
1384        let summary = DiagnosticSummary {
1385            error_count: summary.error_count as usize,
1386            warning_count: summary.warning_count as usize,
1387        };
1388
1389        if summary.is_empty() {
1390            if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1391                summaries.remove(&server_id);
1392                if summaries.is_empty() {
1393                    self.diagnostic_summaries.remove(&path);
1394                }
1395            }
1396        } else {
1397            self.diagnostic_summaries
1398                .entry(path)
1399                .or_default()
1400                .insert(server_id, summary);
1401        }
1402    }
1403
1404    pub fn insert_entry(
1405        &mut self,
1406        entry: proto::Entry,
1407        scan_id: usize,
1408        cx: &mut ModelContext<Worktree>,
1409    ) -> Task<Result<Entry>> {
1410        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1411        cx.spawn(|this, mut cx| async move {
1412            wait_for_snapshot.await?;
1413            this.update(&mut cx, |worktree, _| {
1414                let worktree = worktree.as_remote_mut().unwrap();
1415                let mut snapshot = worktree.background_snapshot.lock();
1416                let entry = snapshot.insert_entry(entry);
1417                worktree.snapshot = snapshot.clone();
1418                entry
1419            })
1420        })
1421    }
1422
1423    pub(crate) fn delete_entry(
1424        &mut self,
1425        id: ProjectEntryId,
1426        scan_id: usize,
1427        cx: &mut ModelContext<Worktree>,
1428    ) -> Task<Result<()>> {
1429        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1430        cx.spawn(|this, mut cx| async move {
1431            wait_for_snapshot.await?;
1432            this.update(&mut cx, |worktree, _| {
1433                let worktree = worktree.as_remote_mut().unwrap();
1434                let mut snapshot = worktree.background_snapshot.lock();
1435                snapshot.delete_entry(id);
1436                worktree.snapshot = snapshot.clone();
1437            });
1438            Ok(())
1439        })
1440    }
1441}
1442
1443impl Snapshot {
1444    pub fn id(&self) -> WorktreeId {
1445        self.id
1446    }
1447
1448    pub fn abs_path(&self) -> &Arc<Path> {
1449        &self.abs_path
1450    }
1451
1452    pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1453        self.entries_by_id.get(&entry_id, &()).is_some()
1454    }
1455
1456    pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1457        let entry = Entry::try_from((&self.root_char_bag, entry))?;
1458        let old_entry = self.entries_by_id.insert_or_replace(
1459            PathEntry {
1460                id: entry.id,
1461                path: entry.path.clone(),
1462                is_ignored: entry.is_ignored,
1463                scan_id: 0,
1464            },
1465            &(),
1466        );
1467        if let Some(old_entry) = old_entry {
1468            self.entries_by_path.remove(&PathKey(old_entry.path), &());
1469        }
1470        self.entries_by_path.insert_or_replace(entry.clone(), &());
1471        Ok(entry)
1472    }
1473
1474    fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1475        let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1476        self.entries_by_path = {
1477            let mut cursor = self.entries_by_path.cursor();
1478            let mut new_entries_by_path =
1479                cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1480            while let Some(entry) = cursor.item() {
1481                if entry.path.starts_with(&removed_entry.path) {
1482                    self.entries_by_id.remove(&entry.id, &());
1483                    cursor.next(&());
1484                } else {
1485                    break;
1486                }
1487            }
1488            new_entries_by_path.push_tree(cursor.suffix(&()), &());
1489            new_entries_by_path
1490        };
1491
1492        Some(removed_entry.path)
1493    }
1494
1495    pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1496        let mut entries_by_path_edits = Vec::new();
1497        let mut entries_by_id_edits = Vec::new();
1498        for entry_id in update.removed_entries {
1499            if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1500                entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1501                entries_by_id_edits.push(Edit::Remove(entry.id));
1502            }
1503        }
1504
1505        for entry in update.updated_entries {
1506            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1507            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1508                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1509            }
1510            entries_by_id_edits.push(Edit::Insert(PathEntry {
1511                id: entry.id,
1512                path: entry.path.clone(),
1513                is_ignored: entry.is_ignored,
1514                scan_id: 0,
1515            }));
1516            entries_by_path_edits.push(Edit::Insert(entry));
1517        }
1518
1519        self.entries_by_path.edit(entries_by_path_edits, &());
1520        self.entries_by_id.edit(entries_by_id_edits, &());
1521
1522        update.removed_repositories.sort_unstable();
1523        self.repository_entries.retain(|_, entry| {
1524            if let Ok(_) = update
1525                .removed_repositories
1526                .binary_search(&entry.work_directory.to_proto())
1527            {
1528                false
1529            } else {
1530                true
1531            }
1532        });
1533
1534        for repository in update.updated_repositories {
1535            let work_directory_entry: WorkDirectoryEntry =
1536                ProjectEntryId::from_proto(repository.work_directory_id).into();
1537
1538            if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1539                let mut statuses = TreeMap::default();
1540                for status_entry in repository.updated_statuses {
1541                    let Some(git_file_status) = read_git_status(status_entry.status) else {
1542                        continue;
1543                    };
1544
1545                    let repo_path = RepoPath::new(status_entry.repo_path.into());
1546                    statuses.insert(repo_path, git_file_status);
1547                }
1548
1549                let work_directory = RepositoryWorkDirectory(entry.path.clone());
1550                if self.repository_entries.get(&work_directory).is_some() {
1551                    self.repository_entries.update(&work_directory, |repo| {
1552                        repo.branch = repository.branch.map(Into::into);
1553                        repo.statuses.insert_tree(statuses);
1554
1555                        for repo_path in repository.removed_repo_paths {
1556                            let repo_path = RepoPath::new(repo_path.into());
1557                            repo.statuses.remove(&repo_path);
1558                        }
1559                    });
1560                } else {
1561                    self.repository_entries.insert(
1562                        work_directory,
1563                        RepositoryEntry {
1564                            work_directory: work_directory_entry,
1565                            branch: repository.branch.map(Into::into),
1566                            statuses,
1567                        },
1568                    )
1569                }
1570            } else {
1571                log::error!("no work directory entry for repository {:?}", repository)
1572            }
1573        }
1574
1575        self.scan_id = update.scan_id as usize;
1576        if update.is_last_update {
1577            self.completed_scan_id = update.scan_id as usize;
1578        }
1579
1580        Ok(())
1581    }
1582
1583    pub fn file_count(&self) -> usize {
1584        self.entries_by_path.summary().file_count
1585    }
1586
1587    pub fn visible_file_count(&self) -> usize {
1588        self.entries_by_path.summary().visible_file_count
1589    }
1590
1591    fn traverse_from_offset(
1592        &self,
1593        include_dirs: bool,
1594        include_ignored: bool,
1595        start_offset: usize,
1596    ) -> Traversal {
1597        let mut cursor = self.entries_by_path.cursor();
1598        cursor.seek(
1599            &TraversalTarget::Count {
1600                count: start_offset,
1601                include_dirs,
1602                include_ignored,
1603            },
1604            Bias::Right,
1605            &(),
1606        );
1607        Traversal {
1608            cursor,
1609            include_dirs,
1610            include_ignored,
1611        }
1612    }
1613
1614    fn traverse_from_path(
1615        &self,
1616        include_dirs: bool,
1617        include_ignored: bool,
1618        path: &Path,
1619    ) -> Traversal {
1620        let mut cursor = self.entries_by_path.cursor();
1621        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1622        Traversal {
1623            cursor,
1624            include_dirs,
1625            include_ignored,
1626        }
1627    }
1628
1629    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1630        self.traverse_from_offset(false, include_ignored, start)
1631    }
1632
1633    pub fn entries(&self, include_ignored: bool) -> Traversal {
1634        self.traverse_from_offset(true, include_ignored, 0)
1635    }
1636
1637    pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
1638        self.repository_entries
1639            .iter()
1640            .map(|(path, entry)| (&path.0, entry))
1641    }
1642
1643    /// Get the repository whose work directory contains the given path.
1644    pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
1645        self.repository_entries
1646            .get(&RepositoryWorkDirectory(path.into()))
1647            .cloned()
1648    }
1649
1650    /// Get the repository whose work directory contains the given path.
1651    pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
1652        let mut max_len = 0;
1653        let mut current_candidate = None;
1654        for (work_directory, repo) in (&self.repository_entries).iter() {
1655            if path.starts_with(&work_directory.0) {
1656                if work_directory.0.as_os_str().len() >= max_len {
1657                    current_candidate = Some(repo);
1658                    max_len = work_directory.0.as_os_str().len();
1659                } else {
1660                    break;
1661                }
1662            }
1663        }
1664
1665        current_candidate.cloned()
1666    }
1667
1668    /// Given an ordered iterator of entries, returns an iterator of those entries,
1669    /// along with their containing git repository.
1670    pub fn entries_with_repositories<'a>(
1671        &'a self,
1672        entries: impl 'a + Iterator<Item = &'a Entry>,
1673    ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
1674        let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
1675        let mut repositories = self.repositories().peekable();
1676        entries.map(move |entry| {
1677            while let Some((repo_path, _)) = containing_repos.last() {
1678                if !entry.path.starts_with(repo_path) {
1679                    containing_repos.pop();
1680                } else {
1681                    break;
1682                }
1683            }
1684            while let Some((repo_path, _)) = repositories.peek() {
1685                if entry.path.starts_with(repo_path) {
1686                    containing_repos.push(repositories.next().unwrap());
1687                } else {
1688                    break;
1689                }
1690            }
1691            let repo = containing_repos.last().map(|(_, repo)| *repo);
1692            (entry, repo)
1693        })
1694    }
1695
1696    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1697        let empty_path = Path::new("");
1698        self.entries_by_path
1699            .cursor::<()>()
1700            .filter(move |entry| entry.path.as_ref() != empty_path)
1701            .map(|entry| &entry.path)
1702    }
1703
1704    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1705        let mut cursor = self.entries_by_path.cursor();
1706        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1707        let traversal = Traversal {
1708            cursor,
1709            include_dirs: true,
1710            include_ignored: true,
1711        };
1712        ChildEntriesIter {
1713            traversal,
1714            parent_path,
1715        }
1716    }
1717
1718    fn descendent_entries<'a>(
1719        &'a self,
1720        include_dirs: bool,
1721        include_ignored: bool,
1722        parent_path: &'a Path,
1723    ) -> DescendentEntriesIter<'a> {
1724        let mut cursor = self.entries_by_path.cursor();
1725        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
1726        let mut traversal = Traversal {
1727            cursor,
1728            include_dirs,
1729            include_ignored,
1730        };
1731
1732        if traversal.end_offset() == traversal.start_offset() {
1733            traversal.advance();
1734        }
1735
1736        DescendentEntriesIter {
1737            traversal,
1738            parent_path,
1739        }
1740    }
1741
1742    pub fn root_entry(&self) -> Option<&Entry> {
1743        self.entry_for_path("")
1744    }
1745
1746    pub fn root_name(&self) -> &str {
1747        &self.root_name
1748    }
1749
1750    pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1751        self.repository_entries
1752            .get(&RepositoryWorkDirectory(Path::new("").into()))
1753            .map(|entry| entry.to_owned())
1754    }
1755
1756    pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1757        self.repository_entries.values()
1758    }
1759
1760    pub fn scan_id(&self) -> usize {
1761        self.scan_id
1762    }
1763
1764    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1765        let path = path.as_ref();
1766        self.traverse_from_path(true, true, path)
1767            .entry()
1768            .and_then(|entry| {
1769                if entry.path.as_ref() == path {
1770                    Some(entry)
1771                } else {
1772                    None
1773                }
1774            })
1775    }
1776
1777    pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1778        let entry = self.entries_by_id.get(&id, &())?;
1779        self.entry_for_path(&entry.path)
1780    }
1781
1782    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1783        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1784    }
1785}
1786
1787impl LocalSnapshot {
1788    pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1789        self.git_repositories.get(&repo.work_directory.0)
1790    }
1791
1792    pub(crate) fn repo_for_metadata(
1793        &self,
1794        path: &Path,
1795    ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1796        self.git_repositories
1797            .iter()
1798            .find(|(_, repo)| repo.in_dot_git(path))
1799    }
1800
1801    #[cfg(test)]
1802    pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1803        let root_name = self.root_name.clone();
1804        proto::UpdateWorktree {
1805            project_id,
1806            worktree_id: self.id().to_proto(),
1807            abs_path: self.abs_path().to_string_lossy().into(),
1808            root_name,
1809            updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1810            removed_entries: Default::default(),
1811            scan_id: self.scan_id as u64,
1812            is_last_update: true,
1813            updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1814            removed_repositories: Default::default(),
1815        }
1816    }
1817
1818    pub(crate) fn build_update(
1819        &self,
1820        other: &Self,
1821        project_id: u64,
1822        worktree_id: u64,
1823        include_ignored: bool,
1824    ) -> proto::UpdateWorktree {
1825        let mut updated_entries = Vec::new();
1826        let mut removed_entries = Vec::new();
1827        let mut self_entries = self
1828            .entries_by_id
1829            .cursor::<()>()
1830            .filter(|e| include_ignored || !e.is_ignored)
1831            .peekable();
1832        let mut other_entries = other
1833            .entries_by_id
1834            .cursor::<()>()
1835            .filter(|e| include_ignored || !e.is_ignored)
1836            .peekable();
1837        loop {
1838            match (self_entries.peek(), other_entries.peek()) {
1839                (Some(self_entry), Some(other_entry)) => {
1840                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1841                        Ordering::Less => {
1842                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1843                            updated_entries.push(entry);
1844                            self_entries.next();
1845                        }
1846                        Ordering::Equal => {
1847                            if self_entry.scan_id != other_entry.scan_id {
1848                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1849                                updated_entries.push(entry);
1850                            }
1851
1852                            self_entries.next();
1853                            other_entries.next();
1854                        }
1855                        Ordering::Greater => {
1856                            removed_entries.push(other_entry.id.to_proto());
1857                            other_entries.next();
1858                        }
1859                    }
1860                }
1861                (Some(self_entry), None) => {
1862                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1863                    updated_entries.push(entry);
1864                    self_entries.next();
1865                }
1866                (None, Some(other_entry)) => {
1867                    removed_entries.push(other_entry.id.to_proto());
1868                    other_entries.next();
1869                }
1870                (None, None) => break,
1871            }
1872        }
1873
1874        let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1875        let mut removed_repositories = Vec::new();
1876        let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1877        let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1878        loop {
1879            match (self_repos.peek(), other_repos.peek()) {
1880                (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1881                    match Ord::cmp(self_work_dir, other_work_dir) {
1882                        Ordering::Less => {
1883                            updated_repositories.push((*self_repo).into());
1884                            self_repos.next();
1885                        }
1886                        Ordering::Equal => {
1887                            if self_repo != other_repo {
1888                                updated_repositories.push(self_repo.build_update(other_repo));
1889                            }
1890
1891                            self_repos.next();
1892                            other_repos.next();
1893                        }
1894                        Ordering::Greater => {
1895                            removed_repositories.push(other_repo.work_directory.to_proto());
1896                            other_repos.next();
1897                        }
1898                    }
1899                }
1900                (Some((_, self_repo)), None) => {
1901                    updated_repositories.push((*self_repo).into());
1902                    self_repos.next();
1903                }
1904                (None, Some((_, other_repo))) => {
1905                    removed_repositories.push(other_repo.work_directory.to_proto());
1906                    other_repos.next();
1907                }
1908                (None, None) => break,
1909            }
1910        }
1911
1912        proto::UpdateWorktree {
1913            project_id,
1914            worktree_id,
1915            abs_path: self.abs_path().to_string_lossy().into(),
1916            root_name: self.root_name().to_string(),
1917            updated_entries,
1918            removed_entries,
1919            scan_id: self.scan_id as u64,
1920            is_last_update: self.completed_scan_id == self.scan_id,
1921            updated_repositories,
1922            removed_repositories,
1923        }
1924    }
1925
1926    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1927        if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1928            let abs_path = self.abs_path.join(&entry.path);
1929            match smol::block_on(build_gitignore(&abs_path, fs)) {
1930                Ok(ignore) => {
1931                    self.ignores_by_parent_abs_path
1932                        .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
1933                }
1934                Err(error) => {
1935                    log::error!(
1936                        "error loading .gitignore file {:?} - {:?}",
1937                        &entry.path,
1938                        error
1939                    );
1940                }
1941            }
1942        }
1943
1944        if entry.kind == EntryKind::PendingDir {
1945            if let Some(existing_entry) =
1946                self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1947            {
1948                entry.kind = existing_entry.kind;
1949            }
1950        }
1951
1952        let scan_id = self.scan_id;
1953        let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1954        if let Some(removed) = removed {
1955            if removed.id != entry.id {
1956                self.entries_by_id.remove(&removed.id, &());
1957            }
1958        }
1959        self.entries_by_id.insert_or_replace(
1960            PathEntry {
1961                id: entry.id,
1962                path: entry.path.clone(),
1963                is_ignored: entry.is_ignored,
1964                scan_id,
1965            },
1966            &(),
1967        );
1968
1969        entry
1970    }
1971
1972    fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1973        let abs_path = self.abs_path.join(&parent_path);
1974        let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1975
1976        // Guard against repositories inside the repository metadata
1977        if work_dir
1978            .components()
1979            .find(|component| component.as_os_str() == *DOT_GIT)
1980            .is_some()
1981        {
1982            return None;
1983        };
1984
1985        let work_dir_id = self
1986            .entry_for_path(work_dir.clone())
1987            .map(|entry| entry.id)?;
1988
1989        if self.git_repositories.get(&work_dir_id).is_none() {
1990            let repo = fs.open_repo(abs_path.as_path())?;
1991            let work_directory = RepositoryWorkDirectory(work_dir.clone());
1992            let scan_id = self.scan_id;
1993
1994            let repo_lock = repo.lock();
1995
1996            self.repository_entries.insert(
1997                work_directory,
1998                RepositoryEntry {
1999                    work_directory: work_dir_id.into(),
2000                    branch: repo_lock.branch_name().map(Into::into),
2001                    statuses: repo_lock.statuses().unwrap_or_default(),
2002                },
2003            );
2004            drop(repo_lock);
2005
2006            self.git_repositories.insert(
2007                work_dir_id,
2008                LocalRepositoryEntry {
2009                    scan_id,
2010                    git_dir_scan_id: scan_id,
2011                    repo_ptr: repo,
2012                    git_dir_path: parent_path.clone(),
2013                },
2014            )
2015        }
2016
2017        Some(())
2018    }
2019
2020    fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2021        let mut inodes = TreeSet::default();
2022        for ancestor in path.ancestors().skip(1) {
2023            if let Some(entry) = self.entry_for_path(ancestor) {
2024                inodes.insert(entry.inode);
2025            }
2026        }
2027        inodes
2028    }
2029
2030    fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2031        let mut new_ignores = Vec::new();
2032        for ancestor in abs_path.ancestors().skip(1) {
2033            if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2034                new_ignores.push((ancestor, Some(ignore.clone())));
2035            } else {
2036                new_ignores.push((ancestor, None));
2037            }
2038        }
2039
2040        let mut ignore_stack = IgnoreStack::none();
2041        for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2042            if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2043                ignore_stack = IgnoreStack::all();
2044                break;
2045            } else if let Some(ignore) = ignore {
2046                ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2047            }
2048        }
2049
2050        if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2051            ignore_stack = IgnoreStack::all();
2052        }
2053
2054        ignore_stack
2055    }
2056}
2057
2058impl LocalMutableSnapshot {
2059    fn reuse_entry_id(&mut self, entry: &mut Entry) {
2060        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2061            entry.id = removed_entry_id;
2062        } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2063            entry.id = existing_entry.id;
2064        }
2065    }
2066
2067    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2068        self.reuse_entry_id(&mut entry);
2069        self.snapshot.insert_entry(entry, fs)
2070    }
2071
2072    fn populate_dir(
2073        &mut self,
2074        parent_path: Arc<Path>,
2075        entries: impl IntoIterator<Item = Entry>,
2076        ignore: Option<Arc<Gitignore>>,
2077        fs: &dyn Fs,
2078    ) {
2079        let mut parent_entry = if let Some(parent_entry) =
2080            self.entries_by_path.get(&PathKey(parent_path.clone()), &())
2081        {
2082            parent_entry.clone()
2083        } else {
2084            log::warn!(
2085                "populating a directory {:?} that has been removed",
2086                parent_path
2087            );
2088            return;
2089        };
2090
2091        match parent_entry.kind {
2092            EntryKind::PendingDir => {
2093                parent_entry.kind = EntryKind::Dir;
2094            }
2095            EntryKind::Dir => {}
2096            _ => return,
2097        }
2098
2099        if let Some(ignore) = ignore {
2100            let abs_parent_path = self.abs_path.join(&parent_path).into();
2101            self.ignores_by_parent_abs_path
2102                .insert(abs_parent_path, (ignore, false));
2103        }
2104
2105        if parent_path.file_name() == Some(&DOT_GIT) {
2106            self.build_repo(parent_path, fs);
2107        }
2108
2109        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2110        let mut entries_by_id_edits = Vec::new();
2111
2112        for mut entry in entries {
2113            self.reuse_entry_id(&mut entry);
2114            entries_by_id_edits.push(Edit::Insert(PathEntry {
2115                id: entry.id,
2116                path: entry.path.clone(),
2117                is_ignored: entry.is_ignored,
2118                scan_id: self.scan_id,
2119            }));
2120            entries_by_path_edits.push(Edit::Insert(entry));
2121        }
2122
2123        self.entries_by_path.edit(entries_by_path_edits, &());
2124        self.entries_by_id.edit(entries_by_id_edits, &());
2125    }
2126
2127    fn remove_path(&mut self, path: &Path) {
2128        let mut new_entries;
2129        let removed_entries;
2130        {
2131            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2132            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2133            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2134            new_entries.push_tree(cursor.suffix(&()), &());
2135        }
2136        self.entries_by_path = new_entries;
2137
2138        let mut entries_by_id_edits = Vec::new();
2139        for entry in removed_entries.cursor::<()>() {
2140            let removed_entry_id = self
2141                .removed_entry_ids
2142                .entry(entry.inode)
2143                .or_insert(entry.id);
2144            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2145            entries_by_id_edits.push(Edit::Remove(entry.id));
2146        }
2147        self.entries_by_id.edit(entries_by_id_edits, &());
2148
2149        if path.file_name() == Some(&GITIGNORE) {
2150            let abs_parent_path = self.abs_path.join(path.parent().unwrap());
2151            if let Some((_, needs_update)) = self
2152                .ignores_by_parent_abs_path
2153                .get_mut(abs_parent_path.as_path())
2154            {
2155                *needs_update = true;
2156            }
2157        }
2158    }
2159}
2160
2161async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2162    let contents = fs.load(abs_path).await?;
2163    let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2164    let mut builder = GitignoreBuilder::new(parent);
2165    for line in contents.lines() {
2166        builder.add_line(Some(abs_path.into()), line)?;
2167    }
2168    Ok(builder.build()?)
2169}
2170
2171impl WorktreeId {
2172    pub fn from_usize(handle_id: usize) -> Self {
2173        Self(handle_id)
2174    }
2175
2176    pub(crate) fn from_proto(id: u64) -> Self {
2177        Self(id as usize)
2178    }
2179
2180    pub fn to_proto(&self) -> u64 {
2181        self.0 as u64
2182    }
2183
2184    pub fn to_usize(&self) -> usize {
2185        self.0
2186    }
2187}
2188
2189impl fmt::Display for WorktreeId {
2190    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2191        self.0.fmt(f)
2192    }
2193}
2194
2195impl Deref for Worktree {
2196    type Target = Snapshot;
2197
2198    fn deref(&self) -> &Self::Target {
2199        match self {
2200            Worktree::Local(worktree) => &worktree.snapshot,
2201            Worktree::Remote(worktree) => &worktree.snapshot,
2202        }
2203    }
2204}
2205
2206impl Deref for LocalWorktree {
2207    type Target = LocalSnapshot;
2208
2209    fn deref(&self) -> &Self::Target {
2210        &self.snapshot
2211    }
2212}
2213
2214impl Deref for RemoteWorktree {
2215    type Target = Snapshot;
2216
2217    fn deref(&self) -> &Self::Target {
2218        &self.snapshot
2219    }
2220}
2221
2222impl fmt::Debug for LocalWorktree {
2223    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2224        self.snapshot.fmt(f)
2225    }
2226}
2227
2228impl fmt::Debug for Snapshot {
2229    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2230        struct EntriesById<'a>(&'a SumTree<PathEntry>);
2231        struct EntriesByPath<'a>(&'a SumTree<Entry>);
2232
2233        impl<'a> fmt::Debug for EntriesByPath<'a> {
2234            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2235                f.debug_map()
2236                    .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2237                    .finish()
2238            }
2239        }
2240
2241        impl<'a> fmt::Debug for EntriesById<'a> {
2242            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2243                f.debug_list().entries(self.0.iter()).finish()
2244            }
2245        }
2246
2247        f.debug_struct("Snapshot")
2248            .field("id", &self.id)
2249            .field("root_name", &self.root_name)
2250            .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2251            .field("entries_by_id", &EntriesById(&self.entries_by_id))
2252            .finish()
2253    }
2254}
2255
2256#[derive(Clone, PartialEq)]
2257pub struct File {
2258    pub worktree: ModelHandle<Worktree>,
2259    pub path: Arc<Path>,
2260    pub mtime: SystemTime,
2261    pub(crate) entry_id: ProjectEntryId,
2262    pub(crate) is_local: bool,
2263    pub(crate) is_deleted: bool,
2264}
2265
2266impl language::File for File {
2267    fn as_local(&self) -> Option<&dyn language::LocalFile> {
2268        if self.is_local {
2269            Some(self)
2270        } else {
2271            None
2272        }
2273    }
2274
2275    fn mtime(&self) -> SystemTime {
2276        self.mtime
2277    }
2278
2279    fn path(&self) -> &Arc<Path> {
2280        &self.path
2281    }
2282
2283    fn full_path(&self, cx: &AppContext) -> PathBuf {
2284        let mut full_path = PathBuf::new();
2285        let worktree = self.worktree.read(cx);
2286
2287        if worktree.is_visible() {
2288            full_path.push(worktree.root_name());
2289        } else {
2290            let path = worktree.abs_path();
2291
2292            if worktree.is_local() && path.starts_with(HOME.as_path()) {
2293                full_path.push("~");
2294                full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2295            } else {
2296                full_path.push(path)
2297            }
2298        }
2299
2300        if self.path.components().next().is_some() {
2301            full_path.push(&self.path);
2302        }
2303
2304        full_path
2305    }
2306
2307    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2308    /// of its worktree, then this method will return the name of the worktree itself.
2309    fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2310        self.path
2311            .file_name()
2312            .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2313    }
2314
2315    fn is_deleted(&self) -> bool {
2316        self.is_deleted
2317    }
2318
2319    fn as_any(&self) -> &dyn Any {
2320        self
2321    }
2322
2323    fn to_proto(&self) -> rpc::proto::File {
2324        rpc::proto::File {
2325            worktree_id: self.worktree.id() as u64,
2326            entry_id: self.entry_id.to_proto(),
2327            path: self.path.to_string_lossy().into(),
2328            mtime: Some(self.mtime.into()),
2329            is_deleted: self.is_deleted,
2330        }
2331    }
2332}
2333
2334impl language::LocalFile for File {
2335    fn abs_path(&self, cx: &AppContext) -> PathBuf {
2336        self.worktree
2337            .read(cx)
2338            .as_local()
2339            .unwrap()
2340            .abs_path
2341            .join(&self.path)
2342    }
2343
2344    fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2345        let worktree = self.worktree.read(cx).as_local().unwrap();
2346        let abs_path = worktree.absolutize(&self.path);
2347        let fs = worktree.fs.clone();
2348        cx.background()
2349            .spawn(async move { fs.load(&abs_path).await })
2350    }
2351
2352    fn buffer_reloaded(
2353        &self,
2354        buffer_id: u64,
2355        version: &clock::Global,
2356        fingerprint: RopeFingerprint,
2357        line_ending: LineEnding,
2358        mtime: SystemTime,
2359        cx: &mut AppContext,
2360    ) {
2361        let worktree = self.worktree.read(cx).as_local().unwrap();
2362        if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2363            worktree
2364                .client
2365                .send(proto::BufferReloaded {
2366                    project_id,
2367                    buffer_id,
2368                    version: serialize_version(version),
2369                    mtime: Some(mtime.into()),
2370                    fingerprint: serialize_fingerprint(fingerprint),
2371                    line_ending: serialize_line_ending(line_ending) as i32,
2372                })
2373                .log_err();
2374        }
2375    }
2376}
2377
2378impl File {
2379    pub fn from_proto(
2380        proto: rpc::proto::File,
2381        worktree: ModelHandle<Worktree>,
2382        cx: &AppContext,
2383    ) -> Result<Self> {
2384        let worktree_id = worktree
2385            .read(cx)
2386            .as_remote()
2387            .ok_or_else(|| anyhow!("not remote"))?
2388            .id();
2389
2390        if worktree_id.to_proto() != proto.worktree_id {
2391            return Err(anyhow!("worktree id does not match file"));
2392        }
2393
2394        Ok(Self {
2395            worktree,
2396            path: Path::new(&proto.path).into(),
2397            mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2398            entry_id: ProjectEntryId::from_proto(proto.entry_id),
2399            is_local: false,
2400            is_deleted: proto.is_deleted,
2401        })
2402    }
2403
2404    pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2405        file.and_then(|f| f.as_any().downcast_ref())
2406    }
2407
2408    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2409        self.worktree.read(cx).id()
2410    }
2411
2412    pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2413        if self.is_deleted {
2414            None
2415        } else {
2416            Some(self.entry_id)
2417        }
2418    }
2419}
2420
2421#[derive(Clone, Debug, PartialEq, Eq)]
2422pub struct Entry {
2423    pub id: ProjectEntryId,
2424    pub kind: EntryKind,
2425    pub path: Arc<Path>,
2426    pub inode: u64,
2427    pub mtime: SystemTime,
2428    pub is_symlink: bool,
2429    pub is_ignored: bool,
2430}
2431
2432#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2433pub enum EntryKind {
2434    PendingDir,
2435    Dir,
2436    File(CharBag),
2437}
2438
2439#[derive(Clone, Copy, Debug)]
2440pub enum PathChange {
2441    Added,
2442    Removed,
2443    Updated,
2444    AddedOrUpdated,
2445}
2446
2447impl Entry {
2448    fn new(
2449        path: Arc<Path>,
2450        metadata: &fs::Metadata,
2451        next_entry_id: &AtomicUsize,
2452        root_char_bag: CharBag,
2453    ) -> Self {
2454        Self {
2455            id: ProjectEntryId::new(next_entry_id),
2456            kind: if metadata.is_dir {
2457                EntryKind::PendingDir
2458            } else {
2459                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2460            },
2461            path,
2462            inode: metadata.inode,
2463            mtime: metadata.mtime,
2464            is_symlink: metadata.is_symlink,
2465            is_ignored: false,
2466        }
2467    }
2468
2469    pub fn is_dir(&self) -> bool {
2470        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2471    }
2472
2473    pub fn is_file(&self) -> bool {
2474        matches!(self.kind, EntryKind::File(_))
2475    }
2476}
2477
2478impl sum_tree::Item for Entry {
2479    type Summary = EntrySummary;
2480
2481    fn summary(&self) -> Self::Summary {
2482        let visible_count = if self.is_ignored { 0 } else { 1 };
2483        let file_count;
2484        let visible_file_count;
2485        if self.is_file() {
2486            file_count = 1;
2487            visible_file_count = visible_count;
2488        } else {
2489            file_count = 0;
2490            visible_file_count = 0;
2491        }
2492
2493        EntrySummary {
2494            max_path: self.path.clone(),
2495            count: 1,
2496            visible_count,
2497            file_count,
2498            visible_file_count,
2499        }
2500    }
2501}
2502
2503impl sum_tree::KeyedItem for Entry {
2504    type Key = PathKey;
2505
2506    fn key(&self) -> Self::Key {
2507        PathKey(self.path.clone())
2508    }
2509}
2510
2511#[derive(Clone, Debug)]
2512pub struct EntrySummary {
2513    max_path: Arc<Path>,
2514    count: usize,
2515    visible_count: usize,
2516    file_count: usize,
2517    visible_file_count: usize,
2518}
2519
2520impl Default for EntrySummary {
2521    fn default() -> Self {
2522        Self {
2523            max_path: Arc::from(Path::new("")),
2524            count: 0,
2525            visible_count: 0,
2526            file_count: 0,
2527            visible_file_count: 0,
2528        }
2529    }
2530}
2531
2532impl sum_tree::Summary for EntrySummary {
2533    type Context = ();
2534
2535    fn add_summary(&mut self, rhs: &Self, _: &()) {
2536        self.max_path = rhs.max_path.clone();
2537        self.count += rhs.count;
2538        self.visible_count += rhs.visible_count;
2539        self.file_count += rhs.file_count;
2540        self.visible_file_count += rhs.visible_file_count;
2541    }
2542}
2543
2544#[derive(Clone, Debug)]
2545struct PathEntry {
2546    id: ProjectEntryId,
2547    path: Arc<Path>,
2548    is_ignored: bool,
2549    scan_id: usize,
2550}
2551
2552impl sum_tree::Item for PathEntry {
2553    type Summary = PathEntrySummary;
2554
2555    fn summary(&self) -> Self::Summary {
2556        PathEntrySummary { max_id: self.id }
2557    }
2558}
2559
2560impl sum_tree::KeyedItem for PathEntry {
2561    type Key = ProjectEntryId;
2562
2563    fn key(&self) -> Self::Key {
2564        self.id
2565    }
2566}
2567
2568#[derive(Clone, Debug, Default)]
2569struct PathEntrySummary {
2570    max_id: ProjectEntryId,
2571}
2572
2573impl sum_tree::Summary for PathEntrySummary {
2574    type Context = ();
2575
2576    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2577        self.max_id = summary.max_id;
2578    }
2579}
2580
2581impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2582    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2583        *self = summary.max_id;
2584    }
2585}
2586
2587#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2588pub struct PathKey(Arc<Path>);
2589
2590impl Default for PathKey {
2591    fn default() -> Self {
2592        Self(Path::new("").into())
2593    }
2594}
2595
2596impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2597    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2598        self.0 = summary.max_path.clone();
2599    }
2600}
2601
2602struct BackgroundScanner {
2603    snapshot: Mutex<LocalMutableSnapshot>,
2604    fs: Arc<dyn Fs>,
2605    status_updates_tx: UnboundedSender<ScanState>,
2606    executor: Arc<executor::Background>,
2607    refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2608    prev_state: Mutex<BackgroundScannerState>,
2609    next_entry_id: Arc<AtomicUsize>,
2610    finished_initial_scan: bool,
2611}
2612
2613struct BackgroundScannerState {
2614    snapshot: Snapshot,
2615    event_paths: Vec<Arc<Path>>,
2616}
2617
2618impl BackgroundScanner {
2619    fn new(
2620        snapshot: LocalSnapshot,
2621        next_entry_id: Arc<AtomicUsize>,
2622        fs: Arc<dyn Fs>,
2623        status_updates_tx: UnboundedSender<ScanState>,
2624        executor: Arc<executor::Background>,
2625        refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2626    ) -> Self {
2627        Self {
2628            fs,
2629            status_updates_tx,
2630            executor,
2631            refresh_requests_rx,
2632            next_entry_id,
2633            prev_state: Mutex::new(BackgroundScannerState {
2634                snapshot: snapshot.snapshot.clone(),
2635                event_paths: Default::default(),
2636            }),
2637            snapshot: Mutex::new(LocalMutableSnapshot {
2638                snapshot,
2639                removed_entry_ids: Default::default(),
2640            }),
2641            finished_initial_scan: false,
2642        }
2643    }
2644
2645    async fn run(
2646        &mut self,
2647        mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2648    ) {
2649        use futures::FutureExt as _;
2650
2651        let (root_abs_path, root_inode) = {
2652            let snapshot = self.snapshot.lock();
2653            (
2654                snapshot.abs_path.clone(),
2655                snapshot.root_entry().map(|e| e.inode),
2656            )
2657        };
2658
2659        // Populate ignores above the root.
2660        let ignore_stack;
2661        for ancestor in root_abs_path.ancestors().skip(1) {
2662            if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2663            {
2664                self.snapshot
2665                    .lock()
2666                    .ignores_by_parent_abs_path
2667                    .insert(ancestor.into(), (ignore.into(), false));
2668            }
2669        }
2670        {
2671            let mut snapshot = self.snapshot.lock();
2672            snapshot.scan_id += 1;
2673            ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2674            if ignore_stack.is_all() {
2675                if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2676                    root_entry.is_ignored = true;
2677                    snapshot.insert_entry(root_entry, self.fs.as_ref());
2678                }
2679            }
2680        };
2681
2682        // Perform an initial scan of the directory.
2683        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2684        smol::block_on(scan_job_tx.send(ScanJob {
2685            abs_path: root_abs_path,
2686            path: Arc::from(Path::new("")),
2687            ignore_stack,
2688            ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2689            scan_queue: scan_job_tx.clone(),
2690        }))
2691        .unwrap();
2692        drop(scan_job_tx);
2693        self.scan_dirs(true, scan_job_rx).await;
2694        {
2695            let mut snapshot = self.snapshot.lock();
2696            snapshot.completed_scan_id = snapshot.scan_id;
2697        }
2698        self.send_status_update(false, None);
2699
2700        // Process any any FS events that occurred while performing the initial scan.
2701        // For these events, update events cannot be as precise, because we didn't
2702        // have the previous state loaded yet.
2703        if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2704            let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2705            while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2706                paths.extend(more_events.into_iter().map(|e| e.path));
2707            }
2708            self.process_events(paths).await;
2709        }
2710
2711        self.finished_initial_scan = true;
2712
2713        // Continue processing events until the worktree is dropped.
2714        loop {
2715            select_biased! {
2716                // Process any path refresh requests from the worktree. Prioritize
2717                // these before handling changes reported by the filesystem.
2718                request = self.refresh_requests_rx.recv().fuse() => {
2719                    let Ok((paths, barrier)) = request else { break };
2720                    if !self.process_refresh_request(paths.clone(), barrier).await {
2721                        return;
2722                    }
2723                }
2724
2725                events = events_rx.next().fuse() => {
2726                    let Some(events) = events else { break };
2727                    let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2728                    while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2729                        paths.extend(more_events.into_iter().map(|e| e.path));
2730                    }
2731                    self.process_events(paths.clone()).await;
2732                }
2733            }
2734        }
2735    }
2736
2737    async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2738        if let Some(mut paths) = self.reload_entries_for_paths(paths, None).await {
2739            paths.sort_unstable();
2740            util::extend_sorted(
2741                &mut self.prev_state.lock().event_paths,
2742                paths,
2743                usize::MAX,
2744                Ord::cmp,
2745            );
2746        }
2747        self.send_status_update(false, Some(barrier))
2748    }
2749
2750    async fn process_events(&mut self, paths: Vec<PathBuf>) {
2751        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2752        let paths = self
2753            .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2754            .await;
2755        if let Some(paths) = &paths {
2756            util::extend_sorted(
2757                &mut self.prev_state.lock().event_paths,
2758                paths.iter().cloned(),
2759                usize::MAX,
2760                Ord::cmp,
2761            );
2762        }
2763        drop(scan_job_tx);
2764        self.scan_dirs(false, scan_job_rx).await;
2765
2766        self.update_ignore_statuses().await;
2767
2768        let mut snapshot = self.snapshot.lock();
2769
2770        if let Some(paths) = paths {
2771            for path in paths {
2772                self.reload_repo_for_file_path(&path, &mut *snapshot, self.fs.as_ref());
2773            }
2774        }
2775
2776        let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2777        git_repositories.retain(|work_directory_id, _| {
2778            snapshot
2779                .entry_for_id(*work_directory_id)
2780                .map_or(false, |entry| {
2781                    snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2782                })
2783        });
2784        snapshot.git_repositories = git_repositories;
2785
2786        let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2787        git_repository_entries.retain(|_, entry| {
2788            snapshot
2789                .git_repositories
2790                .get(&entry.work_directory.0)
2791                .is_some()
2792        });
2793        snapshot.snapshot.repository_entries = git_repository_entries;
2794        snapshot.completed_scan_id = snapshot.scan_id;
2795        drop(snapshot);
2796
2797        self.send_status_update(false, None);
2798        self.prev_state.lock().event_paths.clear();
2799    }
2800
2801    async fn scan_dirs(
2802        &self,
2803        enable_progress_updates: bool,
2804        scan_jobs_rx: channel::Receiver<ScanJob>,
2805    ) {
2806        use futures::FutureExt as _;
2807
2808        if self
2809            .status_updates_tx
2810            .unbounded_send(ScanState::Started)
2811            .is_err()
2812        {
2813            return;
2814        }
2815
2816        let progress_update_count = AtomicUsize::new(0);
2817        self.executor
2818            .scoped(|scope| {
2819                for _ in 0..self.executor.num_cpus() {
2820                    scope.spawn(async {
2821                        let mut last_progress_update_count = 0;
2822                        let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2823                        futures::pin_mut!(progress_update_timer);
2824
2825                        loop {
2826                            select_biased! {
2827                                // Process any path refresh requests before moving on to process
2828                                // the scan queue, so that user operations are prioritized.
2829                                request = self.refresh_requests_rx.recv().fuse() => {
2830                                    let Ok((paths, barrier)) = request else { break };
2831                                    if !self.process_refresh_request(paths, barrier).await {
2832                                        return;
2833                                    }
2834                                }
2835
2836                                // Send periodic progress updates to the worktree. Use an atomic counter
2837                                // to ensure that only one of the workers sends a progress update after
2838                                // the update interval elapses.
2839                                _ = progress_update_timer => {
2840                                    match progress_update_count.compare_exchange(
2841                                        last_progress_update_count,
2842                                        last_progress_update_count + 1,
2843                                        SeqCst,
2844                                        SeqCst
2845                                    ) {
2846                                        Ok(_) => {
2847                                            last_progress_update_count += 1;
2848                                            self.send_status_update(true, None);
2849                                        }
2850                                        Err(count) => {
2851                                            last_progress_update_count = count;
2852                                        }
2853                                    }
2854                                    progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2855                                }
2856
2857                                // Recursively load directories from the file system.
2858                                job = scan_jobs_rx.recv().fuse() => {
2859                                    let Ok(job) = job else { break };
2860                                    if let Err(err) = self.scan_dir(&job).await {
2861                                        if job.path.as_ref() != Path::new("") {
2862                                            log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2863                                        }
2864                                    }
2865                                }
2866                            }
2867                        }
2868                    })
2869                }
2870            })
2871            .await;
2872    }
2873
2874    fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2875        let mut prev_state = self.prev_state.lock();
2876        let new_snapshot = self.snapshot.lock().clone();
2877        let old_snapshot = mem::replace(&mut prev_state.snapshot, new_snapshot.snapshot.clone());
2878
2879        let changes = self.build_change_set(
2880            &old_snapshot,
2881            &new_snapshot.snapshot,
2882            &prev_state.event_paths,
2883        );
2884
2885        self.status_updates_tx
2886            .unbounded_send(ScanState::Updated {
2887                snapshot: new_snapshot,
2888                changes,
2889                scanning,
2890                barrier,
2891            })
2892            .is_ok()
2893    }
2894
2895    async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2896        let mut new_entries: Vec<Entry> = Vec::new();
2897        let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2898        let mut ignore_stack = job.ignore_stack.clone();
2899        let mut new_ignore = None;
2900        let (root_abs_path, root_char_bag, next_entry_id) = {
2901            let snapshot = self.snapshot.lock();
2902            (
2903                snapshot.abs_path().clone(),
2904                snapshot.root_char_bag,
2905                self.next_entry_id.clone(),
2906            )
2907        };
2908        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2909        while let Some(child_abs_path) = child_paths.next().await {
2910            let child_abs_path: Arc<Path> = match child_abs_path {
2911                Ok(child_abs_path) => child_abs_path.into(),
2912                Err(error) => {
2913                    log::error!("error processing entry {:?}", error);
2914                    continue;
2915                }
2916            };
2917
2918            let child_name = child_abs_path.file_name().unwrap();
2919            let child_path: Arc<Path> = job.path.join(child_name).into();
2920            let child_metadata = match self.fs.metadata(&child_abs_path).await {
2921                Ok(Some(metadata)) => metadata,
2922                Ok(None) => continue,
2923                Err(err) => {
2924                    log::error!("error processing {:?}: {:?}", child_abs_path, err);
2925                    continue;
2926                }
2927            };
2928
2929            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2930            if child_name == *GITIGNORE {
2931                match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2932                    Ok(ignore) => {
2933                        let ignore = Arc::new(ignore);
2934                        ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2935                        new_ignore = Some(ignore);
2936                    }
2937                    Err(error) => {
2938                        log::error!(
2939                            "error loading .gitignore file {:?} - {:?}",
2940                            child_name,
2941                            error
2942                        );
2943                    }
2944                }
2945
2946                // Update ignore status of any child entries we've already processed to reflect the
2947                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2948                // there should rarely be too numerous. Update the ignore stack associated with any
2949                // new jobs as well.
2950                let mut new_jobs = new_jobs.iter_mut();
2951                for entry in &mut new_entries {
2952                    let entry_abs_path = root_abs_path.join(&entry.path);
2953                    entry.is_ignored =
2954                        ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2955
2956                    if entry.is_dir() {
2957                        if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2958                            job.ignore_stack = if entry.is_ignored {
2959                                IgnoreStack::all()
2960                            } else {
2961                                ignore_stack.clone()
2962                            };
2963                        }
2964                    }
2965                }
2966            }
2967
2968            let mut child_entry = Entry::new(
2969                child_path.clone(),
2970                &child_metadata,
2971                &next_entry_id,
2972                root_char_bag,
2973            );
2974
2975            if child_entry.is_dir() {
2976                let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2977                child_entry.is_ignored = is_ignored;
2978
2979                // Avoid recursing until crash in the case of a recursive symlink
2980                if !job.ancestor_inodes.contains(&child_entry.inode) {
2981                    let mut ancestor_inodes = job.ancestor_inodes.clone();
2982                    ancestor_inodes.insert(child_entry.inode);
2983
2984                    new_jobs.push(Some(ScanJob {
2985                        abs_path: child_abs_path,
2986                        path: child_path,
2987                        ignore_stack: if is_ignored {
2988                            IgnoreStack::all()
2989                        } else {
2990                            ignore_stack.clone()
2991                        },
2992                        ancestor_inodes,
2993                        scan_queue: job.scan_queue.clone(),
2994                    }));
2995                } else {
2996                    new_jobs.push(None);
2997                }
2998            } else {
2999                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
3000            }
3001
3002            new_entries.push(child_entry);
3003        }
3004
3005        self.snapshot.lock().populate_dir(
3006            job.path.clone(),
3007            new_entries,
3008            new_ignore,
3009            self.fs.as_ref(),
3010        );
3011
3012        for new_job in new_jobs {
3013            if let Some(new_job) = new_job {
3014                job.scan_queue.send(new_job).await.unwrap();
3015            }
3016        }
3017
3018        Ok(())
3019    }
3020
3021    async fn reload_entries_for_paths(
3022        &self,
3023        mut abs_paths: Vec<PathBuf>,
3024        scan_queue_tx: Option<Sender<ScanJob>>,
3025    ) -> Option<Vec<Arc<Path>>> {
3026        let doing_recursive_update = scan_queue_tx.is_some();
3027
3028        abs_paths.sort_unstable();
3029        abs_paths.dedup_by(|a, b| a.starts_with(&b));
3030
3031        let root_abs_path = self.snapshot.lock().abs_path.clone();
3032        let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
3033        let metadata = futures::future::join_all(
3034            abs_paths
3035                .iter()
3036                .map(|abs_path| self.fs.metadata(&abs_path))
3037                .collect::<Vec<_>>(),
3038        )
3039        .await;
3040
3041        let mut snapshot = self.snapshot.lock();
3042        let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
3043        snapshot.scan_id += 1;
3044        if is_idle && !doing_recursive_update {
3045            snapshot.completed_scan_id = snapshot.scan_id;
3046        }
3047
3048        // Remove any entries for paths that no longer exist or are being recursively
3049        // refreshed. Do this before adding any new entries, so that renames can be
3050        // detected regardless of the order of the paths.
3051        let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
3052        for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
3053            if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3054                if matches!(metadata, Ok(None)) || doing_recursive_update {
3055                    snapshot.remove_path(path);
3056                }
3057                event_paths.push(path.into());
3058            } else {
3059                log::error!(
3060                    "unexpected event {:?} for root path {:?}",
3061                    abs_path,
3062                    root_canonical_path
3063                );
3064            }
3065        }
3066
3067        for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
3068            let abs_path: Arc<Path> = root_abs_path.join(&path).into();
3069
3070            match metadata {
3071                Ok(Some(metadata)) => {
3072                    let ignore_stack =
3073                        snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
3074                    let mut fs_entry = Entry::new(
3075                        path.clone(),
3076                        &metadata,
3077                        self.next_entry_id.as_ref(),
3078                        snapshot.root_char_bag,
3079                    );
3080                    fs_entry.is_ignored = ignore_stack.is_all();
3081                    snapshot.insert_entry(fs_entry, self.fs.as_ref());
3082
3083                    if let Some(scan_queue_tx) = &scan_queue_tx {
3084                        let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
3085                        if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
3086                            ancestor_inodes.insert(metadata.inode);
3087                            smol::block_on(scan_queue_tx.send(ScanJob {
3088                                abs_path,
3089                                path,
3090                                ignore_stack,
3091                                ancestor_inodes,
3092                                scan_queue: scan_queue_tx.clone(),
3093                            }))
3094                            .unwrap();
3095                        }
3096                    }
3097                }
3098                Ok(None) => {
3099                    self.remove_repo_path(&path, &mut snapshot);
3100                }
3101                Err(err) => {
3102                    // TODO - create a special 'error' entry in the entries tree to mark this
3103                    log::error!("error reading file on event {:?}", err);
3104                }
3105            }
3106        }
3107
3108        Some(event_paths)
3109    }
3110
3111    fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3112        if !path
3113            .components()
3114            .any(|component| component.as_os_str() == *DOT_GIT)
3115        {
3116            let scan_id = snapshot.scan_id;
3117
3118            if let Some(repository) = snapshot.repository_for_work_directory(path) {
3119                let entry = repository.work_directory.0;
3120                snapshot.git_repositories.remove(&entry);
3121                snapshot
3122                    .snapshot
3123                    .repository_entries
3124                    .remove(&RepositoryWorkDirectory(path.into()));
3125                return Some(());
3126            }
3127
3128            let repo = snapshot.repository_for_path(&path)?;
3129
3130            let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3131
3132            let work_dir = repo.work_directory(snapshot)?;
3133            let work_dir_id = repo.work_directory;
3134
3135            snapshot
3136                .git_repositories
3137                .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3138
3139            snapshot.repository_entries.update(&work_dir, |entry| {
3140                entry
3141                    .statuses
3142                    .remove_range(&repo_path, &RepoPathDescendants(&repo_path))
3143            });
3144        }
3145
3146        Some(())
3147    }
3148
3149    fn reload_repo_for_file_path(
3150        &self,
3151        path: &Path,
3152        snapshot: &mut LocalSnapshot,
3153        fs: &dyn Fs,
3154    ) -> Option<()> {
3155        let scan_id = snapshot.scan_id;
3156
3157        if path
3158            .components()
3159            .any(|component| component.as_os_str() == *DOT_GIT)
3160        {
3161            let (entry_id, repo_ptr) = {
3162                let Some((entry_id, repo)) = snapshot.repo_for_metadata(&path) else {
3163                    let dot_git_dir = path.ancestors()
3164                    .skip_while(|ancestor| ancestor.file_name() != Some(&*DOT_GIT))
3165                    .next()?;
3166
3167                    snapshot.build_repo(dot_git_dir.into(), fs);
3168                    return None;
3169                };
3170                if repo.git_dir_scan_id == scan_id {
3171                    return None;
3172                }
3173                (*entry_id, repo.repo_ptr.to_owned())
3174            };
3175
3176            let work_dir = snapshot
3177                .entry_for_id(entry_id)
3178                .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3179
3180            let repo = repo_ptr.lock();
3181            repo.reload_index();
3182            let branch = repo.branch_name();
3183            let statuses = repo.statuses().unwrap_or_default();
3184
3185            snapshot.git_repositories.update(&entry_id, |entry| {
3186                entry.scan_id = scan_id;
3187                entry.git_dir_scan_id = scan_id;
3188            });
3189
3190            snapshot.repository_entries.update(&work_dir, |entry| {
3191                entry.branch = branch.map(Into::into);
3192                entry.statuses = statuses;
3193            });
3194        } else {
3195            if snapshot
3196                .entry_for_path(&path)
3197                .map(|entry| entry.is_ignored)
3198                .unwrap_or(false)
3199            {
3200                self.remove_repo_path(&path, snapshot);
3201                return None;
3202            }
3203
3204            let repo = snapshot.repository_for_path(&path)?;
3205
3206            let work_dir = repo.work_directory(snapshot)?;
3207            let work_dir_id = repo.work_directory.clone();
3208
3209            snapshot
3210                .git_repositories
3211                .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3212
3213            let local_repo = snapshot.get_local_repo(&repo)?.to_owned();
3214
3215            // Short circuit if we've already scanned everything
3216            if local_repo.git_dir_scan_id == scan_id {
3217                return None;
3218            }
3219
3220            let mut repository = snapshot.repository_entries.remove(&work_dir)?;
3221
3222            for entry in snapshot.descendent_entries(false, false, path) {
3223                let Some(repo_path) = repo.work_directory.relativize(snapshot, &entry.path) else {
3224                    continue;
3225                };
3226
3227                let status = local_repo.repo_ptr.lock().status(&repo_path);
3228                if let Some(status) = status {
3229                    repository.statuses.insert(repo_path.clone(), status);
3230                } else {
3231                    repository.statuses.remove(&repo_path);
3232                }
3233            }
3234
3235            snapshot.repository_entries.insert(work_dir, repository)
3236        }
3237
3238        Some(())
3239    }
3240
3241    async fn update_ignore_statuses(&self) {
3242        use futures::FutureExt as _;
3243
3244        let mut snapshot = self.snapshot.lock().clone();
3245        let mut ignores_to_update = Vec::new();
3246        let mut ignores_to_delete = Vec::new();
3247        let abs_path = snapshot.abs_path.clone();
3248        for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
3249            if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
3250                if *needs_update {
3251                    *needs_update = false;
3252                    if snapshot.snapshot.entry_for_path(parent_path).is_some() {
3253                        ignores_to_update.push(parent_abs_path.clone());
3254                    }
3255                }
3256
3257                let ignore_path = parent_path.join(&*GITIGNORE);
3258                if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
3259                    ignores_to_delete.push(parent_abs_path.clone());
3260                }
3261            }
3262        }
3263
3264        for parent_abs_path in ignores_to_delete {
3265            snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3266            self.snapshot
3267                .lock()
3268                .ignores_by_parent_abs_path
3269                .remove(&parent_abs_path);
3270        }
3271
3272        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3273        ignores_to_update.sort_unstable();
3274        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3275        while let Some(parent_abs_path) = ignores_to_update.next() {
3276            while ignores_to_update
3277                .peek()
3278                .map_or(false, |p| p.starts_with(&parent_abs_path))
3279            {
3280                ignores_to_update.next().unwrap();
3281            }
3282
3283            let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3284            smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3285                abs_path: parent_abs_path,
3286                ignore_stack,
3287                ignore_queue: ignore_queue_tx.clone(),
3288            }))
3289            .unwrap();
3290        }
3291        drop(ignore_queue_tx);
3292
3293        self.executor
3294            .scoped(|scope| {
3295                for _ in 0..self.executor.num_cpus() {
3296                    scope.spawn(async {
3297                        loop {
3298                            select_biased! {
3299                                // Process any path refresh requests before moving on to process
3300                                // the queue of ignore statuses.
3301                                request = self.refresh_requests_rx.recv().fuse() => {
3302                                    let Ok((paths, barrier)) = request else { break };
3303                                    if !self.process_refresh_request(paths, barrier).await {
3304                                        return;
3305                                    }
3306                                }
3307
3308                                // Recursively process directories whose ignores have changed.
3309                                job = ignore_queue_rx.recv().fuse() => {
3310                                    let Ok(job) = job else { break };
3311                                    self.update_ignore_status(job, &snapshot).await;
3312                                }
3313                            }
3314                        }
3315                    });
3316                }
3317            })
3318            .await;
3319    }
3320
3321    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3322        let mut ignore_stack = job.ignore_stack;
3323        if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3324            ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3325        }
3326
3327        let mut entries_by_id_edits = Vec::new();
3328        let mut entries_by_path_edits = Vec::new();
3329        let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3330        for mut entry in snapshot.child_entries(path).cloned() {
3331            let was_ignored = entry.is_ignored;
3332            let abs_path = snapshot.abs_path().join(&entry.path);
3333            entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3334            if entry.is_dir() {
3335                let child_ignore_stack = if entry.is_ignored {
3336                    IgnoreStack::all()
3337                } else {
3338                    ignore_stack.clone()
3339                };
3340                job.ignore_queue
3341                    .send(UpdateIgnoreStatusJob {
3342                        abs_path: abs_path.into(),
3343                        ignore_stack: child_ignore_stack,
3344                        ignore_queue: job.ignore_queue.clone(),
3345                    })
3346                    .await
3347                    .unwrap();
3348            }
3349
3350            if entry.is_ignored != was_ignored {
3351                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3352                path_entry.scan_id = snapshot.scan_id;
3353                path_entry.is_ignored = entry.is_ignored;
3354                entries_by_id_edits.push(Edit::Insert(path_entry));
3355                entries_by_path_edits.push(Edit::Insert(entry));
3356            }
3357        }
3358
3359        let mut snapshot = self.snapshot.lock();
3360        snapshot.entries_by_path.edit(entries_by_path_edits, &());
3361        snapshot.entries_by_id.edit(entries_by_id_edits, &());
3362    }
3363
3364    fn build_change_set(
3365        &self,
3366        old_snapshot: &Snapshot,
3367        new_snapshot: &Snapshot,
3368        event_paths: &[Arc<Path>],
3369    ) -> HashMap<(Arc<Path>, ProjectEntryId), PathChange> {
3370        use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3371
3372        let mut changes = HashMap::default();
3373        let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3374        let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3375        let received_before_initialized = !self.finished_initial_scan;
3376
3377        for path in event_paths {
3378            let path = PathKey(path.clone());
3379            old_paths.seek(&path, Bias::Left, &());
3380            new_paths.seek(&path, Bias::Left, &());
3381
3382            loop {
3383                match (old_paths.item(), new_paths.item()) {
3384                    (Some(old_entry), Some(new_entry)) => {
3385                        if old_entry.path > path.0
3386                            && new_entry.path > path.0
3387                            && !old_entry.path.starts_with(&path.0)
3388                            && !new_entry.path.starts_with(&path.0)
3389                        {
3390                            break;
3391                        }
3392
3393                        match Ord::cmp(&old_entry.path, &new_entry.path) {
3394                            Ordering::Less => {
3395                                changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3396                                old_paths.next(&());
3397                            }
3398                            Ordering::Equal => {
3399                                if received_before_initialized {
3400                                    // If the worktree was not fully initialized when this event was generated,
3401                                    // we can't know whether this entry was added during the scan or whether
3402                                    // it was merely updated.
3403                                    changes.insert(
3404                                        (new_entry.path.clone(), new_entry.id),
3405                                        AddedOrUpdated,
3406                                    );
3407                                } else if old_entry.mtime != new_entry.mtime {
3408                                    changes.insert((new_entry.path.clone(), new_entry.id), Updated);
3409                                }
3410                                old_paths.next(&());
3411                                new_paths.next(&());
3412                            }
3413                            Ordering::Greater => {
3414                                changes.insert((new_entry.path.clone(), new_entry.id), Added);
3415                                new_paths.next(&());
3416                            }
3417                        }
3418                    }
3419                    (Some(old_entry), None) => {
3420                        changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3421                        old_paths.next(&());
3422                    }
3423                    (None, Some(new_entry)) => {
3424                        changes.insert((new_entry.path.clone(), new_entry.id), Added);
3425                        new_paths.next(&());
3426                    }
3427                    (None, None) => break,
3428                }
3429            }
3430        }
3431
3432        changes
3433    }
3434
3435    async fn progress_timer(&self, running: bool) {
3436        if !running {
3437            return futures::future::pending().await;
3438        }
3439
3440        #[cfg(any(test, feature = "test-support"))]
3441        if self.fs.is_fake() {
3442            return self.executor.simulate_random_delay().await;
3443        }
3444
3445        smol::Timer::after(Duration::from_millis(100)).await;
3446    }
3447}
3448
3449fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3450    let mut result = root_char_bag;
3451    result.extend(
3452        path.to_string_lossy()
3453            .chars()
3454            .map(|c| c.to_ascii_lowercase()),
3455    );
3456    result
3457}
3458
3459struct ScanJob {
3460    abs_path: Arc<Path>,
3461    path: Arc<Path>,
3462    ignore_stack: Arc<IgnoreStack>,
3463    scan_queue: Sender<ScanJob>,
3464    ancestor_inodes: TreeSet<u64>,
3465}
3466
3467struct UpdateIgnoreStatusJob {
3468    abs_path: Arc<Path>,
3469    ignore_stack: Arc<IgnoreStack>,
3470    ignore_queue: Sender<UpdateIgnoreStatusJob>,
3471}
3472
3473pub trait WorktreeHandle {
3474    #[cfg(any(test, feature = "test-support"))]
3475    fn flush_fs_events<'a>(
3476        &self,
3477        cx: &'a gpui::TestAppContext,
3478    ) -> futures::future::LocalBoxFuture<'a, ()>;
3479}
3480
3481impl WorktreeHandle for ModelHandle<Worktree> {
3482    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3483    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3484    // extra directory scans, and emit extra scan-state notifications.
3485    //
3486    // This function mutates the worktree's directory and waits for those mutations to be picked up,
3487    // to ensure that all redundant FS events have already been processed.
3488    #[cfg(any(test, feature = "test-support"))]
3489    fn flush_fs_events<'a>(
3490        &self,
3491        cx: &'a gpui::TestAppContext,
3492    ) -> futures::future::LocalBoxFuture<'a, ()> {
3493        use smol::future::FutureExt;
3494
3495        let filename = "fs-event-sentinel";
3496        let tree = self.clone();
3497        let (fs, root_path) = self.read_with(cx, |tree, _| {
3498            let tree = tree.as_local().unwrap();
3499            (tree.fs.clone(), tree.abs_path().clone())
3500        });
3501
3502        async move {
3503            fs.create_file(&root_path.join(filename), Default::default())
3504                .await
3505                .unwrap();
3506            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3507                .await;
3508
3509            fs.remove_file(&root_path.join(filename), Default::default())
3510                .await
3511                .unwrap();
3512            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3513                .await;
3514
3515            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3516                .await;
3517        }
3518        .boxed_local()
3519    }
3520}
3521
3522#[derive(Clone, Debug)]
3523struct TraversalProgress<'a> {
3524    max_path: &'a Path,
3525    count: usize,
3526    visible_count: usize,
3527    file_count: usize,
3528    visible_file_count: usize,
3529}
3530
3531impl<'a> TraversalProgress<'a> {
3532    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3533        match (include_ignored, include_dirs) {
3534            (true, true) => self.count,
3535            (true, false) => self.file_count,
3536            (false, true) => self.visible_count,
3537            (false, false) => self.visible_file_count,
3538        }
3539    }
3540}
3541
3542impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3543    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3544        self.max_path = summary.max_path.as_ref();
3545        self.count += summary.count;
3546        self.visible_count += summary.visible_count;
3547        self.file_count += summary.file_count;
3548        self.visible_file_count += summary.visible_file_count;
3549    }
3550}
3551
3552impl<'a> Default for TraversalProgress<'a> {
3553    fn default() -> Self {
3554        Self {
3555            max_path: Path::new(""),
3556            count: 0,
3557            visible_count: 0,
3558            file_count: 0,
3559            visible_file_count: 0,
3560        }
3561    }
3562}
3563
3564pub struct Traversal<'a> {
3565    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3566    include_ignored: bool,
3567    include_dirs: bool,
3568}
3569
3570impl<'a> Traversal<'a> {
3571    pub fn advance(&mut self) -> bool {
3572        self.cursor.seek_forward(
3573            &TraversalTarget::Count {
3574                count: self.end_offset() + 1,
3575                include_dirs: self.include_dirs,
3576                include_ignored: self.include_ignored,
3577            },
3578            Bias::Left,
3579            &(),
3580        )
3581    }
3582
3583    pub fn advance_to_sibling(&mut self) -> bool {
3584        while let Some(entry) = self.cursor.item() {
3585            self.cursor.seek_forward(
3586                &TraversalTarget::PathSuccessor(&entry.path),
3587                Bias::Left,
3588                &(),
3589            );
3590            if let Some(entry) = self.cursor.item() {
3591                if (self.include_dirs || !entry.is_dir())
3592                    && (self.include_ignored || !entry.is_ignored)
3593                {
3594                    return true;
3595                }
3596            }
3597        }
3598        false
3599    }
3600
3601    pub fn entry(&self) -> Option<&'a Entry> {
3602        self.cursor.item()
3603    }
3604
3605    pub fn start_offset(&self) -> usize {
3606        self.cursor
3607            .start()
3608            .count(self.include_dirs, self.include_ignored)
3609    }
3610
3611    pub fn end_offset(&self) -> usize {
3612        self.cursor
3613            .end(&())
3614            .count(self.include_dirs, self.include_ignored)
3615    }
3616}
3617
3618impl<'a> Iterator for Traversal<'a> {
3619    type Item = &'a Entry;
3620
3621    fn next(&mut self) -> Option<Self::Item> {
3622        if let Some(item) = self.entry() {
3623            self.advance();
3624            Some(item)
3625        } else {
3626            None
3627        }
3628    }
3629}
3630
3631#[derive(Debug)]
3632enum TraversalTarget<'a> {
3633    Path(&'a Path),
3634    PathSuccessor(&'a Path),
3635    Count {
3636        count: usize,
3637        include_ignored: bool,
3638        include_dirs: bool,
3639    },
3640}
3641
3642impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3643    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3644        match self {
3645            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3646            TraversalTarget::PathSuccessor(path) => {
3647                if !cursor_location.max_path.starts_with(path) {
3648                    Ordering::Equal
3649                } else {
3650                    Ordering::Greater
3651                }
3652            }
3653            TraversalTarget::Count {
3654                count,
3655                include_dirs,
3656                include_ignored,
3657            } => Ord::cmp(
3658                count,
3659                &cursor_location.count(*include_dirs, *include_ignored),
3660            ),
3661        }
3662    }
3663}
3664
3665struct ChildEntriesIter<'a> {
3666    parent_path: &'a Path,
3667    traversal: Traversal<'a>,
3668}
3669
3670impl<'a> Iterator for ChildEntriesIter<'a> {
3671    type Item = &'a Entry;
3672
3673    fn next(&mut self) -> Option<Self::Item> {
3674        if let Some(item) = self.traversal.entry() {
3675            if item.path.starts_with(&self.parent_path) {
3676                self.traversal.advance_to_sibling();
3677                return Some(item);
3678            }
3679        }
3680        None
3681    }
3682}
3683
3684struct DescendentEntriesIter<'a> {
3685    parent_path: &'a Path,
3686    traversal: Traversal<'a>,
3687}
3688
3689impl<'a> Iterator for DescendentEntriesIter<'a> {
3690    type Item = &'a Entry;
3691
3692    fn next(&mut self) -> Option<Self::Item> {
3693        if let Some(item) = self.traversal.entry() {
3694            if item.path.starts_with(&self.parent_path) {
3695                self.traversal.advance();
3696                return Some(item);
3697            }
3698        }
3699        None
3700    }
3701}
3702
3703impl<'a> From<&'a Entry> for proto::Entry {
3704    fn from(entry: &'a Entry) -> Self {
3705        Self {
3706            id: entry.id.to_proto(),
3707            is_dir: entry.is_dir(),
3708            path: entry.path.to_string_lossy().into(),
3709            inode: entry.inode,
3710            mtime: Some(entry.mtime.into()),
3711            is_symlink: entry.is_symlink,
3712            is_ignored: entry.is_ignored,
3713        }
3714    }
3715}
3716
3717impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3718    type Error = anyhow::Error;
3719
3720    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3721        if let Some(mtime) = entry.mtime {
3722            let kind = if entry.is_dir {
3723                EntryKind::Dir
3724            } else {
3725                let mut char_bag = *root_char_bag;
3726                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3727                EntryKind::File(char_bag)
3728            };
3729            let path: Arc<Path> = PathBuf::from(entry.path).into();
3730            Ok(Entry {
3731                id: ProjectEntryId::from_proto(entry.id),
3732                kind,
3733                path,
3734                inode: entry.inode,
3735                mtime: mtime.into(),
3736                is_symlink: entry.is_symlink,
3737                is_ignored: entry.is_ignored,
3738            })
3739        } else {
3740            Err(anyhow!(
3741                "missing mtime in remote worktree entry {:?}",
3742                entry.path
3743            ))
3744        }
3745    }
3746}
3747
3748#[cfg(test)]
3749mod tests {
3750    use super::*;
3751    use fs::{FakeFs, RealFs};
3752    use gpui::{executor::Deterministic, TestAppContext};
3753    use pretty_assertions::assert_eq;
3754    use rand::prelude::*;
3755    use serde_json::json;
3756    use std::{env, fmt::Write};
3757    use util::{http::FakeHttpClient, test::temp_tree};
3758
3759    #[gpui::test]
3760    async fn test_traversal(cx: &mut TestAppContext) {
3761        let fs = FakeFs::new(cx.background());
3762        fs.insert_tree(
3763            "/root",
3764            json!({
3765               ".gitignore": "a/b\n",
3766               "a": {
3767                   "b": "",
3768                   "c": "",
3769               }
3770            }),
3771        )
3772        .await;
3773
3774        let http_client = FakeHttpClient::with_404_response();
3775        let client = cx.read(|cx| Client::new(http_client, cx));
3776
3777        let tree = Worktree::local(
3778            client,
3779            Path::new("/root"),
3780            true,
3781            fs,
3782            Default::default(),
3783            &mut cx.to_async(),
3784        )
3785        .await
3786        .unwrap();
3787        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3788            .await;
3789
3790        tree.read_with(cx, |tree, _| {
3791            assert_eq!(
3792                tree.entries(false)
3793                    .map(|entry| entry.path.as_ref())
3794                    .collect::<Vec<_>>(),
3795                vec![
3796                    Path::new(""),
3797                    Path::new(".gitignore"),
3798                    Path::new("a"),
3799                    Path::new("a/c"),
3800                ]
3801            );
3802            assert_eq!(
3803                tree.entries(true)
3804                    .map(|entry| entry.path.as_ref())
3805                    .collect::<Vec<_>>(),
3806                vec![
3807                    Path::new(""),
3808                    Path::new(".gitignore"),
3809                    Path::new("a"),
3810                    Path::new("a/b"),
3811                    Path::new("a/c"),
3812                ]
3813            );
3814        })
3815    }
3816
3817    #[gpui::test]
3818    async fn test_descendent_entries(cx: &mut TestAppContext) {
3819        let fs = FakeFs::new(cx.background());
3820        fs.insert_tree(
3821            "/root",
3822            json!({
3823                "a": "",
3824                "b": {
3825                   "c": {
3826                       "d": ""
3827                   },
3828                   "e": {}
3829                },
3830                "f": "",
3831                "g": {
3832                    "h": {}
3833                },
3834                "i": {
3835                    "j": {
3836                        "k": ""
3837                    },
3838                    "l": {
3839
3840                    }
3841                },
3842                ".gitignore": "i/j\n",
3843            }),
3844        )
3845        .await;
3846
3847        let http_client = FakeHttpClient::with_404_response();
3848        let client = cx.read(|cx| Client::new(http_client, cx));
3849
3850        let tree = Worktree::local(
3851            client,
3852            Path::new("/root"),
3853            true,
3854            fs,
3855            Default::default(),
3856            &mut cx.to_async(),
3857        )
3858        .await
3859        .unwrap();
3860        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3861            .await;
3862
3863        tree.read_with(cx, |tree, _| {
3864            assert_eq!(
3865                tree.descendent_entries(false, false, Path::new("b"))
3866                    .map(|entry| entry.path.as_ref())
3867                    .collect::<Vec<_>>(),
3868                vec![Path::new("b/c/d"),]
3869            );
3870            assert_eq!(
3871                tree.descendent_entries(true, false, Path::new("b"))
3872                    .map(|entry| entry.path.as_ref())
3873                    .collect::<Vec<_>>(),
3874                vec![
3875                    Path::new("b"),
3876                    Path::new("b/c"),
3877                    Path::new("b/c/d"),
3878                    Path::new("b/e"),
3879                ]
3880            );
3881
3882            assert_eq!(
3883                tree.descendent_entries(false, false, Path::new("g"))
3884                    .map(|entry| entry.path.as_ref())
3885                    .collect::<Vec<_>>(),
3886                Vec::<PathBuf>::new()
3887            );
3888            assert_eq!(
3889                tree.descendent_entries(true, false, Path::new("g"))
3890                    .map(|entry| entry.path.as_ref())
3891                    .collect::<Vec<_>>(),
3892                vec![Path::new("g"), Path::new("g/h"),]
3893            );
3894
3895            assert_eq!(
3896                tree.descendent_entries(false, false, Path::new("i"))
3897                    .map(|entry| entry.path.as_ref())
3898                    .collect::<Vec<_>>(),
3899                Vec::<PathBuf>::new()
3900            );
3901            assert_eq!(
3902                tree.descendent_entries(false, true, Path::new("i"))
3903                    .map(|entry| entry.path.as_ref())
3904                    .collect::<Vec<_>>(),
3905                vec![Path::new("i/j/k")]
3906            );
3907            assert_eq!(
3908                tree.descendent_entries(true, false, Path::new("i"))
3909                    .map(|entry| entry.path.as_ref())
3910                    .collect::<Vec<_>>(),
3911                vec![Path::new("i"), Path::new("i/l"),]
3912            );
3913        })
3914    }
3915
3916    #[gpui::test(iterations = 10)]
3917    async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3918        let fs = FakeFs::new(cx.background());
3919        fs.insert_tree(
3920            "/root",
3921            json!({
3922                "lib": {
3923                    "a": {
3924                        "a.txt": ""
3925                    },
3926                    "b": {
3927                        "b.txt": ""
3928                    }
3929                }
3930            }),
3931        )
3932        .await;
3933        fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3934        fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3935
3936        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3937        let tree = Worktree::local(
3938            client,
3939            Path::new("/root"),
3940            true,
3941            fs.clone(),
3942            Default::default(),
3943            &mut cx.to_async(),
3944        )
3945        .await
3946        .unwrap();
3947
3948        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3949            .await;
3950
3951        tree.read_with(cx, |tree, _| {
3952            assert_eq!(
3953                tree.entries(false)
3954                    .map(|entry| entry.path.as_ref())
3955                    .collect::<Vec<_>>(),
3956                vec![
3957                    Path::new(""),
3958                    Path::new("lib"),
3959                    Path::new("lib/a"),
3960                    Path::new("lib/a/a.txt"),
3961                    Path::new("lib/a/lib"),
3962                    Path::new("lib/b"),
3963                    Path::new("lib/b/b.txt"),
3964                    Path::new("lib/b/lib"),
3965                ]
3966            );
3967        });
3968
3969        fs.rename(
3970            Path::new("/root/lib/a/lib"),
3971            Path::new("/root/lib/a/lib-2"),
3972            Default::default(),
3973        )
3974        .await
3975        .unwrap();
3976        executor.run_until_parked();
3977        tree.read_with(cx, |tree, _| {
3978            assert_eq!(
3979                tree.entries(false)
3980                    .map(|entry| entry.path.as_ref())
3981                    .collect::<Vec<_>>(),
3982                vec![
3983                    Path::new(""),
3984                    Path::new("lib"),
3985                    Path::new("lib/a"),
3986                    Path::new("lib/a/a.txt"),
3987                    Path::new("lib/a/lib-2"),
3988                    Path::new("lib/b"),
3989                    Path::new("lib/b/b.txt"),
3990                    Path::new("lib/b/lib"),
3991                ]
3992            );
3993        });
3994    }
3995
3996    #[gpui::test]
3997    async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3998        // .gitignores are handled explicitly by Zed and do not use the git
3999        // machinery that the git_tests module checks
4000        let parent_dir = temp_tree(json!({
4001            ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
4002            "tree": {
4003                ".git": {},
4004                ".gitignore": "ignored-dir\n",
4005                "tracked-dir": {
4006                    "tracked-file1": "",
4007                    "ancestor-ignored-file1": "",
4008                },
4009                "ignored-dir": {
4010                    "ignored-file1": ""
4011                }
4012            }
4013        }));
4014        let dir = parent_dir.path().join("tree");
4015
4016        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4017
4018        let tree = Worktree::local(
4019            client,
4020            dir.as_path(),
4021            true,
4022            Arc::new(RealFs),
4023            Default::default(),
4024            &mut cx.to_async(),
4025        )
4026        .await
4027        .unwrap();
4028        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4029            .await;
4030        tree.flush_fs_events(cx).await;
4031        cx.read(|cx| {
4032            let tree = tree.read(cx);
4033            assert!(
4034                !tree
4035                    .entry_for_path("tracked-dir/tracked-file1")
4036                    .unwrap()
4037                    .is_ignored
4038            );
4039            assert!(
4040                tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
4041                    .unwrap()
4042                    .is_ignored
4043            );
4044            assert!(
4045                tree.entry_for_path("ignored-dir/ignored-file1")
4046                    .unwrap()
4047                    .is_ignored
4048            );
4049        });
4050
4051        std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
4052        std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
4053        std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
4054        tree.flush_fs_events(cx).await;
4055        cx.read(|cx| {
4056            let tree = tree.read(cx);
4057            assert!(
4058                !tree
4059                    .entry_for_path("tracked-dir/tracked-file2")
4060                    .unwrap()
4061                    .is_ignored
4062            );
4063            assert!(
4064                tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
4065                    .unwrap()
4066                    .is_ignored
4067            );
4068            assert!(
4069                tree.entry_for_path("ignored-dir/ignored-file2")
4070                    .unwrap()
4071                    .is_ignored
4072            );
4073            assert!(tree.entry_for_path(".git").unwrap().is_ignored);
4074        });
4075    }
4076
4077    #[gpui::test]
4078    async fn test_write_file(cx: &mut TestAppContext) {
4079        let dir = temp_tree(json!({
4080            ".git": {},
4081            ".gitignore": "ignored-dir\n",
4082            "tracked-dir": {},
4083            "ignored-dir": {}
4084        }));
4085
4086        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4087
4088        let tree = Worktree::local(
4089            client,
4090            dir.path(),
4091            true,
4092            Arc::new(RealFs),
4093            Default::default(),
4094            &mut cx.to_async(),
4095        )
4096        .await
4097        .unwrap();
4098        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4099            .await;
4100        tree.flush_fs_events(cx).await;
4101
4102        tree.update(cx, |tree, cx| {
4103            tree.as_local().unwrap().write_file(
4104                Path::new("tracked-dir/file.txt"),
4105                "hello".into(),
4106                Default::default(),
4107                cx,
4108            )
4109        })
4110        .await
4111        .unwrap();
4112        tree.update(cx, |tree, cx| {
4113            tree.as_local().unwrap().write_file(
4114                Path::new("ignored-dir/file.txt"),
4115                "world".into(),
4116                Default::default(),
4117                cx,
4118            )
4119        })
4120        .await
4121        .unwrap();
4122
4123        tree.read_with(cx, |tree, _| {
4124            let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4125            let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4126            assert!(!tracked.is_ignored);
4127            assert!(ignored.is_ignored);
4128        });
4129    }
4130
4131    #[gpui::test(iterations = 30)]
4132    async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4133        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4134
4135        let fs = FakeFs::new(cx.background());
4136        fs.insert_tree(
4137            "/root",
4138            json!({
4139                "b": {},
4140                "c": {},
4141                "d": {},
4142            }),
4143        )
4144        .await;
4145
4146        let tree = Worktree::local(
4147            client,
4148            "/root".as_ref(),
4149            true,
4150            fs,
4151            Default::default(),
4152            &mut cx.to_async(),
4153        )
4154        .await
4155        .unwrap();
4156
4157        let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4158
4159        let entry = tree
4160            .update(cx, |tree, cx| {
4161                tree.as_local_mut()
4162                    .unwrap()
4163                    .create_entry("a/e".as_ref(), true, cx)
4164            })
4165            .await
4166            .unwrap();
4167        assert!(entry.is_dir());
4168
4169        cx.foreground().run_until_parked();
4170        tree.read_with(cx, |tree, _| {
4171            assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4172        });
4173
4174        let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4175        let update = snapshot2.build_update(&snapshot1, 0, 0, true);
4176        snapshot1.apply_remote_update(update).unwrap();
4177        assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
4178    }
4179
4180    #[gpui::test(iterations = 100)]
4181    async fn test_random_worktree_operations_during_initial_scan(
4182        cx: &mut TestAppContext,
4183        mut rng: StdRng,
4184    ) {
4185        let operations = env::var("OPERATIONS")
4186            .map(|o| o.parse().unwrap())
4187            .unwrap_or(5);
4188        let initial_entries = env::var("INITIAL_ENTRIES")
4189            .map(|o| o.parse().unwrap())
4190            .unwrap_or(20);
4191
4192        let root_dir = Path::new("/test");
4193        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4194        fs.as_fake().insert_tree(root_dir, json!({})).await;
4195        for _ in 0..initial_entries {
4196            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4197        }
4198        log::info!("generated initial tree");
4199
4200        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4201        let worktree = Worktree::local(
4202            client.clone(),
4203            root_dir,
4204            true,
4205            fs.clone(),
4206            Default::default(),
4207            &mut cx.to_async(),
4208        )
4209        .await
4210        .unwrap();
4211
4212        let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4213
4214        for _ in 0..operations {
4215            worktree
4216                .update(cx, |worktree, cx| {
4217                    randomly_mutate_worktree(worktree, &mut rng, cx)
4218                })
4219                .await
4220                .log_err();
4221            worktree.read_with(cx, |tree, _| {
4222                tree.as_local().unwrap().snapshot.check_invariants()
4223            });
4224
4225            if rng.gen_bool(0.6) {
4226                let new_snapshot =
4227                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4228                let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4229                snapshot.apply_remote_update(update.clone()).unwrap();
4230                assert_eq!(
4231                    snapshot.to_vec(true),
4232                    new_snapshot.to_vec(true),
4233                    "incorrect snapshot after update {:?}",
4234                    update
4235                );
4236            }
4237        }
4238
4239        worktree
4240            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4241            .await;
4242        worktree.read_with(cx, |tree, _| {
4243            tree.as_local().unwrap().snapshot.check_invariants()
4244        });
4245
4246        let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4247        let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4248        snapshot.apply_remote_update(update.clone()).unwrap();
4249        assert_eq!(
4250            snapshot.to_vec(true),
4251            new_snapshot.to_vec(true),
4252            "incorrect snapshot after update {:?}",
4253            update
4254        );
4255    }
4256
4257    #[gpui::test(iterations = 100)]
4258    async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4259        let operations = env::var("OPERATIONS")
4260            .map(|o| o.parse().unwrap())
4261            .unwrap_or(40);
4262        let initial_entries = env::var("INITIAL_ENTRIES")
4263            .map(|o| o.parse().unwrap())
4264            .unwrap_or(20);
4265
4266        let root_dir = Path::new("/test");
4267        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4268        fs.as_fake().insert_tree(root_dir, json!({})).await;
4269        for _ in 0..initial_entries {
4270            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4271        }
4272        log::info!("generated initial tree");
4273
4274        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4275        let worktree = Worktree::local(
4276            client.clone(),
4277            root_dir,
4278            true,
4279            fs.clone(),
4280            Default::default(),
4281            &mut cx.to_async(),
4282        )
4283        .await
4284        .unwrap();
4285
4286        worktree
4287            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4288            .await;
4289
4290        // After the initial scan is complete, the `UpdatedEntries` event can
4291        // be used to follow along with all changes to the worktree's snapshot.
4292        worktree.update(cx, |tree, cx| {
4293            let mut paths = tree
4294                .as_local()
4295                .unwrap()
4296                .paths()
4297                .cloned()
4298                .collect::<Vec<_>>();
4299
4300            cx.subscribe(&worktree, move |tree, _, event, _| {
4301                if let Event::UpdatedEntries(changes) = event {
4302                    for ((path, _), change_type) in changes.iter() {
4303                        let path = path.clone();
4304                        let ix = match paths.binary_search(&path) {
4305                            Ok(ix) | Err(ix) => ix,
4306                        };
4307                        match change_type {
4308                            PathChange::Added => {
4309                                assert_ne!(paths.get(ix), Some(&path));
4310                                paths.insert(ix, path);
4311                            }
4312
4313                            PathChange::Removed => {
4314                                assert_eq!(paths.get(ix), Some(&path));
4315                                paths.remove(ix);
4316                            }
4317
4318                            PathChange::Updated => {
4319                                assert_eq!(paths.get(ix), Some(&path));
4320                            }
4321
4322                            PathChange::AddedOrUpdated => {
4323                                if paths[ix] != path {
4324                                    paths.insert(ix, path);
4325                                }
4326                            }
4327                        }
4328                    }
4329
4330                    let new_paths = tree.paths().cloned().collect::<Vec<_>>();
4331                    assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
4332                }
4333            })
4334            .detach();
4335        });
4336
4337        fs.as_fake().pause_events();
4338        let mut snapshots = Vec::new();
4339        let mut mutations_len = operations;
4340        while mutations_len > 1 {
4341            if rng.gen_bool(0.2) {
4342                worktree
4343                    .update(cx, |worktree, cx| {
4344                        randomly_mutate_worktree(worktree, &mut rng, cx)
4345                    })
4346                    .await
4347                    .log_err();
4348            } else {
4349                randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4350            }
4351
4352            let buffered_event_count = fs.as_fake().buffered_event_count();
4353            if buffered_event_count > 0 && rng.gen_bool(0.3) {
4354                let len = rng.gen_range(0..=buffered_event_count);
4355                log::info!("flushing {} events", len);
4356                fs.as_fake().flush_events(len);
4357            } else {
4358                randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4359                mutations_len -= 1;
4360            }
4361
4362            cx.foreground().run_until_parked();
4363            if rng.gen_bool(0.2) {
4364                log::info!("storing snapshot {}", snapshots.len());
4365                let snapshot =
4366                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4367                snapshots.push(snapshot);
4368            }
4369        }
4370
4371        log::info!("quiescing");
4372        fs.as_fake().flush_events(usize::MAX);
4373        cx.foreground().run_until_parked();
4374        let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4375        snapshot.check_invariants();
4376
4377        {
4378            let new_worktree = Worktree::local(
4379                client.clone(),
4380                root_dir,
4381                true,
4382                fs.clone(),
4383                Default::default(),
4384                &mut cx.to_async(),
4385            )
4386            .await
4387            .unwrap();
4388            new_worktree
4389                .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4390                .await;
4391            let new_snapshot =
4392                new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4393            assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4394        }
4395
4396        for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4397            let include_ignored = rng.gen::<bool>();
4398            if !include_ignored {
4399                let mut entries_by_path_edits = Vec::new();
4400                let mut entries_by_id_edits = Vec::new();
4401                for entry in prev_snapshot
4402                    .entries_by_id
4403                    .cursor::<()>()
4404                    .filter(|e| e.is_ignored)
4405                {
4406                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4407                    entries_by_id_edits.push(Edit::Remove(entry.id));
4408                }
4409
4410                prev_snapshot
4411                    .entries_by_path
4412                    .edit(entries_by_path_edits, &());
4413                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4414            }
4415
4416            let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4417            prev_snapshot.apply_remote_update(update.clone()).unwrap();
4418            assert_eq!(
4419                prev_snapshot.to_vec(include_ignored),
4420                snapshot.to_vec(include_ignored),
4421                "wrong update for snapshot {i}. update: {:?}",
4422                update
4423            );
4424        }
4425    }
4426
4427    fn randomly_mutate_worktree(
4428        worktree: &mut Worktree,
4429        rng: &mut impl Rng,
4430        cx: &mut ModelContext<Worktree>,
4431    ) -> Task<Result<()>> {
4432        log::info!("mutating worktree");
4433        let worktree = worktree.as_local_mut().unwrap();
4434        let snapshot = worktree.snapshot();
4435        let entry = snapshot.entries(false).choose(rng).unwrap();
4436
4437        match rng.gen_range(0_u32..100) {
4438            0..=33 if entry.path.as_ref() != Path::new("") => {
4439                log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4440                worktree.delete_entry(entry.id, cx).unwrap()
4441            }
4442            ..=66 if entry.path.as_ref() != Path::new("") => {
4443                let other_entry = snapshot.entries(false).choose(rng).unwrap();
4444                let new_parent_path = if other_entry.is_dir() {
4445                    other_entry.path.clone()
4446                } else {
4447                    other_entry.path.parent().unwrap().into()
4448                };
4449                let mut new_path = new_parent_path.join(gen_name(rng));
4450                if new_path.starts_with(&entry.path) {
4451                    new_path = gen_name(rng).into();
4452                }
4453
4454                log::info!(
4455                    "renaming entry {:?} ({}) to {:?}",
4456                    entry.path,
4457                    entry.id.0,
4458                    new_path
4459                );
4460                let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4461                cx.foreground().spawn(async move {
4462                    task.await?;
4463                    Ok(())
4464                })
4465            }
4466            _ => {
4467                let task = if entry.is_dir() {
4468                    let child_path = entry.path.join(gen_name(rng));
4469                    let is_dir = rng.gen_bool(0.3);
4470                    log::info!(
4471                        "creating {} at {:?}",
4472                        if is_dir { "dir" } else { "file" },
4473                        child_path,
4474                    );
4475                    worktree.create_entry(child_path, is_dir, cx)
4476                } else {
4477                    log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4478                    worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4479                };
4480                cx.foreground().spawn(async move {
4481                    task.await?;
4482                    Ok(())
4483                })
4484            }
4485        }
4486    }
4487
4488    async fn randomly_mutate_fs(
4489        fs: &Arc<dyn Fs>,
4490        root_path: &Path,
4491        insertion_probability: f64,
4492        rng: &mut impl Rng,
4493    ) {
4494        log::info!("mutating fs");
4495        let mut files = Vec::new();
4496        let mut dirs = Vec::new();
4497        for path in fs.as_fake().paths() {
4498            if path.starts_with(root_path) {
4499                if fs.is_file(&path).await {
4500                    files.push(path);
4501                } else {
4502                    dirs.push(path);
4503                }
4504            }
4505        }
4506
4507        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4508            let path = dirs.choose(rng).unwrap();
4509            let new_path = path.join(gen_name(rng));
4510
4511            if rng.gen() {
4512                log::info!(
4513                    "creating dir {:?}",
4514                    new_path.strip_prefix(root_path).unwrap()
4515                );
4516                fs.create_dir(&new_path).await.unwrap();
4517            } else {
4518                log::info!(
4519                    "creating file {:?}",
4520                    new_path.strip_prefix(root_path).unwrap()
4521                );
4522                fs.create_file(&new_path, Default::default()).await.unwrap();
4523            }
4524        } else if rng.gen_bool(0.05) {
4525            let ignore_dir_path = dirs.choose(rng).unwrap();
4526            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4527
4528            let subdirs = dirs
4529                .iter()
4530                .filter(|d| d.starts_with(&ignore_dir_path))
4531                .cloned()
4532                .collect::<Vec<_>>();
4533            let subfiles = files
4534                .iter()
4535                .filter(|d| d.starts_with(&ignore_dir_path))
4536                .cloned()
4537                .collect::<Vec<_>>();
4538            let files_to_ignore = {
4539                let len = rng.gen_range(0..=subfiles.len());
4540                subfiles.choose_multiple(rng, len)
4541            };
4542            let dirs_to_ignore = {
4543                let len = rng.gen_range(0..subdirs.len());
4544                subdirs.choose_multiple(rng, len)
4545            };
4546
4547            let mut ignore_contents = String::new();
4548            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4549                writeln!(
4550                    ignore_contents,
4551                    "{}",
4552                    path_to_ignore
4553                        .strip_prefix(&ignore_dir_path)
4554                        .unwrap()
4555                        .to_str()
4556                        .unwrap()
4557                )
4558                .unwrap();
4559            }
4560            log::info!(
4561                "creating gitignore {:?} with contents:\n{}",
4562                ignore_path.strip_prefix(&root_path).unwrap(),
4563                ignore_contents
4564            );
4565            fs.save(
4566                &ignore_path,
4567                &ignore_contents.as_str().into(),
4568                Default::default(),
4569            )
4570            .await
4571            .unwrap();
4572        } else {
4573            let old_path = {
4574                let file_path = files.choose(rng);
4575                let dir_path = dirs[1..].choose(rng);
4576                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4577            };
4578
4579            let is_rename = rng.gen();
4580            if is_rename {
4581                let new_path_parent = dirs
4582                    .iter()
4583                    .filter(|d| !d.starts_with(old_path))
4584                    .choose(rng)
4585                    .unwrap();
4586
4587                let overwrite_existing_dir =
4588                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4589                let new_path = if overwrite_existing_dir {
4590                    fs.remove_dir(
4591                        &new_path_parent,
4592                        RemoveOptions {
4593                            recursive: true,
4594                            ignore_if_not_exists: true,
4595                        },
4596                    )
4597                    .await
4598                    .unwrap();
4599                    new_path_parent.to_path_buf()
4600                } else {
4601                    new_path_parent.join(gen_name(rng))
4602                };
4603
4604                log::info!(
4605                    "renaming {:?} to {}{:?}",
4606                    old_path.strip_prefix(&root_path).unwrap(),
4607                    if overwrite_existing_dir {
4608                        "overwrite "
4609                    } else {
4610                        ""
4611                    },
4612                    new_path.strip_prefix(&root_path).unwrap()
4613                );
4614                fs.rename(
4615                    &old_path,
4616                    &new_path,
4617                    fs::RenameOptions {
4618                        overwrite: true,
4619                        ignore_if_exists: true,
4620                    },
4621                )
4622                .await
4623                .unwrap();
4624            } else if fs.is_file(&old_path).await {
4625                log::info!(
4626                    "deleting file {:?}",
4627                    old_path.strip_prefix(&root_path).unwrap()
4628                );
4629                fs.remove_file(old_path, Default::default()).await.unwrap();
4630            } else {
4631                log::info!(
4632                    "deleting dir {:?}",
4633                    old_path.strip_prefix(&root_path).unwrap()
4634                );
4635                fs.remove_dir(
4636                    &old_path,
4637                    RemoveOptions {
4638                        recursive: true,
4639                        ignore_if_not_exists: true,
4640                    },
4641                )
4642                .await
4643                .unwrap();
4644            }
4645        }
4646    }
4647
4648    fn gen_name(rng: &mut impl Rng) -> String {
4649        (0..6)
4650            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4651            .map(char::from)
4652            .collect()
4653    }
4654
4655    impl LocalSnapshot {
4656        fn check_invariants(&self) {
4657            assert_eq!(
4658                self.entries_by_path
4659                    .cursor::<()>()
4660                    .map(|e| (&e.path, e.id))
4661                    .collect::<Vec<_>>(),
4662                self.entries_by_id
4663                    .cursor::<()>()
4664                    .map(|e| (&e.path, e.id))
4665                    .collect::<collections::BTreeSet<_>>()
4666                    .into_iter()
4667                    .collect::<Vec<_>>(),
4668                "entries_by_path and entries_by_id are inconsistent"
4669            );
4670
4671            let mut files = self.files(true, 0);
4672            let mut visible_files = self.files(false, 0);
4673            for entry in self.entries_by_path.cursor::<()>() {
4674                if entry.is_file() {
4675                    assert_eq!(files.next().unwrap().inode, entry.inode);
4676                    if !entry.is_ignored {
4677                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4678                    }
4679                }
4680            }
4681
4682            assert!(files.next().is_none());
4683            assert!(visible_files.next().is_none());
4684
4685            let mut bfs_paths = Vec::new();
4686            let mut stack = vec![Path::new("")];
4687            while let Some(path) = stack.pop() {
4688                bfs_paths.push(path);
4689                let ix = stack.len();
4690                for child_entry in self.child_entries(path) {
4691                    stack.insert(ix, &child_entry.path);
4692                }
4693            }
4694
4695            let dfs_paths_via_iter = self
4696                .entries_by_path
4697                .cursor::<()>()
4698                .map(|e| e.path.as_ref())
4699                .collect::<Vec<_>>();
4700            assert_eq!(bfs_paths, dfs_paths_via_iter);
4701
4702            let dfs_paths_via_traversal = self
4703                .entries(true)
4704                .map(|e| e.path.as_ref())
4705                .collect::<Vec<_>>();
4706            assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4707
4708            for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4709                let ignore_parent_path =
4710                    ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4711                assert!(self.entry_for_path(&ignore_parent_path).is_some());
4712                assert!(self
4713                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4714                    .is_some());
4715            }
4716        }
4717
4718        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4719            let mut paths = Vec::new();
4720            for entry in self.entries_by_path.cursor::<()>() {
4721                if include_ignored || !entry.is_ignored {
4722                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4723                }
4724            }
4725            paths.sort_by(|a, b| a.0.cmp(b.0));
4726            paths
4727        }
4728    }
4729
4730    mod git_tests {
4731        use super::*;
4732        use pretty_assertions::assert_eq;
4733
4734        #[gpui::test]
4735        async fn test_rename_work_directory(cx: &mut TestAppContext) {
4736            let root = temp_tree(json!({
4737                "projects": {
4738                    "project1": {
4739                        "a": "",
4740                        "b": "",
4741                    }
4742                },
4743
4744            }));
4745            let root_path = root.path();
4746
4747            let http_client = FakeHttpClient::with_404_response();
4748            let client = cx.read(|cx| Client::new(http_client, cx));
4749            let tree = Worktree::local(
4750                client,
4751                root_path,
4752                true,
4753                Arc::new(RealFs),
4754                Default::default(),
4755                &mut cx.to_async(),
4756            )
4757            .await
4758            .unwrap();
4759
4760            let repo = git_init(&root_path.join("projects/project1"));
4761            git_add("a", &repo);
4762            git_commit("init", &repo);
4763            std::fs::write(root_path.join("projects/project1/a"), "aa").ok();
4764
4765            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4766                .await;
4767
4768            tree.flush_fs_events(cx).await;
4769
4770            cx.read(|cx| {
4771                let tree = tree.read(cx);
4772                let (work_dir, repo) = tree.repositories().next().unwrap();
4773                assert_eq!(work_dir.as_ref(), Path::new("projects/project1"));
4774                assert_eq!(
4775                    repo.status_for_file(tree, Path::new("projects/project1/a")),
4776                    Some(GitFileStatus::Modified)
4777                );
4778                assert_eq!(
4779                    repo.status_for_file(tree, Path::new("projects/project1/b")),
4780                    Some(GitFileStatus::Added)
4781                );
4782            });
4783
4784            std::fs::rename(
4785                root_path.join("projects/project1"),
4786                root_path.join("projects/project2"),
4787            )
4788            .ok();
4789            tree.flush_fs_events(cx).await;
4790
4791            cx.read(|cx| {
4792                let tree = tree.read(cx);
4793                let (work_dir, repo) = tree.repositories().next().unwrap();
4794                assert_eq!(work_dir.as_ref(), Path::new("projects/project2"));
4795                assert_eq!(
4796                    repo.status_for_file(tree, Path::new("projects/project2/a")),
4797                    Some(GitFileStatus::Modified)
4798                );
4799                assert_eq!(
4800                    repo.status_for_file(tree, Path::new("projects/project2/b")),
4801                    Some(GitFileStatus::Added)
4802                );
4803            });
4804        }
4805
4806        #[gpui::test]
4807        async fn test_git_repository_for_path(cx: &mut TestAppContext) {
4808            let root = temp_tree(json!({
4809                "c.txt": "",
4810                "dir1": {
4811                    ".git": {},
4812                    "deps": {
4813                        "dep1": {
4814                            ".git": {},
4815                            "src": {
4816                                "a.txt": ""
4817                            }
4818                        }
4819                    },
4820                    "src": {
4821                        "b.txt": ""
4822                    }
4823                },
4824            }));
4825
4826            let http_client = FakeHttpClient::with_404_response();
4827            let client = cx.read(|cx| Client::new(http_client, cx));
4828            let tree = Worktree::local(
4829                client,
4830                root.path(),
4831                true,
4832                Arc::new(RealFs),
4833                Default::default(),
4834                &mut cx.to_async(),
4835            )
4836            .await
4837            .unwrap();
4838
4839            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4840                .await;
4841            tree.flush_fs_events(cx).await;
4842
4843            tree.read_with(cx, |tree, _cx| {
4844                let tree = tree.as_local().unwrap();
4845
4846                assert!(tree.repository_for_path("c.txt".as_ref()).is_none());
4847
4848                let entry = tree.repository_for_path("dir1/src/b.txt".as_ref()).unwrap();
4849                assert_eq!(
4850                    entry
4851                        .work_directory(tree)
4852                        .map(|directory| directory.as_ref().to_owned()),
4853                    Some(Path::new("dir1").to_owned())
4854                );
4855
4856                let entry = tree
4857                    .repository_for_path("dir1/deps/dep1/src/a.txt".as_ref())
4858                    .unwrap();
4859                assert_eq!(
4860                    entry
4861                        .work_directory(tree)
4862                        .map(|directory| directory.as_ref().to_owned()),
4863                    Some(Path::new("dir1/deps/dep1").to_owned())
4864                );
4865
4866                let entries = tree.files(false, 0);
4867
4868                let paths_with_repos = tree
4869                    .entries_with_repositories(entries)
4870                    .map(|(entry, repo)| {
4871                        (
4872                            entry.path.as_ref(),
4873                            repo.and_then(|repo| {
4874                                repo.work_directory(&tree)
4875                                    .map(|work_directory| work_directory.0.to_path_buf())
4876                            }),
4877                        )
4878                    })
4879                    .collect::<Vec<_>>();
4880
4881                assert_eq!(
4882                    paths_with_repos,
4883                    &[
4884                        (Path::new("c.txt"), None),
4885                        (
4886                            Path::new("dir1/deps/dep1/src/a.txt"),
4887                            Some(Path::new("dir1/deps/dep1").into())
4888                        ),
4889                        (Path::new("dir1/src/b.txt"), Some(Path::new("dir1").into())),
4890                    ]
4891                );
4892            });
4893
4894            let repo_update_events = Arc::new(Mutex::new(vec![]));
4895            tree.update(cx, |_, cx| {
4896                let repo_update_events = repo_update_events.clone();
4897                cx.subscribe(&tree, move |_, _, event, _| {
4898                    if let Event::UpdatedGitRepositories(update) = event {
4899                        repo_update_events.lock().push(update.clone());
4900                    }
4901                })
4902                .detach();
4903            });
4904
4905            std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
4906            tree.flush_fs_events(cx).await;
4907
4908            assert_eq!(
4909                repo_update_events.lock()[0]
4910                    .keys()
4911                    .cloned()
4912                    .collect::<Vec<Arc<Path>>>(),
4913                vec![Path::new("dir1").into()]
4914            );
4915
4916            std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
4917            tree.flush_fs_events(cx).await;
4918
4919            tree.read_with(cx, |tree, _cx| {
4920                let tree = tree.as_local().unwrap();
4921
4922                assert!(tree
4923                    .repository_for_path("dir1/src/b.txt".as_ref())
4924                    .is_none());
4925            });
4926        }
4927
4928        #[gpui::test]
4929        async fn test_git_status(cx: &mut TestAppContext) {
4930            const IGNORE_RULE: &'static str = "**/target";
4931
4932            let root = temp_tree(json!({
4933                "project": {
4934                    "a.txt": "a",
4935                    "b.txt": "bb",
4936                    "c": {
4937                        "d": {
4938                            "e.txt": "eee"
4939                        }
4940                    },
4941                    "f.txt": "ffff",
4942                    "target": {
4943                        "build_file": "???"
4944                    },
4945                    ".gitignore": IGNORE_RULE
4946                },
4947
4948            }));
4949
4950            let http_client = FakeHttpClient::with_404_response();
4951            let client = cx.read(|cx| Client::new(http_client, cx));
4952            let tree = Worktree::local(
4953                client,
4954                root.path(),
4955                true,
4956                Arc::new(RealFs),
4957                Default::default(),
4958                &mut cx.to_async(),
4959            )
4960            .await
4961            .unwrap();
4962
4963            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4964                .await;
4965
4966            const A_TXT: &'static str = "a.txt";
4967            const B_TXT: &'static str = "b.txt";
4968            const E_TXT: &'static str = "c/d/e.txt";
4969            const F_TXT: &'static str = "f.txt";
4970            const DOTGITIGNORE: &'static str = ".gitignore";
4971            const BUILD_FILE: &'static str = "target/build_file";
4972
4973            let work_dir = root.path().join("project");
4974            let mut repo = git_init(work_dir.as_path());
4975            repo.add_ignore_rule(IGNORE_RULE).unwrap();
4976            git_add(Path::new(A_TXT), &repo);
4977            git_add(Path::new(E_TXT), &repo);
4978            git_add(Path::new(DOTGITIGNORE), &repo);
4979            git_commit("Initial commit", &repo);
4980
4981            std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
4982
4983            tree.flush_fs_events(cx).await;
4984
4985            // Check that the right git state is observed on startup
4986            tree.read_with(cx, |tree, _cx| {
4987                let snapshot = tree.snapshot();
4988                assert_eq!(snapshot.repository_entries.iter().count(), 1);
4989                let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
4990                assert_eq!(dir.0.as_ref(), Path::new("project"));
4991
4992                assert_eq!(repo.statuses.iter().count(), 3);
4993                assert_eq!(
4994                    repo.statuses.get(&Path::new(A_TXT).into()),
4995                    Some(&GitFileStatus::Modified)
4996                );
4997                assert_eq!(
4998                    repo.statuses.get(&Path::new(B_TXT).into()),
4999                    Some(&GitFileStatus::Added)
5000                );
5001                assert_eq!(
5002                    repo.statuses.get(&Path::new(F_TXT).into()),
5003                    Some(&GitFileStatus::Added)
5004                );
5005            });
5006
5007            git_add(Path::new(A_TXT), &repo);
5008            git_add(Path::new(B_TXT), &repo);
5009            git_commit("Committing modified and added", &repo);
5010            tree.flush_fs_events(cx).await;
5011
5012            // Check that repo only changes are tracked
5013            tree.read_with(cx, |tree, _cx| {
5014                let snapshot = tree.snapshot();
5015                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5016
5017                assert_eq!(repo.statuses.iter().count(), 1);
5018                assert_eq!(
5019                    repo.statuses.get(&Path::new(F_TXT).into()),
5020                    Some(&GitFileStatus::Added)
5021                );
5022            });
5023
5024            git_reset(0, &repo);
5025            git_remove_index(Path::new(B_TXT), &repo);
5026            git_stash(&mut repo);
5027            std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
5028            std::fs::write(work_dir.join(BUILD_FILE), "this should be ignored").unwrap();
5029            tree.flush_fs_events(cx).await;
5030
5031            // Check that more complex repo changes are tracked
5032            tree.read_with(cx, |tree, _cx| {
5033                let snapshot = tree.snapshot();
5034                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5035
5036                assert_eq!(repo.statuses.iter().count(), 3);
5037                assert_eq!(repo.statuses.get(&Path::new(A_TXT).into()), None);
5038                assert_eq!(
5039                    repo.statuses.get(&Path::new(B_TXT).into()),
5040                    Some(&GitFileStatus::Added)
5041                );
5042                assert_eq!(
5043                    repo.statuses.get(&Path::new(E_TXT).into()),
5044                    Some(&GitFileStatus::Modified)
5045                );
5046                assert_eq!(
5047                    repo.statuses.get(&Path::new(F_TXT).into()),
5048                    Some(&GitFileStatus::Added)
5049                );
5050            });
5051
5052            std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
5053            std::fs::remove_dir_all(work_dir.join("c")).unwrap();
5054            std::fs::write(
5055                work_dir.join(DOTGITIGNORE),
5056                [IGNORE_RULE, "f.txt"].join("\n"),
5057            )
5058            .unwrap();
5059
5060            git_add(Path::new(DOTGITIGNORE), &repo);
5061            git_commit("Committing modified git ignore", &repo);
5062
5063            tree.flush_fs_events(cx).await;
5064
5065            // Check that non-repo behavior is tracked
5066            tree.read_with(cx, |tree, _cx| {
5067                let snapshot = tree.snapshot();
5068                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5069
5070                assert_eq!(repo.statuses.iter().count(), 0);
5071            });
5072
5073            let mut renamed_dir_name = "first_directory/second_directory";
5074            const RENAMED_FILE: &'static str = "rf.txt";
5075
5076            std::fs::create_dir_all(work_dir.join(renamed_dir_name)).unwrap();
5077            std::fs::write(
5078                work_dir.join(renamed_dir_name).join(RENAMED_FILE),
5079                "new-contents",
5080            )
5081            .unwrap();
5082
5083            tree.flush_fs_events(cx).await;
5084
5085            tree.read_with(cx, |tree, _cx| {
5086                let snapshot = tree.snapshot();
5087                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5088
5089                assert_eq!(repo.statuses.iter().count(), 1);
5090                assert_eq!(
5091                    repo.statuses
5092                        .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5093                    Some(&GitFileStatus::Added)
5094                );
5095            });
5096
5097            renamed_dir_name = "new_first_directory/second_directory";
5098
5099            std::fs::rename(
5100                work_dir.join("first_directory"),
5101                work_dir.join("new_first_directory"),
5102            )
5103            .unwrap();
5104
5105            tree.flush_fs_events(cx).await;
5106
5107            tree.read_with(cx, |tree, _cx| {
5108                let snapshot = tree.snapshot();
5109                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5110
5111                assert_eq!(repo.statuses.iter().count(), 1);
5112                assert_eq!(
5113                    repo.statuses
5114                        .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5115                    Some(&GitFileStatus::Added)
5116                );
5117            });
5118        }
5119
5120        #[track_caller]
5121        fn git_init(path: &Path) -> git2::Repository {
5122            git2::Repository::init(path).expect("Failed to initialize git repository")
5123        }
5124
5125        #[track_caller]
5126        fn git_add<P: AsRef<Path>>(path: P, repo: &git2::Repository) {
5127            let path = path.as_ref();
5128            let mut index = repo.index().expect("Failed to get index");
5129            index.add_path(path).expect("Failed to add a.txt");
5130            index.write().expect("Failed to write index");
5131        }
5132
5133        #[track_caller]
5134        fn git_remove_index(path: &Path, repo: &git2::Repository) {
5135            let mut index = repo.index().expect("Failed to get index");
5136            index.remove_path(path).expect("Failed to add a.txt");
5137            index.write().expect("Failed to write index");
5138        }
5139
5140        #[track_caller]
5141        fn git_commit(msg: &'static str, repo: &git2::Repository) {
5142            use git2::Signature;
5143
5144            let signature = Signature::now("test", "test@zed.dev").unwrap();
5145            let oid = repo.index().unwrap().write_tree().unwrap();
5146            let tree = repo.find_tree(oid).unwrap();
5147            if let Some(head) = repo.head().ok() {
5148                let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
5149
5150                let parent_commit = parent_obj.as_commit().unwrap();
5151
5152                repo.commit(
5153                    Some("HEAD"),
5154                    &signature,
5155                    &signature,
5156                    msg,
5157                    &tree,
5158                    &[parent_commit],
5159                )
5160                .expect("Failed to commit with parent");
5161            } else {
5162                repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
5163                    .expect("Failed to commit");
5164            }
5165        }
5166
5167        #[track_caller]
5168        fn git_stash(repo: &mut git2::Repository) {
5169            use git2::Signature;
5170
5171            let signature = Signature::now("test", "test@zed.dev").unwrap();
5172            repo.stash_save(&signature, "N/A", None)
5173                .expect("Failed to stash");
5174        }
5175
5176        #[track_caller]
5177        fn git_reset(offset: usize, repo: &git2::Repository) {
5178            let head = repo.head().expect("Couldn't get repo head");
5179            let object = head.peel(git2::ObjectType::Commit).unwrap();
5180            let commit = object.as_commit().unwrap();
5181            let new_head = commit
5182                .parents()
5183                .inspect(|parnet| {
5184                    parnet.message();
5185                })
5186                .skip(offset)
5187                .next()
5188                .expect("Not enough history");
5189            repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
5190                .expect("Could not reset");
5191        }
5192
5193        #[allow(dead_code)]
5194        #[track_caller]
5195        fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
5196            repo.statuses(None)
5197                .unwrap()
5198                .iter()
5199                .map(|status| (status.path().unwrap().to_string(), status.status()))
5200                .collect()
5201        }
5202    }
5203}