worktree.rs

   1use crate::{
   2    copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
   3};
   4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   5use anyhow::{anyhow, Context, Result};
   6use client::{proto, Client};
   7use clock::ReplicaId;
   8use collections::{HashMap, VecDeque};
   9use fs::{
  10    repository::{GitFileStatus, GitRepository, RepoPath, RepoPathDescendants},
  11    Fs, LineEnding,
  12};
  13use futures::{
  14    channel::{
  15        mpsc::{self, UnboundedSender},
  16        oneshot,
  17    },
  18    select_biased,
  19    task::Poll,
  20    Stream, StreamExt,
  21};
  22use fuzzy::CharBag;
  23use git::{DOT_GIT, GITIGNORE};
  24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
  25use language::{
  26    proto::{
  27        deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
  28        serialize_version,
  29    },
  30    Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
  31};
  32use lsp::LanguageServerId;
  33use parking_lot::Mutex;
  34use postage::{
  35    barrier,
  36    prelude::{Sink as _, Stream as _},
  37    watch,
  38};
  39use smol::channel::{self, Sender};
  40use std::{
  41    any::Any,
  42    cmp::{self, Ordering},
  43    convert::TryFrom,
  44    ffi::OsStr,
  45    fmt,
  46    future::Future,
  47    mem,
  48    ops::{Deref, DerefMut},
  49    path::{Path, PathBuf},
  50    pin::Pin,
  51    sync::{
  52        atomic::{AtomicUsize, Ordering::SeqCst},
  53        Arc,
  54    },
  55    time::{Duration, SystemTime},
  56};
  57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
  58use util::{paths::HOME, ResultExt, TakeUntilExt, TryFutureExt};
  59
  60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  61pub struct WorktreeId(usize);
  62
  63pub enum Worktree {
  64    Local(LocalWorktree),
  65    Remote(RemoteWorktree),
  66}
  67
  68pub struct LocalWorktree {
  69    snapshot: LocalSnapshot,
  70    path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
  71    is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
  72    _background_scanner_task: Task<()>,
  73    share: Option<ShareState>,
  74    diagnostics: HashMap<
  75        Arc<Path>,
  76        Vec<(
  77            LanguageServerId,
  78            Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
  79        )>,
  80    >,
  81    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  82    client: Arc<Client>,
  83    fs: Arc<dyn Fs>,
  84    visible: bool,
  85}
  86
  87pub struct RemoteWorktree {
  88    snapshot: Snapshot,
  89    background_snapshot: Arc<Mutex<Snapshot>>,
  90    project_id: u64,
  91    client: Arc<Client>,
  92    updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
  93    snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
  94    replica_id: ReplicaId,
  95    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  96    visible: bool,
  97    disconnected: bool,
  98}
  99
 100#[derive(Clone)]
 101pub struct Snapshot {
 102    id: WorktreeId,
 103    abs_path: Arc<Path>,
 104    root_name: String,
 105    root_char_bag: CharBag,
 106    entries_by_path: SumTree<Entry>,
 107    entries_by_id: SumTree<PathEntry>,
 108    repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
 109
 110    /// A number that increases every time the worktree begins scanning
 111    /// a set of paths from the filesystem. This scanning could be caused
 112    /// by some operation performed on the worktree, such as reading or
 113    /// writing a file, or by an event reported by the filesystem.
 114    scan_id: usize,
 115
 116    /// The latest scan id that has completed, and whose preceding scans
 117    /// have all completed. The current `scan_id` could be more than one
 118    /// greater than the `completed_scan_id` if operations are performed
 119    /// on the worktree while it is processing a file-system event.
 120    completed_scan_id: usize,
 121}
 122
 123#[derive(Clone, Debug, PartialEq, Eq)]
 124pub struct RepositoryEntry {
 125    pub(crate) work_directory: WorkDirectoryEntry,
 126    pub(crate) branch: Option<Arc<str>>,
 127    pub(crate) statuses: TreeMap<RepoPath, GitFileStatus>,
 128}
 129
 130fn read_git_status(git_status: i32) -> Option<GitFileStatus> {
 131    proto::GitStatus::from_i32(git_status).map(|status| match status {
 132        proto::GitStatus::Added => GitFileStatus::Added,
 133        proto::GitStatus::Modified => GitFileStatus::Modified,
 134        proto::GitStatus::Conflict => GitFileStatus::Conflict,
 135    })
 136}
 137
 138impl RepositoryEntry {
 139    pub fn branch(&self) -> Option<Arc<str>> {
 140        self.branch.clone()
 141    }
 142
 143    pub fn work_directory_id(&self) -> ProjectEntryId {
 144        *self.work_directory
 145    }
 146
 147    pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
 148        snapshot
 149            .entry_for_id(self.work_directory_id())
 150            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
 151    }
 152
 153    pub fn status_for_path(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
 154        self.work_directory
 155            .relativize(snapshot, path)
 156            .and_then(|repo_path| {
 157                self.statuses
 158                    .iter_from(&repo_path)
 159                    .take_while(|(key, _)| key.starts_with(&repo_path))
 160                    // Short circut once we've found the highest level
 161                    .take_until(|(_, status)| status == &&GitFileStatus::Conflict)
 162                    .map(|(_, status)| status)
 163                    .reduce(
 164                        |status_first, status_second| match (status_first, status_second) {
 165                            (GitFileStatus::Conflict, _) | (_, GitFileStatus::Conflict) => {
 166                                &GitFileStatus::Conflict
 167                            }
 168                            (GitFileStatus::Modified, _) | (_, GitFileStatus::Modified) => {
 169                                &GitFileStatus::Modified
 170                            }
 171                            _ => &GitFileStatus::Added,
 172                        },
 173                    )
 174                    .copied()
 175            })
 176    }
 177
 178    #[cfg(any(test, feature = "test-support"))]
 179    pub fn status_for_file(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
 180        self.work_directory
 181            .relativize(snapshot, path)
 182            .and_then(|repo_path| (&self.statuses).get(&repo_path))
 183            .cloned()
 184    }
 185
 186    pub fn build_update(&self, other: &Self) -> proto::RepositoryEntry {
 187        let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
 188        let mut removed_statuses: Vec<String> = Vec::new();
 189
 190        let mut self_statuses = self.statuses.iter().peekable();
 191        let mut other_statuses = other.statuses.iter().peekable();
 192        loop {
 193            match (self_statuses.peek(), other_statuses.peek()) {
 194                (Some((self_repo_path, self_status)), Some((other_repo_path, other_status))) => {
 195                    match Ord::cmp(self_repo_path, other_repo_path) {
 196                        Ordering::Less => {
 197                            updated_statuses.push(make_status_entry(self_repo_path, self_status));
 198                            self_statuses.next();
 199                        }
 200                        Ordering::Equal => {
 201                            if self_status != other_status {
 202                                updated_statuses
 203                                    .push(make_status_entry(self_repo_path, self_status));
 204                            }
 205
 206                            self_statuses.next();
 207                            other_statuses.next();
 208                        }
 209                        Ordering::Greater => {
 210                            removed_statuses.push(make_repo_path(other_repo_path));
 211                            other_statuses.next();
 212                        }
 213                    }
 214                }
 215                (Some((self_repo_path, self_status)), None) => {
 216                    updated_statuses.push(make_status_entry(self_repo_path, self_status));
 217                    self_statuses.next();
 218                }
 219                (None, Some((other_repo_path, _))) => {
 220                    removed_statuses.push(make_repo_path(other_repo_path));
 221                    other_statuses.next();
 222                }
 223                (None, None) => break,
 224            }
 225        }
 226
 227        proto::RepositoryEntry {
 228            work_directory_id: self.work_directory_id().to_proto(),
 229            branch: self.branch.as_ref().map(|str| str.to_string()),
 230            removed_repo_paths: removed_statuses,
 231            updated_statuses,
 232        }
 233    }
 234}
 235
 236fn make_repo_path(path: &RepoPath) -> String {
 237    path.as_os_str().to_string_lossy().to_string()
 238}
 239
 240fn make_status_entry(path: &RepoPath, status: &GitFileStatus) -> proto::StatusEntry {
 241    proto::StatusEntry {
 242        repo_path: make_repo_path(path),
 243        status: match status {
 244            GitFileStatus::Added => proto::GitStatus::Added.into(),
 245            GitFileStatus::Modified => proto::GitStatus::Modified.into(),
 246            GitFileStatus::Conflict => proto::GitStatus::Conflict.into(),
 247        },
 248    }
 249}
 250
 251impl From<&RepositoryEntry> for proto::RepositoryEntry {
 252    fn from(value: &RepositoryEntry) -> Self {
 253        proto::RepositoryEntry {
 254            work_directory_id: value.work_directory.to_proto(),
 255            branch: value.branch.as_ref().map(|str| str.to_string()),
 256            updated_statuses: value
 257                .statuses
 258                .iter()
 259                .map(|(repo_path, status)| make_status_entry(repo_path, status))
 260                .collect(),
 261            removed_repo_paths: Default::default(),
 262        }
 263    }
 264}
 265
 266/// This path corresponds to the 'content path' (the folder that contains the .git)
 267#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 268pub struct RepositoryWorkDirectory(Arc<Path>);
 269
 270impl Default for RepositoryWorkDirectory {
 271    fn default() -> Self {
 272        RepositoryWorkDirectory(Arc::from(Path::new("")))
 273    }
 274}
 275
 276impl AsRef<Path> for RepositoryWorkDirectory {
 277    fn as_ref(&self) -> &Path {
 278        self.0.as_ref()
 279    }
 280}
 281
 282#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 283pub struct WorkDirectoryEntry(ProjectEntryId);
 284
 285impl WorkDirectoryEntry {
 286    pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
 287        worktree.entry_for_id(self.0).and_then(|entry| {
 288            path.strip_prefix(&entry.path)
 289                .ok()
 290                .map(move |path| path.into())
 291        })
 292    }
 293}
 294
 295impl Deref for WorkDirectoryEntry {
 296    type Target = ProjectEntryId;
 297
 298    fn deref(&self) -> &Self::Target {
 299        &self.0
 300    }
 301}
 302
 303impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
 304    fn from(value: ProjectEntryId) -> Self {
 305        WorkDirectoryEntry(value)
 306    }
 307}
 308
 309#[derive(Debug, Clone)]
 310pub struct LocalSnapshot {
 311    snapshot: Snapshot,
 312    /// All of the gitignore files in the worktree, indexed by their relative path.
 313    /// The boolean indicates whether the gitignore needs to be updated.
 314    ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
 315    /// All of the git repositories in the worktree, indexed by the project entry
 316    /// id of their parent directory.
 317    git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 318}
 319
 320pub struct LocalMutableSnapshot {
 321    snapshot: LocalSnapshot,
 322    /// The ids of all of the entries that were removed from the snapshot
 323    /// as part of the current update. These entry ids may be re-used
 324    /// if the same inode is discovered at a new path, or if the given
 325    /// path is re-created after being deleted.
 326    removed_entry_ids: HashMap<u64, ProjectEntryId>,
 327}
 328
 329#[derive(Debug, Clone)]
 330pub struct LocalRepositoryEntry {
 331    pub(crate) scan_id: usize,
 332    pub(crate) full_scan_id: usize,
 333    pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
 334    /// Path to the actual .git folder.
 335    /// Note: if .git is a file, this points to the folder indicated by the .git file
 336    pub(crate) git_dir_path: Arc<Path>,
 337}
 338
 339impl LocalRepositoryEntry {
 340    // Note that this path should be relative to the worktree root.
 341    pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
 342        path.starts_with(self.git_dir_path.as_ref())
 343    }
 344}
 345
 346impl Deref for LocalSnapshot {
 347    type Target = Snapshot;
 348
 349    fn deref(&self) -> &Self::Target {
 350        &self.snapshot
 351    }
 352}
 353
 354impl DerefMut for LocalSnapshot {
 355    fn deref_mut(&mut self) -> &mut Self::Target {
 356        &mut self.snapshot
 357    }
 358}
 359
 360impl Deref for LocalMutableSnapshot {
 361    type Target = LocalSnapshot;
 362
 363    fn deref(&self) -> &Self::Target {
 364        &self.snapshot
 365    }
 366}
 367
 368impl DerefMut for LocalMutableSnapshot {
 369    fn deref_mut(&mut self) -> &mut Self::Target {
 370        &mut self.snapshot
 371    }
 372}
 373
 374enum ScanState {
 375    Started,
 376    Updated {
 377        snapshot: LocalSnapshot,
 378        changes: HashMap<(Arc<Path>, ProjectEntryId), PathChange>,
 379        barrier: Option<barrier::Sender>,
 380        scanning: bool,
 381    },
 382}
 383
 384struct ShareState {
 385    project_id: u64,
 386    snapshots_tx: watch::Sender<LocalSnapshot>,
 387    resume_updates: watch::Sender<()>,
 388    _maintain_remote_snapshot: Task<Option<()>>,
 389}
 390
 391pub enum Event {
 392    UpdatedEntries(HashMap<(Arc<Path>, ProjectEntryId), PathChange>),
 393    UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
 394}
 395
 396impl Entity for Worktree {
 397    type Event = Event;
 398}
 399
 400impl Worktree {
 401    pub async fn local(
 402        client: Arc<Client>,
 403        path: impl Into<Arc<Path>>,
 404        visible: bool,
 405        fs: Arc<dyn Fs>,
 406        next_entry_id: Arc<AtomicUsize>,
 407        cx: &mut AsyncAppContext,
 408    ) -> Result<ModelHandle<Self>> {
 409        // After determining whether the root entry is a file or a directory, populate the
 410        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 411        let abs_path = path.into();
 412        let metadata = fs
 413            .metadata(&abs_path)
 414            .await
 415            .context("failed to stat worktree path")?;
 416
 417        Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
 418            let root_name = abs_path
 419                .file_name()
 420                .map_or(String::new(), |f| f.to_string_lossy().to_string());
 421
 422            let mut snapshot = LocalSnapshot {
 423                ignores_by_parent_abs_path: Default::default(),
 424                git_repositories: Default::default(),
 425                snapshot: Snapshot {
 426                    id: WorktreeId::from_usize(cx.model_id()),
 427                    abs_path: abs_path.clone(),
 428                    root_name: root_name.clone(),
 429                    root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
 430                    entries_by_path: Default::default(),
 431                    entries_by_id: Default::default(),
 432                    repository_entries: Default::default(),
 433                    scan_id: 1,
 434                    completed_scan_id: 0,
 435                },
 436            };
 437
 438            if let Some(metadata) = metadata {
 439                snapshot.insert_entry(
 440                    Entry::new(
 441                        Arc::from(Path::new("")),
 442                        &metadata,
 443                        &next_entry_id,
 444                        snapshot.root_char_bag,
 445                    ),
 446                    fs.as_ref(),
 447                );
 448            }
 449
 450            let (path_changes_tx, path_changes_rx) = channel::unbounded();
 451            let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
 452
 453            cx.spawn_weak(|this, mut cx| async move {
 454                while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
 455                    this.update(&mut cx, |this, cx| {
 456                        let this = this.as_local_mut().unwrap();
 457                        match state {
 458                            ScanState::Started => {
 459                                *this.is_scanning.0.borrow_mut() = true;
 460                            }
 461                            ScanState::Updated {
 462                                snapshot,
 463                                changes,
 464                                barrier,
 465                                scanning,
 466                            } => {
 467                                *this.is_scanning.0.borrow_mut() = scanning;
 468                                this.set_snapshot(snapshot, cx);
 469                                cx.emit(Event::UpdatedEntries(changes));
 470                                drop(barrier);
 471                            }
 472                        }
 473                        cx.notify();
 474                    });
 475                }
 476            })
 477            .detach();
 478
 479            let background_scanner_task = cx.background().spawn({
 480                let fs = fs.clone();
 481                let snapshot = snapshot.clone();
 482                let background = cx.background().clone();
 483                async move {
 484                    let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 485                    BackgroundScanner::new(
 486                        snapshot,
 487                        next_entry_id,
 488                        fs,
 489                        scan_states_tx,
 490                        background,
 491                        path_changes_rx,
 492                    )
 493                    .run(events)
 494                    .await;
 495                }
 496            });
 497
 498            Worktree::Local(LocalWorktree {
 499                snapshot,
 500                is_scanning: watch::channel_with(true),
 501                share: None,
 502                path_changes_tx,
 503                _background_scanner_task: background_scanner_task,
 504                diagnostics: Default::default(),
 505                diagnostic_summaries: Default::default(),
 506                client,
 507                fs,
 508                visible,
 509            })
 510        }))
 511    }
 512
 513    pub fn remote(
 514        project_remote_id: u64,
 515        replica_id: ReplicaId,
 516        worktree: proto::WorktreeMetadata,
 517        client: Arc<Client>,
 518        cx: &mut AppContext,
 519    ) -> ModelHandle<Self> {
 520        cx.add_model(|cx: &mut ModelContext<Self>| {
 521            let snapshot = Snapshot {
 522                id: WorktreeId(worktree.id as usize),
 523                abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
 524                root_name: worktree.root_name.clone(),
 525                root_char_bag: worktree
 526                    .root_name
 527                    .chars()
 528                    .map(|c| c.to_ascii_lowercase())
 529                    .collect(),
 530                entries_by_path: Default::default(),
 531                entries_by_id: Default::default(),
 532                repository_entries: Default::default(),
 533                scan_id: 1,
 534                completed_scan_id: 0,
 535            };
 536
 537            let (updates_tx, mut updates_rx) = mpsc::unbounded();
 538            let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
 539            let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
 540
 541            cx.background()
 542                .spawn({
 543                    let background_snapshot = background_snapshot.clone();
 544                    async move {
 545                        while let Some(update) = updates_rx.next().await {
 546                            if let Err(error) =
 547                                background_snapshot.lock().apply_remote_update(update)
 548                            {
 549                                log::error!("error applying worktree update: {}", error);
 550                            }
 551                            snapshot_updated_tx.send(()).await.ok();
 552                        }
 553                    }
 554                })
 555                .detach();
 556
 557            cx.spawn_weak(|this, mut cx| async move {
 558                while (snapshot_updated_rx.recv().await).is_some() {
 559                    if let Some(this) = this.upgrade(&cx) {
 560                        this.update(&mut cx, |this, cx| {
 561                            let this = this.as_remote_mut().unwrap();
 562                            this.snapshot = this.background_snapshot.lock().clone();
 563                            cx.emit(Event::UpdatedEntries(Default::default()));
 564                            cx.notify();
 565                            while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
 566                                if this.observed_snapshot(*scan_id) {
 567                                    let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
 568                                    let _ = tx.send(());
 569                                } else {
 570                                    break;
 571                                }
 572                            }
 573                        });
 574                    } else {
 575                        break;
 576                    }
 577                }
 578            })
 579            .detach();
 580
 581            Worktree::Remote(RemoteWorktree {
 582                project_id: project_remote_id,
 583                replica_id,
 584                snapshot: snapshot.clone(),
 585                background_snapshot,
 586                updates_tx: Some(updates_tx),
 587                snapshot_subscriptions: Default::default(),
 588                client: client.clone(),
 589                diagnostic_summaries: Default::default(),
 590                visible: worktree.visible,
 591                disconnected: false,
 592            })
 593        })
 594    }
 595
 596    pub fn as_local(&self) -> Option<&LocalWorktree> {
 597        if let Worktree::Local(worktree) = self {
 598            Some(worktree)
 599        } else {
 600            None
 601        }
 602    }
 603
 604    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 605        if let Worktree::Remote(worktree) = self {
 606            Some(worktree)
 607        } else {
 608            None
 609        }
 610    }
 611
 612    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 613        if let Worktree::Local(worktree) = self {
 614            Some(worktree)
 615        } else {
 616            None
 617        }
 618    }
 619
 620    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 621        if let Worktree::Remote(worktree) = self {
 622            Some(worktree)
 623        } else {
 624            None
 625        }
 626    }
 627
 628    pub fn is_local(&self) -> bool {
 629        matches!(self, Worktree::Local(_))
 630    }
 631
 632    pub fn is_remote(&self) -> bool {
 633        !self.is_local()
 634    }
 635
 636    pub fn snapshot(&self) -> Snapshot {
 637        match self {
 638            Worktree::Local(worktree) => worktree.snapshot().snapshot,
 639            Worktree::Remote(worktree) => worktree.snapshot(),
 640        }
 641    }
 642
 643    pub fn scan_id(&self) -> usize {
 644        match self {
 645            Worktree::Local(worktree) => worktree.snapshot.scan_id,
 646            Worktree::Remote(worktree) => worktree.snapshot.scan_id,
 647        }
 648    }
 649
 650    pub fn completed_scan_id(&self) -> usize {
 651        match self {
 652            Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
 653            Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
 654        }
 655    }
 656
 657    pub fn is_visible(&self) -> bool {
 658        match self {
 659            Worktree::Local(worktree) => worktree.visible,
 660            Worktree::Remote(worktree) => worktree.visible,
 661        }
 662    }
 663
 664    pub fn replica_id(&self) -> ReplicaId {
 665        match self {
 666            Worktree::Local(_) => 0,
 667            Worktree::Remote(worktree) => worktree.replica_id,
 668        }
 669    }
 670
 671    pub fn diagnostic_summaries(
 672        &self,
 673    ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
 674        match self {
 675            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 676            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 677        }
 678        .iter()
 679        .flat_map(|(path, summaries)| {
 680            summaries
 681                .iter()
 682                .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
 683        })
 684    }
 685
 686    pub fn abs_path(&self) -> Arc<Path> {
 687        match self {
 688            Worktree::Local(worktree) => worktree.abs_path.clone(),
 689            Worktree::Remote(worktree) => worktree.abs_path.clone(),
 690        }
 691    }
 692}
 693
 694impl LocalWorktree {
 695    pub fn contains_abs_path(&self, path: &Path) -> bool {
 696        path.starts_with(&self.abs_path)
 697    }
 698
 699    fn absolutize(&self, path: &Path) -> PathBuf {
 700        if path.file_name().is_some() {
 701            self.abs_path.join(path)
 702        } else {
 703            self.abs_path.to_path_buf()
 704        }
 705    }
 706
 707    pub(crate) fn load_buffer(
 708        &mut self,
 709        id: u64,
 710        path: &Path,
 711        cx: &mut ModelContext<Worktree>,
 712    ) -> Task<Result<ModelHandle<Buffer>>> {
 713        let path = Arc::from(path);
 714        cx.spawn(move |this, mut cx| async move {
 715            let (file, contents, diff_base) = this
 716                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
 717                .await?;
 718            let text_buffer = cx
 719                .background()
 720                .spawn(async move { text::Buffer::new(0, id, contents) })
 721                .await;
 722            Ok(cx.add_model(|cx| {
 723                let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
 724                buffer.git_diff_recalc(cx);
 725                buffer
 726            }))
 727        })
 728    }
 729
 730    pub fn diagnostics_for_path(
 731        &self,
 732        path: &Path,
 733    ) -> Vec<(
 734        LanguageServerId,
 735        Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 736    )> {
 737        self.diagnostics.get(path).cloned().unwrap_or_default()
 738    }
 739
 740    pub fn update_diagnostics(
 741        &mut self,
 742        server_id: LanguageServerId,
 743        worktree_path: Arc<Path>,
 744        diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 745        _: &mut ModelContext<Worktree>,
 746    ) -> Result<bool> {
 747        let summaries_by_server_id = self
 748            .diagnostic_summaries
 749            .entry(worktree_path.clone())
 750            .or_default();
 751
 752        let old_summary = summaries_by_server_id
 753            .remove(&server_id)
 754            .unwrap_or_default();
 755
 756        let new_summary = DiagnosticSummary::new(&diagnostics);
 757        if new_summary.is_empty() {
 758            if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
 759                if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 760                    diagnostics_by_server_id.remove(ix);
 761                }
 762                if diagnostics_by_server_id.is_empty() {
 763                    self.diagnostics.remove(&worktree_path);
 764                }
 765            }
 766        } else {
 767            summaries_by_server_id.insert(server_id, new_summary);
 768            let diagnostics_by_server_id =
 769                self.diagnostics.entry(worktree_path.clone()).or_default();
 770            match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 771                Ok(ix) => {
 772                    diagnostics_by_server_id[ix] = (server_id, diagnostics);
 773                }
 774                Err(ix) => {
 775                    diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
 776                }
 777            }
 778        }
 779
 780        if !old_summary.is_empty() || !new_summary.is_empty() {
 781            if let Some(share) = self.share.as_ref() {
 782                self.client
 783                    .send(proto::UpdateDiagnosticSummary {
 784                        project_id: share.project_id,
 785                        worktree_id: self.id().to_proto(),
 786                        summary: Some(proto::DiagnosticSummary {
 787                            path: worktree_path.to_string_lossy().to_string(),
 788                            language_server_id: server_id.0 as u64,
 789                            error_count: new_summary.error_count as u32,
 790                            warning_count: new_summary.warning_count as u32,
 791                        }),
 792                    })
 793                    .log_err();
 794            }
 795        }
 796
 797        Ok(!old_summary.is_empty() || !new_summary.is_empty())
 798    }
 799
 800    fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
 801        let updated_repos =
 802            self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
 803        self.snapshot = new_snapshot;
 804
 805        if let Some(share) = self.share.as_mut() {
 806            *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
 807        }
 808
 809        if !updated_repos.is_empty() {
 810            cx.emit(Event::UpdatedGitRepositories(updated_repos));
 811        }
 812    }
 813
 814    fn changed_repos(
 815        &self,
 816        old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 817        new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 818    ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
 819        let mut diff = HashMap::default();
 820        let mut old_repos = old_repos.iter().peekable();
 821        let mut new_repos = new_repos.iter().peekable();
 822        loop {
 823            match (old_repos.peek(), new_repos.peek()) {
 824                (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
 825                    match Ord::cmp(old_entry_id, new_entry_id) {
 826                        Ordering::Less => {
 827                            if let Some(entry) = self.entry_for_id(**old_entry_id) {
 828                                diff.insert(entry.path.clone(), (*old_repo).clone());
 829                            }
 830                            old_repos.next();
 831                        }
 832                        Ordering::Equal => {
 833                            if old_repo.scan_id != new_repo.scan_id {
 834                                if let Some(entry) = self.entry_for_id(**new_entry_id) {
 835                                    diff.insert(entry.path.clone(), (*new_repo).clone());
 836                                }
 837                            }
 838
 839                            old_repos.next();
 840                            new_repos.next();
 841                        }
 842                        Ordering::Greater => {
 843                            if let Some(entry) = self.entry_for_id(**new_entry_id) {
 844                                diff.insert(entry.path.clone(), (*new_repo).clone());
 845                            }
 846                            new_repos.next();
 847                        }
 848                    }
 849                }
 850                (Some((old_entry_id, old_repo)), None) => {
 851                    if let Some(entry) = self.entry_for_id(**old_entry_id) {
 852                        diff.insert(entry.path.clone(), (*old_repo).clone());
 853                    }
 854                    old_repos.next();
 855                }
 856                (None, Some((new_entry_id, new_repo))) => {
 857                    if let Some(entry) = self.entry_for_id(**new_entry_id) {
 858                        diff.insert(entry.path.clone(), (*new_repo).clone());
 859                    }
 860                    new_repos.next();
 861                }
 862                (None, None) => break,
 863            }
 864        }
 865        diff
 866    }
 867
 868    pub fn scan_complete(&self) -> impl Future<Output = ()> {
 869        let mut is_scanning_rx = self.is_scanning.1.clone();
 870        async move {
 871            let mut is_scanning = is_scanning_rx.borrow().clone();
 872            while is_scanning {
 873                if let Some(value) = is_scanning_rx.recv().await {
 874                    is_scanning = value;
 875                } else {
 876                    break;
 877                }
 878            }
 879        }
 880    }
 881
 882    pub fn snapshot(&self) -> LocalSnapshot {
 883        self.snapshot.clone()
 884    }
 885
 886    pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
 887        proto::WorktreeMetadata {
 888            id: self.id().to_proto(),
 889            root_name: self.root_name().to_string(),
 890            visible: self.visible,
 891            abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
 892        }
 893    }
 894
 895    fn load(
 896        &self,
 897        path: &Path,
 898        cx: &mut ModelContext<Worktree>,
 899    ) -> Task<Result<(File, String, Option<String>)>> {
 900        let handle = cx.handle();
 901        let path = Arc::from(path);
 902        let abs_path = self.absolutize(&path);
 903        let fs = self.fs.clone();
 904        let snapshot = self.snapshot();
 905
 906        let mut index_task = None;
 907
 908        if let Some(repo) = snapshot.repository_for_path(&path) {
 909            let repo_path = repo.work_directory.relativize(self, &path).unwrap();
 910            if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
 911                let repo = repo.repo_ptr.to_owned();
 912                index_task = Some(
 913                    cx.background()
 914                        .spawn(async move { repo.lock().load_index_text(&repo_path) }),
 915                );
 916            }
 917        }
 918
 919        cx.spawn(|this, mut cx| async move {
 920            let text = fs.load(&abs_path).await?;
 921
 922            let diff_base = if let Some(index_task) = index_task {
 923                index_task.await
 924            } else {
 925                None
 926            };
 927
 928            // Eagerly populate the snapshot with an updated entry for the loaded file
 929            let entry = this
 930                .update(&mut cx, |this, cx| {
 931                    this.as_local().unwrap().refresh_entry(path, None, cx)
 932                })
 933                .await?;
 934
 935            Ok((
 936                File {
 937                    entry_id: entry.id,
 938                    worktree: handle,
 939                    path: entry.path,
 940                    mtime: entry.mtime,
 941                    is_local: true,
 942                    is_deleted: false,
 943                },
 944                text,
 945                diff_base,
 946            ))
 947        })
 948    }
 949
 950    pub fn save_buffer(
 951        &self,
 952        buffer_handle: ModelHandle<Buffer>,
 953        path: Arc<Path>,
 954        has_changed_file: bool,
 955        cx: &mut ModelContext<Worktree>,
 956    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
 957        let handle = cx.handle();
 958        let buffer = buffer_handle.read(cx);
 959
 960        let rpc = self.client.clone();
 961        let buffer_id = buffer.remote_id();
 962        let project_id = self.share.as_ref().map(|share| share.project_id);
 963
 964        let text = buffer.as_rope().clone();
 965        let fingerprint = text.fingerprint();
 966        let version = buffer.version();
 967        let save = self.write_file(path, text, buffer.line_ending(), cx);
 968
 969        cx.as_mut().spawn(|mut cx| async move {
 970            let entry = save.await?;
 971
 972            if has_changed_file {
 973                let new_file = Arc::new(File {
 974                    entry_id: entry.id,
 975                    worktree: handle,
 976                    path: entry.path,
 977                    mtime: entry.mtime,
 978                    is_local: true,
 979                    is_deleted: false,
 980                });
 981
 982                if let Some(project_id) = project_id {
 983                    rpc.send(proto::UpdateBufferFile {
 984                        project_id,
 985                        buffer_id,
 986                        file: Some(new_file.to_proto()),
 987                    })
 988                    .log_err();
 989                }
 990
 991                buffer_handle.update(&mut cx, |buffer, cx| {
 992                    if has_changed_file {
 993                        buffer.file_updated(new_file, cx).detach();
 994                    }
 995                });
 996            }
 997
 998            if let Some(project_id) = project_id {
 999                rpc.send(proto::BufferSaved {
1000                    project_id,
1001                    buffer_id,
1002                    version: serialize_version(&version),
1003                    mtime: Some(entry.mtime.into()),
1004                    fingerprint: serialize_fingerprint(fingerprint),
1005                })?;
1006            }
1007
1008            buffer_handle.update(&mut cx, |buffer, cx| {
1009                buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
1010            });
1011
1012            Ok((version, fingerprint, entry.mtime))
1013        })
1014    }
1015
1016    pub fn create_entry(
1017        &self,
1018        path: impl Into<Arc<Path>>,
1019        is_dir: bool,
1020        cx: &mut ModelContext<Worktree>,
1021    ) -> Task<Result<Entry>> {
1022        let path = path.into();
1023        let abs_path = self.absolutize(&path);
1024        let fs = self.fs.clone();
1025        let write = cx.background().spawn(async move {
1026            if is_dir {
1027                fs.create_dir(&abs_path).await
1028            } else {
1029                fs.save(&abs_path, &Default::default(), Default::default())
1030                    .await
1031            }
1032        });
1033
1034        cx.spawn(|this, mut cx| async move {
1035            write.await?;
1036            this.update(&mut cx, |this, cx| {
1037                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1038            })
1039            .await
1040        })
1041    }
1042
1043    pub fn write_file(
1044        &self,
1045        path: impl Into<Arc<Path>>,
1046        text: Rope,
1047        line_ending: LineEnding,
1048        cx: &mut ModelContext<Worktree>,
1049    ) -> Task<Result<Entry>> {
1050        let path = path.into();
1051        let abs_path = self.absolutize(&path);
1052        let fs = self.fs.clone();
1053        let write = cx
1054            .background()
1055            .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1056
1057        cx.spawn(|this, mut cx| async move {
1058            write.await?;
1059            this.update(&mut cx, |this, cx| {
1060                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1061            })
1062            .await
1063        })
1064    }
1065
1066    pub fn delete_entry(
1067        &self,
1068        entry_id: ProjectEntryId,
1069        cx: &mut ModelContext<Worktree>,
1070    ) -> Option<Task<Result<()>>> {
1071        let entry = self.entry_for_id(entry_id)?.clone();
1072        let abs_path = self.abs_path.clone();
1073        let fs = self.fs.clone();
1074
1075        let delete = cx.background().spawn(async move {
1076            let mut abs_path = fs.canonicalize(&abs_path).await?;
1077            if entry.path.file_name().is_some() {
1078                abs_path = abs_path.join(&entry.path);
1079            }
1080            if entry.is_file() {
1081                fs.remove_file(&abs_path, Default::default()).await?;
1082            } else {
1083                fs.remove_dir(
1084                    &abs_path,
1085                    RemoveOptions {
1086                        recursive: true,
1087                        ignore_if_not_exists: false,
1088                    },
1089                )
1090                .await?;
1091            }
1092            anyhow::Ok(abs_path)
1093        });
1094
1095        Some(cx.spawn(|this, mut cx| async move {
1096            let abs_path = delete.await?;
1097            let (tx, mut rx) = barrier::channel();
1098            this.update(&mut cx, |this, _| {
1099                this.as_local_mut()
1100                    .unwrap()
1101                    .path_changes_tx
1102                    .try_send((vec![abs_path], tx))
1103            })?;
1104            rx.recv().await;
1105            Ok(())
1106        }))
1107    }
1108
1109    pub fn rename_entry(
1110        &self,
1111        entry_id: ProjectEntryId,
1112        new_path: impl Into<Arc<Path>>,
1113        cx: &mut ModelContext<Worktree>,
1114    ) -> Option<Task<Result<Entry>>> {
1115        let old_path = self.entry_for_id(entry_id)?.path.clone();
1116        let new_path = new_path.into();
1117        let abs_old_path = self.absolutize(&old_path);
1118        let abs_new_path = self.absolutize(&new_path);
1119        let fs = self.fs.clone();
1120        let rename = cx.background().spawn(async move {
1121            fs.rename(&abs_old_path, &abs_new_path, Default::default())
1122                .await
1123        });
1124
1125        Some(cx.spawn(|this, mut cx| async move {
1126            rename.await?;
1127            this.update(&mut cx, |this, cx| {
1128                this.as_local_mut()
1129                    .unwrap()
1130                    .refresh_entry(new_path.clone(), Some(old_path), cx)
1131            })
1132            .await
1133        }))
1134    }
1135
1136    pub fn copy_entry(
1137        &self,
1138        entry_id: ProjectEntryId,
1139        new_path: impl Into<Arc<Path>>,
1140        cx: &mut ModelContext<Worktree>,
1141    ) -> Option<Task<Result<Entry>>> {
1142        let old_path = self.entry_for_id(entry_id)?.path.clone();
1143        let new_path = new_path.into();
1144        let abs_old_path = self.absolutize(&old_path);
1145        let abs_new_path = self.absolutize(&new_path);
1146        let fs = self.fs.clone();
1147        let copy = cx.background().spawn(async move {
1148            copy_recursive(
1149                fs.as_ref(),
1150                &abs_old_path,
1151                &abs_new_path,
1152                Default::default(),
1153            )
1154            .await
1155        });
1156
1157        Some(cx.spawn(|this, mut cx| async move {
1158            copy.await?;
1159            this.update(&mut cx, |this, cx| {
1160                this.as_local_mut()
1161                    .unwrap()
1162                    .refresh_entry(new_path.clone(), None, cx)
1163            })
1164            .await
1165        }))
1166    }
1167
1168    fn refresh_entry(
1169        &self,
1170        path: Arc<Path>,
1171        old_path: Option<Arc<Path>>,
1172        cx: &mut ModelContext<Worktree>,
1173    ) -> Task<Result<Entry>> {
1174        let fs = self.fs.clone();
1175        let abs_root_path = self.abs_path.clone();
1176        let path_changes_tx = self.path_changes_tx.clone();
1177        cx.spawn_weak(move |this, mut cx| async move {
1178            let abs_path = fs.canonicalize(&abs_root_path).await?;
1179            let mut paths = Vec::with_capacity(2);
1180            paths.push(if path.file_name().is_some() {
1181                abs_path.join(&path)
1182            } else {
1183                abs_path.clone()
1184            });
1185            if let Some(old_path) = old_path {
1186                paths.push(if old_path.file_name().is_some() {
1187                    abs_path.join(&old_path)
1188                } else {
1189                    abs_path.clone()
1190                });
1191            }
1192
1193            let (tx, mut rx) = barrier::channel();
1194            path_changes_tx.try_send((paths, tx))?;
1195            rx.recv().await;
1196            this.upgrade(&cx)
1197                .ok_or_else(|| anyhow!("worktree was dropped"))?
1198                .update(&mut cx, |this, _| {
1199                    this.entry_for_path(path)
1200                        .cloned()
1201                        .ok_or_else(|| anyhow!("failed to read path after update"))
1202                })
1203        })
1204    }
1205
1206    pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1207        let (share_tx, share_rx) = oneshot::channel();
1208
1209        if let Some(share) = self.share.as_mut() {
1210            let _ = share_tx.send(());
1211            *share.resume_updates.borrow_mut() = ();
1212        } else {
1213            let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1214            let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1215            let worktree_id = cx.model_id() as u64;
1216
1217            for (path, summaries) in &self.diagnostic_summaries {
1218                for (&server_id, summary) in summaries {
1219                    if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1220                        project_id,
1221                        worktree_id,
1222                        summary: Some(summary.to_proto(server_id, &path)),
1223                    }) {
1224                        return Task::ready(Err(e));
1225                    }
1226                }
1227            }
1228
1229            let _maintain_remote_snapshot = cx.background().spawn({
1230                let client = self.client.clone();
1231                async move {
1232                    let mut share_tx = Some(share_tx);
1233                    let mut prev_snapshot = LocalSnapshot {
1234                        ignores_by_parent_abs_path: Default::default(),
1235                        git_repositories: Default::default(),
1236                        snapshot: Snapshot {
1237                            id: WorktreeId(worktree_id as usize),
1238                            abs_path: Path::new("").into(),
1239                            root_name: Default::default(),
1240                            root_char_bag: Default::default(),
1241                            entries_by_path: Default::default(),
1242                            entries_by_id: Default::default(),
1243                            repository_entries: Default::default(),
1244                            scan_id: 0,
1245                            completed_scan_id: 0,
1246                        },
1247                    };
1248                    while let Some(snapshot) = snapshots_rx.recv().await {
1249                        #[cfg(any(test, feature = "test-support"))]
1250                        const MAX_CHUNK_SIZE: usize = 2;
1251                        #[cfg(not(any(test, feature = "test-support")))]
1252                        const MAX_CHUNK_SIZE: usize = 256;
1253
1254                        let update =
1255                            snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1256                        for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1257                            let _ = resume_updates_rx.try_recv();
1258                            while let Err(error) = client.request(update.clone()).await {
1259                                log::error!("failed to send worktree update: {}", error);
1260                                log::info!("waiting to resume updates");
1261                                if resume_updates_rx.next().await.is_none() {
1262                                    return Ok(());
1263                                }
1264                            }
1265                        }
1266
1267                        if let Some(share_tx) = share_tx.take() {
1268                            let _ = share_tx.send(());
1269                        }
1270
1271                        prev_snapshot = snapshot;
1272                    }
1273
1274                    Ok::<_, anyhow::Error>(())
1275                }
1276                .log_err()
1277            });
1278
1279            self.share = Some(ShareState {
1280                project_id,
1281                snapshots_tx,
1282                resume_updates: resume_updates_tx,
1283                _maintain_remote_snapshot,
1284            });
1285        }
1286
1287        cx.foreground()
1288            .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1289    }
1290
1291    pub fn unshare(&mut self) {
1292        self.share.take();
1293    }
1294
1295    pub fn is_shared(&self) -> bool {
1296        self.share.is_some()
1297    }
1298}
1299
1300impl RemoteWorktree {
1301    fn snapshot(&self) -> Snapshot {
1302        self.snapshot.clone()
1303    }
1304
1305    pub fn disconnected_from_host(&mut self) {
1306        self.updates_tx.take();
1307        self.snapshot_subscriptions.clear();
1308        self.disconnected = true;
1309    }
1310
1311    pub fn save_buffer(
1312        &self,
1313        buffer_handle: ModelHandle<Buffer>,
1314        cx: &mut ModelContext<Worktree>,
1315    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1316        let buffer = buffer_handle.read(cx);
1317        let buffer_id = buffer.remote_id();
1318        let version = buffer.version();
1319        let rpc = self.client.clone();
1320        let project_id = self.project_id;
1321        cx.as_mut().spawn(|mut cx| async move {
1322            let response = rpc
1323                .request(proto::SaveBuffer {
1324                    project_id,
1325                    buffer_id,
1326                    version: serialize_version(&version),
1327                })
1328                .await?;
1329            let version = deserialize_version(&response.version);
1330            let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1331            let mtime = response
1332                .mtime
1333                .ok_or_else(|| anyhow!("missing mtime"))?
1334                .into();
1335
1336            buffer_handle.update(&mut cx, |buffer, cx| {
1337                buffer.did_save(version.clone(), fingerprint, mtime, cx);
1338            });
1339
1340            Ok((version, fingerprint, mtime))
1341        })
1342    }
1343
1344    pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1345        if let Some(updates_tx) = &self.updates_tx {
1346            updates_tx
1347                .unbounded_send(update)
1348                .expect("consumer runs to completion");
1349        }
1350    }
1351
1352    fn observed_snapshot(&self, scan_id: usize) -> bool {
1353        self.completed_scan_id >= scan_id
1354    }
1355
1356    fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1357        let (tx, rx) = oneshot::channel();
1358        if self.observed_snapshot(scan_id) {
1359            let _ = tx.send(());
1360        } else if self.disconnected {
1361            drop(tx);
1362        } else {
1363            match self
1364                .snapshot_subscriptions
1365                .binary_search_by_key(&scan_id, |probe| probe.0)
1366            {
1367                Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1368            }
1369        }
1370
1371        async move {
1372            rx.await?;
1373            Ok(())
1374        }
1375    }
1376
1377    pub fn update_diagnostic_summary(
1378        &mut self,
1379        path: Arc<Path>,
1380        summary: &proto::DiagnosticSummary,
1381    ) {
1382        let server_id = LanguageServerId(summary.language_server_id as usize);
1383        let summary = DiagnosticSummary {
1384            error_count: summary.error_count as usize,
1385            warning_count: summary.warning_count as usize,
1386        };
1387
1388        if summary.is_empty() {
1389            if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1390                summaries.remove(&server_id);
1391                if summaries.is_empty() {
1392                    self.diagnostic_summaries.remove(&path);
1393                }
1394            }
1395        } else {
1396            self.diagnostic_summaries
1397                .entry(path)
1398                .or_default()
1399                .insert(server_id, summary);
1400        }
1401    }
1402
1403    pub fn insert_entry(
1404        &mut self,
1405        entry: proto::Entry,
1406        scan_id: usize,
1407        cx: &mut ModelContext<Worktree>,
1408    ) -> Task<Result<Entry>> {
1409        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1410        cx.spawn(|this, mut cx| async move {
1411            wait_for_snapshot.await?;
1412            this.update(&mut cx, |worktree, _| {
1413                let worktree = worktree.as_remote_mut().unwrap();
1414                let mut snapshot = worktree.background_snapshot.lock();
1415                let entry = snapshot.insert_entry(entry);
1416                worktree.snapshot = snapshot.clone();
1417                entry
1418            })
1419        })
1420    }
1421
1422    pub(crate) fn delete_entry(
1423        &mut self,
1424        id: ProjectEntryId,
1425        scan_id: usize,
1426        cx: &mut ModelContext<Worktree>,
1427    ) -> Task<Result<()>> {
1428        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1429        cx.spawn(|this, mut cx| async move {
1430            wait_for_snapshot.await?;
1431            this.update(&mut cx, |worktree, _| {
1432                let worktree = worktree.as_remote_mut().unwrap();
1433                let mut snapshot = worktree.background_snapshot.lock();
1434                snapshot.delete_entry(id);
1435                worktree.snapshot = snapshot.clone();
1436            });
1437            Ok(())
1438        })
1439    }
1440}
1441
1442impl Snapshot {
1443    pub fn id(&self) -> WorktreeId {
1444        self.id
1445    }
1446
1447    pub fn abs_path(&self) -> &Arc<Path> {
1448        &self.abs_path
1449    }
1450
1451    pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1452        self.entries_by_id.get(&entry_id, &()).is_some()
1453    }
1454
1455    pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1456        let entry = Entry::try_from((&self.root_char_bag, entry))?;
1457        let old_entry = self.entries_by_id.insert_or_replace(
1458            PathEntry {
1459                id: entry.id,
1460                path: entry.path.clone(),
1461                is_ignored: entry.is_ignored,
1462                scan_id: 0,
1463            },
1464            &(),
1465        );
1466        if let Some(old_entry) = old_entry {
1467            self.entries_by_path.remove(&PathKey(old_entry.path), &());
1468        }
1469        self.entries_by_path.insert_or_replace(entry.clone(), &());
1470        Ok(entry)
1471    }
1472
1473    fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1474        let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1475        self.entries_by_path = {
1476            let mut cursor = self.entries_by_path.cursor();
1477            let mut new_entries_by_path =
1478                cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1479            while let Some(entry) = cursor.item() {
1480                if entry.path.starts_with(&removed_entry.path) {
1481                    self.entries_by_id.remove(&entry.id, &());
1482                    cursor.next(&());
1483                } else {
1484                    break;
1485                }
1486            }
1487            new_entries_by_path.push_tree(cursor.suffix(&()), &());
1488            new_entries_by_path
1489        };
1490
1491        Some(removed_entry.path)
1492    }
1493
1494    pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1495        let mut entries_by_path_edits = Vec::new();
1496        let mut entries_by_id_edits = Vec::new();
1497        for entry_id in update.removed_entries {
1498            if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1499                entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1500                entries_by_id_edits.push(Edit::Remove(entry.id));
1501            }
1502        }
1503
1504        for entry in update.updated_entries {
1505            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1506            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1507                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1508            }
1509            entries_by_id_edits.push(Edit::Insert(PathEntry {
1510                id: entry.id,
1511                path: entry.path.clone(),
1512                is_ignored: entry.is_ignored,
1513                scan_id: 0,
1514            }));
1515            entries_by_path_edits.push(Edit::Insert(entry));
1516        }
1517
1518        self.entries_by_path.edit(entries_by_path_edits, &());
1519        self.entries_by_id.edit(entries_by_id_edits, &());
1520
1521        update.removed_repositories.sort_unstable();
1522        self.repository_entries.retain(|_, entry| {
1523            if let Ok(_) = update
1524                .removed_repositories
1525                .binary_search(&entry.work_directory.to_proto())
1526            {
1527                false
1528            } else {
1529                true
1530            }
1531        });
1532
1533        for repository in update.updated_repositories {
1534            let work_directory_entry: WorkDirectoryEntry =
1535                ProjectEntryId::from_proto(repository.work_directory_id).into();
1536
1537            if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1538                let mut statuses = TreeMap::default();
1539                for status_entry in repository.updated_statuses {
1540                    let Some(git_file_status) = read_git_status(status_entry.status) else {
1541                        continue;
1542                    };
1543
1544                    let repo_path = RepoPath::new(status_entry.repo_path.into());
1545                    statuses.insert(repo_path, git_file_status);
1546                }
1547
1548                let work_directory = RepositoryWorkDirectory(entry.path.clone());
1549                if self.repository_entries.get(&work_directory).is_some() {
1550                    self.repository_entries.update(&work_directory, |repo| {
1551                        repo.branch = repository.branch.map(Into::into);
1552                        repo.statuses.insert_tree(statuses);
1553
1554                        for repo_path in repository.removed_repo_paths {
1555                            let repo_path = RepoPath::new(repo_path.into());
1556                            repo.statuses.remove(&repo_path);
1557                        }
1558                    });
1559                } else {
1560                    self.repository_entries.insert(
1561                        work_directory,
1562                        RepositoryEntry {
1563                            work_directory: work_directory_entry,
1564                            branch: repository.branch.map(Into::into),
1565                            statuses,
1566                        },
1567                    )
1568                }
1569            } else {
1570                log::error!("no work directory entry for repository {:?}", repository)
1571            }
1572        }
1573
1574        self.scan_id = update.scan_id as usize;
1575        if update.is_last_update {
1576            self.completed_scan_id = update.scan_id as usize;
1577        }
1578
1579        Ok(())
1580    }
1581
1582    pub fn file_count(&self) -> usize {
1583        self.entries_by_path.summary().file_count
1584    }
1585
1586    pub fn visible_file_count(&self) -> usize {
1587        self.entries_by_path.summary().visible_file_count
1588    }
1589
1590    fn traverse_from_offset(
1591        &self,
1592        include_dirs: bool,
1593        include_ignored: bool,
1594        start_offset: usize,
1595    ) -> Traversal {
1596        let mut cursor = self.entries_by_path.cursor();
1597        cursor.seek(
1598            &TraversalTarget::Count {
1599                count: start_offset,
1600                include_dirs,
1601                include_ignored,
1602            },
1603            Bias::Right,
1604            &(),
1605        );
1606        Traversal {
1607            cursor,
1608            include_dirs,
1609            include_ignored,
1610        }
1611    }
1612
1613    fn traverse_from_path(
1614        &self,
1615        include_dirs: bool,
1616        include_ignored: bool,
1617        path: &Path,
1618    ) -> Traversal {
1619        let mut cursor = self.entries_by_path.cursor();
1620        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1621        Traversal {
1622            cursor,
1623            include_dirs,
1624            include_ignored,
1625        }
1626    }
1627
1628    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1629        self.traverse_from_offset(false, include_ignored, start)
1630    }
1631
1632    pub fn entries(&self, include_ignored: bool) -> Traversal {
1633        self.traverse_from_offset(true, include_ignored, 0)
1634    }
1635
1636    pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
1637        self.repository_entries
1638            .iter()
1639            .map(|(path, entry)| (&path.0, entry))
1640    }
1641
1642    /// Get the repository whose work directory contains the given path.
1643    pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
1644        self.repository_entries.get(&RepositoryWorkDirectory(path.into())).cloned()
1645    }
1646
1647    /// Get the repository whose work directory contains the given path.
1648    pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
1649        let mut max_len = 0;
1650        let mut current_candidate = None;
1651        for (work_directory, repo) in (&self.repository_entries).iter() {
1652            if path.starts_with(&work_directory.0) {
1653                if work_directory.0.as_os_str().len() >= max_len {
1654                    current_candidate = Some(repo);
1655                    max_len = work_directory.0.as_os_str().len();
1656                } else {
1657                    break;
1658                }
1659            }
1660        }
1661
1662        current_candidate.cloned()
1663    }
1664
1665    /// Given an ordered iterator of entries, returns an iterator of those entries,
1666    /// along with their containing git repository.
1667    pub fn entries_with_repositories<'a>(
1668        &'a self,
1669        entries: impl 'a + Iterator<Item = &'a Entry>,
1670    ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
1671        let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
1672        let mut repositories = self.repositories().peekable();
1673        entries.map(move |entry| {
1674            while let Some((repo_path, _)) = containing_repos.last() {
1675                if !entry.path.starts_with(repo_path) {
1676                    containing_repos.pop();
1677                } else {
1678                    break;
1679                }
1680            }
1681            while let Some((repo_path, _)) = repositories.peek() {
1682                if entry.path.starts_with(repo_path) {
1683                    containing_repos.push(repositories.next().unwrap());
1684                } else {
1685                    break;
1686                }
1687            }
1688            let repo = containing_repos.last().map(|(_, repo)| *repo);
1689            (entry, repo)
1690        })
1691    }
1692
1693    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1694        let empty_path = Path::new("");
1695        self.entries_by_path
1696            .cursor::<()>()
1697            .filter(move |entry| entry.path.as_ref() != empty_path)
1698            .map(|entry| &entry.path)
1699    }
1700
1701    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1702        let mut cursor = self.entries_by_path.cursor();
1703        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1704        let traversal = Traversal {
1705            cursor,
1706            include_dirs: true,
1707            include_ignored: true,
1708        };
1709        ChildEntriesIter {
1710            traversal,
1711            parent_path,
1712        }
1713    }
1714
1715    fn descendent_entries<'a>(
1716        &'a self,
1717        include_dirs: bool,
1718        include_ignored: bool,
1719        parent_path: &'a Path,
1720    ) -> DescendentEntriesIter<'a> {
1721        let mut cursor = self.entries_by_path.cursor();
1722        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
1723        let mut traversal = Traversal {
1724            cursor,
1725            include_dirs,
1726            include_ignored,
1727        };
1728
1729        if traversal.end_offset() == traversal.start_offset() {
1730            traversal.advance();
1731        }
1732
1733        DescendentEntriesIter {
1734            traversal,
1735            parent_path,
1736        }
1737    }
1738
1739    pub fn root_entry(&self) -> Option<&Entry> {
1740        self.entry_for_path("")
1741    }
1742
1743    pub fn root_name(&self) -> &str {
1744        &self.root_name
1745    }
1746
1747    pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1748        self.repository_entries
1749            .get(&RepositoryWorkDirectory(Path::new("").into()))
1750            .map(|entry| entry.to_owned())
1751    }
1752
1753    pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1754        self.repository_entries.values()
1755    }
1756
1757    pub fn scan_id(&self) -> usize {
1758        self.scan_id
1759    }
1760
1761    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1762        let path = path.as_ref();
1763        self.traverse_from_path(true, true, path)
1764            .entry()
1765            .and_then(|entry| {
1766                if entry.path.as_ref() == path {
1767                    Some(entry)
1768                } else {
1769                    None
1770                }
1771            })
1772    }
1773
1774    pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1775        let entry = self.entries_by_id.get(&id, &())?;
1776        self.entry_for_path(&entry.path)
1777    }
1778
1779    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1780        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1781    }
1782}
1783
1784impl LocalSnapshot {
1785    pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1786        self.git_repositories.get(&repo.work_directory.0)
1787    }
1788
1789    pub(crate) fn repo_for_metadata(
1790        &self,
1791        path: &Path,
1792    ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1793        self.git_repositories
1794            .iter()
1795            .find(|(_, repo)| repo.in_dot_git(path))
1796    }
1797
1798    #[cfg(test)]
1799    pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1800        let root_name = self.root_name.clone();
1801        proto::UpdateWorktree {
1802            project_id,
1803            worktree_id: self.id().to_proto(),
1804            abs_path: self.abs_path().to_string_lossy().into(),
1805            root_name,
1806            updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1807            removed_entries: Default::default(),
1808            scan_id: self.scan_id as u64,
1809            is_last_update: true,
1810            updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1811            removed_repositories: Default::default(),
1812        }
1813    }
1814
1815    pub(crate) fn build_update(
1816        &self,
1817        other: &Self,
1818        project_id: u64,
1819        worktree_id: u64,
1820        include_ignored: bool,
1821    ) -> proto::UpdateWorktree {
1822        let mut updated_entries = Vec::new();
1823        let mut removed_entries = Vec::new();
1824        let mut self_entries = self
1825            .entries_by_id
1826            .cursor::<()>()
1827            .filter(|e| include_ignored || !e.is_ignored)
1828            .peekable();
1829        let mut other_entries = other
1830            .entries_by_id
1831            .cursor::<()>()
1832            .filter(|e| include_ignored || !e.is_ignored)
1833            .peekable();
1834        loop {
1835            match (self_entries.peek(), other_entries.peek()) {
1836                (Some(self_entry), Some(other_entry)) => {
1837                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1838                        Ordering::Less => {
1839                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1840                            updated_entries.push(entry);
1841                            self_entries.next();
1842                        }
1843                        Ordering::Equal => {
1844                            if self_entry.scan_id != other_entry.scan_id {
1845                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1846                                updated_entries.push(entry);
1847                            }
1848
1849                            self_entries.next();
1850                            other_entries.next();
1851                        }
1852                        Ordering::Greater => {
1853                            removed_entries.push(other_entry.id.to_proto());
1854                            other_entries.next();
1855                        }
1856                    }
1857                }
1858                (Some(self_entry), None) => {
1859                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1860                    updated_entries.push(entry);
1861                    self_entries.next();
1862                }
1863                (None, Some(other_entry)) => {
1864                    removed_entries.push(other_entry.id.to_proto());
1865                    other_entries.next();
1866                }
1867                (None, None) => break,
1868            }
1869        }
1870
1871        let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1872        let mut removed_repositories = Vec::new();
1873        let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1874        let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1875        loop {
1876            match (self_repos.peek(), other_repos.peek()) {
1877                (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1878                    match Ord::cmp(self_work_dir, other_work_dir) {
1879                        Ordering::Less => {
1880                            updated_repositories.push((*self_repo).into());
1881                            self_repos.next();
1882                        }
1883                        Ordering::Equal => {
1884                            if self_repo != other_repo {
1885                                updated_repositories.push(self_repo.build_update(other_repo));
1886                            }
1887
1888                            self_repos.next();
1889                            other_repos.next();
1890                        }
1891                        Ordering::Greater => {
1892                            removed_repositories.push(other_repo.work_directory.to_proto());
1893                            other_repos.next();
1894                        }
1895                    }
1896                }
1897                (Some((_, self_repo)), None) => {
1898                    updated_repositories.push((*self_repo).into());
1899                    self_repos.next();
1900                }
1901                (None, Some((_, other_repo))) => {
1902                    removed_repositories.push(other_repo.work_directory.to_proto());
1903                    other_repos.next();
1904                }
1905                (None, None) => break,
1906            }
1907        }
1908
1909        proto::UpdateWorktree {
1910            project_id,
1911            worktree_id,
1912            abs_path: self.abs_path().to_string_lossy().into(),
1913            root_name: self.root_name().to_string(),
1914            updated_entries,
1915            removed_entries,
1916            scan_id: self.scan_id as u64,
1917            is_last_update: self.completed_scan_id == self.scan_id,
1918            updated_repositories,
1919            removed_repositories,
1920        }
1921    }
1922
1923    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1924        if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1925            let abs_path = self.abs_path.join(&entry.path);
1926            match smol::block_on(build_gitignore(&abs_path, fs)) {
1927                Ok(ignore) => {
1928                    self.ignores_by_parent_abs_path
1929                        .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
1930                }
1931                Err(error) => {
1932                    log::error!(
1933                        "error loading .gitignore file {:?} - {:?}",
1934                        &entry.path,
1935                        error
1936                    );
1937                }
1938            }
1939        }
1940
1941        if entry.kind == EntryKind::PendingDir {
1942            if let Some(existing_entry) =
1943                self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1944            {
1945                entry.kind = existing_entry.kind;
1946            }
1947        }
1948
1949        let scan_id = self.scan_id;
1950        let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1951        if let Some(removed) = removed {
1952            if removed.id != entry.id {
1953                self.entries_by_id.remove(&removed.id, &());
1954            }
1955        }
1956        self.entries_by_id.insert_or_replace(
1957            PathEntry {
1958                id: entry.id,
1959                path: entry.path.clone(),
1960                is_ignored: entry.is_ignored,
1961                scan_id,
1962            },
1963            &(),
1964        );
1965
1966        entry
1967    }
1968
1969    fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1970        let abs_path = self.abs_path.join(&parent_path);
1971        let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1972
1973        // Guard against repositories inside the repository metadata
1974        if work_dir
1975            .components()
1976            .find(|component| component.as_os_str() == *DOT_GIT)
1977            .is_some()
1978        {
1979            return None;
1980        };
1981
1982        let work_dir_id = self
1983            .entry_for_path(work_dir.clone())
1984            .map(|entry| entry.id)?;
1985
1986        if self.git_repositories.get(&work_dir_id).is_none() {
1987            let repo = fs.open_repo(abs_path.as_path())?;
1988            let work_directory = RepositoryWorkDirectory(work_dir.clone());
1989            let scan_id = self.scan_id;
1990
1991            let repo_lock = repo.lock();
1992
1993            self.repository_entries.insert(
1994                work_directory,
1995                RepositoryEntry {
1996                    work_directory: work_dir_id.into(),
1997                    branch: repo_lock.branch_name().map(Into::into),
1998                    statuses: repo_lock.statuses().unwrap_or_default(),
1999                },
2000            );
2001            drop(repo_lock);
2002
2003            self.git_repositories.insert(
2004                work_dir_id,
2005                LocalRepositoryEntry {
2006                    scan_id,
2007                    full_scan_id: scan_id,
2008                    repo_ptr: repo,
2009                    git_dir_path: parent_path.clone(),
2010                },
2011            )
2012        }
2013
2014        Some(())
2015    }
2016
2017    fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2018        let mut inodes = TreeSet::default();
2019        for ancestor in path.ancestors().skip(1) {
2020            if let Some(entry) = self.entry_for_path(ancestor) {
2021                inodes.insert(entry.inode);
2022            }
2023        }
2024        inodes
2025    }
2026
2027    fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2028        let mut new_ignores = Vec::new();
2029        for ancestor in abs_path.ancestors().skip(1) {
2030            if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2031                new_ignores.push((ancestor, Some(ignore.clone())));
2032            } else {
2033                new_ignores.push((ancestor, None));
2034            }
2035        }
2036
2037        let mut ignore_stack = IgnoreStack::none();
2038        for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2039            if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2040                ignore_stack = IgnoreStack::all();
2041                break;
2042            } else if let Some(ignore) = ignore {
2043                ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2044            }
2045        }
2046
2047        if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2048            ignore_stack = IgnoreStack::all();
2049        }
2050
2051        ignore_stack
2052    }
2053}
2054
2055impl LocalMutableSnapshot {
2056    fn reuse_entry_id(&mut self, entry: &mut Entry) {
2057        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2058            entry.id = removed_entry_id;
2059        } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2060            entry.id = existing_entry.id;
2061        }
2062    }
2063
2064    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2065        self.reuse_entry_id(&mut entry);
2066        self.snapshot.insert_entry(entry, fs)
2067    }
2068
2069    fn populate_dir(
2070        &mut self,
2071        parent_path: Arc<Path>,
2072        entries: impl IntoIterator<Item = Entry>,
2073        ignore: Option<Arc<Gitignore>>,
2074        fs: &dyn Fs,
2075    ) {
2076        let mut parent_entry = if let Some(parent_entry) =
2077            self.entries_by_path.get(&PathKey(parent_path.clone()), &())
2078        {
2079            parent_entry.clone()
2080        } else {
2081            log::warn!(
2082                "populating a directory {:?} that has been removed",
2083                parent_path
2084            );
2085            return;
2086        };
2087
2088        match parent_entry.kind {
2089            EntryKind::PendingDir => {
2090                parent_entry.kind = EntryKind::Dir;
2091            }
2092            EntryKind::Dir => {}
2093            _ => return,
2094        }
2095
2096        if let Some(ignore) = ignore {
2097            let abs_parent_path = self.abs_path.join(&parent_path).into();
2098            self.ignores_by_parent_abs_path
2099                .insert(abs_parent_path, (ignore, false));
2100        }
2101
2102        if parent_path.file_name() == Some(&DOT_GIT) {
2103            self.build_repo(parent_path, fs);
2104        }
2105
2106        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2107        let mut entries_by_id_edits = Vec::new();
2108
2109        for mut entry in entries {
2110            self.reuse_entry_id(&mut entry);
2111            entries_by_id_edits.push(Edit::Insert(PathEntry {
2112                id: entry.id,
2113                path: entry.path.clone(),
2114                is_ignored: entry.is_ignored,
2115                scan_id: self.scan_id,
2116            }));
2117            entries_by_path_edits.push(Edit::Insert(entry));
2118        }
2119
2120        self.entries_by_path.edit(entries_by_path_edits, &());
2121        self.entries_by_id.edit(entries_by_id_edits, &());
2122    }
2123
2124    fn remove_path(&mut self, path: &Path) {
2125        let mut new_entries;
2126        let removed_entries;
2127        {
2128            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2129            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2130            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2131            new_entries.push_tree(cursor.suffix(&()), &());
2132        }
2133        self.entries_by_path = new_entries;
2134
2135        let mut entries_by_id_edits = Vec::new();
2136        for entry in removed_entries.cursor::<()>() {
2137            let removed_entry_id = self
2138                .removed_entry_ids
2139                .entry(entry.inode)
2140                .or_insert(entry.id);
2141            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2142            entries_by_id_edits.push(Edit::Remove(entry.id));
2143        }
2144        self.entries_by_id.edit(entries_by_id_edits, &());
2145
2146        if path.file_name() == Some(&GITIGNORE) {
2147            let abs_parent_path = self.abs_path.join(path.parent().unwrap());
2148            if let Some((_, needs_update)) = self
2149                .ignores_by_parent_abs_path
2150                .get_mut(abs_parent_path.as_path())
2151            {
2152                *needs_update = true;
2153            }
2154        }
2155    }
2156}
2157
2158async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2159    let contents = fs.load(abs_path).await?;
2160    let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2161    let mut builder = GitignoreBuilder::new(parent);
2162    for line in contents.lines() {
2163        builder.add_line(Some(abs_path.into()), line)?;
2164    }
2165    Ok(builder.build()?)
2166}
2167
2168impl WorktreeId {
2169    pub fn from_usize(handle_id: usize) -> Self {
2170        Self(handle_id)
2171    }
2172
2173    pub(crate) fn from_proto(id: u64) -> Self {
2174        Self(id as usize)
2175    }
2176
2177    pub fn to_proto(&self) -> u64 {
2178        self.0 as u64
2179    }
2180
2181    pub fn to_usize(&self) -> usize {
2182        self.0
2183    }
2184}
2185
2186impl fmt::Display for WorktreeId {
2187    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2188        self.0.fmt(f)
2189    }
2190}
2191
2192impl Deref for Worktree {
2193    type Target = Snapshot;
2194
2195    fn deref(&self) -> &Self::Target {
2196        match self {
2197            Worktree::Local(worktree) => &worktree.snapshot,
2198            Worktree::Remote(worktree) => &worktree.snapshot,
2199        }
2200    }
2201}
2202
2203impl Deref for LocalWorktree {
2204    type Target = LocalSnapshot;
2205
2206    fn deref(&self) -> &Self::Target {
2207        &self.snapshot
2208    }
2209}
2210
2211impl Deref for RemoteWorktree {
2212    type Target = Snapshot;
2213
2214    fn deref(&self) -> &Self::Target {
2215        &self.snapshot
2216    }
2217}
2218
2219impl fmt::Debug for LocalWorktree {
2220    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2221        self.snapshot.fmt(f)
2222    }
2223}
2224
2225impl fmt::Debug for Snapshot {
2226    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2227        struct EntriesById<'a>(&'a SumTree<PathEntry>);
2228        struct EntriesByPath<'a>(&'a SumTree<Entry>);
2229
2230        impl<'a> fmt::Debug for EntriesByPath<'a> {
2231            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2232                f.debug_map()
2233                    .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2234                    .finish()
2235            }
2236        }
2237
2238        impl<'a> fmt::Debug for EntriesById<'a> {
2239            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2240                f.debug_list().entries(self.0.iter()).finish()
2241            }
2242        }
2243
2244        f.debug_struct("Snapshot")
2245            .field("id", &self.id)
2246            .field("root_name", &self.root_name)
2247            .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2248            .field("entries_by_id", &EntriesById(&self.entries_by_id))
2249            .finish()
2250    }
2251}
2252
2253#[derive(Clone, PartialEq)]
2254pub struct File {
2255    pub worktree: ModelHandle<Worktree>,
2256    pub path: Arc<Path>,
2257    pub mtime: SystemTime,
2258    pub(crate) entry_id: ProjectEntryId,
2259    pub(crate) is_local: bool,
2260    pub(crate) is_deleted: bool,
2261}
2262
2263impl language::File for File {
2264    fn as_local(&self) -> Option<&dyn language::LocalFile> {
2265        if self.is_local {
2266            Some(self)
2267        } else {
2268            None
2269        }
2270    }
2271
2272    fn mtime(&self) -> SystemTime {
2273        self.mtime
2274    }
2275
2276    fn path(&self) -> &Arc<Path> {
2277        &self.path
2278    }
2279
2280    fn full_path(&self, cx: &AppContext) -> PathBuf {
2281        let mut full_path = PathBuf::new();
2282        let worktree = self.worktree.read(cx);
2283
2284        if worktree.is_visible() {
2285            full_path.push(worktree.root_name());
2286        } else {
2287            let path = worktree.abs_path();
2288
2289            if worktree.is_local() && path.starts_with(HOME.as_path()) {
2290                full_path.push("~");
2291                full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2292            } else {
2293                full_path.push(path)
2294            }
2295        }
2296
2297        if self.path.components().next().is_some() {
2298            full_path.push(&self.path);
2299        }
2300
2301        full_path
2302    }
2303
2304    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2305    /// of its worktree, then this method will return the name of the worktree itself.
2306    fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2307        self.path
2308            .file_name()
2309            .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2310    }
2311
2312    fn is_deleted(&self) -> bool {
2313        self.is_deleted
2314    }
2315
2316    fn as_any(&self) -> &dyn Any {
2317        self
2318    }
2319
2320    fn to_proto(&self) -> rpc::proto::File {
2321        rpc::proto::File {
2322            worktree_id: self.worktree.id() as u64,
2323            entry_id: self.entry_id.to_proto(),
2324            path: self.path.to_string_lossy().into(),
2325            mtime: Some(self.mtime.into()),
2326            is_deleted: self.is_deleted,
2327        }
2328    }
2329}
2330
2331impl language::LocalFile for File {
2332    fn abs_path(&self, cx: &AppContext) -> PathBuf {
2333        self.worktree
2334            .read(cx)
2335            .as_local()
2336            .unwrap()
2337            .abs_path
2338            .join(&self.path)
2339    }
2340
2341    fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2342        let worktree = self.worktree.read(cx).as_local().unwrap();
2343        let abs_path = worktree.absolutize(&self.path);
2344        let fs = worktree.fs.clone();
2345        cx.background()
2346            .spawn(async move { fs.load(&abs_path).await })
2347    }
2348
2349    fn buffer_reloaded(
2350        &self,
2351        buffer_id: u64,
2352        version: &clock::Global,
2353        fingerprint: RopeFingerprint,
2354        line_ending: LineEnding,
2355        mtime: SystemTime,
2356        cx: &mut AppContext,
2357    ) {
2358        let worktree = self.worktree.read(cx).as_local().unwrap();
2359        if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2360            worktree
2361                .client
2362                .send(proto::BufferReloaded {
2363                    project_id,
2364                    buffer_id,
2365                    version: serialize_version(version),
2366                    mtime: Some(mtime.into()),
2367                    fingerprint: serialize_fingerprint(fingerprint),
2368                    line_ending: serialize_line_ending(line_ending) as i32,
2369                })
2370                .log_err();
2371        }
2372    }
2373}
2374
2375impl File {
2376    pub fn from_proto(
2377        proto: rpc::proto::File,
2378        worktree: ModelHandle<Worktree>,
2379        cx: &AppContext,
2380    ) -> Result<Self> {
2381        let worktree_id = worktree
2382            .read(cx)
2383            .as_remote()
2384            .ok_or_else(|| anyhow!("not remote"))?
2385            .id();
2386
2387        if worktree_id.to_proto() != proto.worktree_id {
2388            return Err(anyhow!("worktree id does not match file"));
2389        }
2390
2391        Ok(Self {
2392            worktree,
2393            path: Path::new(&proto.path).into(),
2394            mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2395            entry_id: ProjectEntryId::from_proto(proto.entry_id),
2396            is_local: false,
2397            is_deleted: proto.is_deleted,
2398        })
2399    }
2400
2401    pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2402        file.and_then(|f| f.as_any().downcast_ref())
2403    }
2404
2405    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2406        self.worktree.read(cx).id()
2407    }
2408
2409    pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2410        if self.is_deleted {
2411            None
2412        } else {
2413            Some(self.entry_id)
2414        }
2415    }
2416}
2417
2418#[derive(Clone, Debug, PartialEq, Eq)]
2419pub struct Entry {
2420    pub id: ProjectEntryId,
2421    pub kind: EntryKind,
2422    pub path: Arc<Path>,
2423    pub inode: u64,
2424    pub mtime: SystemTime,
2425    pub is_symlink: bool,
2426    pub is_ignored: bool,
2427}
2428
2429#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2430pub enum EntryKind {
2431    PendingDir,
2432    Dir,
2433    File(CharBag),
2434}
2435
2436#[derive(Clone, Copy, Debug)]
2437pub enum PathChange {
2438    Added,
2439    Removed,
2440    Updated,
2441    AddedOrUpdated,
2442}
2443
2444impl Entry {
2445    fn new(
2446        path: Arc<Path>,
2447        metadata: &fs::Metadata,
2448        next_entry_id: &AtomicUsize,
2449        root_char_bag: CharBag,
2450    ) -> Self {
2451        Self {
2452            id: ProjectEntryId::new(next_entry_id),
2453            kind: if metadata.is_dir {
2454                EntryKind::PendingDir
2455            } else {
2456                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2457            },
2458            path,
2459            inode: metadata.inode,
2460            mtime: metadata.mtime,
2461            is_symlink: metadata.is_symlink,
2462            is_ignored: false,
2463        }
2464    }
2465
2466    pub fn is_dir(&self) -> bool {
2467        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2468    }
2469
2470    pub fn is_file(&self) -> bool {
2471        matches!(self.kind, EntryKind::File(_))
2472    }
2473}
2474
2475impl sum_tree::Item for Entry {
2476    type Summary = EntrySummary;
2477
2478    fn summary(&self) -> Self::Summary {
2479        let visible_count = if self.is_ignored { 0 } else { 1 };
2480        let file_count;
2481        let visible_file_count;
2482        if self.is_file() {
2483            file_count = 1;
2484            visible_file_count = visible_count;
2485        } else {
2486            file_count = 0;
2487            visible_file_count = 0;
2488        }
2489
2490        EntrySummary {
2491            max_path: self.path.clone(),
2492            count: 1,
2493            visible_count,
2494            file_count,
2495            visible_file_count,
2496        }
2497    }
2498}
2499
2500impl sum_tree::KeyedItem for Entry {
2501    type Key = PathKey;
2502
2503    fn key(&self) -> Self::Key {
2504        PathKey(self.path.clone())
2505    }
2506}
2507
2508#[derive(Clone, Debug)]
2509pub struct EntrySummary {
2510    max_path: Arc<Path>,
2511    count: usize,
2512    visible_count: usize,
2513    file_count: usize,
2514    visible_file_count: usize,
2515}
2516
2517impl Default for EntrySummary {
2518    fn default() -> Self {
2519        Self {
2520            max_path: Arc::from(Path::new("")),
2521            count: 0,
2522            visible_count: 0,
2523            file_count: 0,
2524            visible_file_count: 0,
2525        }
2526    }
2527}
2528
2529impl sum_tree::Summary for EntrySummary {
2530    type Context = ();
2531
2532    fn add_summary(&mut self, rhs: &Self, _: &()) {
2533        self.max_path = rhs.max_path.clone();
2534        self.count += rhs.count;
2535        self.visible_count += rhs.visible_count;
2536        self.file_count += rhs.file_count;
2537        self.visible_file_count += rhs.visible_file_count;
2538    }
2539}
2540
2541#[derive(Clone, Debug)]
2542struct PathEntry {
2543    id: ProjectEntryId,
2544    path: Arc<Path>,
2545    is_ignored: bool,
2546    scan_id: usize,
2547}
2548
2549impl sum_tree::Item for PathEntry {
2550    type Summary = PathEntrySummary;
2551
2552    fn summary(&self) -> Self::Summary {
2553        PathEntrySummary { max_id: self.id }
2554    }
2555}
2556
2557impl sum_tree::KeyedItem for PathEntry {
2558    type Key = ProjectEntryId;
2559
2560    fn key(&self) -> Self::Key {
2561        self.id
2562    }
2563}
2564
2565#[derive(Clone, Debug, Default)]
2566struct PathEntrySummary {
2567    max_id: ProjectEntryId,
2568}
2569
2570impl sum_tree::Summary for PathEntrySummary {
2571    type Context = ();
2572
2573    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2574        self.max_id = summary.max_id;
2575    }
2576}
2577
2578impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2579    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2580        *self = summary.max_id;
2581    }
2582}
2583
2584#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2585pub struct PathKey(Arc<Path>);
2586
2587impl Default for PathKey {
2588    fn default() -> Self {
2589        Self(Path::new("").into())
2590    }
2591}
2592
2593impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2594    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2595        self.0 = summary.max_path.clone();
2596    }
2597}
2598
2599struct BackgroundScanner {
2600    snapshot: Mutex<LocalMutableSnapshot>,
2601    fs: Arc<dyn Fs>,
2602    status_updates_tx: UnboundedSender<ScanState>,
2603    executor: Arc<executor::Background>,
2604    refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2605    prev_state: Mutex<BackgroundScannerState>,
2606    next_entry_id: Arc<AtomicUsize>,
2607    finished_initial_scan: bool,
2608}
2609
2610struct BackgroundScannerState {
2611    snapshot: Snapshot,
2612    event_paths: Vec<Arc<Path>>,
2613}
2614
2615impl BackgroundScanner {
2616    fn new(
2617        snapshot: LocalSnapshot,
2618        next_entry_id: Arc<AtomicUsize>,
2619        fs: Arc<dyn Fs>,
2620        status_updates_tx: UnboundedSender<ScanState>,
2621        executor: Arc<executor::Background>,
2622        refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2623    ) -> Self {
2624        Self {
2625            fs,
2626            status_updates_tx,
2627            executor,
2628            refresh_requests_rx,
2629            next_entry_id,
2630            prev_state: Mutex::new(BackgroundScannerState {
2631                snapshot: snapshot.snapshot.clone(),
2632                event_paths: Default::default(),
2633            }),
2634            snapshot: Mutex::new(LocalMutableSnapshot {
2635                snapshot,
2636                removed_entry_ids: Default::default(),
2637            }),
2638            finished_initial_scan: false,
2639        }
2640    }
2641
2642    async fn run(
2643        &mut self,
2644        mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2645    ) {
2646        use futures::FutureExt as _;
2647
2648        let (root_abs_path, root_inode) = {
2649            let snapshot = self.snapshot.lock();
2650            (
2651                snapshot.abs_path.clone(),
2652                snapshot.root_entry().map(|e| e.inode),
2653            )
2654        };
2655
2656        // Populate ignores above the root.
2657        let ignore_stack;
2658        for ancestor in root_abs_path.ancestors().skip(1) {
2659            if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2660            {
2661                self.snapshot
2662                    .lock()
2663                    .ignores_by_parent_abs_path
2664                    .insert(ancestor.into(), (ignore.into(), false));
2665            }
2666        }
2667        {
2668            let mut snapshot = self.snapshot.lock();
2669            snapshot.scan_id += 1;
2670            ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2671            if ignore_stack.is_all() {
2672                if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2673                    root_entry.is_ignored = true;
2674                    snapshot.insert_entry(root_entry, self.fs.as_ref());
2675                }
2676            }
2677        };
2678
2679        // Perform an initial scan of the directory.
2680        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2681        smol::block_on(scan_job_tx.send(ScanJob {
2682            abs_path: root_abs_path,
2683            path: Arc::from(Path::new("")),
2684            ignore_stack,
2685            ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2686            scan_queue: scan_job_tx.clone(),
2687        }))
2688        .unwrap();
2689        drop(scan_job_tx);
2690        self.scan_dirs(true, scan_job_rx).await;
2691        {
2692            let mut snapshot = self.snapshot.lock();
2693            snapshot.completed_scan_id = snapshot.scan_id;
2694        }
2695        self.send_status_update(false, None);
2696
2697        // Process any any FS events that occurred while performing the initial scan.
2698        // For these events, update events cannot be as precise, because we didn't
2699        // have the previous state loaded yet.
2700        if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2701            let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2702            while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2703                paths.extend(more_events.into_iter().map(|e| e.path));
2704            }
2705            self.process_events(paths).await;
2706        }
2707
2708        self.finished_initial_scan = true;
2709
2710        // Continue processing events until the worktree is dropped.
2711        loop {
2712            select_biased! {
2713                // Process any path refresh requests from the worktree. Prioritize
2714                // these before handling changes reported by the filesystem.
2715                request = self.refresh_requests_rx.recv().fuse() => {
2716                    let Ok((paths, barrier)) = request else { break };
2717                    if !self.process_refresh_request(paths.clone(), barrier).await {
2718                        return;
2719                    }
2720                }
2721
2722                events = events_rx.next().fuse() => {
2723                    let Some(events) = events else { break };
2724                    let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2725                    while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2726                        paths.extend(more_events.into_iter().map(|e| e.path));
2727                    }
2728                    self.process_events(paths.clone()).await;
2729                }
2730            }
2731        }
2732    }
2733
2734    async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2735        if let Some(mut paths) = self.reload_entries_for_paths(paths, None).await {
2736            paths.sort_unstable();
2737            util::extend_sorted(
2738                &mut self.prev_state.lock().event_paths,
2739                paths,
2740                usize::MAX,
2741                Ord::cmp,
2742            );
2743        }
2744        self.send_status_update(false, Some(barrier))
2745    }
2746
2747    async fn process_events(&mut self, paths: Vec<PathBuf>) {
2748        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2749        let paths = self
2750            .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2751            .await;
2752        if let Some(paths) = &paths {
2753            util::extend_sorted(
2754                &mut self.prev_state.lock().event_paths,
2755                paths.iter().cloned(),
2756                usize::MAX,
2757                Ord::cmp,
2758            );
2759        }
2760        drop(scan_job_tx);
2761        self.scan_dirs(false, scan_job_rx).await;
2762
2763        self.update_ignore_statuses().await;
2764
2765        let mut snapshot = self.snapshot.lock();
2766
2767        if let Some(paths) = paths {
2768            for path in paths {
2769                self.reload_repo_for_file_path(&path, &mut *snapshot, self.fs.as_ref());
2770            }
2771        }
2772
2773        let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2774        git_repositories.retain(|work_directory_id, _| {
2775            snapshot
2776                .entry_for_id(*work_directory_id)
2777                .map_or(false, |entry| {
2778                    snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2779                })
2780        });
2781        snapshot.git_repositories = git_repositories;
2782
2783        let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2784        git_repository_entries.retain(|_, entry| {
2785            snapshot
2786                .git_repositories
2787                .get(&entry.work_directory.0)
2788                .is_some()
2789        });
2790        snapshot.snapshot.repository_entries = git_repository_entries;
2791        snapshot.completed_scan_id = snapshot.scan_id;
2792        drop(snapshot);
2793
2794        self.send_status_update(false, None);
2795        self.prev_state.lock().event_paths.clear();
2796    }
2797
2798    async fn scan_dirs(
2799        &self,
2800        enable_progress_updates: bool,
2801        scan_jobs_rx: channel::Receiver<ScanJob>,
2802    ) {
2803        use futures::FutureExt as _;
2804
2805        if self
2806            .status_updates_tx
2807            .unbounded_send(ScanState::Started)
2808            .is_err()
2809        {
2810            return;
2811        }
2812
2813        let progress_update_count = AtomicUsize::new(0);
2814        self.executor
2815            .scoped(|scope| {
2816                for _ in 0..self.executor.num_cpus() {
2817                    scope.spawn(async {
2818                        let mut last_progress_update_count = 0;
2819                        let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2820                        futures::pin_mut!(progress_update_timer);
2821
2822                        loop {
2823                            select_biased! {
2824                                // Process any path refresh requests before moving on to process
2825                                // the scan queue, so that user operations are prioritized.
2826                                request = self.refresh_requests_rx.recv().fuse() => {
2827                                    let Ok((paths, barrier)) = request else { break };
2828                                    if !self.process_refresh_request(paths, barrier).await {
2829                                        return;
2830                                    }
2831                                }
2832
2833                                // Send periodic progress updates to the worktree. Use an atomic counter
2834                                // to ensure that only one of the workers sends a progress update after
2835                                // the update interval elapses.
2836                                _ = progress_update_timer => {
2837                                    match progress_update_count.compare_exchange(
2838                                        last_progress_update_count,
2839                                        last_progress_update_count + 1,
2840                                        SeqCst,
2841                                        SeqCst
2842                                    ) {
2843                                        Ok(_) => {
2844                                            last_progress_update_count += 1;
2845                                            self.send_status_update(true, None);
2846                                        }
2847                                        Err(count) => {
2848                                            last_progress_update_count = count;
2849                                        }
2850                                    }
2851                                    progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2852                                }
2853
2854                                // Recursively load directories from the file system.
2855                                job = scan_jobs_rx.recv().fuse() => {
2856                                    let Ok(job) = job else { break };
2857                                    if let Err(err) = self.scan_dir(&job).await {
2858                                        if job.path.as_ref() != Path::new("") {
2859                                            log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2860                                        }
2861                                    }
2862                                }
2863                            }
2864                        }
2865                    })
2866                }
2867            })
2868            .await;
2869    }
2870
2871    fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2872        let mut prev_state = self.prev_state.lock();
2873        let new_snapshot = self.snapshot.lock().clone();
2874        let old_snapshot = mem::replace(&mut prev_state.snapshot, new_snapshot.snapshot.clone());
2875
2876        let changes = self.build_change_set(
2877            &old_snapshot,
2878            &new_snapshot.snapshot,
2879            &prev_state.event_paths,
2880        );
2881
2882        self.status_updates_tx
2883            .unbounded_send(ScanState::Updated {
2884                snapshot: new_snapshot,
2885                changes,
2886                scanning,
2887                barrier,
2888            })
2889            .is_ok()
2890    }
2891
2892    async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2893        let mut new_entries: Vec<Entry> = Vec::new();
2894        let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2895        let mut ignore_stack = job.ignore_stack.clone();
2896        let mut new_ignore = None;
2897        let (root_abs_path, root_char_bag, next_entry_id) = {
2898            let snapshot = self.snapshot.lock();
2899            (
2900                snapshot.abs_path().clone(),
2901                snapshot.root_char_bag,
2902                self.next_entry_id.clone(),
2903            )
2904        };
2905        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2906        while let Some(child_abs_path) = child_paths.next().await {
2907            let child_abs_path: Arc<Path> = match child_abs_path {
2908                Ok(child_abs_path) => child_abs_path.into(),
2909                Err(error) => {
2910                    log::error!("error processing entry {:?}", error);
2911                    continue;
2912                }
2913            };
2914
2915            let child_name = child_abs_path.file_name().unwrap();
2916            let child_path: Arc<Path> = job.path.join(child_name).into();
2917            let child_metadata = match self.fs.metadata(&child_abs_path).await {
2918                Ok(Some(metadata)) => metadata,
2919                Ok(None) => continue,
2920                Err(err) => {
2921                    log::error!("error processing {:?}: {:?}", child_abs_path, err);
2922                    continue;
2923                }
2924            };
2925
2926            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2927            if child_name == *GITIGNORE {
2928                match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2929                    Ok(ignore) => {
2930                        let ignore = Arc::new(ignore);
2931                        ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2932                        new_ignore = Some(ignore);
2933                    }
2934                    Err(error) => {
2935                        log::error!(
2936                            "error loading .gitignore file {:?} - {:?}",
2937                            child_name,
2938                            error
2939                        );
2940                    }
2941                }
2942
2943                // Update ignore status of any child entries we've already processed to reflect the
2944                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2945                // there should rarely be too numerous. Update the ignore stack associated with any
2946                // new jobs as well.
2947                let mut new_jobs = new_jobs.iter_mut();
2948                for entry in &mut new_entries {
2949                    let entry_abs_path = root_abs_path.join(&entry.path);
2950                    entry.is_ignored =
2951                        ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2952
2953                    if entry.is_dir() {
2954                        if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2955                            job.ignore_stack = if entry.is_ignored {
2956                                IgnoreStack::all()
2957                            } else {
2958                                ignore_stack.clone()
2959                            };
2960                        }
2961                    }
2962                }
2963            }
2964
2965            let mut child_entry = Entry::new(
2966                child_path.clone(),
2967                &child_metadata,
2968                &next_entry_id,
2969                root_char_bag,
2970            );
2971
2972            if child_entry.is_dir() {
2973                let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2974                child_entry.is_ignored = is_ignored;
2975
2976                // Avoid recursing until crash in the case of a recursive symlink
2977                if !job.ancestor_inodes.contains(&child_entry.inode) {
2978                    let mut ancestor_inodes = job.ancestor_inodes.clone();
2979                    ancestor_inodes.insert(child_entry.inode);
2980
2981                    new_jobs.push(Some(ScanJob {
2982                        abs_path: child_abs_path,
2983                        path: child_path,
2984                        ignore_stack: if is_ignored {
2985                            IgnoreStack::all()
2986                        } else {
2987                            ignore_stack.clone()
2988                        },
2989                        ancestor_inodes,
2990                        scan_queue: job.scan_queue.clone(),
2991                    }));
2992                } else {
2993                    new_jobs.push(None);
2994                }
2995            } else {
2996                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2997            }
2998
2999            new_entries.push(child_entry);
3000        }
3001
3002        self.snapshot.lock().populate_dir(
3003            job.path.clone(),
3004            new_entries,
3005            new_ignore,
3006            self.fs.as_ref(),
3007        );
3008
3009        for new_job in new_jobs {
3010            if let Some(new_job) = new_job {
3011                job.scan_queue.send(new_job).await.unwrap();
3012            }
3013        }
3014
3015        Ok(())
3016    }
3017
3018    async fn reload_entries_for_paths(
3019        &self,
3020        mut abs_paths: Vec<PathBuf>,
3021        scan_queue_tx: Option<Sender<ScanJob>>,
3022    ) -> Option<Vec<Arc<Path>>> {
3023        let doing_recursive_update = scan_queue_tx.is_some();
3024
3025        abs_paths.sort_unstable();
3026        abs_paths.dedup_by(|a, b| a.starts_with(&b));
3027
3028        let root_abs_path = self.snapshot.lock().abs_path.clone();
3029        let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
3030        let metadata = futures::future::join_all(
3031            abs_paths
3032                .iter()
3033                .map(|abs_path| self.fs.metadata(&abs_path))
3034                .collect::<Vec<_>>(),
3035        )
3036        .await;
3037
3038        let mut snapshot = self.snapshot.lock();
3039        let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
3040        snapshot.scan_id += 1;
3041        if is_idle && !doing_recursive_update {
3042            snapshot.completed_scan_id = snapshot.scan_id;
3043        }
3044
3045        // Remove any entries for paths that no longer exist or are being recursively
3046        // refreshed. Do this before adding any new entries, so that renames can be
3047        // detected regardless of the order of the paths.
3048        let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
3049        for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
3050            if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3051                if matches!(metadata, Ok(None)) || doing_recursive_update {
3052                    snapshot.remove_path(path);
3053                }
3054                event_paths.push(path.into());
3055            } else {
3056                log::error!(
3057                    "unexpected event {:?} for root path {:?}",
3058                    abs_path,
3059                    root_canonical_path
3060                );
3061            }
3062        }
3063
3064        for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
3065            let abs_path: Arc<Path> = root_abs_path.join(&path).into();
3066
3067            match metadata {
3068                Ok(Some(metadata)) => {
3069                    let ignore_stack =
3070                        snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
3071                    let mut fs_entry = Entry::new(
3072                        path.clone(),
3073                        &metadata,
3074                        self.next_entry_id.as_ref(),
3075                        snapshot.root_char_bag,
3076                    );
3077                    fs_entry.is_ignored = ignore_stack.is_all();
3078                    snapshot.insert_entry(fs_entry, self.fs.as_ref());
3079
3080                    if let Some(scan_queue_tx) = &scan_queue_tx {
3081                        let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
3082                        if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
3083                            ancestor_inodes.insert(metadata.inode);
3084                            smol::block_on(scan_queue_tx.send(ScanJob {
3085                                abs_path,
3086                                path,
3087                                ignore_stack,
3088                                ancestor_inodes,
3089                                scan_queue: scan_queue_tx.clone(),
3090                            }))
3091                            .unwrap();
3092                        }
3093                    }
3094                }
3095                Ok(None) => {
3096                    self.remove_repo_path(&path, &mut snapshot);
3097                }
3098                Err(err) => {
3099                    // TODO - create a special 'error' entry in the entries tree to mark this
3100                    log::error!("error reading file on event {:?}", err);
3101                }
3102            }
3103        }
3104
3105        Some(event_paths)
3106    }
3107
3108    fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3109        if !path
3110            .components()
3111            .any(|component| component.as_os_str() == *DOT_GIT)
3112        {
3113            let scan_id = snapshot.scan_id;
3114
3115            if let Some(repository) = snapshot.repository_for_work_directory(path) {
3116                let entry = repository.work_directory.0;
3117                snapshot.git_repositories.remove(&entry);
3118                snapshot
3119                    .snapshot
3120                    .repository_entries
3121                    .remove(&RepositoryWorkDirectory(path.into()));
3122                return Some(());
3123            }
3124
3125            let repo = snapshot.repository_for_path(&path)?;
3126
3127            let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3128
3129            let work_dir = repo.work_directory(snapshot)?;
3130            let work_dir_id = repo.work_directory;
3131
3132            snapshot
3133                .git_repositories
3134                .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3135
3136            snapshot.repository_entries.update(&work_dir, |entry| {
3137                entry
3138                    .statuses
3139                    .remove_range(&repo_path, &RepoPathDescendants(&repo_path))
3140            });
3141        }
3142
3143        Some(())
3144    }
3145
3146    fn reload_repo_for_file_path(
3147        &self,
3148        path: &Path,
3149        snapshot: &mut LocalSnapshot,
3150        fs: &dyn Fs,
3151    ) -> Option<()> {
3152        let scan_id = snapshot.scan_id;
3153
3154        if path
3155            .components()
3156            .any(|component| component.as_os_str() == *DOT_GIT)
3157        {
3158            let (entry_id, repo_ptr) = {
3159                let Some((entry_id, repo)) = snapshot.repo_for_metadata(&path) else {
3160                    let dot_git_dir = path.ancestors()
3161                    .skip_while(|ancestor| ancestor.file_name() != Some(&*DOT_GIT))
3162                    .next()?;
3163
3164                    snapshot.build_repo(dot_git_dir.into(), fs);
3165                    return None;
3166                };
3167                if repo.full_scan_id == scan_id {
3168                    return None;
3169                }
3170                (*entry_id, repo.repo_ptr.to_owned())
3171            };
3172
3173            let work_dir = snapshot
3174                .entry_for_id(entry_id)
3175                .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3176
3177            let repo = repo_ptr.lock();
3178            repo.reload_index();
3179            let branch = repo.branch_name();
3180            let statuses = repo.statuses().unwrap_or_default();
3181
3182            snapshot.git_repositories.update(&entry_id, |entry| {
3183                entry.scan_id = scan_id;
3184                entry.full_scan_id = scan_id;
3185            });
3186
3187            snapshot.repository_entries.update(&work_dir, |entry| {
3188                entry.branch = branch.map(Into::into);
3189                entry.statuses = statuses;
3190            });
3191        } else {
3192            if snapshot
3193                .entry_for_path(&path)
3194                .map(|entry| entry.is_ignored)
3195                .unwrap_or(false)
3196            {
3197                self.remove_repo_path(&path, snapshot);
3198                return None;
3199            }
3200
3201            let repo = snapshot.repository_for_path(&path)?;
3202
3203            let work_dir = repo.work_directory(snapshot)?;
3204            let work_dir_id = repo.work_directory.clone();
3205
3206            snapshot
3207                .git_repositories
3208                .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3209
3210            let local_repo = snapshot.get_local_repo(&repo)?.to_owned();
3211
3212            // Short circuit if we've already scanned everything
3213            if local_repo.full_scan_id == scan_id {
3214                return None;
3215            }
3216
3217            let mut repository = snapshot.repository_entries.remove(&work_dir)?;
3218
3219            for entry in snapshot.descendent_entries(false, false, path) {
3220                let Some(repo_path) = repo.work_directory.relativize(snapshot, &entry.path) else {
3221                    continue;
3222                };
3223
3224                let status = local_repo.repo_ptr.lock().status(&repo_path);
3225                if let Some(status) = status {
3226                    repository.statuses.insert(repo_path.clone(), status);
3227                } else {
3228                    repository.statuses.remove(&repo_path);
3229                }
3230            }
3231
3232            snapshot.repository_entries.insert(work_dir, repository)
3233        }
3234
3235        Some(())
3236    }
3237
3238    async fn update_ignore_statuses(&self) {
3239        use futures::FutureExt as _;
3240
3241        let mut snapshot = self.snapshot.lock().clone();
3242        let mut ignores_to_update = Vec::new();
3243        let mut ignores_to_delete = Vec::new();
3244        let abs_path = snapshot.abs_path.clone();
3245        for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
3246            if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
3247                if *needs_update {
3248                    *needs_update = false;
3249                    if snapshot.snapshot.entry_for_path(parent_path).is_some() {
3250                        ignores_to_update.push(parent_abs_path.clone());
3251                    }
3252                }
3253
3254                let ignore_path = parent_path.join(&*GITIGNORE);
3255                if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
3256                    ignores_to_delete.push(parent_abs_path.clone());
3257                }
3258            }
3259        }
3260
3261        for parent_abs_path in ignores_to_delete {
3262            snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3263            self.snapshot
3264                .lock()
3265                .ignores_by_parent_abs_path
3266                .remove(&parent_abs_path);
3267        }
3268
3269        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3270        ignores_to_update.sort_unstable();
3271        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3272        while let Some(parent_abs_path) = ignores_to_update.next() {
3273            while ignores_to_update
3274                .peek()
3275                .map_or(false, |p| p.starts_with(&parent_abs_path))
3276            {
3277                ignores_to_update.next().unwrap();
3278            }
3279
3280            let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3281            smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3282                abs_path: parent_abs_path,
3283                ignore_stack,
3284                ignore_queue: ignore_queue_tx.clone(),
3285            }))
3286            .unwrap();
3287        }
3288        drop(ignore_queue_tx);
3289
3290        self.executor
3291            .scoped(|scope| {
3292                for _ in 0..self.executor.num_cpus() {
3293                    scope.spawn(async {
3294                        loop {
3295                            select_biased! {
3296                                // Process any path refresh requests before moving on to process
3297                                // the queue of ignore statuses.
3298                                request = self.refresh_requests_rx.recv().fuse() => {
3299                                    let Ok((paths, barrier)) = request else { break };
3300                                    if !self.process_refresh_request(paths, barrier).await {
3301                                        return;
3302                                    }
3303                                }
3304
3305                                // Recursively process directories whose ignores have changed.
3306                                job = ignore_queue_rx.recv().fuse() => {
3307                                    let Ok(job) = job else { break };
3308                                    self.update_ignore_status(job, &snapshot).await;
3309                                }
3310                            }
3311                        }
3312                    });
3313                }
3314            })
3315            .await;
3316    }
3317
3318    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3319        let mut ignore_stack = job.ignore_stack;
3320        if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3321            ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3322        }
3323
3324        let mut entries_by_id_edits = Vec::new();
3325        let mut entries_by_path_edits = Vec::new();
3326        let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3327        for mut entry in snapshot.child_entries(path).cloned() {
3328            let was_ignored = entry.is_ignored;
3329            let abs_path = snapshot.abs_path().join(&entry.path);
3330            entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3331            if entry.is_dir() {
3332                let child_ignore_stack = if entry.is_ignored {
3333                    IgnoreStack::all()
3334                } else {
3335                    ignore_stack.clone()
3336                };
3337                job.ignore_queue
3338                    .send(UpdateIgnoreStatusJob {
3339                        abs_path: abs_path.into(),
3340                        ignore_stack: child_ignore_stack,
3341                        ignore_queue: job.ignore_queue.clone(),
3342                    })
3343                    .await
3344                    .unwrap();
3345            }
3346
3347            if entry.is_ignored != was_ignored {
3348                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3349                path_entry.scan_id = snapshot.scan_id;
3350                path_entry.is_ignored = entry.is_ignored;
3351                entries_by_id_edits.push(Edit::Insert(path_entry));
3352                entries_by_path_edits.push(Edit::Insert(entry));
3353            }
3354        }
3355
3356        let mut snapshot = self.snapshot.lock();
3357        snapshot.entries_by_path.edit(entries_by_path_edits, &());
3358        snapshot.entries_by_id.edit(entries_by_id_edits, &());
3359    }
3360
3361    fn build_change_set(
3362        &self,
3363        old_snapshot: &Snapshot,
3364        new_snapshot: &Snapshot,
3365        event_paths: &[Arc<Path>],
3366    ) -> HashMap<(Arc<Path>, ProjectEntryId), PathChange> {
3367        use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3368
3369        let mut changes = HashMap::default();
3370        let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3371        let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3372        let received_before_initialized = !self.finished_initial_scan;
3373
3374        for path in event_paths {
3375            let path = PathKey(path.clone());
3376            old_paths.seek(&path, Bias::Left, &());
3377            new_paths.seek(&path, Bias::Left, &());
3378
3379            loop {
3380                match (old_paths.item(), new_paths.item()) {
3381                    (Some(old_entry), Some(new_entry)) => {
3382                        if old_entry.path > path.0
3383                            && new_entry.path > path.0
3384                            && !old_entry.path.starts_with(&path.0)
3385                            && !new_entry.path.starts_with(&path.0)
3386                        {
3387                            break;
3388                        }
3389
3390                        match Ord::cmp(&old_entry.path, &new_entry.path) {
3391                            Ordering::Less => {
3392                                changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3393                                old_paths.next(&());
3394                            }
3395                            Ordering::Equal => {
3396                                if received_before_initialized {
3397                                    // If the worktree was not fully initialized when this event was generated,
3398                                    // we can't know whether this entry was added during the scan or whether
3399                                    // it was merely updated.
3400                                    changes.insert(
3401                                        (new_entry.path.clone(), new_entry.id),
3402                                        AddedOrUpdated,
3403                                    );
3404                                } else if old_entry.mtime != new_entry.mtime {
3405                                    changes.insert((new_entry.path.clone(), new_entry.id), Updated);
3406                                }
3407                                old_paths.next(&());
3408                                new_paths.next(&());
3409                            }
3410                            Ordering::Greater => {
3411                                changes.insert((new_entry.path.clone(), new_entry.id), Added);
3412                                new_paths.next(&());
3413                            }
3414                        }
3415                    }
3416                    (Some(old_entry), None) => {
3417                        changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3418                        old_paths.next(&());
3419                    }
3420                    (None, Some(new_entry)) => {
3421                        changes.insert((new_entry.path.clone(), new_entry.id), Added);
3422                        new_paths.next(&());
3423                    }
3424                    (None, None) => break,
3425                }
3426            }
3427        }
3428
3429        changes
3430    }
3431
3432    async fn progress_timer(&self, running: bool) {
3433        if !running {
3434            return futures::future::pending().await;
3435        }
3436
3437        #[cfg(any(test, feature = "test-support"))]
3438        if self.fs.is_fake() {
3439            return self.executor.simulate_random_delay().await;
3440        }
3441
3442        smol::Timer::after(Duration::from_millis(100)).await;
3443    }
3444}
3445
3446fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3447    let mut result = root_char_bag;
3448    result.extend(
3449        path.to_string_lossy()
3450            .chars()
3451            .map(|c| c.to_ascii_lowercase()),
3452    );
3453    result
3454}
3455
3456struct ScanJob {
3457    abs_path: Arc<Path>,
3458    path: Arc<Path>,
3459    ignore_stack: Arc<IgnoreStack>,
3460    scan_queue: Sender<ScanJob>,
3461    ancestor_inodes: TreeSet<u64>,
3462}
3463
3464struct UpdateIgnoreStatusJob {
3465    abs_path: Arc<Path>,
3466    ignore_stack: Arc<IgnoreStack>,
3467    ignore_queue: Sender<UpdateIgnoreStatusJob>,
3468}
3469
3470pub trait WorktreeHandle {
3471    #[cfg(any(test, feature = "test-support"))]
3472    fn flush_fs_events<'a>(
3473        &self,
3474        cx: &'a gpui::TestAppContext,
3475    ) -> futures::future::LocalBoxFuture<'a, ()>;
3476}
3477
3478impl WorktreeHandle for ModelHandle<Worktree> {
3479    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3480    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3481    // extra directory scans, and emit extra scan-state notifications.
3482    //
3483    // This function mutates the worktree's directory and waits for those mutations to be picked up,
3484    // to ensure that all redundant FS events have already been processed.
3485    #[cfg(any(test, feature = "test-support"))]
3486    fn flush_fs_events<'a>(
3487        &self,
3488        cx: &'a gpui::TestAppContext,
3489    ) -> futures::future::LocalBoxFuture<'a, ()> {
3490        use smol::future::FutureExt;
3491
3492        let filename = "fs-event-sentinel";
3493        let tree = self.clone();
3494        let (fs, root_path) = self.read_with(cx, |tree, _| {
3495            let tree = tree.as_local().unwrap();
3496            (tree.fs.clone(), tree.abs_path().clone())
3497        });
3498
3499        async move {
3500            fs.create_file(&root_path.join(filename), Default::default())
3501                .await
3502                .unwrap();
3503            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3504                .await;
3505
3506            fs.remove_file(&root_path.join(filename), Default::default())
3507                .await
3508                .unwrap();
3509            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3510                .await;
3511
3512            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3513                .await;
3514        }
3515        .boxed_local()
3516    }
3517}
3518
3519#[derive(Clone, Debug)]
3520struct TraversalProgress<'a> {
3521    max_path: &'a Path,
3522    count: usize,
3523    visible_count: usize,
3524    file_count: usize,
3525    visible_file_count: usize,
3526}
3527
3528impl<'a> TraversalProgress<'a> {
3529    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3530        match (include_ignored, include_dirs) {
3531            (true, true) => self.count,
3532            (true, false) => self.file_count,
3533            (false, true) => self.visible_count,
3534            (false, false) => self.visible_file_count,
3535        }
3536    }
3537}
3538
3539impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3540    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3541        self.max_path = summary.max_path.as_ref();
3542        self.count += summary.count;
3543        self.visible_count += summary.visible_count;
3544        self.file_count += summary.file_count;
3545        self.visible_file_count += summary.visible_file_count;
3546    }
3547}
3548
3549impl<'a> Default for TraversalProgress<'a> {
3550    fn default() -> Self {
3551        Self {
3552            max_path: Path::new(""),
3553            count: 0,
3554            visible_count: 0,
3555            file_count: 0,
3556            visible_file_count: 0,
3557        }
3558    }
3559}
3560
3561pub struct Traversal<'a> {
3562    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3563    include_ignored: bool,
3564    include_dirs: bool,
3565}
3566
3567impl<'a> Traversal<'a> {
3568    pub fn advance(&mut self) -> bool {
3569        self.cursor.seek_forward(
3570            &TraversalTarget::Count {
3571                count: self.end_offset() + 1,
3572                include_dirs: self.include_dirs,
3573                include_ignored: self.include_ignored,
3574            },
3575            Bias::Left,
3576            &(),
3577        )
3578    }
3579
3580    pub fn advance_to_sibling(&mut self) -> bool {
3581        while let Some(entry) = self.cursor.item() {
3582            self.cursor.seek_forward(
3583                &TraversalTarget::PathSuccessor(&entry.path),
3584                Bias::Left,
3585                &(),
3586            );
3587            if let Some(entry) = self.cursor.item() {
3588                if (self.include_dirs || !entry.is_dir())
3589                    && (self.include_ignored || !entry.is_ignored)
3590                {
3591                    return true;
3592                }
3593            }
3594        }
3595        false
3596    }
3597
3598    pub fn entry(&self) -> Option<&'a Entry> {
3599        self.cursor.item()
3600    }
3601
3602    pub fn start_offset(&self) -> usize {
3603        self.cursor
3604            .start()
3605            .count(self.include_dirs, self.include_ignored)
3606    }
3607
3608    pub fn end_offset(&self) -> usize {
3609        self.cursor
3610            .end(&())
3611            .count(self.include_dirs, self.include_ignored)
3612    }
3613}
3614
3615impl<'a> Iterator for Traversal<'a> {
3616    type Item = &'a Entry;
3617
3618    fn next(&mut self) -> Option<Self::Item> {
3619        if let Some(item) = self.entry() {
3620            self.advance();
3621            Some(item)
3622        } else {
3623            None
3624        }
3625    }
3626}
3627
3628#[derive(Debug)]
3629enum TraversalTarget<'a> {
3630    Path(&'a Path),
3631    PathSuccessor(&'a Path),
3632    Count {
3633        count: usize,
3634        include_ignored: bool,
3635        include_dirs: bool,
3636    },
3637}
3638
3639impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3640    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3641        match self {
3642            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3643            TraversalTarget::PathSuccessor(path) => {
3644                if !cursor_location.max_path.starts_with(path) {
3645                    Ordering::Equal
3646                } else {
3647                    Ordering::Greater
3648                }
3649            }
3650            TraversalTarget::Count {
3651                count,
3652                include_dirs,
3653                include_ignored,
3654            } => Ord::cmp(
3655                count,
3656                &cursor_location.count(*include_dirs, *include_ignored),
3657            ),
3658        }
3659    }
3660}
3661
3662struct ChildEntriesIter<'a> {
3663    parent_path: &'a Path,
3664    traversal: Traversal<'a>,
3665}
3666
3667impl<'a> Iterator for ChildEntriesIter<'a> {
3668    type Item = &'a Entry;
3669
3670    fn next(&mut self) -> Option<Self::Item> {
3671        if let Some(item) = self.traversal.entry() {
3672            if item.path.starts_with(&self.parent_path) {
3673                self.traversal.advance_to_sibling();
3674                return Some(item);
3675            }
3676        }
3677        None
3678    }
3679}
3680
3681struct DescendentEntriesIter<'a> {
3682    parent_path: &'a Path,
3683    traversal: Traversal<'a>,
3684}
3685
3686impl<'a> Iterator for DescendentEntriesIter<'a> {
3687    type Item = &'a Entry;
3688
3689    fn next(&mut self) -> Option<Self::Item> {
3690        if let Some(item) = self.traversal.entry() {
3691            if item.path.starts_with(&self.parent_path) {
3692                self.traversal.advance();
3693                return Some(item);
3694            }
3695        }
3696        None
3697    }
3698}
3699
3700impl<'a> From<&'a Entry> for proto::Entry {
3701    fn from(entry: &'a Entry) -> Self {
3702        Self {
3703            id: entry.id.to_proto(),
3704            is_dir: entry.is_dir(),
3705            path: entry.path.to_string_lossy().into(),
3706            inode: entry.inode,
3707            mtime: Some(entry.mtime.into()),
3708            is_symlink: entry.is_symlink,
3709            is_ignored: entry.is_ignored,
3710        }
3711    }
3712}
3713
3714impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3715    type Error = anyhow::Error;
3716
3717    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3718        if let Some(mtime) = entry.mtime {
3719            let kind = if entry.is_dir {
3720                EntryKind::Dir
3721            } else {
3722                let mut char_bag = *root_char_bag;
3723                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3724                EntryKind::File(char_bag)
3725            };
3726            let path: Arc<Path> = PathBuf::from(entry.path).into();
3727            Ok(Entry {
3728                id: ProjectEntryId::from_proto(entry.id),
3729                kind,
3730                path,
3731                inode: entry.inode,
3732                mtime: mtime.into(),
3733                is_symlink: entry.is_symlink,
3734                is_ignored: entry.is_ignored,
3735            })
3736        } else {
3737            Err(anyhow!(
3738                "missing mtime in remote worktree entry {:?}",
3739                entry.path
3740            ))
3741        }
3742    }
3743}
3744
3745#[cfg(test)]
3746mod tests {
3747    use super::*;
3748    use fs::{FakeFs, RealFs};
3749    use gpui::{executor::Deterministic, TestAppContext};
3750    use pretty_assertions::assert_eq;
3751    use rand::prelude::*;
3752    use serde_json::json;
3753    use std::{env, fmt::Write};
3754    use util::{http::FakeHttpClient, test::temp_tree};
3755
3756    #[gpui::test]
3757    async fn test_traversal(cx: &mut TestAppContext) {
3758        let fs = FakeFs::new(cx.background());
3759        fs.insert_tree(
3760            "/root",
3761            json!({
3762               ".gitignore": "a/b\n",
3763               "a": {
3764                   "b": "",
3765                   "c": "",
3766               }
3767            }),
3768        )
3769        .await;
3770
3771        let http_client = FakeHttpClient::with_404_response();
3772        let client = cx.read(|cx| Client::new(http_client, cx));
3773
3774        let tree = Worktree::local(
3775            client,
3776            Path::new("/root"),
3777            true,
3778            fs,
3779            Default::default(),
3780            &mut cx.to_async(),
3781        )
3782        .await
3783        .unwrap();
3784        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3785            .await;
3786
3787        tree.read_with(cx, |tree, _| {
3788            assert_eq!(
3789                tree.entries(false)
3790                    .map(|entry| entry.path.as_ref())
3791                    .collect::<Vec<_>>(),
3792                vec![
3793                    Path::new(""),
3794                    Path::new(".gitignore"),
3795                    Path::new("a"),
3796                    Path::new("a/c"),
3797                ]
3798            );
3799            assert_eq!(
3800                tree.entries(true)
3801                    .map(|entry| entry.path.as_ref())
3802                    .collect::<Vec<_>>(),
3803                vec![
3804                    Path::new(""),
3805                    Path::new(".gitignore"),
3806                    Path::new("a"),
3807                    Path::new("a/b"),
3808                    Path::new("a/c"),
3809                ]
3810            );
3811        })
3812    }
3813
3814    #[gpui::test]
3815    async fn test_descendent_entries(cx: &mut TestAppContext) {
3816        let fs = FakeFs::new(cx.background());
3817        fs.insert_tree(
3818            "/root",
3819            json!({
3820                "a": "",
3821                "b": {
3822                   "c": {
3823                       "d": ""
3824                   },
3825                   "e": {}
3826                },
3827                "f": "",
3828                "g": {
3829                    "h": {}
3830                },
3831                "i": {
3832                    "j": {
3833                        "k": ""
3834                    },
3835                    "l": {
3836
3837                    }
3838                },
3839                ".gitignore": "i/j\n",
3840            }),
3841        )
3842        .await;
3843
3844        let http_client = FakeHttpClient::with_404_response();
3845        let client = cx.read(|cx| Client::new(http_client, cx));
3846
3847        let tree = Worktree::local(
3848            client,
3849            Path::new("/root"),
3850            true,
3851            fs,
3852            Default::default(),
3853            &mut cx.to_async(),
3854        )
3855        .await
3856        .unwrap();
3857        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3858            .await;
3859
3860        tree.read_with(cx, |tree, _| {
3861            assert_eq!(
3862                tree.descendent_entries(false, false, Path::new("b"))
3863                    .map(|entry| entry.path.as_ref())
3864                    .collect::<Vec<_>>(),
3865                vec![Path::new("b/c/d"),]
3866            );
3867            assert_eq!(
3868                tree.descendent_entries(true, false, Path::new("b"))
3869                    .map(|entry| entry.path.as_ref())
3870                    .collect::<Vec<_>>(),
3871                vec![
3872                    Path::new("b"),
3873                    Path::new("b/c"),
3874                    Path::new("b/c/d"),
3875                    Path::new("b/e"),
3876                ]
3877            );
3878
3879            assert_eq!(
3880                tree.descendent_entries(false, false, Path::new("g"))
3881                    .map(|entry| entry.path.as_ref())
3882                    .collect::<Vec<_>>(),
3883                Vec::<PathBuf>::new()
3884            );
3885            assert_eq!(
3886                tree.descendent_entries(true, false, Path::new("g"))
3887                    .map(|entry| entry.path.as_ref())
3888                    .collect::<Vec<_>>(),
3889                vec![Path::new("g"), Path::new("g/h"),]
3890            );
3891
3892            assert_eq!(
3893                tree.descendent_entries(false, false, Path::new("i"))
3894                    .map(|entry| entry.path.as_ref())
3895                    .collect::<Vec<_>>(),
3896                Vec::<PathBuf>::new()
3897            );
3898            assert_eq!(
3899                tree.descendent_entries(false, true, Path::new("i"))
3900                    .map(|entry| entry.path.as_ref())
3901                    .collect::<Vec<_>>(),
3902                vec![Path::new("i/j/k")]
3903            );
3904            assert_eq!(
3905                tree.descendent_entries(true, false, Path::new("i"))
3906                    .map(|entry| entry.path.as_ref())
3907                    .collect::<Vec<_>>(),
3908                vec![Path::new("i"), Path::new("i/l"),]
3909            );
3910        })
3911    }
3912
3913    #[gpui::test(iterations = 10)]
3914    async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3915        let fs = FakeFs::new(cx.background());
3916        fs.insert_tree(
3917            "/root",
3918            json!({
3919                "lib": {
3920                    "a": {
3921                        "a.txt": ""
3922                    },
3923                    "b": {
3924                        "b.txt": ""
3925                    }
3926                }
3927            }),
3928        )
3929        .await;
3930        fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3931        fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3932
3933        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3934        let tree = Worktree::local(
3935            client,
3936            Path::new("/root"),
3937            true,
3938            fs.clone(),
3939            Default::default(),
3940            &mut cx.to_async(),
3941        )
3942        .await
3943        .unwrap();
3944
3945        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3946            .await;
3947
3948        tree.read_with(cx, |tree, _| {
3949            assert_eq!(
3950                tree.entries(false)
3951                    .map(|entry| entry.path.as_ref())
3952                    .collect::<Vec<_>>(),
3953                vec![
3954                    Path::new(""),
3955                    Path::new("lib"),
3956                    Path::new("lib/a"),
3957                    Path::new("lib/a/a.txt"),
3958                    Path::new("lib/a/lib"),
3959                    Path::new("lib/b"),
3960                    Path::new("lib/b/b.txt"),
3961                    Path::new("lib/b/lib"),
3962                ]
3963            );
3964        });
3965
3966        fs.rename(
3967            Path::new("/root/lib/a/lib"),
3968            Path::new("/root/lib/a/lib-2"),
3969            Default::default(),
3970        )
3971        .await
3972        .unwrap();
3973        executor.run_until_parked();
3974        tree.read_with(cx, |tree, _| {
3975            assert_eq!(
3976                tree.entries(false)
3977                    .map(|entry| entry.path.as_ref())
3978                    .collect::<Vec<_>>(),
3979                vec![
3980                    Path::new(""),
3981                    Path::new("lib"),
3982                    Path::new("lib/a"),
3983                    Path::new("lib/a/a.txt"),
3984                    Path::new("lib/a/lib-2"),
3985                    Path::new("lib/b"),
3986                    Path::new("lib/b/b.txt"),
3987                    Path::new("lib/b/lib"),
3988                ]
3989            );
3990        });
3991    }
3992
3993    #[gpui::test]
3994    async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3995        // .gitignores are handled explicitly by Zed and do not use the git
3996        // machinery that the git_tests module checks
3997        let parent_dir = temp_tree(json!({
3998            ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3999            "tree": {
4000                ".git": {},
4001                ".gitignore": "ignored-dir\n",
4002                "tracked-dir": {
4003                    "tracked-file1": "",
4004                    "ancestor-ignored-file1": "",
4005                },
4006                "ignored-dir": {
4007                    "ignored-file1": ""
4008                }
4009            }
4010        }));
4011        let dir = parent_dir.path().join("tree");
4012
4013        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4014
4015        let tree = Worktree::local(
4016            client,
4017            dir.as_path(),
4018            true,
4019            Arc::new(RealFs),
4020            Default::default(),
4021            &mut cx.to_async(),
4022        )
4023        .await
4024        .unwrap();
4025        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4026            .await;
4027        tree.flush_fs_events(cx).await;
4028        cx.read(|cx| {
4029            let tree = tree.read(cx);
4030            assert!(
4031                !tree
4032                    .entry_for_path("tracked-dir/tracked-file1")
4033                    .unwrap()
4034                    .is_ignored
4035            );
4036            assert!(
4037                tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
4038                    .unwrap()
4039                    .is_ignored
4040            );
4041            assert!(
4042                tree.entry_for_path("ignored-dir/ignored-file1")
4043                    .unwrap()
4044                    .is_ignored
4045            );
4046        });
4047
4048        std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
4049        std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
4050        std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
4051        tree.flush_fs_events(cx).await;
4052        cx.read(|cx| {
4053            let tree = tree.read(cx);
4054            assert!(
4055                !tree
4056                    .entry_for_path("tracked-dir/tracked-file2")
4057                    .unwrap()
4058                    .is_ignored
4059            );
4060            assert!(
4061                tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
4062                    .unwrap()
4063                    .is_ignored
4064            );
4065            assert!(
4066                tree.entry_for_path("ignored-dir/ignored-file2")
4067                    .unwrap()
4068                    .is_ignored
4069            );
4070            assert!(tree.entry_for_path(".git").unwrap().is_ignored);
4071        });
4072    }
4073
4074    #[gpui::test]
4075    async fn test_write_file(cx: &mut TestAppContext) {
4076        let dir = temp_tree(json!({
4077            ".git": {},
4078            ".gitignore": "ignored-dir\n",
4079            "tracked-dir": {},
4080            "ignored-dir": {}
4081        }));
4082
4083        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4084
4085        let tree = Worktree::local(
4086            client,
4087            dir.path(),
4088            true,
4089            Arc::new(RealFs),
4090            Default::default(),
4091            &mut cx.to_async(),
4092        )
4093        .await
4094        .unwrap();
4095        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4096            .await;
4097        tree.flush_fs_events(cx).await;
4098
4099        tree.update(cx, |tree, cx| {
4100            tree.as_local().unwrap().write_file(
4101                Path::new("tracked-dir/file.txt"),
4102                "hello".into(),
4103                Default::default(),
4104                cx,
4105            )
4106        })
4107        .await
4108        .unwrap();
4109        tree.update(cx, |tree, cx| {
4110            tree.as_local().unwrap().write_file(
4111                Path::new("ignored-dir/file.txt"),
4112                "world".into(),
4113                Default::default(),
4114                cx,
4115            )
4116        })
4117        .await
4118        .unwrap();
4119
4120        tree.read_with(cx, |tree, _| {
4121            let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4122            let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4123            assert!(!tracked.is_ignored);
4124            assert!(ignored.is_ignored);
4125        });
4126    }
4127
4128    #[gpui::test(iterations = 30)]
4129    async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4130        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4131
4132        let fs = FakeFs::new(cx.background());
4133        fs.insert_tree(
4134            "/root",
4135            json!({
4136                "b": {},
4137                "c": {},
4138                "d": {},
4139            }),
4140        )
4141        .await;
4142
4143        let tree = Worktree::local(
4144            client,
4145            "/root".as_ref(),
4146            true,
4147            fs,
4148            Default::default(),
4149            &mut cx.to_async(),
4150        )
4151        .await
4152        .unwrap();
4153
4154        let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4155
4156        let entry = tree
4157            .update(cx, |tree, cx| {
4158                tree.as_local_mut()
4159                    .unwrap()
4160                    .create_entry("a/e".as_ref(), true, cx)
4161            })
4162            .await
4163            .unwrap();
4164        assert!(entry.is_dir());
4165
4166        cx.foreground().run_until_parked();
4167        tree.read_with(cx, |tree, _| {
4168            assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4169        });
4170
4171        let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4172        let update = snapshot2.build_update(&snapshot1, 0, 0, true);
4173        snapshot1.apply_remote_update(update).unwrap();
4174        assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
4175    }
4176
4177    #[gpui::test(iterations = 100)]
4178    async fn test_random_worktree_operations_during_initial_scan(
4179        cx: &mut TestAppContext,
4180        mut rng: StdRng,
4181    ) {
4182        let operations = env::var("OPERATIONS")
4183            .map(|o| o.parse().unwrap())
4184            .unwrap_or(5);
4185        let initial_entries = env::var("INITIAL_ENTRIES")
4186            .map(|o| o.parse().unwrap())
4187            .unwrap_or(20);
4188
4189        let root_dir = Path::new("/test");
4190        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4191        fs.as_fake().insert_tree(root_dir, json!({})).await;
4192        for _ in 0..initial_entries {
4193            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4194        }
4195        log::info!("generated initial tree");
4196
4197        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4198        let worktree = Worktree::local(
4199            client.clone(),
4200            root_dir,
4201            true,
4202            fs.clone(),
4203            Default::default(),
4204            &mut cx.to_async(),
4205        )
4206        .await
4207        .unwrap();
4208
4209        let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4210
4211        for _ in 0..operations {
4212            worktree
4213                .update(cx, |worktree, cx| {
4214                    randomly_mutate_worktree(worktree, &mut rng, cx)
4215                })
4216                .await
4217                .log_err();
4218            worktree.read_with(cx, |tree, _| {
4219                tree.as_local().unwrap().snapshot.check_invariants()
4220            });
4221
4222            if rng.gen_bool(0.6) {
4223                let new_snapshot =
4224                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4225                let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4226                snapshot.apply_remote_update(update.clone()).unwrap();
4227                assert_eq!(
4228                    snapshot.to_vec(true),
4229                    new_snapshot.to_vec(true),
4230                    "incorrect snapshot after update {:?}",
4231                    update
4232                );
4233            }
4234        }
4235
4236        worktree
4237            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4238            .await;
4239        worktree.read_with(cx, |tree, _| {
4240            tree.as_local().unwrap().snapshot.check_invariants()
4241        });
4242
4243        let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4244        let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4245        snapshot.apply_remote_update(update.clone()).unwrap();
4246        assert_eq!(
4247            snapshot.to_vec(true),
4248            new_snapshot.to_vec(true),
4249            "incorrect snapshot after update {:?}",
4250            update
4251        );
4252    }
4253
4254    #[gpui::test(iterations = 100)]
4255    async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4256        let operations = env::var("OPERATIONS")
4257            .map(|o| o.parse().unwrap())
4258            .unwrap_or(40);
4259        let initial_entries = env::var("INITIAL_ENTRIES")
4260            .map(|o| o.parse().unwrap())
4261            .unwrap_or(20);
4262
4263        let root_dir = Path::new("/test");
4264        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4265        fs.as_fake().insert_tree(root_dir, json!({})).await;
4266        for _ in 0..initial_entries {
4267            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4268        }
4269        log::info!("generated initial tree");
4270
4271        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4272        let worktree = Worktree::local(
4273            client.clone(),
4274            root_dir,
4275            true,
4276            fs.clone(),
4277            Default::default(),
4278            &mut cx.to_async(),
4279        )
4280        .await
4281        .unwrap();
4282
4283        worktree
4284            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4285            .await;
4286
4287        // After the initial scan is complete, the `UpdatedEntries` event can
4288        // be used to follow along with all changes to the worktree's snapshot.
4289        worktree.update(cx, |tree, cx| {
4290            let mut paths = tree
4291                .as_local()
4292                .unwrap()
4293                .paths()
4294                .cloned()
4295                .collect::<Vec<_>>();
4296
4297            cx.subscribe(&worktree, move |tree, _, event, _| {
4298                if let Event::UpdatedEntries(changes) = event {
4299                    for ((path, _), change_type) in changes.iter() {
4300                        let path = path.clone();
4301                        let ix = match paths.binary_search(&path) {
4302                            Ok(ix) | Err(ix) => ix,
4303                        };
4304                        match change_type {
4305                            PathChange::Added => {
4306                                assert_ne!(paths.get(ix), Some(&path));
4307                                paths.insert(ix, path);
4308                            }
4309
4310                            PathChange::Removed => {
4311                                assert_eq!(paths.get(ix), Some(&path));
4312                                paths.remove(ix);
4313                            }
4314
4315                            PathChange::Updated => {
4316                                assert_eq!(paths.get(ix), Some(&path));
4317                            }
4318
4319                            PathChange::AddedOrUpdated => {
4320                                if paths[ix] != path {
4321                                    paths.insert(ix, path);
4322                                }
4323                            }
4324                        }
4325                    }
4326
4327                    let new_paths = tree.paths().cloned().collect::<Vec<_>>();
4328                    assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
4329                }
4330            })
4331            .detach();
4332        });
4333
4334        fs.as_fake().pause_events();
4335        let mut snapshots = Vec::new();
4336        let mut mutations_len = operations;
4337        while mutations_len > 1 {
4338            if rng.gen_bool(0.2) {
4339                worktree
4340                    .update(cx, |worktree, cx| {
4341                        randomly_mutate_worktree(worktree, &mut rng, cx)
4342                    })
4343                    .await
4344                    .log_err();
4345            } else {
4346                randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4347            }
4348
4349            let buffered_event_count = fs.as_fake().buffered_event_count();
4350            if buffered_event_count > 0 && rng.gen_bool(0.3) {
4351                let len = rng.gen_range(0..=buffered_event_count);
4352                log::info!("flushing {} events", len);
4353                fs.as_fake().flush_events(len);
4354            } else {
4355                randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4356                mutations_len -= 1;
4357            }
4358
4359            cx.foreground().run_until_parked();
4360            if rng.gen_bool(0.2) {
4361                log::info!("storing snapshot {}", snapshots.len());
4362                let snapshot =
4363                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4364                snapshots.push(snapshot);
4365            }
4366        }
4367
4368        log::info!("quiescing");
4369        fs.as_fake().flush_events(usize::MAX);
4370        cx.foreground().run_until_parked();
4371        let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4372        snapshot.check_invariants();
4373
4374        {
4375            let new_worktree = Worktree::local(
4376                client.clone(),
4377                root_dir,
4378                true,
4379                fs.clone(),
4380                Default::default(),
4381                &mut cx.to_async(),
4382            )
4383            .await
4384            .unwrap();
4385            new_worktree
4386                .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4387                .await;
4388            let new_snapshot =
4389                new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4390            assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4391        }
4392
4393        for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4394            let include_ignored = rng.gen::<bool>();
4395            if !include_ignored {
4396                let mut entries_by_path_edits = Vec::new();
4397                let mut entries_by_id_edits = Vec::new();
4398                for entry in prev_snapshot
4399                    .entries_by_id
4400                    .cursor::<()>()
4401                    .filter(|e| e.is_ignored)
4402                {
4403                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4404                    entries_by_id_edits.push(Edit::Remove(entry.id));
4405                }
4406
4407                prev_snapshot
4408                    .entries_by_path
4409                    .edit(entries_by_path_edits, &());
4410                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4411            }
4412
4413            let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4414            prev_snapshot.apply_remote_update(update.clone()).unwrap();
4415            assert_eq!(
4416                prev_snapshot.to_vec(include_ignored),
4417                snapshot.to_vec(include_ignored),
4418                "wrong update for snapshot {i}. update: {:?}",
4419                update
4420            );
4421        }
4422    }
4423
4424    fn randomly_mutate_worktree(
4425        worktree: &mut Worktree,
4426        rng: &mut impl Rng,
4427        cx: &mut ModelContext<Worktree>,
4428    ) -> Task<Result<()>> {
4429        log::info!("mutating worktree");
4430        let worktree = worktree.as_local_mut().unwrap();
4431        let snapshot = worktree.snapshot();
4432        let entry = snapshot.entries(false).choose(rng).unwrap();
4433
4434        match rng.gen_range(0_u32..100) {
4435            0..=33 if entry.path.as_ref() != Path::new("") => {
4436                log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4437                worktree.delete_entry(entry.id, cx).unwrap()
4438            }
4439            ..=66 if entry.path.as_ref() != Path::new("") => {
4440                let other_entry = snapshot.entries(false).choose(rng).unwrap();
4441                let new_parent_path = if other_entry.is_dir() {
4442                    other_entry.path.clone()
4443                } else {
4444                    other_entry.path.parent().unwrap().into()
4445                };
4446                let mut new_path = new_parent_path.join(gen_name(rng));
4447                if new_path.starts_with(&entry.path) {
4448                    new_path = gen_name(rng).into();
4449                }
4450
4451                log::info!(
4452                    "renaming entry {:?} ({}) to {:?}",
4453                    entry.path,
4454                    entry.id.0,
4455                    new_path
4456                );
4457                let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4458                cx.foreground().spawn(async move {
4459                    task.await?;
4460                    Ok(())
4461                })
4462            }
4463            _ => {
4464                let task = if entry.is_dir() {
4465                    let child_path = entry.path.join(gen_name(rng));
4466                    let is_dir = rng.gen_bool(0.3);
4467                    log::info!(
4468                        "creating {} at {:?}",
4469                        if is_dir { "dir" } else { "file" },
4470                        child_path,
4471                    );
4472                    worktree.create_entry(child_path, is_dir, cx)
4473                } else {
4474                    log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4475                    worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4476                };
4477                cx.foreground().spawn(async move {
4478                    task.await?;
4479                    Ok(())
4480                })
4481            }
4482        }
4483    }
4484
4485    async fn randomly_mutate_fs(
4486        fs: &Arc<dyn Fs>,
4487        root_path: &Path,
4488        insertion_probability: f64,
4489        rng: &mut impl Rng,
4490    ) {
4491        log::info!("mutating fs");
4492        let mut files = Vec::new();
4493        let mut dirs = Vec::new();
4494        for path in fs.as_fake().paths() {
4495            if path.starts_with(root_path) {
4496                if fs.is_file(&path).await {
4497                    files.push(path);
4498                } else {
4499                    dirs.push(path);
4500                }
4501            }
4502        }
4503
4504        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4505            let path = dirs.choose(rng).unwrap();
4506            let new_path = path.join(gen_name(rng));
4507
4508            if rng.gen() {
4509                log::info!(
4510                    "creating dir {:?}",
4511                    new_path.strip_prefix(root_path).unwrap()
4512                );
4513                fs.create_dir(&new_path).await.unwrap();
4514            } else {
4515                log::info!(
4516                    "creating file {:?}",
4517                    new_path.strip_prefix(root_path).unwrap()
4518                );
4519                fs.create_file(&new_path, Default::default()).await.unwrap();
4520            }
4521        } else if rng.gen_bool(0.05) {
4522            let ignore_dir_path = dirs.choose(rng).unwrap();
4523            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4524
4525            let subdirs = dirs
4526                .iter()
4527                .filter(|d| d.starts_with(&ignore_dir_path))
4528                .cloned()
4529                .collect::<Vec<_>>();
4530            let subfiles = files
4531                .iter()
4532                .filter(|d| d.starts_with(&ignore_dir_path))
4533                .cloned()
4534                .collect::<Vec<_>>();
4535            let files_to_ignore = {
4536                let len = rng.gen_range(0..=subfiles.len());
4537                subfiles.choose_multiple(rng, len)
4538            };
4539            let dirs_to_ignore = {
4540                let len = rng.gen_range(0..subdirs.len());
4541                subdirs.choose_multiple(rng, len)
4542            };
4543
4544            let mut ignore_contents = String::new();
4545            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4546                writeln!(
4547                    ignore_contents,
4548                    "{}",
4549                    path_to_ignore
4550                        .strip_prefix(&ignore_dir_path)
4551                        .unwrap()
4552                        .to_str()
4553                        .unwrap()
4554                )
4555                .unwrap();
4556            }
4557            log::info!(
4558                "creating gitignore {:?} with contents:\n{}",
4559                ignore_path.strip_prefix(&root_path).unwrap(),
4560                ignore_contents
4561            );
4562            fs.save(
4563                &ignore_path,
4564                &ignore_contents.as_str().into(),
4565                Default::default(),
4566            )
4567            .await
4568            .unwrap();
4569        } else {
4570            let old_path = {
4571                let file_path = files.choose(rng);
4572                let dir_path = dirs[1..].choose(rng);
4573                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4574            };
4575
4576            let is_rename = rng.gen();
4577            if is_rename {
4578                let new_path_parent = dirs
4579                    .iter()
4580                    .filter(|d| !d.starts_with(old_path))
4581                    .choose(rng)
4582                    .unwrap();
4583
4584                let overwrite_existing_dir =
4585                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4586                let new_path = if overwrite_existing_dir {
4587                    fs.remove_dir(
4588                        &new_path_parent,
4589                        RemoveOptions {
4590                            recursive: true,
4591                            ignore_if_not_exists: true,
4592                        },
4593                    )
4594                    .await
4595                    .unwrap();
4596                    new_path_parent.to_path_buf()
4597                } else {
4598                    new_path_parent.join(gen_name(rng))
4599                };
4600
4601                log::info!(
4602                    "renaming {:?} to {}{:?}",
4603                    old_path.strip_prefix(&root_path).unwrap(),
4604                    if overwrite_existing_dir {
4605                        "overwrite "
4606                    } else {
4607                        ""
4608                    },
4609                    new_path.strip_prefix(&root_path).unwrap()
4610                );
4611                fs.rename(
4612                    &old_path,
4613                    &new_path,
4614                    fs::RenameOptions {
4615                        overwrite: true,
4616                        ignore_if_exists: true,
4617                    },
4618                )
4619                .await
4620                .unwrap();
4621            } else if fs.is_file(&old_path).await {
4622                log::info!(
4623                    "deleting file {:?}",
4624                    old_path.strip_prefix(&root_path).unwrap()
4625                );
4626                fs.remove_file(old_path, Default::default()).await.unwrap();
4627            } else {
4628                log::info!(
4629                    "deleting dir {:?}",
4630                    old_path.strip_prefix(&root_path).unwrap()
4631                );
4632                fs.remove_dir(
4633                    &old_path,
4634                    RemoveOptions {
4635                        recursive: true,
4636                        ignore_if_not_exists: true,
4637                    },
4638                )
4639                .await
4640                .unwrap();
4641            }
4642        }
4643    }
4644
4645    fn gen_name(rng: &mut impl Rng) -> String {
4646        (0..6)
4647            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4648            .map(char::from)
4649            .collect()
4650    }
4651
4652    impl LocalSnapshot {
4653        fn check_invariants(&self) {
4654            assert_eq!(
4655                self.entries_by_path
4656                    .cursor::<()>()
4657                    .map(|e| (&e.path, e.id))
4658                    .collect::<Vec<_>>(),
4659                self.entries_by_id
4660                    .cursor::<()>()
4661                    .map(|e| (&e.path, e.id))
4662                    .collect::<collections::BTreeSet<_>>()
4663                    .into_iter()
4664                    .collect::<Vec<_>>(),
4665                "entries_by_path and entries_by_id are inconsistent"
4666            );
4667
4668            let mut files = self.files(true, 0);
4669            let mut visible_files = self.files(false, 0);
4670            for entry in self.entries_by_path.cursor::<()>() {
4671                if entry.is_file() {
4672                    assert_eq!(files.next().unwrap().inode, entry.inode);
4673                    if !entry.is_ignored {
4674                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4675                    }
4676                }
4677            }
4678
4679            assert!(files.next().is_none());
4680            assert!(visible_files.next().is_none());
4681
4682            let mut bfs_paths = Vec::new();
4683            let mut stack = vec![Path::new("")];
4684            while let Some(path) = stack.pop() {
4685                bfs_paths.push(path);
4686                let ix = stack.len();
4687                for child_entry in self.child_entries(path) {
4688                    stack.insert(ix, &child_entry.path);
4689                }
4690            }
4691
4692            let dfs_paths_via_iter = self
4693                .entries_by_path
4694                .cursor::<()>()
4695                .map(|e| e.path.as_ref())
4696                .collect::<Vec<_>>();
4697            assert_eq!(bfs_paths, dfs_paths_via_iter);
4698
4699            let dfs_paths_via_traversal = self
4700                .entries(true)
4701                .map(|e| e.path.as_ref())
4702                .collect::<Vec<_>>();
4703            assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4704
4705            for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4706                let ignore_parent_path =
4707                    ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4708                assert!(self.entry_for_path(&ignore_parent_path).is_some());
4709                assert!(self
4710                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4711                    .is_some());
4712            }
4713        }
4714
4715        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4716            let mut paths = Vec::new();
4717            for entry in self.entries_by_path.cursor::<()>() {
4718                if include_ignored || !entry.is_ignored {
4719                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4720                }
4721            }
4722            paths.sort_by(|a, b| a.0.cmp(b.0));
4723            paths
4724        }
4725    }
4726
4727    mod git_tests {
4728        use super::*;
4729        use pretty_assertions::assert_eq;
4730
4731        #[gpui::test]
4732        async fn test_rename_work_directory(cx: &mut TestAppContext) {
4733            let root = temp_tree(json!({
4734                "projects": {
4735                    "project1": {
4736                        "a": "",
4737                        "b": "",
4738                    }
4739                },
4740
4741            }));
4742            let root_path = root.path();
4743
4744            let http_client = FakeHttpClient::with_404_response();
4745            let client = cx.read(|cx| Client::new(http_client, cx));
4746            let tree = Worktree::local(
4747                client,
4748                root_path,
4749                true,
4750                Arc::new(RealFs),
4751                Default::default(),
4752                &mut cx.to_async(),
4753            )
4754            .await
4755            .unwrap();
4756
4757            let repo = git_init(&root_path.join("projects/project1"));
4758            git_add("a", &repo);
4759            git_commit("init", &repo);
4760            std::fs::write(root_path.join("projects/project1/a"), "aa").ok();
4761
4762            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4763                .await;
4764
4765            tree.flush_fs_events(cx).await;
4766
4767            cx.read(|cx| {
4768                let tree = tree.read(cx);
4769                let (work_dir, repo) = tree.repositories().next().unwrap();
4770                assert_eq!(work_dir.as_ref(), Path::new("projects/project1"));
4771                assert_eq!(
4772                    repo.status_for_file(tree, Path::new("projects/project1/a")),
4773                    Some(GitFileStatus::Modified)
4774                );
4775                assert_eq!(
4776                    repo.status_for_file(tree, Path::new("projects/project1/b")),
4777                    Some(GitFileStatus::Added)
4778                );
4779            });
4780            dbg!("RENAMING");
4781            std::fs::rename(
4782                root_path.join("projects/project1"),
4783                root_path.join("projects/project2"),
4784            )
4785            .ok();
4786            tree.flush_fs_events(cx).await;
4787
4788            cx.read(|cx| {
4789                let tree = tree.read(cx);
4790                let (work_dir, repo) = tree.repositories().next().unwrap();
4791                assert_eq!(work_dir.as_ref(), Path::new("projects/project2"));
4792                assert_eq!(
4793                    repo.status_for_file(tree, Path::new("projects/project2/a")),
4794                    Some(GitFileStatus::Modified)
4795                );
4796                assert_eq!(
4797                    repo.status_for_file(tree, Path::new("projects/project2/b")),
4798                    Some(GitFileStatus::Added)
4799                );
4800            });
4801        }
4802
4803        #[gpui::test]
4804        async fn test_git_repository_for_path(cx: &mut TestAppContext) {
4805            let root = temp_tree(json!({
4806                "c.txt": "",
4807                "dir1": {
4808                    ".git": {},
4809                    "deps": {
4810                        "dep1": {
4811                            ".git": {},
4812                            "src": {
4813                                "a.txt": ""
4814                            }
4815                        }
4816                    },
4817                    "src": {
4818                        "b.txt": ""
4819                    }
4820                },
4821            }));
4822
4823            let http_client = FakeHttpClient::with_404_response();
4824            let client = cx.read(|cx| Client::new(http_client, cx));
4825            let tree = Worktree::local(
4826                client,
4827                root.path(),
4828                true,
4829                Arc::new(RealFs),
4830                Default::default(),
4831                &mut cx.to_async(),
4832            )
4833            .await
4834            .unwrap();
4835
4836            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4837                .await;
4838            tree.flush_fs_events(cx).await;
4839
4840            tree.read_with(cx, |tree, _cx| {
4841                let tree = tree.as_local().unwrap();
4842
4843                assert!(tree.repository_for_path("c.txt".as_ref()).is_none());
4844
4845                let entry = tree.repository_for_path("dir1/src/b.txt".as_ref()).unwrap();
4846                assert_eq!(
4847                    entry
4848                        .work_directory(tree)
4849                        .map(|directory| directory.as_ref().to_owned()),
4850                    Some(Path::new("dir1").to_owned())
4851                );
4852
4853                let entry = tree.repository_for_path("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
4854                assert_eq!(
4855                    entry
4856                        .work_directory(tree)
4857                        .map(|directory| directory.as_ref().to_owned()),
4858                    Some(Path::new("dir1/deps/dep1").to_owned())
4859                );
4860
4861                let entries = tree.files(false, 0);
4862
4863                let paths_with_repos = tree
4864                    .entries_with_repositories(entries)
4865                    .map(|(entry, repo)| {
4866                        (
4867                            entry.path.as_ref(),
4868                            repo.and_then(|repo| {
4869                                repo.work_directory(&tree)
4870                                    .map(|work_directory| work_directory.0.to_path_buf())
4871                            }),
4872                        )
4873                    })
4874                    .collect::<Vec<_>>();
4875
4876                assert_eq!(
4877                    paths_with_repos,
4878                    &[
4879                        (Path::new("c.txt"), None),
4880                        (
4881                            Path::new("dir1/deps/dep1/src/a.txt"),
4882                            Some(Path::new("dir1/deps/dep1").into())
4883                        ),
4884                        (Path::new("dir1/src/b.txt"), Some(Path::new("dir1").into())),
4885                    ]
4886                );
4887            });
4888
4889            let repo_update_events = Arc::new(Mutex::new(vec![]));
4890            tree.update(cx, |_, cx| {
4891                let repo_update_events = repo_update_events.clone();
4892                cx.subscribe(&tree, move |_, _, event, _| {
4893                    if let Event::UpdatedGitRepositories(update) = event {
4894                        repo_update_events.lock().push(update.clone());
4895                    }
4896                })
4897                .detach();
4898            });
4899
4900            std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
4901            tree.flush_fs_events(cx).await;
4902
4903            assert_eq!(
4904                repo_update_events.lock()[0]
4905                    .keys()
4906                    .cloned()
4907                    .collect::<Vec<Arc<Path>>>(),
4908                vec![Path::new("dir1").into()]
4909            );
4910
4911            std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
4912            tree.flush_fs_events(cx).await;
4913
4914            tree.read_with(cx, |tree, _cx| {
4915                let tree = tree.as_local().unwrap();
4916
4917                assert!(tree.repository_for_path("dir1/src/b.txt".as_ref()).is_none());
4918            });
4919        }
4920
4921        #[gpui::test]
4922        async fn test_git_status(cx: &mut TestAppContext) {
4923            const IGNORE_RULE: &'static str = "**/target";
4924
4925            let root = temp_tree(json!({
4926                "project": {
4927                    "a.txt": "a",
4928                    "b.txt": "bb",
4929                    "c": {
4930                        "d": {
4931                            "e.txt": "eee"
4932                        }
4933                    },
4934                    "f.txt": "ffff",
4935                    "target": {
4936                        "build_file": "???"
4937                    },
4938                    ".gitignore": IGNORE_RULE
4939                },
4940
4941            }));
4942
4943            let http_client = FakeHttpClient::with_404_response();
4944            let client = cx.read(|cx| Client::new(http_client, cx));
4945            let tree = Worktree::local(
4946                client,
4947                root.path(),
4948                true,
4949                Arc::new(RealFs),
4950                Default::default(),
4951                &mut cx.to_async(),
4952            )
4953            .await
4954            .unwrap();
4955
4956            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4957                .await;
4958
4959            const A_TXT: &'static str = "a.txt";
4960            const B_TXT: &'static str = "b.txt";
4961            const E_TXT: &'static str = "c/d/e.txt";
4962            const F_TXT: &'static str = "f.txt";
4963            const DOTGITIGNORE: &'static str = ".gitignore";
4964            const BUILD_FILE: &'static str = "target/build_file";
4965
4966            let work_dir = root.path().join("project");
4967            let mut repo = git_init(work_dir.as_path());
4968            repo.add_ignore_rule(IGNORE_RULE).unwrap();
4969            git_add(Path::new(A_TXT), &repo);
4970            git_add(Path::new(E_TXT), &repo);
4971            git_add(Path::new(DOTGITIGNORE), &repo);
4972            git_commit("Initial commit", &repo);
4973
4974            std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
4975
4976            tree.flush_fs_events(cx).await;
4977
4978            // Check that the right git state is observed on startup
4979            tree.read_with(cx, |tree, _cx| {
4980                let snapshot = tree.snapshot();
4981                assert_eq!(snapshot.repository_entries.iter().count(), 1);
4982                let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
4983                assert_eq!(dir.0.as_ref(), Path::new("project"));
4984
4985                assert_eq!(repo.statuses.iter().count(), 3);
4986                assert_eq!(
4987                    repo.statuses.get(&Path::new(A_TXT).into()),
4988                    Some(&GitFileStatus::Modified)
4989                );
4990                assert_eq!(
4991                    repo.statuses.get(&Path::new(B_TXT).into()),
4992                    Some(&GitFileStatus::Added)
4993                );
4994                assert_eq!(
4995                    repo.statuses.get(&Path::new(F_TXT).into()),
4996                    Some(&GitFileStatus::Added)
4997                );
4998            });
4999
5000            git_add(Path::new(A_TXT), &repo);
5001            git_add(Path::new(B_TXT), &repo);
5002            git_commit("Committing modified and added", &repo);
5003            tree.flush_fs_events(cx).await;
5004
5005            // Check that repo only changes are tracked
5006            tree.read_with(cx, |tree, _cx| {
5007                let snapshot = tree.snapshot();
5008                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5009
5010                assert_eq!(repo.statuses.iter().count(), 1);
5011                assert_eq!(
5012                    repo.statuses.get(&Path::new(F_TXT).into()),
5013                    Some(&GitFileStatus::Added)
5014                );
5015            });
5016
5017            git_reset(0, &repo);
5018            git_remove_index(Path::new(B_TXT), &repo);
5019            git_stash(&mut repo);
5020            std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
5021            std::fs::write(work_dir.join(BUILD_FILE), "this should be ignored").unwrap();
5022            tree.flush_fs_events(cx).await;
5023
5024            // Check that more complex repo changes are tracked
5025            tree.read_with(cx, |tree, _cx| {
5026                let snapshot = tree.snapshot();
5027                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5028
5029                assert_eq!(repo.statuses.iter().count(), 3);
5030                assert_eq!(repo.statuses.get(&Path::new(A_TXT).into()), None);
5031                assert_eq!(
5032                    repo.statuses.get(&Path::new(B_TXT).into()),
5033                    Some(&GitFileStatus::Added)
5034                );
5035                assert_eq!(
5036                    repo.statuses.get(&Path::new(E_TXT).into()),
5037                    Some(&GitFileStatus::Modified)
5038                );
5039                assert_eq!(
5040                    repo.statuses.get(&Path::new(F_TXT).into()),
5041                    Some(&GitFileStatus::Added)
5042                );
5043            });
5044
5045            std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
5046            std::fs::remove_dir_all(work_dir.join("c")).unwrap();
5047            std::fs::write(
5048                work_dir.join(DOTGITIGNORE),
5049                [IGNORE_RULE, "f.txt"].join("\n"),
5050            )
5051            .unwrap();
5052
5053            git_add(Path::new(DOTGITIGNORE), &repo);
5054            git_commit("Committing modified git ignore", &repo);
5055
5056            tree.flush_fs_events(cx).await;
5057
5058            // Check that non-repo behavior is tracked
5059            tree.read_with(cx, |tree, _cx| {
5060                let snapshot = tree.snapshot();
5061                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5062
5063                assert_eq!(repo.statuses.iter().count(), 0);
5064            });
5065
5066            let mut renamed_dir_name = "first_directory/second_directory";
5067            const RENAMED_FILE: &'static str = "rf.txt";
5068
5069            std::fs::create_dir_all(work_dir.join(renamed_dir_name)).unwrap();
5070            std::fs::write(
5071                work_dir.join(renamed_dir_name).join(RENAMED_FILE),
5072                "new-contents",
5073            )
5074            .unwrap();
5075
5076            tree.flush_fs_events(cx).await;
5077
5078            tree.read_with(cx, |tree, _cx| {
5079                let snapshot = tree.snapshot();
5080                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5081
5082                assert_eq!(repo.statuses.iter().count(), 1);
5083                assert_eq!(
5084                    repo.statuses
5085                        .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5086                    Some(&GitFileStatus::Added)
5087                );
5088            });
5089
5090            renamed_dir_name = "new_first_directory/second_directory";
5091
5092            std::fs::rename(
5093                work_dir.join("first_directory"),
5094                work_dir.join("new_first_directory"),
5095            )
5096            .unwrap();
5097
5098            tree.flush_fs_events(cx).await;
5099
5100            tree.read_with(cx, |tree, _cx| {
5101                let snapshot = tree.snapshot();
5102                let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5103
5104                assert_eq!(repo.statuses.iter().count(), 1);
5105                assert_eq!(
5106                    repo.statuses
5107                        .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5108                    Some(&GitFileStatus::Added)
5109                );
5110            });
5111        }
5112
5113        #[track_caller]
5114        fn git_init(path: &Path) -> git2::Repository {
5115            git2::Repository::init(path).expect("Failed to initialize git repository")
5116        }
5117
5118        #[track_caller]
5119        fn git_add<P: AsRef<Path>>(path: P, repo: &git2::Repository) {
5120            let path = path.as_ref();
5121            let mut index = repo.index().expect("Failed to get index");
5122            index.add_path(path).expect("Failed to add a.txt");
5123            index.write().expect("Failed to write index");
5124        }
5125
5126        #[track_caller]
5127        fn git_remove_index(path: &Path, repo: &git2::Repository) {
5128            let mut index = repo.index().expect("Failed to get index");
5129            index.remove_path(path).expect("Failed to add a.txt");
5130            index.write().expect("Failed to write index");
5131        }
5132
5133        #[track_caller]
5134        fn git_commit(msg: &'static str, repo: &git2::Repository) {
5135            use git2::Signature;
5136
5137            let signature = Signature::now("test", "test@zed.dev").unwrap();
5138            let oid = repo.index().unwrap().write_tree().unwrap();
5139            let tree = repo.find_tree(oid).unwrap();
5140            if let Some(head) = repo.head().ok() {
5141                let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
5142
5143                let parent_commit = parent_obj.as_commit().unwrap();
5144
5145                repo.commit(
5146                    Some("HEAD"),
5147                    &signature,
5148                    &signature,
5149                    msg,
5150                    &tree,
5151                    &[parent_commit],
5152                )
5153                .expect("Failed to commit with parent");
5154            } else {
5155                repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
5156                    .expect("Failed to commit");
5157            }
5158        }
5159
5160        #[track_caller]
5161        fn git_stash(repo: &mut git2::Repository) {
5162            use git2::Signature;
5163
5164            let signature = Signature::now("test", "test@zed.dev").unwrap();
5165            repo.stash_save(&signature, "N/A", None)
5166                .expect("Failed to stash");
5167        }
5168
5169        #[track_caller]
5170        fn git_reset(offset: usize, repo: &git2::Repository) {
5171            let head = repo.head().expect("Couldn't get repo head");
5172            let object = head.peel(git2::ObjectType::Commit).unwrap();
5173            let commit = object.as_commit().unwrap();
5174            let new_head = commit
5175                .parents()
5176                .inspect(|parnet| {
5177                    parnet.message();
5178                })
5179                .skip(offset)
5180                .next()
5181                .expect("Not enough history");
5182            repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
5183                .expect("Could not reset");
5184        }
5185
5186        #[allow(dead_code)]
5187        #[track_caller]
5188        fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
5189            repo.statuses(None)
5190                .unwrap()
5191                .iter()
5192                .map(|status| (status.path().unwrap().to_string(), status.status()))
5193                .collect()
5194        }
5195    }
5196}