worktree.rs

   1use crate::{
   2    copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
   3};
   4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   5use anyhow::{anyhow, Context, Result};
   6use client::{proto, Client};
   7use clock::ReplicaId;
   8use collections::{HashMap, VecDeque};
   9use fs::{
  10    repository::{GitRepository, GitFileStatus, RepoPath},
  11    Fs, LineEnding,
  12};
  13use futures::{
  14    channel::{
  15        mpsc::{self, UnboundedSender},
  16        oneshot,
  17    },
  18    select_biased,
  19    task::Poll,
  20    Stream, StreamExt,
  21};
  22use fuzzy::CharBag;
  23use git::{DOT_GIT, GITIGNORE};
  24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
  25use language::{
  26    proto::{
  27        deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
  28        serialize_version,
  29    },
  30    Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
  31};
  32use lsp::LanguageServerId;
  33use parking_lot::Mutex;
  34use postage::{
  35    barrier,
  36    prelude::{Sink as _, Stream as _},
  37    watch,
  38};
  39use smol::channel::{self, Sender};
  40use std::{
  41    any::Any,
  42    cmp::{self, Ordering},
  43    convert::TryFrom,
  44    ffi::OsStr,
  45    fmt,
  46    future::Future,
  47    mem,
  48    ops::{Deref, DerefMut},
  49    path::{Path, PathBuf},
  50    pin::Pin,
  51    sync::{
  52        atomic::{AtomicUsize, Ordering::SeqCst},
  53        Arc,
  54    },
  55    time::{Duration, SystemTime},
  56};
  57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
  58use util::{paths::HOME, ResultExt, TryFutureExt};
  59
  60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  61pub struct WorktreeId(usize);
  62
  63pub enum Worktree {
  64    Local(LocalWorktree),
  65    Remote(RemoteWorktree),
  66}
  67
  68pub struct LocalWorktree {
  69    snapshot: LocalSnapshot,
  70    path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
  71    is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
  72    _background_scanner_task: Task<()>,
  73    share: Option<ShareState>,
  74    diagnostics: HashMap<
  75        Arc<Path>,
  76        Vec<(
  77            LanguageServerId,
  78            Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
  79        )>,
  80    >,
  81    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  82    client: Arc<Client>,
  83    fs: Arc<dyn Fs>,
  84    visible: bool,
  85}
  86
  87pub struct RemoteWorktree {
  88    snapshot: Snapshot,
  89    background_snapshot: Arc<Mutex<Snapshot>>,
  90    project_id: u64,
  91    client: Arc<Client>,
  92    updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
  93    snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
  94    replica_id: ReplicaId,
  95    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  96    visible: bool,
  97    disconnected: bool,
  98}
  99
 100#[derive(Clone)]
 101pub struct Snapshot {
 102    id: WorktreeId,
 103    abs_path: Arc<Path>,
 104    root_name: String,
 105    root_char_bag: CharBag,
 106    entries_by_path: SumTree<Entry>,
 107    entries_by_id: SumTree<PathEntry>,
 108    repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
 109
 110    /// A number that increases every time the worktree begins scanning
 111    /// a set of paths from the filesystem. This scanning could be caused
 112    /// by some operation performed on the worktree, such as reading or
 113    /// writing a file, or by an event reported by the filesystem.
 114    scan_id: usize,
 115
 116    /// The latest scan id that has completed, and whose preceding scans
 117    /// have all completed. The current `scan_id` could be more than one
 118    /// greater than the `completed_scan_id` if operations are performed
 119    /// on the worktree while it is processing a file-system event.
 120    completed_scan_id: usize,
 121}
 122
 123impl Snapshot {
 124    pub fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
 125        let mut max_len = 0;
 126        let mut current_candidate = None;
 127        for (work_directory, repo) in (&self.repository_entries).iter() {
 128            if repo.contains(self, path) {
 129                if work_directory.0.as_os_str().len() >= max_len {
 130                    current_candidate = Some(repo);
 131                    max_len = work_directory.0.as_os_str().len();
 132                } else {
 133                    break;
 134                }
 135            }
 136        }
 137
 138        current_candidate.map(|entry| entry.to_owned())
 139    }
 140}
 141
 142#[derive(Clone, Debug, PartialEq, Eq)]
 143pub struct RepositoryEntry {
 144    pub(crate) work_directory: WorkDirectoryEntry,
 145    pub(crate) branch: Option<Arc<str>>,
 146    pub(crate) worktree_statuses: TreeMap<RepoPath, GitFileStatus>,
 147}
 148
 149impl RepositoryEntry {
 150    pub fn branch(&self) -> Option<Arc<str>> {
 151        self.branch.clone()
 152    }
 153
 154    pub fn work_directory_id(&self) -> ProjectEntryId {
 155        *self.work_directory
 156    }
 157
 158    pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
 159        snapshot
 160            .entry_for_id(self.work_directory_id())
 161            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
 162    }
 163
 164    pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
 165        self.work_directory.contains(snapshot, path)
 166    }
 167
 168    pub fn status_for(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
 169        self.work_directory
 170            .relativize(snapshot, path)
 171            .and_then(|repo_path| self.worktree_statuses.get(&repo_path))
 172            .cloned()
 173    }
 174}
 175
 176impl From<&RepositoryEntry> for proto::RepositoryEntry {
 177    fn from(value: &RepositoryEntry) -> Self {
 178        proto::RepositoryEntry {
 179            work_directory_id: value.work_directory.to_proto(),
 180            branch: value.branch.as_ref().map(|str| str.to_string()),
 181            // TODO: Status
 182            removed_statuses: Default::default(),
 183            updated_statuses: Default::default(),
 184        }
 185    }
 186}
 187
 188/// This path corresponds to the 'content path' (the folder that contains the .git)
 189#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 190pub struct RepositoryWorkDirectory(Arc<Path>);
 191
 192impl Default for RepositoryWorkDirectory {
 193    fn default() -> Self {
 194        RepositoryWorkDirectory(Arc::from(Path::new("")))
 195    }
 196}
 197
 198impl AsRef<Path> for RepositoryWorkDirectory {
 199    fn as_ref(&self) -> &Path {
 200        self.0.as_ref()
 201    }
 202}
 203
 204#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 205pub struct WorkDirectoryEntry(ProjectEntryId);
 206
 207impl WorkDirectoryEntry {
 208    // Note that these paths should be relative to the worktree root.
 209    pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
 210        snapshot
 211            .entry_for_id(self.0)
 212            .map(|entry| path.starts_with(&entry.path))
 213            .unwrap_or(false)
 214    }
 215
 216    pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
 217        worktree.entry_for_id(self.0).and_then(|entry| {
 218            path.strip_prefix(&entry.path)
 219                .ok()
 220                .map(move |path| path.into())
 221        })
 222    }
 223}
 224
 225impl Deref for WorkDirectoryEntry {
 226    type Target = ProjectEntryId;
 227
 228    fn deref(&self) -> &Self::Target {
 229        &self.0
 230    }
 231}
 232
 233impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
 234    fn from(value: ProjectEntryId) -> Self {
 235        WorkDirectoryEntry(value)
 236    }
 237}
 238
 239#[derive(Debug, Clone)]
 240pub struct LocalSnapshot {
 241    ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
 242    // The ProjectEntryId corresponds to the entry for the .git dir
 243    // work_directory_id
 244    git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 245    removed_entry_ids: HashMap<u64, ProjectEntryId>,
 246    next_entry_id: Arc<AtomicUsize>,
 247    snapshot: Snapshot,
 248}
 249
 250#[derive(Debug, Clone)]
 251pub struct LocalRepositoryEntry {
 252    pub(crate) scan_id: usize,
 253    pub(crate) full_scan_id: usize,
 254    pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
 255    /// Path to the actual .git folder.
 256    /// Note: if .git is a file, this points to the folder indicated by the .git file
 257    pub(crate) git_dir_path: Arc<Path>,
 258}
 259
 260impl LocalRepositoryEntry {
 261    // Note that this path should be relative to the worktree root.
 262    pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
 263        path.starts_with(self.git_dir_path.as_ref())
 264    }
 265}
 266
 267impl Deref for LocalSnapshot {
 268    type Target = Snapshot;
 269
 270    fn deref(&self) -> &Self::Target {
 271        &self.snapshot
 272    }
 273}
 274
 275impl DerefMut for LocalSnapshot {
 276    fn deref_mut(&mut self) -> &mut Self::Target {
 277        &mut self.snapshot
 278    }
 279}
 280
 281enum ScanState {
 282    Started,
 283    Updated {
 284        snapshot: LocalSnapshot,
 285        changes: HashMap<Arc<Path>, PathChange>,
 286        barrier: Option<barrier::Sender>,
 287        scanning: bool,
 288    },
 289}
 290
 291struct ShareState {
 292    project_id: u64,
 293    snapshots_tx: watch::Sender<LocalSnapshot>,
 294    resume_updates: watch::Sender<()>,
 295    _maintain_remote_snapshot: Task<Option<()>>,
 296}
 297
 298pub enum Event {
 299    UpdatedEntries(HashMap<Arc<Path>, PathChange>),
 300    UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
 301}
 302
 303impl Entity for Worktree {
 304    type Event = Event;
 305}
 306
 307impl Worktree {
 308    pub async fn local(
 309        client: Arc<Client>,
 310        path: impl Into<Arc<Path>>,
 311        visible: bool,
 312        fs: Arc<dyn Fs>,
 313        next_entry_id: Arc<AtomicUsize>,
 314        cx: &mut AsyncAppContext,
 315    ) -> Result<ModelHandle<Self>> {
 316        // After determining whether the root entry is a file or a directory, populate the
 317        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 318        let abs_path = path.into();
 319        let metadata = fs
 320            .metadata(&abs_path)
 321            .await
 322            .context("failed to stat worktree path")?;
 323
 324        Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
 325            let root_name = abs_path
 326                .file_name()
 327                .map_or(String::new(), |f| f.to_string_lossy().to_string());
 328
 329            let mut snapshot = LocalSnapshot {
 330                ignores_by_parent_abs_path: Default::default(),
 331                removed_entry_ids: Default::default(),
 332                git_repositories: Default::default(),
 333                next_entry_id,
 334                snapshot: Snapshot {
 335                    id: WorktreeId::from_usize(cx.model_id()),
 336                    abs_path: abs_path.clone(),
 337                    root_name: root_name.clone(),
 338                    root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
 339                    entries_by_path: Default::default(),
 340                    entries_by_id: Default::default(),
 341                    repository_entries: Default::default(),
 342                    scan_id: 1,
 343                    completed_scan_id: 0,
 344                },
 345            };
 346
 347            if let Some(metadata) = metadata {
 348                snapshot.insert_entry(
 349                    Entry::new(
 350                        Arc::from(Path::new("")),
 351                        &metadata,
 352                        &snapshot.next_entry_id,
 353                        snapshot.root_char_bag,
 354                    ),
 355                    fs.as_ref(),
 356                );
 357            }
 358
 359            let (path_changes_tx, path_changes_rx) = channel::unbounded();
 360            let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
 361
 362            cx.spawn_weak(|this, mut cx| async move {
 363                while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
 364                    this.update(&mut cx, |this, cx| {
 365                        let this = this.as_local_mut().unwrap();
 366                        match state {
 367                            ScanState::Started => {
 368                                *this.is_scanning.0.borrow_mut() = true;
 369                            }
 370                            ScanState::Updated {
 371                                snapshot,
 372                                changes,
 373                                barrier,
 374                                scanning,
 375                            } => {
 376                                *this.is_scanning.0.borrow_mut() = scanning;
 377                                this.set_snapshot(snapshot, cx);
 378                                cx.emit(Event::UpdatedEntries(changes));
 379                                drop(barrier);
 380                            }
 381                        }
 382                        cx.notify();
 383                    });
 384                }
 385            })
 386            .detach();
 387
 388            let background_scanner_task = cx.background().spawn({
 389                let fs = fs.clone();
 390                let snapshot = snapshot.clone();
 391                let background = cx.background().clone();
 392                async move {
 393                    let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 394                    BackgroundScanner::new(
 395                        snapshot,
 396                        fs,
 397                        scan_states_tx,
 398                        background,
 399                        path_changes_rx,
 400                    )
 401                    .run(events)
 402                    .await;
 403                }
 404            });
 405
 406            Worktree::Local(LocalWorktree {
 407                snapshot,
 408                is_scanning: watch::channel_with(true),
 409                share: None,
 410                path_changes_tx,
 411                _background_scanner_task: background_scanner_task,
 412                diagnostics: Default::default(),
 413                diagnostic_summaries: Default::default(),
 414                client,
 415                fs,
 416                visible,
 417            })
 418        }))
 419    }
 420
 421    pub fn remote(
 422        project_remote_id: u64,
 423        replica_id: ReplicaId,
 424        worktree: proto::WorktreeMetadata,
 425        client: Arc<Client>,
 426        cx: &mut AppContext,
 427    ) -> ModelHandle<Self> {
 428        cx.add_model(|cx: &mut ModelContext<Self>| {
 429            let snapshot = Snapshot {
 430                id: WorktreeId(worktree.id as usize),
 431                abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
 432                root_name: worktree.root_name.clone(),
 433                root_char_bag: worktree
 434                    .root_name
 435                    .chars()
 436                    .map(|c| c.to_ascii_lowercase())
 437                    .collect(),
 438                entries_by_path: Default::default(),
 439                entries_by_id: Default::default(),
 440                repository_entries: Default::default(),
 441                scan_id: 1,
 442                completed_scan_id: 0,
 443            };
 444
 445            let (updates_tx, mut updates_rx) = mpsc::unbounded();
 446            let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
 447            let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
 448
 449            cx.background()
 450                .spawn({
 451                    let background_snapshot = background_snapshot.clone();
 452                    async move {
 453                        while let Some(update) = updates_rx.next().await {
 454                            if let Err(error) =
 455                                background_snapshot.lock().apply_remote_update(update)
 456                            {
 457                                log::error!("error applying worktree update: {}", error);
 458                            }
 459                            snapshot_updated_tx.send(()).await.ok();
 460                        }
 461                    }
 462                })
 463                .detach();
 464
 465            cx.spawn_weak(|this, mut cx| async move {
 466                while (snapshot_updated_rx.recv().await).is_some() {
 467                    if let Some(this) = this.upgrade(&cx) {
 468                        this.update(&mut cx, |this, cx| {
 469                            let this = this.as_remote_mut().unwrap();
 470                            this.snapshot = this.background_snapshot.lock().clone();
 471                            cx.emit(Event::UpdatedEntries(Default::default()));
 472                            cx.notify();
 473                            while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
 474                                if this.observed_snapshot(*scan_id) {
 475                                    let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
 476                                    let _ = tx.send(());
 477                                } else {
 478                                    break;
 479                                }
 480                            }
 481                        });
 482                    } else {
 483                        break;
 484                    }
 485                }
 486            })
 487            .detach();
 488
 489            Worktree::Remote(RemoteWorktree {
 490                project_id: project_remote_id,
 491                replica_id,
 492                snapshot: snapshot.clone(),
 493                background_snapshot,
 494                updates_tx: Some(updates_tx),
 495                snapshot_subscriptions: Default::default(),
 496                client: client.clone(),
 497                diagnostic_summaries: Default::default(),
 498                visible: worktree.visible,
 499                disconnected: false,
 500            })
 501        })
 502    }
 503
 504    pub fn as_local(&self) -> Option<&LocalWorktree> {
 505        if let Worktree::Local(worktree) = self {
 506            Some(worktree)
 507        } else {
 508            None
 509        }
 510    }
 511
 512    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 513        if let Worktree::Remote(worktree) = self {
 514            Some(worktree)
 515        } else {
 516            None
 517        }
 518    }
 519
 520    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 521        if let Worktree::Local(worktree) = self {
 522            Some(worktree)
 523        } else {
 524            None
 525        }
 526    }
 527
 528    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 529        if let Worktree::Remote(worktree) = self {
 530            Some(worktree)
 531        } else {
 532            None
 533        }
 534    }
 535
 536    pub fn is_local(&self) -> bool {
 537        matches!(self, Worktree::Local(_))
 538    }
 539
 540    pub fn is_remote(&self) -> bool {
 541        !self.is_local()
 542    }
 543
 544    pub fn snapshot(&self) -> Snapshot {
 545        match self {
 546            Worktree::Local(worktree) => worktree.snapshot().snapshot,
 547            Worktree::Remote(worktree) => worktree.snapshot(),
 548        }
 549    }
 550
 551    pub fn scan_id(&self) -> usize {
 552        match self {
 553            Worktree::Local(worktree) => worktree.snapshot.scan_id,
 554            Worktree::Remote(worktree) => worktree.snapshot.scan_id,
 555        }
 556    }
 557
 558    pub fn completed_scan_id(&self) -> usize {
 559        match self {
 560            Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
 561            Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
 562        }
 563    }
 564
 565    pub fn is_visible(&self) -> bool {
 566        match self {
 567            Worktree::Local(worktree) => worktree.visible,
 568            Worktree::Remote(worktree) => worktree.visible,
 569        }
 570    }
 571
 572    pub fn replica_id(&self) -> ReplicaId {
 573        match self {
 574            Worktree::Local(_) => 0,
 575            Worktree::Remote(worktree) => worktree.replica_id,
 576        }
 577    }
 578
 579    pub fn diagnostic_summaries(
 580        &self,
 581    ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
 582        match self {
 583            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 584            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 585        }
 586        .iter()
 587        .flat_map(|(path, summaries)| {
 588            summaries
 589                .iter()
 590                .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
 591        })
 592    }
 593
 594    pub fn abs_path(&self) -> Arc<Path> {
 595        match self {
 596            Worktree::Local(worktree) => worktree.abs_path.clone(),
 597            Worktree::Remote(worktree) => worktree.abs_path.clone(),
 598        }
 599    }
 600}
 601
 602impl LocalWorktree {
 603    pub fn contains_abs_path(&self, path: &Path) -> bool {
 604        path.starts_with(&self.abs_path)
 605    }
 606
 607    fn absolutize(&self, path: &Path) -> PathBuf {
 608        if path.file_name().is_some() {
 609            self.abs_path.join(path)
 610        } else {
 611            self.abs_path.to_path_buf()
 612        }
 613    }
 614
 615    pub(crate) fn load_buffer(
 616        &mut self,
 617        id: u64,
 618        path: &Path,
 619        cx: &mut ModelContext<Worktree>,
 620    ) -> Task<Result<ModelHandle<Buffer>>> {
 621        let path = Arc::from(path);
 622        cx.spawn(move |this, mut cx| async move {
 623            let (file, contents, diff_base) = this
 624                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
 625                .await?;
 626            let text_buffer = cx
 627                .background()
 628                .spawn(async move { text::Buffer::new(0, id, contents) })
 629                .await;
 630            Ok(cx.add_model(|cx| {
 631                let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
 632                buffer.git_diff_recalc(cx);
 633                buffer
 634            }))
 635        })
 636    }
 637
 638    pub fn diagnostics_for_path(
 639        &self,
 640        path: &Path,
 641    ) -> Vec<(
 642        LanguageServerId,
 643        Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 644    )> {
 645        self.diagnostics.get(path).cloned().unwrap_or_default()
 646    }
 647
 648    pub fn update_diagnostics(
 649        &mut self,
 650        server_id: LanguageServerId,
 651        worktree_path: Arc<Path>,
 652        diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 653        _: &mut ModelContext<Worktree>,
 654    ) -> Result<bool> {
 655        let summaries_by_server_id = self
 656            .diagnostic_summaries
 657            .entry(worktree_path.clone())
 658            .or_default();
 659
 660        let old_summary = summaries_by_server_id
 661            .remove(&server_id)
 662            .unwrap_or_default();
 663
 664        let new_summary = DiagnosticSummary::new(&diagnostics);
 665        if new_summary.is_empty() {
 666            if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
 667                if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 668                    diagnostics_by_server_id.remove(ix);
 669                }
 670                if diagnostics_by_server_id.is_empty() {
 671                    self.diagnostics.remove(&worktree_path);
 672                }
 673            }
 674        } else {
 675            summaries_by_server_id.insert(server_id, new_summary);
 676            let diagnostics_by_server_id =
 677                self.diagnostics.entry(worktree_path.clone()).or_default();
 678            match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 679                Ok(ix) => {
 680                    diagnostics_by_server_id[ix] = (server_id, diagnostics);
 681                }
 682                Err(ix) => {
 683                    diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
 684                }
 685            }
 686        }
 687
 688        if !old_summary.is_empty() || !new_summary.is_empty() {
 689            if let Some(share) = self.share.as_ref() {
 690                self.client
 691                    .send(proto::UpdateDiagnosticSummary {
 692                        project_id: share.project_id,
 693                        worktree_id: self.id().to_proto(),
 694                        summary: Some(proto::DiagnosticSummary {
 695                            path: worktree_path.to_string_lossy().to_string(),
 696                            language_server_id: server_id.0 as u64,
 697                            error_count: new_summary.error_count as u32,
 698                            warning_count: new_summary.warning_count as u32,
 699                        }),
 700                    })
 701                    .log_err();
 702            }
 703        }
 704
 705        Ok(!old_summary.is_empty() || !new_summary.is_empty())
 706    }
 707
 708    fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
 709        let updated_repos =
 710            self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
 711        self.snapshot = new_snapshot;
 712
 713        if let Some(share) = self.share.as_mut() {
 714            *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
 715        }
 716
 717        if !updated_repos.is_empty() {
 718            cx.emit(Event::UpdatedGitRepositories(updated_repos));
 719        }
 720    }
 721
 722    fn changed_repos(
 723        &self,
 724        old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 725        new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 726    ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
 727        let mut diff = HashMap::default();
 728        let mut old_repos = old_repos.iter().peekable();
 729        let mut new_repos = new_repos.iter().peekable();
 730        loop {
 731            match (old_repos.peek(), new_repos.peek()) {
 732                (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
 733                    match Ord::cmp(old_entry_id, new_entry_id) {
 734                        Ordering::Less => {
 735                            if let Some(entry) = self.entry_for_id(**old_entry_id) {
 736                                diff.insert(entry.path.clone(), (*old_repo).clone());
 737                            }
 738                            old_repos.next();
 739                        }
 740                        Ordering::Equal => {
 741                            if old_repo.scan_id != new_repo.scan_id {
 742                                if let Some(entry) = self.entry_for_id(**new_entry_id) {
 743                                    diff.insert(entry.path.clone(), (*new_repo).clone());
 744                                }
 745                            }
 746
 747                            old_repos.next();
 748                            new_repos.next();
 749                        }
 750                        Ordering::Greater => {
 751                            if let Some(entry) = self.entry_for_id(**new_entry_id) {
 752                                diff.insert(entry.path.clone(), (*new_repo).clone());
 753                            }
 754                            new_repos.next();
 755                        }
 756                    }
 757                }
 758                (Some((old_entry_id, old_repo)), None) => {
 759                    if let Some(entry) = self.entry_for_id(**old_entry_id) {
 760                        diff.insert(entry.path.clone(), (*old_repo).clone());
 761                    }
 762                    old_repos.next();
 763                }
 764                (None, Some((new_entry_id, new_repo))) => {
 765                    if let Some(entry) = self.entry_for_id(**new_entry_id) {
 766                        diff.insert(entry.path.clone(), (*new_repo).clone());
 767                    }
 768                    new_repos.next();
 769                }
 770                (None, None) => break,
 771            }
 772        }
 773        diff
 774    }
 775
 776    pub fn scan_complete(&self) -> impl Future<Output = ()> {
 777        let mut is_scanning_rx = self.is_scanning.1.clone();
 778        async move {
 779            let mut is_scanning = is_scanning_rx.borrow().clone();
 780            while is_scanning {
 781                if let Some(value) = is_scanning_rx.recv().await {
 782                    is_scanning = value;
 783                } else {
 784                    break;
 785                }
 786            }
 787        }
 788    }
 789
 790    pub fn snapshot(&self) -> LocalSnapshot {
 791        self.snapshot.clone()
 792    }
 793
 794    pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
 795        proto::WorktreeMetadata {
 796            id: self.id().to_proto(),
 797            root_name: self.root_name().to_string(),
 798            visible: self.visible,
 799            abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
 800        }
 801    }
 802
 803    fn load(
 804        &self,
 805        path: &Path,
 806        cx: &mut ModelContext<Worktree>,
 807    ) -> Task<Result<(File, String, Option<String>)>> {
 808        let handle = cx.handle();
 809        let path = Arc::from(path);
 810        let abs_path = self.absolutize(&path);
 811        let fs = self.fs.clone();
 812        let snapshot = self.snapshot();
 813
 814        let mut index_task = None;
 815
 816        if let Some(repo) = snapshot.repo_for(&path) {
 817            let repo_path = repo.work_directory.relativize(self, &path).unwrap();
 818            if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
 819                let repo = repo.repo_ptr.to_owned();
 820                index_task = Some(
 821                    cx.background()
 822                        .spawn(async move { repo.lock().load_index_text(&repo_path) }),
 823                );
 824            }
 825        }
 826
 827        cx.spawn(|this, mut cx| async move {
 828            let text = fs.load(&abs_path).await?;
 829
 830            let diff_base = if let Some(index_task) = index_task {
 831                index_task.await
 832            } else {
 833                None
 834            };
 835
 836            // Eagerly populate the snapshot with an updated entry for the loaded file
 837            let entry = this
 838                .update(&mut cx, |this, cx| {
 839                    this.as_local().unwrap().refresh_entry(path, None, cx)
 840                })
 841                .await?;
 842
 843            Ok((
 844                File {
 845                    entry_id: entry.id,
 846                    worktree: handle,
 847                    path: entry.path,
 848                    mtime: entry.mtime,
 849                    is_local: true,
 850                    is_deleted: false,
 851                },
 852                text,
 853                diff_base,
 854            ))
 855        })
 856    }
 857
 858    pub fn save_buffer(
 859        &self,
 860        buffer_handle: ModelHandle<Buffer>,
 861        path: Arc<Path>,
 862        has_changed_file: bool,
 863        cx: &mut ModelContext<Worktree>,
 864    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
 865        let handle = cx.handle();
 866        let buffer = buffer_handle.read(cx);
 867
 868        let rpc = self.client.clone();
 869        let buffer_id = buffer.remote_id();
 870        let project_id = self.share.as_ref().map(|share| share.project_id);
 871
 872        let text = buffer.as_rope().clone();
 873        let fingerprint = text.fingerprint();
 874        let version = buffer.version();
 875        let save = self.write_file(path, text, buffer.line_ending(), cx);
 876
 877        cx.as_mut().spawn(|mut cx| async move {
 878            let entry = save.await?;
 879
 880            if has_changed_file {
 881                let new_file = Arc::new(File {
 882                    entry_id: entry.id,
 883                    worktree: handle,
 884                    path: entry.path,
 885                    mtime: entry.mtime,
 886                    is_local: true,
 887                    is_deleted: false,
 888                });
 889
 890                if let Some(project_id) = project_id {
 891                    rpc.send(proto::UpdateBufferFile {
 892                        project_id,
 893                        buffer_id,
 894                        file: Some(new_file.to_proto()),
 895                    })
 896                    .log_err();
 897                }
 898
 899                buffer_handle.update(&mut cx, |buffer, cx| {
 900                    if has_changed_file {
 901                        buffer.file_updated(new_file, cx).detach();
 902                    }
 903                });
 904            }
 905
 906            if let Some(project_id) = project_id {
 907                rpc.send(proto::BufferSaved {
 908                    project_id,
 909                    buffer_id,
 910                    version: serialize_version(&version),
 911                    mtime: Some(entry.mtime.into()),
 912                    fingerprint: serialize_fingerprint(fingerprint),
 913                })?;
 914            }
 915
 916            buffer_handle.update(&mut cx, |buffer, cx| {
 917                buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
 918            });
 919
 920            Ok((version, fingerprint, entry.mtime))
 921        })
 922    }
 923
 924    pub fn create_entry(
 925        &self,
 926        path: impl Into<Arc<Path>>,
 927        is_dir: bool,
 928        cx: &mut ModelContext<Worktree>,
 929    ) -> Task<Result<Entry>> {
 930        let path = path.into();
 931        let abs_path = self.absolutize(&path);
 932        let fs = self.fs.clone();
 933        let write = cx.background().spawn(async move {
 934            if is_dir {
 935                fs.create_dir(&abs_path).await
 936            } else {
 937                fs.save(&abs_path, &Default::default(), Default::default())
 938                    .await
 939            }
 940        });
 941
 942        cx.spawn(|this, mut cx| async move {
 943            write.await?;
 944            this.update(&mut cx, |this, cx| {
 945                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
 946            })
 947            .await
 948        })
 949    }
 950
 951    pub fn write_file(
 952        &self,
 953        path: impl Into<Arc<Path>>,
 954        text: Rope,
 955        line_ending: LineEnding,
 956        cx: &mut ModelContext<Worktree>,
 957    ) -> Task<Result<Entry>> {
 958        let path = path.into();
 959        let abs_path = self.absolutize(&path);
 960        let fs = self.fs.clone();
 961        let write = cx
 962            .background()
 963            .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
 964
 965        cx.spawn(|this, mut cx| async move {
 966            write.await?;
 967            this.update(&mut cx, |this, cx| {
 968                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
 969            })
 970            .await
 971        })
 972    }
 973
 974    pub fn delete_entry(
 975        &self,
 976        entry_id: ProjectEntryId,
 977        cx: &mut ModelContext<Worktree>,
 978    ) -> Option<Task<Result<()>>> {
 979        let entry = self.entry_for_id(entry_id)?.clone();
 980        let abs_path = self.abs_path.clone();
 981        let fs = self.fs.clone();
 982
 983        let delete = cx.background().spawn(async move {
 984            let mut abs_path = fs.canonicalize(&abs_path).await?;
 985            if entry.path.file_name().is_some() {
 986                abs_path = abs_path.join(&entry.path);
 987            }
 988            if entry.is_file() {
 989                fs.remove_file(&abs_path, Default::default()).await?;
 990            } else {
 991                fs.remove_dir(
 992                    &abs_path,
 993                    RemoveOptions {
 994                        recursive: true,
 995                        ignore_if_not_exists: false,
 996                    },
 997                )
 998                .await?;
 999            }
1000            anyhow::Ok(abs_path)
1001        });
1002
1003        Some(cx.spawn(|this, mut cx| async move {
1004            let abs_path = delete.await?;
1005            let (tx, mut rx) = barrier::channel();
1006            this.update(&mut cx, |this, _| {
1007                this.as_local_mut()
1008                    .unwrap()
1009                    .path_changes_tx
1010                    .try_send((vec![abs_path], tx))
1011            })?;
1012            rx.recv().await;
1013            Ok(())
1014        }))
1015    }
1016
1017    pub fn rename_entry(
1018        &self,
1019        entry_id: ProjectEntryId,
1020        new_path: impl Into<Arc<Path>>,
1021        cx: &mut ModelContext<Worktree>,
1022    ) -> Option<Task<Result<Entry>>> {
1023        let old_path = self.entry_for_id(entry_id)?.path.clone();
1024        let new_path = new_path.into();
1025        let abs_old_path = self.absolutize(&old_path);
1026        let abs_new_path = self.absolutize(&new_path);
1027        let fs = self.fs.clone();
1028        let rename = cx.background().spawn(async move {
1029            fs.rename(&abs_old_path, &abs_new_path, Default::default())
1030                .await
1031        });
1032
1033        Some(cx.spawn(|this, mut cx| async move {
1034            rename.await?;
1035            this.update(&mut cx, |this, cx| {
1036                this.as_local_mut()
1037                    .unwrap()
1038                    .refresh_entry(new_path.clone(), Some(old_path), cx)
1039            })
1040            .await
1041        }))
1042    }
1043
1044    pub fn copy_entry(
1045        &self,
1046        entry_id: ProjectEntryId,
1047        new_path: impl Into<Arc<Path>>,
1048        cx: &mut ModelContext<Worktree>,
1049    ) -> Option<Task<Result<Entry>>> {
1050        let old_path = self.entry_for_id(entry_id)?.path.clone();
1051        let new_path = new_path.into();
1052        let abs_old_path = self.absolutize(&old_path);
1053        let abs_new_path = self.absolutize(&new_path);
1054        let fs = self.fs.clone();
1055        let copy = cx.background().spawn(async move {
1056            copy_recursive(
1057                fs.as_ref(),
1058                &abs_old_path,
1059                &abs_new_path,
1060                Default::default(),
1061            )
1062            .await
1063        });
1064
1065        Some(cx.spawn(|this, mut cx| async move {
1066            copy.await?;
1067            this.update(&mut cx, |this, cx| {
1068                this.as_local_mut()
1069                    .unwrap()
1070                    .refresh_entry(new_path.clone(), None, cx)
1071            })
1072            .await
1073        }))
1074    }
1075
1076    fn refresh_entry(
1077        &self,
1078        path: Arc<Path>,
1079        old_path: Option<Arc<Path>>,
1080        cx: &mut ModelContext<Worktree>,
1081    ) -> Task<Result<Entry>> {
1082        let fs = self.fs.clone();
1083        let abs_root_path = self.abs_path.clone();
1084        let path_changes_tx = self.path_changes_tx.clone();
1085        cx.spawn_weak(move |this, mut cx| async move {
1086            let abs_path = fs.canonicalize(&abs_root_path).await?;
1087            let mut paths = Vec::with_capacity(2);
1088            paths.push(if path.file_name().is_some() {
1089                abs_path.join(&path)
1090            } else {
1091                abs_path.clone()
1092            });
1093            if let Some(old_path) = old_path {
1094                paths.push(if old_path.file_name().is_some() {
1095                    abs_path.join(&old_path)
1096                } else {
1097                    abs_path.clone()
1098                });
1099            }
1100
1101            let (tx, mut rx) = barrier::channel();
1102            path_changes_tx.try_send((paths, tx))?;
1103            rx.recv().await;
1104            this.upgrade(&cx)
1105                .ok_or_else(|| anyhow!("worktree was dropped"))?
1106                .update(&mut cx, |this, _| {
1107                    this.entry_for_path(path)
1108                        .cloned()
1109                        .ok_or_else(|| anyhow!("failed to read path after update"))
1110                })
1111        })
1112    }
1113
1114    pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1115        let (share_tx, share_rx) = oneshot::channel();
1116
1117        if let Some(share) = self.share.as_mut() {
1118            let _ = share_tx.send(());
1119            *share.resume_updates.borrow_mut() = ();
1120        } else {
1121            let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1122            let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1123            let worktree_id = cx.model_id() as u64;
1124
1125            for (path, summaries) in &self.diagnostic_summaries {
1126                for (&server_id, summary) in summaries {
1127                    if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1128                        project_id,
1129                        worktree_id,
1130                        summary: Some(summary.to_proto(server_id, &path)),
1131                    }) {
1132                        return Task::ready(Err(e));
1133                    }
1134                }
1135            }
1136
1137            let _maintain_remote_snapshot = cx.background().spawn({
1138                let client = self.client.clone();
1139                async move {
1140                    let mut share_tx = Some(share_tx);
1141                    let mut prev_snapshot = LocalSnapshot {
1142                        ignores_by_parent_abs_path: Default::default(),
1143                        removed_entry_ids: Default::default(),
1144                        next_entry_id: Default::default(),
1145                        git_repositories: Default::default(),
1146                        snapshot: Snapshot {
1147                            id: WorktreeId(worktree_id as usize),
1148                            abs_path: Path::new("").into(),
1149                            root_name: Default::default(),
1150                            root_char_bag: Default::default(),
1151                            entries_by_path: Default::default(),
1152                            entries_by_id: Default::default(),
1153                            repository_entries: Default::default(),
1154                            scan_id: 0,
1155                            completed_scan_id: 0,
1156                        },
1157                    };
1158                    while let Some(snapshot) = snapshots_rx.recv().await {
1159                        #[cfg(any(test, feature = "test-support"))]
1160                        const MAX_CHUNK_SIZE: usize = 2;
1161                        #[cfg(not(any(test, feature = "test-support")))]
1162                        const MAX_CHUNK_SIZE: usize = 256;
1163
1164                        let update =
1165                            snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1166                        for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1167                            let _ = resume_updates_rx.try_recv();
1168                            while let Err(error) = client.request(update.clone()).await {
1169                                log::error!("failed to send worktree update: {}", error);
1170                                log::info!("waiting to resume updates");
1171                                if resume_updates_rx.next().await.is_none() {
1172                                    return Ok(());
1173                                }
1174                            }
1175                        }
1176
1177                        if let Some(share_tx) = share_tx.take() {
1178                            let _ = share_tx.send(());
1179                        }
1180
1181                        prev_snapshot = snapshot;
1182                    }
1183
1184                    Ok::<_, anyhow::Error>(())
1185                }
1186                .log_err()
1187            });
1188
1189            self.share = Some(ShareState {
1190                project_id,
1191                snapshots_tx,
1192                resume_updates: resume_updates_tx,
1193                _maintain_remote_snapshot,
1194            });
1195        }
1196
1197        cx.foreground()
1198            .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1199    }
1200
1201    pub fn unshare(&mut self) {
1202        self.share.take();
1203    }
1204
1205    pub fn is_shared(&self) -> bool {
1206        self.share.is_some()
1207    }
1208}
1209
1210impl RemoteWorktree {
1211    fn snapshot(&self) -> Snapshot {
1212        self.snapshot.clone()
1213    }
1214
1215    pub fn disconnected_from_host(&mut self) {
1216        self.updates_tx.take();
1217        self.snapshot_subscriptions.clear();
1218        self.disconnected = true;
1219    }
1220
1221    pub fn save_buffer(
1222        &self,
1223        buffer_handle: ModelHandle<Buffer>,
1224        cx: &mut ModelContext<Worktree>,
1225    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1226        let buffer = buffer_handle.read(cx);
1227        let buffer_id = buffer.remote_id();
1228        let version = buffer.version();
1229        let rpc = self.client.clone();
1230        let project_id = self.project_id;
1231        cx.as_mut().spawn(|mut cx| async move {
1232            let response = rpc
1233                .request(proto::SaveBuffer {
1234                    project_id,
1235                    buffer_id,
1236                    version: serialize_version(&version),
1237                })
1238                .await?;
1239            let version = deserialize_version(&response.version);
1240            let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1241            let mtime = response
1242                .mtime
1243                .ok_or_else(|| anyhow!("missing mtime"))?
1244                .into();
1245
1246            buffer_handle.update(&mut cx, |buffer, cx| {
1247                buffer.did_save(version.clone(), fingerprint, mtime, cx);
1248            });
1249
1250            Ok((version, fingerprint, mtime))
1251        })
1252    }
1253
1254    pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1255        if let Some(updates_tx) = &self.updates_tx {
1256            updates_tx
1257                .unbounded_send(update)
1258                .expect("consumer runs to completion");
1259        }
1260    }
1261
1262    fn observed_snapshot(&self, scan_id: usize) -> bool {
1263        self.completed_scan_id >= scan_id
1264    }
1265
1266    fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1267        let (tx, rx) = oneshot::channel();
1268        if self.observed_snapshot(scan_id) {
1269            let _ = tx.send(());
1270        } else if self.disconnected {
1271            drop(tx);
1272        } else {
1273            match self
1274                .snapshot_subscriptions
1275                .binary_search_by_key(&scan_id, |probe| probe.0)
1276            {
1277                Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1278            }
1279        }
1280
1281        async move {
1282            rx.await?;
1283            Ok(())
1284        }
1285    }
1286
1287    pub fn update_diagnostic_summary(
1288        &mut self,
1289        path: Arc<Path>,
1290        summary: &proto::DiagnosticSummary,
1291    ) {
1292        let server_id = LanguageServerId(summary.language_server_id as usize);
1293        let summary = DiagnosticSummary {
1294            error_count: summary.error_count as usize,
1295            warning_count: summary.warning_count as usize,
1296        };
1297
1298        if summary.is_empty() {
1299            if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1300                summaries.remove(&server_id);
1301                if summaries.is_empty() {
1302                    self.diagnostic_summaries.remove(&path);
1303                }
1304            }
1305        } else {
1306            self.diagnostic_summaries
1307                .entry(path)
1308                .or_default()
1309                .insert(server_id, summary);
1310        }
1311    }
1312
1313    pub fn insert_entry(
1314        &mut self,
1315        entry: proto::Entry,
1316        scan_id: usize,
1317        cx: &mut ModelContext<Worktree>,
1318    ) -> Task<Result<Entry>> {
1319        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1320        cx.spawn(|this, mut cx| async move {
1321            wait_for_snapshot.await?;
1322            this.update(&mut cx, |worktree, _| {
1323                let worktree = worktree.as_remote_mut().unwrap();
1324                let mut snapshot = worktree.background_snapshot.lock();
1325                let entry = snapshot.insert_entry(entry);
1326                worktree.snapshot = snapshot.clone();
1327                entry
1328            })
1329        })
1330    }
1331
1332    pub(crate) fn delete_entry(
1333        &mut self,
1334        id: ProjectEntryId,
1335        scan_id: usize,
1336        cx: &mut ModelContext<Worktree>,
1337    ) -> Task<Result<()>> {
1338        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1339        cx.spawn(|this, mut cx| async move {
1340            wait_for_snapshot.await?;
1341            this.update(&mut cx, |worktree, _| {
1342                let worktree = worktree.as_remote_mut().unwrap();
1343                let mut snapshot = worktree.background_snapshot.lock();
1344                snapshot.delete_entry(id);
1345                worktree.snapshot = snapshot.clone();
1346            });
1347            Ok(())
1348        })
1349    }
1350}
1351
1352impl Snapshot {
1353    pub fn id(&self) -> WorktreeId {
1354        self.id
1355    }
1356
1357    pub fn abs_path(&self) -> &Arc<Path> {
1358        &self.abs_path
1359    }
1360
1361    pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1362        self.entries_by_id.get(&entry_id, &()).is_some()
1363    }
1364
1365    pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1366        let entry = Entry::try_from((&self.root_char_bag, entry))?;
1367        let old_entry = self.entries_by_id.insert_or_replace(
1368            PathEntry {
1369                id: entry.id,
1370                path: entry.path.clone(),
1371                is_ignored: entry.is_ignored,
1372                scan_id: 0,
1373            },
1374            &(),
1375        );
1376        if let Some(old_entry) = old_entry {
1377            self.entries_by_path.remove(&PathKey(old_entry.path), &());
1378        }
1379        self.entries_by_path.insert_or_replace(entry.clone(), &());
1380        Ok(entry)
1381    }
1382
1383    fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1384        let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1385        self.entries_by_path = {
1386            let mut cursor = self.entries_by_path.cursor();
1387            let mut new_entries_by_path =
1388                cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1389            while let Some(entry) = cursor.item() {
1390                if entry.path.starts_with(&removed_entry.path) {
1391                    self.entries_by_id.remove(&entry.id, &());
1392                    cursor.next(&());
1393                } else {
1394                    break;
1395                }
1396            }
1397            new_entries_by_path.push_tree(cursor.suffix(&()), &());
1398            new_entries_by_path
1399        };
1400
1401        Some(removed_entry.path)
1402    }
1403
1404    pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1405        let mut entries_by_path_edits = Vec::new();
1406        let mut entries_by_id_edits = Vec::new();
1407        for entry_id in update.removed_entries {
1408            if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1409                entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1410                entries_by_id_edits.push(Edit::Remove(entry.id));
1411            }
1412        }
1413
1414        for entry in update.updated_entries {
1415            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1416            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1417                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1418            }
1419            entries_by_id_edits.push(Edit::Insert(PathEntry {
1420                id: entry.id,
1421                path: entry.path.clone(),
1422                is_ignored: entry.is_ignored,
1423                scan_id: 0,
1424            }));
1425            entries_by_path_edits.push(Edit::Insert(entry));
1426        }
1427
1428        self.entries_by_path.edit(entries_by_path_edits, &());
1429        self.entries_by_id.edit(entries_by_id_edits, &());
1430
1431        update.removed_repositories.sort_unstable();
1432        self.repository_entries.retain(|_, entry| {
1433            if let Ok(_) = update
1434                .removed_repositories
1435                .binary_search(&entry.work_directory.to_proto())
1436            {
1437                false
1438            } else {
1439                true
1440            }
1441        });
1442
1443        for repository in update.updated_repositories {
1444            let repository = RepositoryEntry {
1445                work_directory: ProjectEntryId::from_proto(repository.work_directory_id).into(),
1446                branch: repository.branch.map(Into::into),
1447                // TODO: status
1448                worktree_statuses: Default::default(),
1449            };
1450            if let Some(entry) = self.entry_for_id(repository.work_directory_id()) {
1451                self.repository_entries
1452                    .insert(RepositoryWorkDirectory(entry.path.clone()), repository)
1453            } else {
1454                log::error!("no work directory entry for repository {:?}", repository)
1455            }
1456        }
1457
1458        self.scan_id = update.scan_id as usize;
1459        if update.is_last_update {
1460            self.completed_scan_id = update.scan_id as usize;
1461        }
1462
1463        Ok(())
1464    }
1465
1466    pub fn file_count(&self) -> usize {
1467        self.entries_by_path.summary().file_count
1468    }
1469
1470    pub fn visible_file_count(&self) -> usize {
1471        self.entries_by_path.summary().visible_file_count
1472    }
1473
1474    fn traverse_from_offset(
1475        &self,
1476        include_dirs: bool,
1477        include_ignored: bool,
1478        start_offset: usize,
1479    ) -> Traversal {
1480        let mut cursor = self.entries_by_path.cursor();
1481        cursor.seek(
1482            &TraversalTarget::Count {
1483                count: start_offset,
1484                include_dirs,
1485                include_ignored,
1486            },
1487            Bias::Right,
1488            &(),
1489        );
1490        Traversal {
1491            cursor,
1492            include_dirs,
1493            include_ignored,
1494        }
1495    }
1496
1497    fn traverse_from_path(
1498        &self,
1499        include_dirs: bool,
1500        include_ignored: bool,
1501        path: &Path,
1502    ) -> Traversal {
1503        let mut cursor = self.entries_by_path.cursor();
1504        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1505        Traversal {
1506            cursor,
1507            include_dirs,
1508            include_ignored,
1509        }
1510    }
1511
1512    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1513        self.traverse_from_offset(false, include_ignored, start)
1514    }
1515
1516    pub fn entries(&self, include_ignored: bool) -> Traversal {
1517        self.traverse_from_offset(true, include_ignored, 0)
1518    }
1519
1520    pub fn repositories(&self) -> impl Iterator<Item = &RepositoryEntry> {
1521        self.repository_entries.values()
1522    }
1523
1524    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1525        let empty_path = Path::new("");
1526        self.entries_by_path
1527            .cursor::<()>()
1528            .filter(move |entry| entry.path.as_ref() != empty_path)
1529            .map(|entry| &entry.path)
1530    }
1531
1532    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1533        let mut cursor = self.entries_by_path.cursor();
1534        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1535        let traversal = Traversal {
1536            cursor,
1537            include_dirs: true,
1538            include_ignored: true,
1539        };
1540        ChildEntriesIter {
1541            traversal,
1542            parent_path,
1543        }
1544    }
1545
1546    pub fn root_entry(&self) -> Option<&Entry> {
1547        self.entry_for_path("")
1548    }
1549
1550    pub fn root_name(&self) -> &str {
1551        &self.root_name
1552    }
1553
1554    pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1555        self.repository_entries
1556            .get(&RepositoryWorkDirectory(Path::new("").into()))
1557            .map(|entry| entry.to_owned())
1558    }
1559
1560    pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1561        self.repository_entries.values()
1562    }
1563
1564    pub fn scan_id(&self) -> usize {
1565        self.scan_id
1566    }
1567
1568    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1569        let path = path.as_ref();
1570        self.traverse_from_path(true, true, path)
1571            .entry()
1572            .and_then(|entry| {
1573                if entry.path.as_ref() == path {
1574                    Some(entry)
1575                } else {
1576                    None
1577                }
1578            })
1579    }
1580
1581    pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1582        let entry = self.entries_by_id.get(&id, &())?;
1583        self.entry_for_path(&entry.path)
1584    }
1585
1586    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1587        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1588    }
1589}
1590
1591impl LocalSnapshot {
1592    pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1593        self.git_repositories.get(&repo.work_directory.0)
1594    }
1595
1596    pub(crate) fn repo_for_metadata(
1597        &self,
1598        path: &Path,
1599    ) -> Option<(ProjectEntryId, Arc<Mutex<dyn GitRepository>>)> {
1600        let (entry_id, local_repo) = self
1601            .git_repositories
1602            .iter()
1603            .find(|(_, repo)| repo.in_dot_git(path))?;
1604        Some((*entry_id, local_repo.repo_ptr.to_owned()))
1605    }
1606
1607    #[cfg(test)]
1608    pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1609        let root_name = self.root_name.clone();
1610        proto::UpdateWorktree {
1611            project_id,
1612            worktree_id: self.id().to_proto(),
1613            abs_path: self.abs_path().to_string_lossy().into(),
1614            root_name,
1615            updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1616            removed_entries: Default::default(),
1617            scan_id: self.scan_id as u64,
1618            is_last_update: true,
1619            updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1620            removed_repositories: Default::default(),
1621        }
1622    }
1623
1624    pub(crate) fn build_update(
1625        &self,
1626        other: &Self,
1627        project_id: u64,
1628        worktree_id: u64,
1629        include_ignored: bool,
1630    ) -> proto::UpdateWorktree {
1631        let mut updated_entries = Vec::new();
1632        let mut removed_entries = Vec::new();
1633        let mut self_entries = self
1634            .entries_by_id
1635            .cursor::<()>()
1636            .filter(|e| include_ignored || !e.is_ignored)
1637            .peekable();
1638        let mut other_entries = other
1639            .entries_by_id
1640            .cursor::<()>()
1641            .filter(|e| include_ignored || !e.is_ignored)
1642            .peekable();
1643        loop {
1644            match (self_entries.peek(), other_entries.peek()) {
1645                (Some(self_entry), Some(other_entry)) => {
1646                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1647                        Ordering::Less => {
1648                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1649                            updated_entries.push(entry);
1650                            self_entries.next();
1651                        }
1652                        Ordering::Equal => {
1653                            if self_entry.scan_id != other_entry.scan_id {
1654                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1655                                updated_entries.push(entry);
1656                            }
1657
1658                            self_entries.next();
1659                            other_entries.next();
1660                        }
1661                        Ordering::Greater => {
1662                            removed_entries.push(other_entry.id.to_proto());
1663                            other_entries.next();
1664                        }
1665                    }
1666                }
1667                (Some(self_entry), None) => {
1668                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1669                    updated_entries.push(entry);
1670                    self_entries.next();
1671                }
1672                (None, Some(other_entry)) => {
1673                    removed_entries.push(other_entry.id.to_proto());
1674                    other_entries.next();
1675                }
1676                (None, None) => break,
1677            }
1678        }
1679
1680        let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1681        let mut removed_repositories = Vec::new();
1682        let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1683        let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1684        loop {
1685            match (self_repos.peek(), other_repos.peek()) {
1686                (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1687                    match Ord::cmp(self_work_dir, other_work_dir) {
1688                        Ordering::Less => {
1689                            updated_repositories.push((*self_repo).into());
1690                            self_repos.next();
1691                        }
1692                        Ordering::Equal => {
1693                            if self_repo != other_repo {
1694                                updated_repositories.push((*self_repo).into());
1695                            }
1696
1697                            self_repos.next();
1698                            other_repos.next();
1699                        }
1700                        Ordering::Greater => {
1701                            removed_repositories.push(other_repo.work_directory.to_proto());
1702                            other_repos.next();
1703                        }
1704                    }
1705                }
1706                (Some((_, self_repo)), None) => {
1707                    updated_repositories.push((*self_repo).into());
1708                    self_repos.next();
1709                }
1710                (None, Some((_, other_repo))) => {
1711                    removed_repositories.push(other_repo.work_directory.to_proto());
1712                    other_repos.next();
1713                }
1714                (None, None) => break,
1715            }
1716        }
1717
1718        proto::UpdateWorktree {
1719            project_id,
1720            worktree_id,
1721            abs_path: self.abs_path().to_string_lossy().into(),
1722            root_name: self.root_name().to_string(),
1723            updated_entries,
1724            removed_entries,
1725            scan_id: self.scan_id as u64,
1726            is_last_update: self.completed_scan_id == self.scan_id,
1727            updated_repositories,
1728            removed_repositories,
1729        }
1730    }
1731
1732    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1733        if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1734            let abs_path = self.abs_path.join(&entry.path);
1735            match smol::block_on(build_gitignore(&abs_path, fs)) {
1736                Ok(ignore) => {
1737                    self.ignores_by_parent_abs_path.insert(
1738                        abs_path.parent().unwrap().into(),
1739                        (Arc::new(ignore), self.scan_id),
1740                    );
1741                }
1742                Err(error) => {
1743                    log::error!(
1744                        "error loading .gitignore file {:?} - {:?}",
1745                        &entry.path,
1746                        error
1747                    );
1748                }
1749            }
1750        }
1751
1752        self.reuse_entry_id(&mut entry);
1753
1754        if entry.kind == EntryKind::PendingDir {
1755            if let Some(existing_entry) =
1756                self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1757            {
1758                entry.kind = existing_entry.kind;
1759            }
1760        }
1761
1762        let scan_id = self.scan_id;
1763        let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1764        if let Some(removed) = removed {
1765            if removed.id != entry.id {
1766                self.entries_by_id.remove(&removed.id, &());
1767            }
1768        }
1769        self.entries_by_id.insert_or_replace(
1770            PathEntry {
1771                id: entry.id,
1772                path: entry.path.clone(),
1773                is_ignored: entry.is_ignored,
1774                scan_id,
1775            },
1776            &(),
1777        );
1778
1779        entry
1780    }
1781
1782    fn populate_dir(
1783        &mut self,
1784        parent_path: Arc<Path>,
1785        entries: impl IntoIterator<Item = Entry>,
1786        ignore: Option<Arc<Gitignore>>,
1787        fs: &dyn Fs,
1788    ) {
1789        let mut parent_entry = if let Some(parent_entry) =
1790            self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1791        {
1792            parent_entry.clone()
1793        } else {
1794            log::warn!(
1795                "populating a directory {:?} that has been removed",
1796                parent_path
1797            );
1798            return;
1799        };
1800
1801        match parent_entry.kind {
1802            EntryKind::PendingDir => {
1803                parent_entry.kind = EntryKind::Dir;
1804            }
1805            EntryKind::Dir => {}
1806            _ => return,
1807        }
1808
1809        if let Some(ignore) = ignore {
1810            self.ignores_by_parent_abs_path.insert(
1811                self.abs_path.join(&parent_path).into(),
1812                (ignore, self.scan_id),
1813            );
1814        }
1815
1816        if parent_path.file_name() == Some(&DOT_GIT) {
1817            self.build_repo(parent_path, fs);
1818        }
1819
1820        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1821        let mut entries_by_id_edits = Vec::new();
1822
1823        for mut entry in entries {
1824            self.reuse_entry_id(&mut entry);
1825            entries_by_id_edits.push(Edit::Insert(PathEntry {
1826                id: entry.id,
1827                path: entry.path.clone(),
1828                is_ignored: entry.is_ignored,
1829                scan_id: self.scan_id,
1830            }));
1831            entries_by_path_edits.push(Edit::Insert(entry));
1832        }
1833
1834        self.entries_by_path.edit(entries_by_path_edits, &());
1835        self.entries_by_id.edit(entries_by_id_edits, &());
1836    }
1837
1838    fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1839        let abs_path = self.abs_path.join(&parent_path);
1840        let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1841
1842        // Guard against repositories inside the repository metadata
1843        if work_dir
1844            .components()
1845            .find(|component| component.as_os_str() == *DOT_GIT)
1846            .is_some()
1847        {
1848            return None;
1849        };
1850
1851        let work_dir_id = self
1852            .entry_for_path(work_dir.clone())
1853            .map(|entry| entry.id)?;
1854
1855        if self.git_repositories.get(&work_dir_id).is_none() {
1856            let repo = fs.open_repo(abs_path.as_path())?;
1857            let work_directory = RepositoryWorkDirectory(work_dir.clone());
1858            let scan_id = self.scan_id;
1859
1860            let repo_lock = repo.lock();
1861
1862            self.repository_entries.insert(
1863                work_directory,
1864                RepositoryEntry {
1865                    work_directory: work_dir_id.into(),
1866                    branch: repo_lock.branch_name().map(Into::into),
1867                    worktree_statuses: repo_lock.worktree_statuses().unwrap_or_default(),
1868                },
1869            );
1870            drop(repo_lock);
1871
1872            self.git_repositories.insert(
1873                work_dir_id,
1874                LocalRepositoryEntry {
1875                    scan_id,
1876                    full_scan_id: scan_id,
1877                    repo_ptr: repo,
1878                    git_dir_path: parent_path.clone(),
1879                },
1880            )
1881        }
1882
1883        Some(())
1884    }
1885    fn reuse_entry_id(&mut self, entry: &mut Entry) {
1886        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1887            entry.id = removed_entry_id;
1888        } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1889            entry.id = existing_entry.id;
1890        }
1891    }
1892
1893    fn remove_path(&mut self, path: &Path) {
1894        let mut new_entries;
1895        let removed_entries;
1896        {
1897            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1898            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1899            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1900            new_entries.push_tree(cursor.suffix(&()), &());
1901        }
1902        self.entries_by_path = new_entries;
1903
1904        let mut entries_by_id_edits = Vec::new();
1905        for entry in removed_entries.cursor::<()>() {
1906            let removed_entry_id = self
1907                .removed_entry_ids
1908                .entry(entry.inode)
1909                .or_insert(entry.id);
1910            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1911            entries_by_id_edits.push(Edit::Remove(entry.id));
1912        }
1913        self.entries_by_id.edit(entries_by_id_edits, &());
1914
1915        if path.file_name() == Some(&GITIGNORE) {
1916            let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1917            if let Some((_, scan_id)) = self
1918                .ignores_by_parent_abs_path
1919                .get_mut(abs_parent_path.as_path())
1920            {
1921                *scan_id = self.snapshot.scan_id;
1922            }
1923        }
1924    }
1925
1926    fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1927        let mut inodes = TreeSet::default();
1928        for ancestor in path.ancestors().skip(1) {
1929            if let Some(entry) = self.entry_for_path(ancestor) {
1930                inodes.insert(entry.inode);
1931            }
1932        }
1933        inodes
1934    }
1935
1936    fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1937        let mut new_ignores = Vec::new();
1938        for ancestor in abs_path.ancestors().skip(1) {
1939            if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1940                new_ignores.push((ancestor, Some(ignore.clone())));
1941            } else {
1942                new_ignores.push((ancestor, None));
1943            }
1944        }
1945
1946        let mut ignore_stack = IgnoreStack::none();
1947        for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1948            if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1949                ignore_stack = IgnoreStack::all();
1950                break;
1951            } else if let Some(ignore) = ignore {
1952                ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1953            }
1954        }
1955
1956        if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1957            ignore_stack = IgnoreStack::all();
1958        }
1959
1960        ignore_stack
1961    }
1962}
1963
1964async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1965    let contents = fs.load(abs_path).await?;
1966    let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1967    let mut builder = GitignoreBuilder::new(parent);
1968    for line in contents.lines() {
1969        builder.add_line(Some(abs_path.into()), line)?;
1970    }
1971    Ok(builder.build()?)
1972}
1973
1974impl WorktreeId {
1975    pub fn from_usize(handle_id: usize) -> Self {
1976        Self(handle_id)
1977    }
1978
1979    pub(crate) fn from_proto(id: u64) -> Self {
1980        Self(id as usize)
1981    }
1982
1983    pub fn to_proto(&self) -> u64 {
1984        self.0 as u64
1985    }
1986
1987    pub fn to_usize(&self) -> usize {
1988        self.0
1989    }
1990}
1991
1992impl fmt::Display for WorktreeId {
1993    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1994        self.0.fmt(f)
1995    }
1996}
1997
1998impl Deref for Worktree {
1999    type Target = Snapshot;
2000
2001    fn deref(&self) -> &Self::Target {
2002        match self {
2003            Worktree::Local(worktree) => &worktree.snapshot,
2004            Worktree::Remote(worktree) => &worktree.snapshot,
2005        }
2006    }
2007}
2008
2009impl Deref for LocalWorktree {
2010    type Target = LocalSnapshot;
2011
2012    fn deref(&self) -> &Self::Target {
2013        &self.snapshot
2014    }
2015}
2016
2017impl Deref for RemoteWorktree {
2018    type Target = Snapshot;
2019
2020    fn deref(&self) -> &Self::Target {
2021        &self.snapshot
2022    }
2023}
2024
2025impl fmt::Debug for LocalWorktree {
2026    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2027        self.snapshot.fmt(f)
2028    }
2029}
2030
2031impl fmt::Debug for Snapshot {
2032    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2033        struct EntriesById<'a>(&'a SumTree<PathEntry>);
2034        struct EntriesByPath<'a>(&'a SumTree<Entry>);
2035
2036        impl<'a> fmt::Debug for EntriesByPath<'a> {
2037            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2038                f.debug_map()
2039                    .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2040                    .finish()
2041            }
2042        }
2043
2044        impl<'a> fmt::Debug for EntriesById<'a> {
2045            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2046                f.debug_list().entries(self.0.iter()).finish()
2047            }
2048        }
2049
2050        f.debug_struct("Snapshot")
2051            .field("id", &self.id)
2052            .field("root_name", &self.root_name)
2053            .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2054            .field("entries_by_id", &EntriesById(&self.entries_by_id))
2055            .finish()
2056    }
2057}
2058
2059#[derive(Clone, PartialEq)]
2060pub struct File {
2061    pub worktree: ModelHandle<Worktree>,
2062    pub path: Arc<Path>,
2063    pub mtime: SystemTime,
2064    pub(crate) entry_id: ProjectEntryId,
2065    pub(crate) is_local: bool,
2066    pub(crate) is_deleted: bool,
2067}
2068
2069impl language::File for File {
2070    fn as_local(&self) -> Option<&dyn language::LocalFile> {
2071        if self.is_local {
2072            Some(self)
2073        } else {
2074            None
2075        }
2076    }
2077
2078    fn mtime(&self) -> SystemTime {
2079        self.mtime
2080    }
2081
2082    fn path(&self) -> &Arc<Path> {
2083        &self.path
2084    }
2085
2086    fn full_path(&self, cx: &AppContext) -> PathBuf {
2087        let mut full_path = PathBuf::new();
2088        let worktree = self.worktree.read(cx);
2089
2090        if worktree.is_visible() {
2091            full_path.push(worktree.root_name());
2092        } else {
2093            let path = worktree.abs_path();
2094
2095            if worktree.is_local() && path.starts_with(HOME.as_path()) {
2096                full_path.push("~");
2097                full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2098            } else {
2099                full_path.push(path)
2100            }
2101        }
2102
2103        if self.path.components().next().is_some() {
2104            full_path.push(&self.path);
2105        }
2106
2107        full_path
2108    }
2109
2110    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2111    /// of its worktree, then this method will return the name of the worktree itself.
2112    fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2113        self.path
2114            .file_name()
2115            .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2116    }
2117
2118    fn is_deleted(&self) -> bool {
2119        self.is_deleted
2120    }
2121
2122    fn as_any(&self) -> &dyn Any {
2123        self
2124    }
2125
2126    fn to_proto(&self) -> rpc::proto::File {
2127        rpc::proto::File {
2128            worktree_id: self.worktree.id() as u64,
2129            entry_id: self.entry_id.to_proto(),
2130            path: self.path.to_string_lossy().into(),
2131            mtime: Some(self.mtime.into()),
2132            is_deleted: self.is_deleted,
2133        }
2134    }
2135}
2136
2137impl language::LocalFile for File {
2138    fn abs_path(&self, cx: &AppContext) -> PathBuf {
2139        self.worktree
2140            .read(cx)
2141            .as_local()
2142            .unwrap()
2143            .abs_path
2144            .join(&self.path)
2145    }
2146
2147    fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2148        let worktree = self.worktree.read(cx).as_local().unwrap();
2149        let abs_path = worktree.absolutize(&self.path);
2150        let fs = worktree.fs.clone();
2151        cx.background()
2152            .spawn(async move { fs.load(&abs_path).await })
2153    }
2154
2155    fn buffer_reloaded(
2156        &self,
2157        buffer_id: u64,
2158        version: &clock::Global,
2159        fingerprint: RopeFingerprint,
2160        line_ending: LineEnding,
2161        mtime: SystemTime,
2162        cx: &mut AppContext,
2163    ) {
2164        let worktree = self.worktree.read(cx).as_local().unwrap();
2165        if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2166            worktree
2167                .client
2168                .send(proto::BufferReloaded {
2169                    project_id,
2170                    buffer_id,
2171                    version: serialize_version(version),
2172                    mtime: Some(mtime.into()),
2173                    fingerprint: serialize_fingerprint(fingerprint),
2174                    line_ending: serialize_line_ending(line_ending) as i32,
2175                })
2176                .log_err();
2177        }
2178    }
2179}
2180
2181impl File {
2182    pub fn from_proto(
2183        proto: rpc::proto::File,
2184        worktree: ModelHandle<Worktree>,
2185        cx: &AppContext,
2186    ) -> Result<Self> {
2187        let worktree_id = worktree
2188            .read(cx)
2189            .as_remote()
2190            .ok_or_else(|| anyhow!("not remote"))?
2191            .id();
2192
2193        if worktree_id.to_proto() != proto.worktree_id {
2194            return Err(anyhow!("worktree id does not match file"));
2195        }
2196
2197        Ok(Self {
2198            worktree,
2199            path: Path::new(&proto.path).into(),
2200            mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2201            entry_id: ProjectEntryId::from_proto(proto.entry_id),
2202            is_local: false,
2203            is_deleted: proto.is_deleted,
2204        })
2205    }
2206
2207    pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2208        file.and_then(|f| f.as_any().downcast_ref())
2209    }
2210
2211    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2212        self.worktree.read(cx).id()
2213    }
2214
2215    pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2216        if self.is_deleted {
2217            None
2218        } else {
2219            Some(self.entry_id)
2220        }
2221    }
2222}
2223
2224#[derive(Clone, Debug, PartialEq, Eq)]
2225pub struct Entry {
2226    pub id: ProjectEntryId,
2227    pub kind: EntryKind,
2228    pub path: Arc<Path>,
2229    pub inode: u64,
2230    pub mtime: SystemTime,
2231    pub is_symlink: bool,
2232    pub is_ignored: bool,
2233}
2234
2235#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2236pub enum EntryKind {
2237    PendingDir,
2238    Dir,
2239    File(CharBag),
2240}
2241
2242#[derive(Clone, Copy, Debug)]
2243pub enum PathChange {
2244    Added,
2245    Removed,
2246    Updated,
2247    AddedOrUpdated,
2248}
2249
2250impl Entry {
2251    fn new(
2252        path: Arc<Path>,
2253        metadata: &fs::Metadata,
2254        next_entry_id: &AtomicUsize,
2255        root_char_bag: CharBag,
2256    ) -> Self {
2257        Self {
2258            id: ProjectEntryId::new(next_entry_id),
2259            kind: if metadata.is_dir {
2260                EntryKind::PendingDir
2261            } else {
2262                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2263            },
2264            path,
2265            inode: metadata.inode,
2266            mtime: metadata.mtime,
2267            is_symlink: metadata.is_symlink,
2268            is_ignored: false,
2269        }
2270    }
2271
2272    pub fn is_dir(&self) -> bool {
2273        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2274    }
2275
2276    pub fn is_file(&self) -> bool {
2277        matches!(self.kind, EntryKind::File(_))
2278    }
2279}
2280
2281impl sum_tree::Item for Entry {
2282    type Summary = EntrySummary;
2283
2284    fn summary(&self) -> Self::Summary {
2285        let visible_count = if self.is_ignored { 0 } else { 1 };
2286        let file_count;
2287        let visible_file_count;
2288        if self.is_file() {
2289            file_count = 1;
2290            visible_file_count = visible_count;
2291        } else {
2292            file_count = 0;
2293            visible_file_count = 0;
2294        }
2295
2296        EntrySummary {
2297            max_path: self.path.clone(),
2298            count: 1,
2299            visible_count,
2300            file_count,
2301            visible_file_count,
2302        }
2303    }
2304}
2305
2306impl sum_tree::KeyedItem for Entry {
2307    type Key = PathKey;
2308
2309    fn key(&self) -> Self::Key {
2310        PathKey(self.path.clone())
2311    }
2312}
2313
2314#[derive(Clone, Debug)]
2315pub struct EntrySummary {
2316    max_path: Arc<Path>,
2317    count: usize,
2318    visible_count: usize,
2319    file_count: usize,
2320    visible_file_count: usize,
2321}
2322
2323impl Default for EntrySummary {
2324    fn default() -> Self {
2325        Self {
2326            max_path: Arc::from(Path::new("")),
2327            count: 0,
2328            visible_count: 0,
2329            file_count: 0,
2330            visible_file_count: 0,
2331        }
2332    }
2333}
2334
2335impl sum_tree::Summary for EntrySummary {
2336    type Context = ();
2337
2338    fn add_summary(&mut self, rhs: &Self, _: &()) {
2339        self.max_path = rhs.max_path.clone();
2340        self.count += rhs.count;
2341        self.visible_count += rhs.visible_count;
2342        self.file_count += rhs.file_count;
2343        self.visible_file_count += rhs.visible_file_count;
2344    }
2345}
2346
2347#[derive(Clone, Debug)]
2348struct PathEntry {
2349    id: ProjectEntryId,
2350    path: Arc<Path>,
2351    is_ignored: bool,
2352    scan_id: usize,
2353}
2354
2355impl sum_tree::Item for PathEntry {
2356    type Summary = PathEntrySummary;
2357
2358    fn summary(&self) -> Self::Summary {
2359        PathEntrySummary { max_id: self.id }
2360    }
2361}
2362
2363impl sum_tree::KeyedItem for PathEntry {
2364    type Key = ProjectEntryId;
2365
2366    fn key(&self) -> Self::Key {
2367        self.id
2368    }
2369}
2370
2371#[derive(Clone, Debug, Default)]
2372struct PathEntrySummary {
2373    max_id: ProjectEntryId,
2374}
2375
2376impl sum_tree::Summary for PathEntrySummary {
2377    type Context = ();
2378
2379    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2380        self.max_id = summary.max_id;
2381    }
2382}
2383
2384impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2385    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2386        *self = summary.max_id;
2387    }
2388}
2389
2390#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2391pub struct PathKey(Arc<Path>);
2392
2393impl Default for PathKey {
2394    fn default() -> Self {
2395        Self(Path::new("").into())
2396    }
2397}
2398
2399impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2400    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2401        self.0 = summary.max_path.clone();
2402    }
2403}
2404
2405struct BackgroundScanner {
2406    snapshot: Mutex<LocalSnapshot>,
2407    fs: Arc<dyn Fs>,
2408    status_updates_tx: UnboundedSender<ScanState>,
2409    executor: Arc<executor::Background>,
2410    refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2411    prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2412    finished_initial_scan: bool,
2413}
2414
2415impl BackgroundScanner {
2416    fn new(
2417        snapshot: LocalSnapshot,
2418        fs: Arc<dyn Fs>,
2419        status_updates_tx: UnboundedSender<ScanState>,
2420        executor: Arc<executor::Background>,
2421        refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2422    ) -> Self {
2423        Self {
2424            fs,
2425            status_updates_tx,
2426            executor,
2427            refresh_requests_rx,
2428            prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2429            snapshot: Mutex::new(snapshot),
2430            finished_initial_scan: false,
2431        }
2432    }
2433
2434    async fn run(
2435        &mut self,
2436        mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2437    ) {
2438        use futures::FutureExt as _;
2439
2440        let (root_abs_path, root_inode) = {
2441            let snapshot = self.snapshot.lock();
2442            (
2443                snapshot.abs_path.clone(),
2444                snapshot.root_entry().map(|e| e.inode),
2445            )
2446        };
2447
2448        // Populate ignores above the root.
2449        let ignore_stack;
2450        for ancestor in root_abs_path.ancestors().skip(1) {
2451            if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2452            {
2453                self.snapshot
2454                    .lock()
2455                    .ignores_by_parent_abs_path
2456                    .insert(ancestor.into(), (ignore.into(), 0));
2457            }
2458        }
2459        {
2460            let mut snapshot = self.snapshot.lock();
2461            snapshot.scan_id += 1;
2462            ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2463            if ignore_stack.is_all() {
2464                if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2465                    root_entry.is_ignored = true;
2466                    snapshot.insert_entry(root_entry, self.fs.as_ref());
2467                }
2468            }
2469        };
2470
2471        // Perform an initial scan of the directory.
2472        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2473        smol::block_on(scan_job_tx.send(ScanJob {
2474            abs_path: root_abs_path,
2475            path: Arc::from(Path::new("")),
2476            ignore_stack,
2477            ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2478            scan_queue: scan_job_tx.clone(),
2479        }))
2480        .unwrap();
2481        drop(scan_job_tx);
2482        self.scan_dirs(true, scan_job_rx).await;
2483        {
2484            let mut snapshot = self.snapshot.lock();
2485            snapshot.completed_scan_id = snapshot.scan_id;
2486        }
2487        self.send_status_update(false, None);
2488
2489        // Process any any FS events that occurred while performing the initial scan.
2490        // For these events, update events cannot be as precise, because we didn't
2491        // have the previous state loaded yet.
2492        if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2493            let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2494            while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2495                paths.extend(more_events.into_iter().map(|e| e.path));
2496            }
2497            self.process_events(paths).await;
2498        }
2499
2500        self.finished_initial_scan = true;
2501
2502        // Continue processing events until the worktree is dropped.
2503        loop {
2504            select_biased! {
2505                // Process any path refresh requests from the worktree. Prioritize
2506                // these before handling changes reported by the filesystem.
2507                request = self.refresh_requests_rx.recv().fuse() => {
2508                    let Ok((paths, barrier)) = request else { break };
2509                    if !self.process_refresh_request(paths, barrier).await {
2510                        return;
2511                    }
2512                }
2513
2514                events = events_rx.next().fuse() => {
2515                    let Some(events) = events else { break };
2516                    let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2517                    while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2518                        paths.extend(more_events.into_iter().map(|e| e.path));
2519                    }
2520                    self.process_events(paths).await;
2521                }
2522            }
2523        }
2524    }
2525
2526    async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2527        self.reload_entries_for_paths(paths, None).await;
2528        self.send_status_update(false, Some(barrier))
2529    }
2530
2531    async fn process_events(&mut self, paths: Vec<PathBuf>) {
2532        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2533        if let Some(mut paths) = self
2534            .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2535            .await
2536        {
2537            paths.sort_unstable();
2538            util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2539        }
2540        drop(scan_job_tx);
2541        self.scan_dirs(false, scan_job_rx).await;
2542
2543        self.update_ignore_statuses().await;
2544
2545        let mut snapshot = self.snapshot.lock();
2546
2547        let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2548        git_repositories.retain(|work_directory_id, _| {
2549            snapshot
2550                .entry_for_id(*work_directory_id)
2551                .map_or(false, |entry| {
2552                    snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2553                })
2554        });
2555        snapshot.git_repositories = git_repositories;
2556
2557        let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2558        git_repository_entries.retain(|_, entry| {
2559            snapshot
2560                .git_repositories
2561                .get(&entry.work_directory.0)
2562                .is_some()
2563        });
2564        snapshot.snapshot.repository_entries = git_repository_entries;
2565
2566        snapshot.removed_entry_ids.clear();
2567        snapshot.completed_scan_id = snapshot.scan_id;
2568
2569        drop(snapshot);
2570
2571        self.send_status_update(false, None);
2572    }
2573
2574    async fn scan_dirs(
2575        &self,
2576        enable_progress_updates: bool,
2577        scan_jobs_rx: channel::Receiver<ScanJob>,
2578    ) {
2579        use futures::FutureExt as _;
2580
2581        if self
2582            .status_updates_tx
2583            .unbounded_send(ScanState::Started)
2584            .is_err()
2585        {
2586            return;
2587        }
2588
2589        let progress_update_count = AtomicUsize::new(0);
2590        self.executor
2591            .scoped(|scope| {
2592                for _ in 0..self.executor.num_cpus() {
2593                    scope.spawn(async {
2594                        let mut last_progress_update_count = 0;
2595                        let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2596                        futures::pin_mut!(progress_update_timer);
2597
2598                        loop {
2599                            select_biased! {
2600                                // Process any path refresh requests before moving on to process
2601                                // the scan queue, so that user operations are prioritized.
2602                                request = self.refresh_requests_rx.recv().fuse() => {
2603                                    let Ok((paths, barrier)) = request else { break };
2604                                    if !self.process_refresh_request(paths, barrier).await {
2605                                        return;
2606                                    }
2607                                }
2608
2609                                // Send periodic progress updates to the worktree. Use an atomic counter
2610                                // to ensure that only one of the workers sends a progress update after
2611                                // the update interval elapses.
2612                                _ = progress_update_timer => {
2613                                    match progress_update_count.compare_exchange(
2614                                        last_progress_update_count,
2615                                        last_progress_update_count + 1,
2616                                        SeqCst,
2617                                        SeqCst
2618                                    ) {
2619                                        Ok(_) => {
2620                                            last_progress_update_count += 1;
2621                                            self.send_status_update(true, None);
2622                                        }
2623                                        Err(count) => {
2624                                            last_progress_update_count = count;
2625                                        }
2626                                    }
2627                                    progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2628                                }
2629
2630                                // Recursively load directories from the file system.
2631                                job = scan_jobs_rx.recv().fuse() => {
2632                                    let Ok(job) = job else { break };
2633                                    if let Err(err) = self.scan_dir(&job).await {
2634                                        if job.path.as_ref() != Path::new("") {
2635                                            log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2636                                        }
2637                                    }
2638                                }
2639                            }
2640                        }
2641                    })
2642                }
2643            })
2644            .await;
2645    }
2646
2647    fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2648        let mut prev_state = self.prev_state.lock();
2649        let snapshot = self.snapshot.lock().clone();
2650        let mut old_snapshot = snapshot.snapshot.clone();
2651        mem::swap(&mut old_snapshot, &mut prev_state.0);
2652        let changed_paths = mem::take(&mut prev_state.1);
2653        let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2654        self.status_updates_tx
2655            .unbounded_send(ScanState::Updated {
2656                snapshot,
2657                changes,
2658                scanning,
2659                barrier,
2660            })
2661            .is_ok()
2662    }
2663
2664    async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2665        let mut new_entries: Vec<Entry> = Vec::new();
2666        let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2667        let mut ignore_stack = job.ignore_stack.clone();
2668        let mut new_ignore = None;
2669        let (root_abs_path, root_char_bag, next_entry_id) = {
2670            let snapshot = self.snapshot.lock();
2671            (
2672                snapshot.abs_path().clone(),
2673                snapshot.root_char_bag,
2674                snapshot.next_entry_id.clone(),
2675            )
2676        };
2677        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2678        while let Some(child_abs_path) = child_paths.next().await {
2679            let child_abs_path: Arc<Path> = match child_abs_path {
2680                Ok(child_abs_path) => child_abs_path.into(),
2681                Err(error) => {
2682                    log::error!("error processing entry {:?}", error);
2683                    continue;
2684                }
2685            };
2686
2687            let child_name = child_abs_path.file_name().unwrap();
2688            let child_path: Arc<Path> = job.path.join(child_name).into();
2689            let child_metadata = match self.fs.metadata(&child_abs_path).await {
2690                Ok(Some(metadata)) => metadata,
2691                Ok(None) => continue,
2692                Err(err) => {
2693                    log::error!("error processing {:?}: {:?}", child_abs_path, err);
2694                    continue;
2695                }
2696            };
2697
2698            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2699            if child_name == *GITIGNORE {
2700                match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2701                    Ok(ignore) => {
2702                        let ignore = Arc::new(ignore);
2703                        ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2704                        new_ignore = Some(ignore);
2705                    }
2706                    Err(error) => {
2707                        log::error!(
2708                            "error loading .gitignore file {:?} - {:?}",
2709                            child_name,
2710                            error
2711                        );
2712                    }
2713                }
2714
2715                // Update ignore status of any child entries we've already processed to reflect the
2716                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2717                // there should rarely be too numerous. Update the ignore stack associated with any
2718                // new jobs as well.
2719                let mut new_jobs = new_jobs.iter_mut();
2720                for entry in &mut new_entries {
2721                    let entry_abs_path = root_abs_path.join(&entry.path);
2722                    entry.is_ignored =
2723                        ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2724
2725                    if entry.is_dir() {
2726                        if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2727                            job.ignore_stack = if entry.is_ignored {
2728                                IgnoreStack::all()
2729                            } else {
2730                                ignore_stack.clone()
2731                            };
2732                        }
2733                    }
2734                }
2735            }
2736
2737            let mut child_entry = Entry::new(
2738                child_path.clone(),
2739                &child_metadata,
2740                &next_entry_id,
2741                root_char_bag,
2742            );
2743
2744            if child_entry.is_dir() {
2745                let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2746                child_entry.is_ignored = is_ignored;
2747
2748                // Avoid recursing until crash in the case of a recursive symlink
2749                if !job.ancestor_inodes.contains(&child_entry.inode) {
2750                    let mut ancestor_inodes = job.ancestor_inodes.clone();
2751                    ancestor_inodes.insert(child_entry.inode);
2752
2753                    new_jobs.push(Some(ScanJob {
2754                        abs_path: child_abs_path,
2755                        path: child_path,
2756                        ignore_stack: if is_ignored {
2757                            IgnoreStack::all()
2758                        } else {
2759                            ignore_stack.clone()
2760                        },
2761                        ancestor_inodes,
2762                        scan_queue: job.scan_queue.clone(),
2763                    }));
2764                } else {
2765                    new_jobs.push(None);
2766                }
2767            } else {
2768                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2769            }
2770
2771            new_entries.push(child_entry);
2772        }
2773
2774        self.snapshot.lock().populate_dir(
2775            job.path.clone(),
2776            new_entries,
2777            new_ignore,
2778            self.fs.as_ref(),
2779        );
2780
2781        for new_job in new_jobs {
2782            if let Some(new_job) = new_job {
2783                job.scan_queue.send(new_job).await.unwrap();
2784            }
2785        }
2786
2787        Ok(())
2788    }
2789
2790    async fn reload_entries_for_paths(
2791        &self,
2792        mut abs_paths: Vec<PathBuf>,
2793        scan_queue_tx: Option<Sender<ScanJob>>,
2794    ) -> Option<Vec<Arc<Path>>> {
2795        let doing_recursive_update = scan_queue_tx.is_some();
2796
2797        abs_paths.sort_unstable();
2798        abs_paths.dedup_by(|a, b| a.starts_with(&b));
2799
2800        let root_abs_path = self.snapshot.lock().abs_path.clone();
2801        let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2802        let metadata = futures::future::join_all(
2803            abs_paths
2804                .iter()
2805                .map(|abs_path| self.fs.metadata(&abs_path))
2806                .collect::<Vec<_>>(),
2807        )
2808        .await;
2809
2810        let mut snapshot = self.snapshot.lock();
2811        let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2812        snapshot.scan_id += 1;
2813        if is_idle && !doing_recursive_update {
2814            snapshot.completed_scan_id = snapshot.scan_id;
2815        }
2816
2817        // Remove any entries for paths that no longer exist or are being recursively
2818        // refreshed. Do this before adding any new entries, so that renames can be
2819        // detected regardless of the order of the paths.
2820        let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2821        for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2822            if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2823                if matches!(metadata, Ok(None)) || doing_recursive_update {
2824                    snapshot.remove_path(path);
2825                }
2826                event_paths.push(path.into());
2827            } else {
2828                log::error!(
2829                    "unexpected event {:?} for root path {:?}",
2830                    abs_path,
2831                    root_canonical_path
2832                );
2833            }
2834        }
2835
2836        for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2837            let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2838
2839            match metadata {
2840                Ok(Some(metadata)) => {
2841                    let ignore_stack =
2842                        snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2843                    let mut fs_entry = Entry::new(
2844                        path.clone(),
2845                        &metadata,
2846                        snapshot.next_entry_id.as_ref(),
2847                        snapshot.root_char_bag,
2848                    );
2849                    fs_entry.is_ignored = ignore_stack.is_all();
2850                    snapshot.insert_entry(fs_entry, self.fs.as_ref());
2851
2852                    self.reload_repo_for_path(&path, &mut snapshot);
2853
2854                    if let Some(scan_queue_tx) = &scan_queue_tx {
2855                        let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2856                        if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2857                            ancestor_inodes.insert(metadata.inode);
2858                            smol::block_on(scan_queue_tx.send(ScanJob {
2859                                abs_path,
2860                                path,
2861                                ignore_stack,
2862                                ancestor_inodes,
2863                                scan_queue: scan_queue_tx.clone(),
2864                            }))
2865                            .unwrap();
2866                        }
2867                    }
2868                }
2869                Ok(None) => {
2870                    self.remove_repo_path(&path, &mut snapshot);
2871                }
2872                Err(err) => {
2873                    // TODO - create a special 'error' entry in the entries tree to mark this
2874                    log::error!("error reading file on event {:?}", err);
2875                }
2876            }
2877        }
2878
2879        Some(event_paths)
2880    }
2881
2882    fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
2883        if !path
2884            .components()
2885            .any(|component| component.as_os_str() == *DOT_GIT)
2886        {
2887            let scan_id = snapshot.scan_id;
2888            let repo = snapshot.repo_for(&path)?;
2889
2890            let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
2891
2892            let work_dir = repo.work_directory(snapshot)?;
2893            let work_dir_id = repo.work_directory;
2894
2895            snapshot
2896                .git_repositories
2897                .update(&work_dir_id, |entry| entry.scan_id = scan_id);
2898
2899            // TODO: Status Replace linear scan with smarter sum tree traversal
2900            snapshot
2901                .repository_entries
2902                .update(&work_dir, |entry| entry.worktree_statuses.retain(|stored_path, _| {
2903                    !stored_path.starts_with(&repo_path)
2904                }));
2905        }
2906
2907        Some(())
2908    }
2909
2910    fn reload_repo_for_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
2911        let scan_id = snapshot.scan_id;
2912
2913        if path
2914            .components()
2915            .any(|component| component.as_os_str() == *DOT_GIT)
2916        {
2917            let (entry_id, repo) = snapshot.repo_for_metadata(&path)?;
2918
2919            let work_dir = snapshot
2920                .entry_for_id(entry_id)
2921                .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
2922
2923            let repo = repo.lock();
2924            repo.reload_index();
2925            let branch = repo.branch_name();
2926            let statuses = repo.worktree_statuses().unwrap_or_default();
2927
2928            snapshot.git_repositories.update(&entry_id, |entry| {
2929                entry.scan_id = scan_id;
2930                entry.full_scan_id = scan_id;
2931            });
2932
2933            snapshot.repository_entries.update(&work_dir, |entry| {
2934                entry.branch = branch.map(Into::into);
2935                entry.worktree_statuses = statuses;
2936            });
2937        } else {
2938            let repo = snapshot.repo_for(&path)?;
2939
2940            let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
2941
2942            let status = {
2943                let local_repo = snapshot.get_local_repo(&repo)?;
2944
2945                // Short circuit if we've already scanned everything
2946                if local_repo.full_scan_id == scan_id {
2947                    return None;
2948                }
2949
2950                let git_ptr = local_repo.repo_ptr.lock();
2951                git_ptr.worktree_status(&repo_path)?
2952            };
2953
2954            let work_dir = repo.work_directory(snapshot)?;
2955            let work_dir_id = repo.work_directory;
2956
2957            snapshot
2958                .git_repositories
2959                .update(&work_dir_id, |entry| entry.scan_id = scan_id);
2960
2961            snapshot
2962                .repository_entries
2963                .update(&work_dir, |entry| entry.worktree_statuses.insert(repo_path, status));
2964        }
2965
2966        Some(())
2967    }
2968
2969    async fn update_ignore_statuses(&self) {
2970        use futures::FutureExt as _;
2971
2972        let mut snapshot = self.snapshot.lock().clone();
2973        let mut ignores_to_update = Vec::new();
2974        let mut ignores_to_delete = Vec::new();
2975        for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2976            if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2977                if *scan_id > snapshot.completed_scan_id
2978                    && snapshot.entry_for_path(parent_path).is_some()
2979                {
2980                    ignores_to_update.push(parent_abs_path.clone());
2981                }
2982
2983                let ignore_path = parent_path.join(&*GITIGNORE);
2984                if snapshot.entry_for_path(ignore_path).is_none() {
2985                    ignores_to_delete.push(parent_abs_path.clone());
2986                }
2987            }
2988        }
2989
2990        for parent_abs_path in ignores_to_delete {
2991            snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2992            self.snapshot
2993                .lock()
2994                .ignores_by_parent_abs_path
2995                .remove(&parent_abs_path);
2996        }
2997
2998        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2999        ignores_to_update.sort_unstable();
3000        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3001        while let Some(parent_abs_path) = ignores_to_update.next() {
3002            while ignores_to_update
3003                .peek()
3004                .map_or(false, |p| p.starts_with(&parent_abs_path))
3005            {
3006                ignores_to_update.next().unwrap();
3007            }
3008
3009            let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3010            smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3011                abs_path: parent_abs_path,
3012                ignore_stack,
3013                ignore_queue: ignore_queue_tx.clone(),
3014            }))
3015            .unwrap();
3016        }
3017        drop(ignore_queue_tx);
3018
3019        self.executor
3020            .scoped(|scope| {
3021                for _ in 0..self.executor.num_cpus() {
3022                    scope.spawn(async {
3023                        loop {
3024                            select_biased! {
3025                                // Process any path refresh requests before moving on to process
3026                                // the queue of ignore statuses.
3027                                request = self.refresh_requests_rx.recv().fuse() => {
3028                                    let Ok((paths, barrier)) = request else { break };
3029                                    if !self.process_refresh_request(paths, barrier).await {
3030                                        return;
3031                                    }
3032                                }
3033
3034                                // Recursively process directories whose ignores have changed.
3035                                job = ignore_queue_rx.recv().fuse() => {
3036                                    let Ok(job) = job else { break };
3037                                    self.update_ignore_status(job, &snapshot).await;
3038                                }
3039                            }
3040                        }
3041                    });
3042                }
3043            })
3044            .await;
3045    }
3046
3047    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3048        let mut ignore_stack = job.ignore_stack;
3049        if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3050            ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3051        }
3052
3053        let mut entries_by_id_edits = Vec::new();
3054        let mut entries_by_path_edits = Vec::new();
3055        let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3056        for mut entry in snapshot.child_entries(path).cloned() {
3057            let was_ignored = entry.is_ignored;
3058            let abs_path = snapshot.abs_path().join(&entry.path);
3059            entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3060            if entry.is_dir() {
3061                let child_ignore_stack = if entry.is_ignored {
3062                    IgnoreStack::all()
3063                } else {
3064                    ignore_stack.clone()
3065                };
3066                job.ignore_queue
3067                    .send(UpdateIgnoreStatusJob {
3068                        abs_path: abs_path.into(),
3069                        ignore_stack: child_ignore_stack,
3070                        ignore_queue: job.ignore_queue.clone(),
3071                    })
3072                    .await
3073                    .unwrap();
3074            }
3075
3076            if entry.is_ignored != was_ignored {
3077                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3078                path_entry.scan_id = snapshot.scan_id;
3079                path_entry.is_ignored = entry.is_ignored;
3080                entries_by_id_edits.push(Edit::Insert(path_entry));
3081                entries_by_path_edits.push(Edit::Insert(entry));
3082            }
3083        }
3084
3085        let mut snapshot = self.snapshot.lock();
3086        snapshot.entries_by_path.edit(entries_by_path_edits, &());
3087        snapshot.entries_by_id.edit(entries_by_id_edits, &());
3088    }
3089
3090    fn build_change_set(
3091        &self,
3092        old_snapshot: &Snapshot,
3093        new_snapshot: &Snapshot,
3094        event_paths: Vec<Arc<Path>>,
3095    ) -> HashMap<Arc<Path>, PathChange> {
3096        use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3097
3098        let mut changes = HashMap::default();
3099        let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3100        let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3101        let received_before_initialized = !self.finished_initial_scan;
3102
3103        for path in event_paths {
3104            let path = PathKey(path);
3105            old_paths.seek(&path, Bias::Left, &());
3106            new_paths.seek(&path, Bias::Left, &());
3107
3108            loop {
3109                match (old_paths.item(), new_paths.item()) {
3110                    (Some(old_entry), Some(new_entry)) => {
3111                        if old_entry.path > path.0
3112                            && new_entry.path > path.0
3113                            && !old_entry.path.starts_with(&path.0)
3114                            && !new_entry.path.starts_with(&path.0)
3115                        {
3116                            break;
3117                        }
3118
3119                        match Ord::cmp(&old_entry.path, &new_entry.path) {
3120                            Ordering::Less => {
3121                                changes.insert(old_entry.path.clone(), Removed);
3122                                old_paths.next(&());
3123                            }
3124                            Ordering::Equal => {
3125                                if received_before_initialized {
3126                                    // If the worktree was not fully initialized when this event was generated,
3127                                    // we can't know whether this entry was added during the scan or whether
3128                                    // it was merely updated.
3129                                    changes.insert(new_entry.path.clone(), AddedOrUpdated);
3130                                } else if old_entry.mtime != new_entry.mtime {
3131                                    changes.insert(new_entry.path.clone(), Updated);
3132                                }
3133                                old_paths.next(&());
3134                                new_paths.next(&());
3135                            }
3136                            Ordering::Greater => {
3137                                changes.insert(new_entry.path.clone(), Added);
3138                                new_paths.next(&());
3139                            }
3140                        }
3141                    }
3142                    (Some(old_entry), None) => {
3143                        changes.insert(old_entry.path.clone(), Removed);
3144                        old_paths.next(&());
3145                    }
3146                    (None, Some(new_entry)) => {
3147                        changes.insert(new_entry.path.clone(), Added);
3148                        new_paths.next(&());
3149                    }
3150                    (None, None) => break,
3151                }
3152            }
3153        }
3154        changes
3155    }
3156
3157    async fn progress_timer(&self, running: bool) {
3158        if !running {
3159            return futures::future::pending().await;
3160        }
3161
3162        #[cfg(any(test, feature = "test-support"))]
3163        if self.fs.is_fake() {
3164            return self.executor.simulate_random_delay().await;
3165        }
3166
3167        smol::Timer::after(Duration::from_millis(100)).await;
3168    }
3169}
3170
3171fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3172    let mut result = root_char_bag;
3173    result.extend(
3174        path.to_string_lossy()
3175            .chars()
3176            .map(|c| c.to_ascii_lowercase()),
3177    );
3178    result
3179}
3180
3181struct ScanJob {
3182    abs_path: Arc<Path>,
3183    path: Arc<Path>,
3184    ignore_stack: Arc<IgnoreStack>,
3185    scan_queue: Sender<ScanJob>,
3186    ancestor_inodes: TreeSet<u64>,
3187}
3188
3189struct UpdateIgnoreStatusJob {
3190    abs_path: Arc<Path>,
3191    ignore_stack: Arc<IgnoreStack>,
3192    ignore_queue: Sender<UpdateIgnoreStatusJob>,
3193}
3194
3195pub trait WorktreeHandle {
3196    #[cfg(any(test, feature = "test-support"))]
3197    fn flush_fs_events<'a>(
3198        &self,
3199        cx: &'a gpui::TestAppContext,
3200    ) -> futures::future::LocalBoxFuture<'a, ()>;
3201}
3202
3203impl WorktreeHandle for ModelHandle<Worktree> {
3204    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3205    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3206    // extra directory scans, and emit extra scan-state notifications.
3207    //
3208    // This function mutates the worktree's directory and waits for those mutations to be picked up,
3209    // to ensure that all redundant FS events have already been processed.
3210    #[cfg(any(test, feature = "test-support"))]
3211    fn flush_fs_events<'a>(
3212        &self,
3213        cx: &'a gpui::TestAppContext,
3214    ) -> futures::future::LocalBoxFuture<'a, ()> {
3215        use smol::future::FutureExt;
3216
3217        let filename = "fs-event-sentinel";
3218        let tree = self.clone();
3219        let (fs, root_path) = self.read_with(cx, |tree, _| {
3220            let tree = tree.as_local().unwrap();
3221            (tree.fs.clone(), tree.abs_path().clone())
3222        });
3223
3224        async move {
3225            fs.create_file(&root_path.join(filename), Default::default())
3226                .await
3227                .unwrap();
3228            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3229                .await;
3230
3231            fs.remove_file(&root_path.join(filename), Default::default())
3232                .await
3233                .unwrap();
3234            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3235                .await;
3236
3237            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3238                .await;
3239        }
3240        .boxed_local()
3241    }
3242}
3243
3244#[derive(Clone, Debug)]
3245struct TraversalProgress<'a> {
3246    max_path: &'a Path,
3247    count: usize,
3248    visible_count: usize,
3249    file_count: usize,
3250    visible_file_count: usize,
3251}
3252
3253impl<'a> TraversalProgress<'a> {
3254    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3255        match (include_ignored, include_dirs) {
3256            (true, true) => self.count,
3257            (true, false) => self.file_count,
3258            (false, true) => self.visible_count,
3259            (false, false) => self.visible_file_count,
3260        }
3261    }
3262}
3263
3264impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3265    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3266        self.max_path = summary.max_path.as_ref();
3267        self.count += summary.count;
3268        self.visible_count += summary.visible_count;
3269        self.file_count += summary.file_count;
3270        self.visible_file_count += summary.visible_file_count;
3271    }
3272}
3273
3274impl<'a> Default for TraversalProgress<'a> {
3275    fn default() -> Self {
3276        Self {
3277            max_path: Path::new(""),
3278            count: 0,
3279            visible_count: 0,
3280            file_count: 0,
3281            visible_file_count: 0,
3282        }
3283    }
3284}
3285
3286pub struct Traversal<'a> {
3287    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3288    include_ignored: bool,
3289    include_dirs: bool,
3290}
3291
3292impl<'a> Traversal<'a> {
3293    pub fn advance(&mut self) -> bool {
3294        self.advance_to_offset(self.offset() + 1)
3295    }
3296
3297    pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3298        self.cursor.seek_forward(
3299            &TraversalTarget::Count {
3300                count: offset,
3301                include_dirs: self.include_dirs,
3302                include_ignored: self.include_ignored,
3303            },
3304            Bias::Right,
3305            &(),
3306        )
3307    }
3308
3309    pub fn advance_to_sibling(&mut self) -> bool {
3310        while let Some(entry) = self.cursor.item() {
3311            self.cursor.seek_forward(
3312                &TraversalTarget::PathSuccessor(&entry.path),
3313                Bias::Left,
3314                &(),
3315            );
3316            if let Some(entry) = self.cursor.item() {
3317                if (self.include_dirs || !entry.is_dir())
3318                    && (self.include_ignored || !entry.is_ignored)
3319                {
3320                    return true;
3321                }
3322            }
3323        }
3324        false
3325    }
3326
3327    pub fn entry(&self) -> Option<&'a Entry> {
3328        self.cursor.item()
3329    }
3330
3331    pub fn offset(&self) -> usize {
3332        self.cursor
3333            .start()
3334            .count(self.include_dirs, self.include_ignored)
3335    }
3336}
3337
3338impl<'a> Iterator for Traversal<'a> {
3339    type Item = &'a Entry;
3340
3341    fn next(&mut self) -> Option<Self::Item> {
3342        if let Some(item) = self.entry() {
3343            self.advance();
3344            Some(item)
3345        } else {
3346            None
3347        }
3348    }
3349}
3350
3351#[derive(Debug)]
3352enum TraversalTarget<'a> {
3353    Path(&'a Path),
3354    PathSuccessor(&'a Path),
3355    Count {
3356        count: usize,
3357        include_ignored: bool,
3358        include_dirs: bool,
3359    },
3360}
3361
3362impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3363    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3364        match self {
3365            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3366            TraversalTarget::PathSuccessor(path) => {
3367                if !cursor_location.max_path.starts_with(path) {
3368                    Ordering::Equal
3369                } else {
3370                    Ordering::Greater
3371                }
3372            }
3373            TraversalTarget::Count {
3374                count,
3375                include_dirs,
3376                include_ignored,
3377            } => Ord::cmp(
3378                count,
3379                &cursor_location.count(*include_dirs, *include_ignored),
3380            ),
3381        }
3382    }
3383}
3384
3385struct ChildEntriesIter<'a> {
3386    parent_path: &'a Path,
3387    traversal: Traversal<'a>,
3388}
3389
3390impl<'a> Iterator for ChildEntriesIter<'a> {
3391    type Item = &'a Entry;
3392
3393    fn next(&mut self) -> Option<Self::Item> {
3394        if let Some(item) = self.traversal.entry() {
3395            if item.path.starts_with(&self.parent_path) {
3396                self.traversal.advance_to_sibling();
3397                return Some(item);
3398            }
3399        }
3400        None
3401    }
3402}
3403
3404impl<'a> From<&'a Entry> for proto::Entry {
3405    fn from(entry: &'a Entry) -> Self {
3406        Self {
3407            id: entry.id.to_proto(),
3408            is_dir: entry.is_dir(),
3409            path: entry.path.to_string_lossy().into(),
3410            inode: entry.inode,
3411            mtime: Some(entry.mtime.into()),
3412            is_symlink: entry.is_symlink,
3413            is_ignored: entry.is_ignored,
3414        }
3415    }
3416}
3417
3418impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3419    type Error = anyhow::Error;
3420
3421    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3422        if let Some(mtime) = entry.mtime {
3423            let kind = if entry.is_dir {
3424                EntryKind::Dir
3425            } else {
3426                let mut char_bag = *root_char_bag;
3427                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3428                EntryKind::File(char_bag)
3429            };
3430            let path: Arc<Path> = PathBuf::from(entry.path).into();
3431            Ok(Entry {
3432                id: ProjectEntryId::from_proto(entry.id),
3433                kind,
3434                path,
3435                inode: entry.inode,
3436                mtime: mtime.into(),
3437                is_symlink: entry.is_symlink,
3438                is_ignored: entry.is_ignored,
3439            })
3440        } else {
3441            Err(anyhow!(
3442                "missing mtime in remote worktree entry {:?}",
3443                entry.path
3444            ))
3445        }
3446    }
3447}
3448
3449#[cfg(test)]
3450mod tests {
3451    use super::*;
3452    use fs::{FakeFs, RealFs};
3453    use gpui::{executor::Deterministic, TestAppContext};
3454    use pretty_assertions::assert_eq;
3455    use rand::prelude::*;
3456    use serde_json::json;
3457    use std::{env, fmt::Write};
3458    use util::{http::FakeHttpClient, test::temp_tree};
3459
3460    #[gpui::test]
3461    async fn test_traversal(cx: &mut TestAppContext) {
3462        let fs = FakeFs::new(cx.background());
3463        fs.insert_tree(
3464            "/root",
3465            json!({
3466               ".gitignore": "a/b\n",
3467               "a": {
3468                   "b": "",
3469                   "c": "",
3470               }
3471            }),
3472        )
3473        .await;
3474
3475        let http_client = FakeHttpClient::with_404_response();
3476        let client = cx.read(|cx| Client::new(http_client, cx));
3477
3478        let tree = Worktree::local(
3479            client,
3480            Path::new("/root"),
3481            true,
3482            fs,
3483            Default::default(),
3484            &mut cx.to_async(),
3485        )
3486        .await
3487        .unwrap();
3488        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3489            .await;
3490
3491        tree.read_with(cx, |tree, _| {
3492            assert_eq!(
3493                tree.entries(false)
3494                    .map(|entry| entry.path.as_ref())
3495                    .collect::<Vec<_>>(),
3496                vec![
3497                    Path::new(""),
3498                    Path::new(".gitignore"),
3499                    Path::new("a"),
3500                    Path::new("a/c"),
3501                ]
3502            );
3503            assert_eq!(
3504                tree.entries(true)
3505                    .map(|entry| entry.path.as_ref())
3506                    .collect::<Vec<_>>(),
3507                vec![
3508                    Path::new(""),
3509                    Path::new(".gitignore"),
3510                    Path::new("a"),
3511                    Path::new("a/b"),
3512                    Path::new("a/c"),
3513                ]
3514            );
3515        })
3516    }
3517
3518    #[gpui::test(iterations = 10)]
3519    async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3520        let fs = FakeFs::new(cx.background());
3521        fs.insert_tree(
3522            "/root",
3523            json!({
3524                "lib": {
3525                    "a": {
3526                        "a.txt": ""
3527                    },
3528                    "b": {
3529                        "b.txt": ""
3530                    }
3531                }
3532            }),
3533        )
3534        .await;
3535        fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3536        fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3537
3538        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3539        let tree = Worktree::local(
3540            client,
3541            Path::new("/root"),
3542            true,
3543            fs.clone(),
3544            Default::default(),
3545            &mut cx.to_async(),
3546        )
3547        .await
3548        .unwrap();
3549
3550        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3551            .await;
3552
3553        tree.read_with(cx, |tree, _| {
3554            assert_eq!(
3555                tree.entries(false)
3556                    .map(|entry| entry.path.as_ref())
3557                    .collect::<Vec<_>>(),
3558                vec![
3559                    Path::new(""),
3560                    Path::new("lib"),
3561                    Path::new("lib/a"),
3562                    Path::new("lib/a/a.txt"),
3563                    Path::new("lib/a/lib"),
3564                    Path::new("lib/b"),
3565                    Path::new("lib/b/b.txt"),
3566                    Path::new("lib/b/lib"),
3567                ]
3568            );
3569        });
3570
3571        fs.rename(
3572            Path::new("/root/lib/a/lib"),
3573            Path::new("/root/lib/a/lib-2"),
3574            Default::default(),
3575        )
3576        .await
3577        .unwrap();
3578        executor.run_until_parked();
3579        tree.read_with(cx, |tree, _| {
3580            assert_eq!(
3581                tree.entries(false)
3582                    .map(|entry| entry.path.as_ref())
3583                    .collect::<Vec<_>>(),
3584                vec![
3585                    Path::new(""),
3586                    Path::new("lib"),
3587                    Path::new("lib/a"),
3588                    Path::new("lib/a/a.txt"),
3589                    Path::new("lib/a/lib-2"),
3590                    Path::new("lib/b"),
3591                    Path::new("lib/b/b.txt"),
3592                    Path::new("lib/b/lib"),
3593                ]
3594            );
3595        });
3596    }
3597
3598    #[gpui::test]
3599    async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3600        let parent_dir = temp_tree(json!({
3601            ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3602            "tree": {
3603                ".git": {},
3604                ".gitignore": "ignored-dir\n",
3605                "tracked-dir": {
3606                    "tracked-file1": "",
3607                    "ancestor-ignored-file1": "",
3608                },
3609                "ignored-dir": {
3610                    "ignored-file1": ""
3611                }
3612            }
3613        }));
3614        let dir = parent_dir.path().join("tree");
3615
3616        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3617
3618        let tree = Worktree::local(
3619            client,
3620            dir.as_path(),
3621            true,
3622            Arc::new(RealFs),
3623            Default::default(),
3624            &mut cx.to_async(),
3625        )
3626        .await
3627        .unwrap();
3628        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3629            .await;
3630        tree.flush_fs_events(cx).await;
3631        cx.read(|cx| {
3632            let tree = tree.read(cx);
3633            assert!(
3634                !tree
3635                    .entry_for_path("tracked-dir/tracked-file1")
3636                    .unwrap()
3637                    .is_ignored
3638            );
3639            assert!(
3640                tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3641                    .unwrap()
3642                    .is_ignored
3643            );
3644            assert!(
3645                tree.entry_for_path("ignored-dir/ignored-file1")
3646                    .unwrap()
3647                    .is_ignored
3648            );
3649        });
3650
3651        std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3652        std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3653        std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3654        tree.flush_fs_events(cx).await;
3655        cx.read(|cx| {
3656            let tree = tree.read(cx);
3657            assert!(
3658                !tree
3659                    .entry_for_path("tracked-dir/tracked-file2")
3660                    .unwrap()
3661                    .is_ignored
3662            );
3663            assert!(
3664                tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3665                    .unwrap()
3666                    .is_ignored
3667            );
3668            assert!(
3669                tree.entry_for_path("ignored-dir/ignored-file2")
3670                    .unwrap()
3671                    .is_ignored
3672            );
3673            assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3674        });
3675    }
3676
3677    #[gpui::test]
3678    async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3679        let root = temp_tree(json!({
3680            "dir1": {
3681                ".git": {},
3682                "deps": {
3683                    "dep1": {
3684                        ".git": {},
3685                        "src": {
3686                            "a.txt": ""
3687                        }
3688                    }
3689                },
3690                "src": {
3691                    "b.txt": ""
3692                }
3693            },
3694            "c.txt": "",
3695        }));
3696
3697        let http_client = FakeHttpClient::with_404_response();
3698        let client = cx.read(|cx| Client::new(http_client, cx));
3699        let tree = Worktree::local(
3700            client,
3701            root.path(),
3702            true,
3703            Arc::new(RealFs),
3704            Default::default(),
3705            &mut cx.to_async(),
3706        )
3707        .await
3708        .unwrap();
3709
3710        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3711            .await;
3712        tree.flush_fs_events(cx).await;
3713
3714        tree.read_with(cx, |tree, _cx| {
3715            let tree = tree.as_local().unwrap();
3716
3717            assert!(tree.repo_for("c.txt".as_ref()).is_none());
3718
3719            let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3720            assert_eq!(
3721                entry
3722                    .work_directory(tree)
3723                    .map(|directory| directory.as_ref().to_owned()),
3724                Some(Path::new("dir1").to_owned())
3725            );
3726
3727            let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3728            assert_eq!(
3729                entry
3730                    .work_directory(tree)
3731                    .map(|directory| directory.as_ref().to_owned()),
3732                Some(Path::new("dir1/deps/dep1").to_owned())
3733            );
3734        });
3735
3736        let repo_update_events = Arc::new(Mutex::new(vec![]));
3737        tree.update(cx, |_, cx| {
3738            let repo_update_events = repo_update_events.clone();
3739            cx.subscribe(&tree, move |_, _, event, _| {
3740                if let Event::UpdatedGitRepositories(update) = event {
3741                    repo_update_events.lock().push(update.clone());
3742                }
3743            })
3744            .detach();
3745        });
3746
3747        std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3748        tree.flush_fs_events(cx).await;
3749
3750        assert_eq!(
3751            repo_update_events.lock()[0]
3752                .keys()
3753                .cloned()
3754                .collect::<Vec<Arc<Path>>>(),
3755            vec![Path::new("dir1").into()]
3756        );
3757
3758        std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3759        tree.flush_fs_events(cx).await;
3760
3761        tree.read_with(cx, |tree, _cx| {
3762            let tree = tree.as_local().unwrap();
3763
3764            assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3765        });
3766    }
3767
3768    #[gpui::test]
3769    async fn test_git_status(cx: &mut TestAppContext) {
3770        #[track_caller]
3771        fn git_init(path: &Path) -> git2::Repository {
3772            git2::Repository::init(path).expect("Failed to initialize git repository")
3773        }
3774
3775        #[track_caller]
3776        fn git_add(path: &Path, repo: &git2::Repository) {
3777            let mut index = repo.index().expect("Failed to get index");
3778            index.add_path(path).expect("Failed to add a.txt");
3779            index.write().expect("Failed to write index");
3780        }
3781
3782        #[track_caller]
3783        fn git_remove_index(path: &Path, repo: &git2::Repository) {
3784            let mut index = repo.index().expect("Failed to get index");
3785            index.remove_path(path).expect("Failed to add a.txt");
3786            index.write().expect("Failed to write index");
3787        }
3788
3789        #[track_caller]
3790        fn git_commit(msg: &'static str, repo: &git2::Repository) {
3791            let signature = repo.signature().unwrap();
3792            let oid = repo.index().unwrap().write_tree().unwrap();
3793            let tree = repo.find_tree(oid).unwrap();
3794            if let Some(head) = repo.head().ok() {
3795                let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
3796
3797                let parent_commit = parent_obj.as_commit().unwrap();
3798
3799                repo.commit(
3800                    Some("HEAD"),
3801                    &signature,
3802                    &signature,
3803                    msg,
3804                    &tree,
3805                    &[parent_commit],
3806                )
3807                .expect("Failed to commit with parent");
3808            } else {
3809                repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
3810                    .expect("Failed to commit");
3811            }
3812        }
3813
3814        #[track_caller]
3815        fn git_stash(repo: &mut git2::Repository) {
3816            let signature = repo.signature().unwrap();
3817            repo.stash_save(&signature, "N/A", None)
3818                .expect("Failed to stash");
3819        }
3820
3821        #[track_caller]
3822        fn git_reset(offset: usize, repo: &git2::Repository) {
3823            let head = repo.head().expect("Couldn't get repo head");
3824            let object = head.peel(git2::ObjectType::Commit).unwrap();
3825            let commit = object.as_commit().unwrap();
3826            let new_head = commit
3827                .parents()
3828                .inspect(|parnet| {
3829                    parnet.message();
3830                })
3831                .skip(offset)
3832                .next()
3833                .expect("Not enough history");
3834            repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
3835                .expect("Could not reset");
3836        }
3837
3838        #[allow(dead_code)]
3839        #[track_caller]
3840        fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
3841            repo.statuses(None)
3842                .unwrap()
3843                .iter()
3844                .map(|status| (status.path().unwrap().to_string(), status.status()))
3845                .collect()
3846        }
3847
3848        let root = temp_tree(json!({
3849            "project": {
3850                "a.txt": "a",
3851                "b.txt": "bb",
3852                "c": {
3853                    "d": {
3854                        "e.txt": "eee"
3855                    }
3856                }
3857            },
3858
3859        }));
3860
3861        let http_client = FakeHttpClient::with_404_response();
3862        let client = cx.read(|cx| Client::new(http_client, cx));
3863        let tree = Worktree::local(
3864            client,
3865            root.path(),
3866            true,
3867            Arc::new(RealFs),
3868            Default::default(),
3869            &mut cx.to_async(),
3870        )
3871        .await
3872        .unwrap();
3873
3874        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3875            .await;
3876
3877        const A_TXT: &'static str = "a.txt";
3878        const B_TXT: &'static str = "b.txt";
3879        const E_TXT: &'static str = "c/d/e.txt";
3880
3881        let work_dir = root.path().join("project");
3882
3883        let mut repo = git_init(work_dir.as_path());
3884        git_add(Path::new(A_TXT), &repo);
3885        git_add(Path::new(E_TXT), &repo);
3886        git_commit("Initial commit", &repo);
3887
3888        std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
3889
3890        tree.flush_fs_events(cx).await;
3891
3892        // Check that the right git state is observed on startup
3893        tree.read_with(cx, |tree, _cx| {
3894            let snapshot = tree.snapshot();
3895            assert_eq!(snapshot.repository_entries.iter().count(), 1);
3896            let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
3897            assert_eq!(dir.0.as_ref(), Path::new("project"));
3898
3899            assert_eq!(repo.worktree_statuses.iter().count(), 2);
3900            assert_eq!(
3901                repo.worktree_statuses.get(&Path::new(A_TXT).into()),
3902                Some(&GitFileStatus::Modified)
3903            );
3904            assert_eq!(
3905                repo.worktree_statuses.get(&Path::new(B_TXT).into()),
3906                Some(&GitFileStatus::Added)
3907            );
3908        });
3909
3910        git_add(Path::new(A_TXT), &repo);
3911        git_add(Path::new(B_TXT), &repo);
3912        git_commit("Committing modified and added", &repo);
3913        tree.flush_fs_events(cx).await;
3914
3915        // Check that repo only changes are tracked
3916        tree.read_with(cx, |tree, _cx| {
3917            let snapshot = tree.snapshot();
3918            let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
3919
3920            assert_eq!(repo.worktree_statuses.iter().count(), 0);
3921            assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
3922            assert_eq!(repo.worktree_statuses.get(&Path::new(B_TXT).into()), None);
3923        });
3924
3925        git_reset(0, &repo);
3926        git_remove_index(Path::new(B_TXT), &repo);
3927        git_stash(&mut repo);
3928        std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
3929        tree.flush_fs_events(cx).await;
3930
3931        // Check that more complex repo changes are tracked
3932        tree.read_with(cx, |tree, _cx| {
3933            let snapshot = tree.snapshot();
3934            let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
3935
3936            assert_eq!(repo.worktree_statuses.iter().count(), 2);
3937            assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
3938            assert_eq!(
3939                repo.worktree_statuses.get(&Path::new(B_TXT).into()),
3940                Some(&GitFileStatus::Added)
3941            );
3942            assert_eq!(
3943                repo.worktree_statuses.get(&Path::new(E_TXT).into()),
3944                Some(&GitFileStatus::Modified)
3945            );
3946        });
3947
3948        std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
3949        std::fs::remove_dir_all(work_dir.join("c")).unwrap();
3950
3951        tree.flush_fs_events(cx).await;
3952
3953        // Check that non-repo behavior is tracked
3954        tree.read_with(cx, |tree, _cx| {
3955            let snapshot = tree.snapshot();
3956            let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
3957
3958            assert_eq!(repo.worktree_statuses.iter().count(), 0);
3959            assert_eq!(repo.worktree_statuses.get(&Path::new(A_TXT).into()), None);
3960            assert_eq!(repo.worktree_statuses.get(&Path::new(B_TXT).into()), None);
3961            assert_eq!(repo.worktree_statuses.get(&Path::new(E_TXT).into()), None);
3962        });
3963    }
3964
3965    #[gpui::test]
3966    async fn test_write_file(cx: &mut TestAppContext) {
3967        let dir = temp_tree(json!({
3968            ".git": {},
3969            ".gitignore": "ignored-dir\n",
3970            "tracked-dir": {},
3971            "ignored-dir": {}
3972        }));
3973
3974        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3975
3976        let tree = Worktree::local(
3977            client,
3978            dir.path(),
3979            true,
3980            Arc::new(RealFs),
3981            Default::default(),
3982            &mut cx.to_async(),
3983        )
3984        .await
3985        .unwrap();
3986        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3987            .await;
3988        tree.flush_fs_events(cx).await;
3989
3990        tree.update(cx, |tree, cx| {
3991            tree.as_local().unwrap().write_file(
3992                Path::new("tracked-dir/file.txt"),
3993                "hello".into(),
3994                Default::default(),
3995                cx,
3996            )
3997        })
3998        .await
3999        .unwrap();
4000        tree.update(cx, |tree, cx| {
4001            tree.as_local().unwrap().write_file(
4002                Path::new("ignored-dir/file.txt"),
4003                "world".into(),
4004                Default::default(),
4005                cx,
4006            )
4007        })
4008        .await
4009        .unwrap();
4010
4011        tree.read_with(cx, |tree, _| {
4012            let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4013            let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4014            assert!(!tracked.is_ignored);
4015            assert!(ignored.is_ignored);
4016        });
4017    }
4018
4019    #[gpui::test(iterations = 30)]
4020    async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4021        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4022
4023        let fs = FakeFs::new(cx.background());
4024        fs.insert_tree(
4025            "/root",
4026            json!({
4027                "b": {},
4028                "c": {},
4029                "d": {},
4030            }),
4031        )
4032        .await;
4033
4034        let tree = Worktree::local(
4035            client,
4036            "/root".as_ref(),
4037            true,
4038            fs,
4039            Default::default(),
4040            &mut cx.to_async(),
4041        )
4042        .await
4043        .unwrap();
4044
4045        let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4046
4047        let entry = tree
4048            .update(cx, |tree, cx| {
4049                tree.as_local_mut()
4050                    .unwrap()
4051                    .create_entry("a/e".as_ref(), true, cx)
4052            })
4053            .await
4054            .unwrap();
4055        assert!(entry.is_dir());
4056
4057        cx.foreground().run_until_parked();
4058        tree.read_with(cx, |tree, _| {
4059            assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4060        });
4061
4062        let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4063        let update = snapshot2.build_update(&snapshot1, 0, 0, true);
4064        snapshot1.apply_remote_update(update).unwrap();
4065        assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
4066    }
4067
4068    #[gpui::test(iterations = 100)]
4069    async fn test_random_worktree_operations_during_initial_scan(
4070        cx: &mut TestAppContext,
4071        mut rng: StdRng,
4072    ) {
4073        let operations = env::var("OPERATIONS")
4074            .map(|o| o.parse().unwrap())
4075            .unwrap_or(5);
4076        let initial_entries = env::var("INITIAL_ENTRIES")
4077            .map(|o| o.parse().unwrap())
4078            .unwrap_or(20);
4079
4080        let root_dir = Path::new("/test");
4081        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4082        fs.as_fake().insert_tree(root_dir, json!({})).await;
4083        for _ in 0..initial_entries {
4084            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4085        }
4086        log::info!("generated initial tree");
4087
4088        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4089        let worktree = Worktree::local(
4090            client.clone(),
4091            root_dir,
4092            true,
4093            fs.clone(),
4094            Default::default(),
4095            &mut cx.to_async(),
4096        )
4097        .await
4098        .unwrap();
4099
4100        let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4101
4102        for _ in 0..operations {
4103            worktree
4104                .update(cx, |worktree, cx| {
4105                    randomly_mutate_worktree(worktree, &mut rng, cx)
4106                })
4107                .await
4108                .log_err();
4109            worktree.read_with(cx, |tree, _| {
4110                tree.as_local().unwrap().snapshot.check_invariants()
4111            });
4112
4113            if rng.gen_bool(0.6) {
4114                let new_snapshot =
4115                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4116                let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4117                snapshot.apply_remote_update(update.clone()).unwrap();
4118                assert_eq!(
4119                    snapshot.to_vec(true),
4120                    new_snapshot.to_vec(true),
4121                    "incorrect snapshot after update {:?}",
4122                    update
4123                );
4124            }
4125        }
4126
4127        worktree
4128            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4129            .await;
4130        worktree.read_with(cx, |tree, _| {
4131            tree.as_local().unwrap().snapshot.check_invariants()
4132        });
4133
4134        let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4135        let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4136        snapshot.apply_remote_update(update.clone()).unwrap();
4137        assert_eq!(
4138            snapshot.to_vec(true),
4139            new_snapshot.to_vec(true),
4140            "incorrect snapshot after update {:?}",
4141            update
4142        );
4143    }
4144
4145    #[gpui::test(iterations = 100)]
4146    async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4147        let operations = env::var("OPERATIONS")
4148            .map(|o| o.parse().unwrap())
4149            .unwrap_or(40);
4150        let initial_entries = env::var("INITIAL_ENTRIES")
4151            .map(|o| o.parse().unwrap())
4152            .unwrap_or(20);
4153
4154        let root_dir = Path::new("/test");
4155        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4156        fs.as_fake().insert_tree(root_dir, json!({})).await;
4157        for _ in 0..initial_entries {
4158            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4159        }
4160        log::info!("generated initial tree");
4161
4162        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4163        let worktree = Worktree::local(
4164            client.clone(),
4165            root_dir,
4166            true,
4167            fs.clone(),
4168            Default::default(),
4169            &mut cx.to_async(),
4170        )
4171        .await
4172        .unwrap();
4173
4174        worktree
4175            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4176            .await;
4177
4178        // After the initial scan is complete, the `UpdatedEntries` event can
4179        // be used to follow along with all changes to the worktree's snapshot.
4180        worktree.update(cx, |tree, cx| {
4181            let mut paths = tree
4182                .as_local()
4183                .unwrap()
4184                .paths()
4185                .cloned()
4186                .collect::<Vec<_>>();
4187
4188            cx.subscribe(&worktree, move |tree, _, event, _| {
4189                if let Event::UpdatedEntries(changes) = event {
4190                    for (path, change_type) in changes.iter() {
4191                        let path = path.clone();
4192                        let ix = match paths.binary_search(&path) {
4193                            Ok(ix) | Err(ix) => ix,
4194                        };
4195                        match change_type {
4196                            PathChange::Added => {
4197                                assert_ne!(paths.get(ix), Some(&path));
4198                                paths.insert(ix, path);
4199                            }
4200                            PathChange::Removed => {
4201                                assert_eq!(paths.get(ix), Some(&path));
4202                                paths.remove(ix);
4203                            }
4204                            PathChange::Updated => {
4205                                assert_eq!(paths.get(ix), Some(&path));
4206                            }
4207                            PathChange::AddedOrUpdated => {
4208                                if paths[ix] != path {
4209                                    paths.insert(ix, path);
4210                                }
4211                            }
4212                        }
4213                    }
4214                    let new_paths = tree.paths().cloned().collect::<Vec<_>>();
4215                    assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
4216                }
4217            })
4218            .detach();
4219        });
4220
4221        let mut snapshots = Vec::new();
4222        let mut mutations_len = operations;
4223        while mutations_len > 1 {
4224            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4225            let buffered_event_count = fs.as_fake().buffered_event_count().await;
4226            if buffered_event_count > 0 && rng.gen_bool(0.3) {
4227                let len = rng.gen_range(0..=buffered_event_count);
4228                log::info!("flushing {} events", len);
4229                fs.as_fake().flush_events(len).await;
4230            } else {
4231                randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4232                mutations_len -= 1;
4233            }
4234
4235            cx.foreground().run_until_parked();
4236            if rng.gen_bool(0.2) {
4237                log::info!("storing snapshot {}", snapshots.len());
4238                let snapshot =
4239                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4240                snapshots.push(snapshot);
4241            }
4242        }
4243
4244        log::info!("quiescing");
4245        fs.as_fake().flush_events(usize::MAX).await;
4246        cx.foreground().run_until_parked();
4247        let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4248        snapshot.check_invariants();
4249
4250        {
4251            let new_worktree = Worktree::local(
4252                client.clone(),
4253                root_dir,
4254                true,
4255                fs.clone(),
4256                Default::default(),
4257                &mut cx.to_async(),
4258            )
4259            .await
4260            .unwrap();
4261            new_worktree
4262                .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4263                .await;
4264            let new_snapshot =
4265                new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4266            assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4267        }
4268
4269        for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4270            let include_ignored = rng.gen::<bool>();
4271            if !include_ignored {
4272                let mut entries_by_path_edits = Vec::new();
4273                let mut entries_by_id_edits = Vec::new();
4274                for entry in prev_snapshot
4275                    .entries_by_id
4276                    .cursor::<()>()
4277                    .filter(|e| e.is_ignored)
4278                {
4279                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4280                    entries_by_id_edits.push(Edit::Remove(entry.id));
4281                }
4282
4283                prev_snapshot
4284                    .entries_by_path
4285                    .edit(entries_by_path_edits, &());
4286                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4287            }
4288
4289            let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4290            prev_snapshot.apply_remote_update(update.clone()).unwrap();
4291            assert_eq!(
4292                prev_snapshot.to_vec(include_ignored),
4293                snapshot.to_vec(include_ignored),
4294                "wrong update for snapshot {i}. update: {:?}",
4295                update
4296            );
4297        }
4298    }
4299
4300    fn randomly_mutate_worktree(
4301        worktree: &mut Worktree,
4302        rng: &mut impl Rng,
4303        cx: &mut ModelContext<Worktree>,
4304    ) -> Task<Result<()>> {
4305        let worktree = worktree.as_local_mut().unwrap();
4306        let snapshot = worktree.snapshot();
4307        let entry = snapshot.entries(false).choose(rng).unwrap();
4308
4309        match rng.gen_range(0_u32..100) {
4310            0..=33 if entry.path.as_ref() != Path::new("") => {
4311                log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4312                worktree.delete_entry(entry.id, cx).unwrap()
4313            }
4314            ..=66 if entry.path.as_ref() != Path::new("") => {
4315                let other_entry = snapshot.entries(false).choose(rng).unwrap();
4316                let new_parent_path = if other_entry.is_dir() {
4317                    other_entry.path.clone()
4318                } else {
4319                    other_entry.path.parent().unwrap().into()
4320                };
4321                let mut new_path = new_parent_path.join(gen_name(rng));
4322                if new_path.starts_with(&entry.path) {
4323                    new_path = gen_name(rng).into();
4324                }
4325
4326                log::info!(
4327                    "renaming entry {:?} ({}) to {:?}",
4328                    entry.path,
4329                    entry.id.0,
4330                    new_path
4331                );
4332                let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4333                cx.foreground().spawn(async move {
4334                    task.await?;
4335                    Ok(())
4336                })
4337            }
4338            _ => {
4339                let task = if entry.is_dir() {
4340                    let child_path = entry.path.join(gen_name(rng));
4341                    let is_dir = rng.gen_bool(0.3);
4342                    log::info!(
4343                        "creating {} at {:?}",
4344                        if is_dir { "dir" } else { "file" },
4345                        child_path,
4346                    );
4347                    worktree.create_entry(child_path, is_dir, cx)
4348                } else {
4349                    log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4350                    worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4351                };
4352                cx.foreground().spawn(async move {
4353                    task.await?;
4354                    Ok(())
4355                })
4356            }
4357        }
4358    }
4359
4360    async fn randomly_mutate_fs(
4361        fs: &Arc<dyn Fs>,
4362        root_path: &Path,
4363        insertion_probability: f64,
4364        rng: &mut impl Rng,
4365    ) {
4366        let mut files = Vec::new();
4367        let mut dirs = Vec::new();
4368        for path in fs.as_fake().paths() {
4369            if path.starts_with(root_path) {
4370                if fs.is_file(&path).await {
4371                    files.push(path);
4372                } else {
4373                    dirs.push(path);
4374                }
4375            }
4376        }
4377
4378        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4379            let path = dirs.choose(rng).unwrap();
4380            let new_path = path.join(gen_name(rng));
4381
4382            if rng.gen() {
4383                log::info!(
4384                    "creating dir {:?}",
4385                    new_path.strip_prefix(root_path).unwrap()
4386                );
4387                fs.create_dir(&new_path).await.unwrap();
4388            } else {
4389                log::info!(
4390                    "creating file {:?}",
4391                    new_path.strip_prefix(root_path).unwrap()
4392                );
4393                fs.create_file(&new_path, Default::default()).await.unwrap();
4394            }
4395        } else if rng.gen_bool(0.05) {
4396            let ignore_dir_path = dirs.choose(rng).unwrap();
4397            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4398
4399            let subdirs = dirs
4400                .iter()
4401                .filter(|d| d.starts_with(&ignore_dir_path))
4402                .cloned()
4403                .collect::<Vec<_>>();
4404            let subfiles = files
4405                .iter()
4406                .filter(|d| d.starts_with(&ignore_dir_path))
4407                .cloned()
4408                .collect::<Vec<_>>();
4409            let files_to_ignore = {
4410                let len = rng.gen_range(0..=subfiles.len());
4411                subfiles.choose_multiple(rng, len)
4412            };
4413            let dirs_to_ignore = {
4414                let len = rng.gen_range(0..subdirs.len());
4415                subdirs.choose_multiple(rng, len)
4416            };
4417
4418            let mut ignore_contents = String::new();
4419            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4420                writeln!(
4421                    ignore_contents,
4422                    "{}",
4423                    path_to_ignore
4424                        .strip_prefix(&ignore_dir_path)
4425                        .unwrap()
4426                        .to_str()
4427                        .unwrap()
4428                )
4429                .unwrap();
4430            }
4431            log::info!(
4432                "creating gitignore {:?} with contents:\n{}",
4433                ignore_path.strip_prefix(&root_path).unwrap(),
4434                ignore_contents
4435            );
4436            fs.save(
4437                &ignore_path,
4438                &ignore_contents.as_str().into(),
4439                Default::default(),
4440            )
4441            .await
4442            .unwrap();
4443        } else {
4444            let old_path = {
4445                let file_path = files.choose(rng);
4446                let dir_path = dirs[1..].choose(rng);
4447                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4448            };
4449
4450            let is_rename = rng.gen();
4451            if is_rename {
4452                let new_path_parent = dirs
4453                    .iter()
4454                    .filter(|d| !d.starts_with(old_path))
4455                    .choose(rng)
4456                    .unwrap();
4457
4458                let overwrite_existing_dir =
4459                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4460                let new_path = if overwrite_existing_dir {
4461                    fs.remove_dir(
4462                        &new_path_parent,
4463                        RemoveOptions {
4464                            recursive: true,
4465                            ignore_if_not_exists: true,
4466                        },
4467                    )
4468                    .await
4469                    .unwrap();
4470                    new_path_parent.to_path_buf()
4471                } else {
4472                    new_path_parent.join(gen_name(rng))
4473                };
4474
4475                log::info!(
4476                    "renaming {:?} to {}{:?}",
4477                    old_path.strip_prefix(&root_path).unwrap(),
4478                    if overwrite_existing_dir {
4479                        "overwrite "
4480                    } else {
4481                        ""
4482                    },
4483                    new_path.strip_prefix(&root_path).unwrap()
4484                );
4485                fs.rename(
4486                    &old_path,
4487                    &new_path,
4488                    fs::RenameOptions {
4489                        overwrite: true,
4490                        ignore_if_exists: true,
4491                    },
4492                )
4493                .await
4494                .unwrap();
4495            } else if fs.is_file(&old_path).await {
4496                log::info!(
4497                    "deleting file {:?}",
4498                    old_path.strip_prefix(&root_path).unwrap()
4499                );
4500                fs.remove_file(old_path, Default::default()).await.unwrap();
4501            } else {
4502                log::info!(
4503                    "deleting dir {:?}",
4504                    old_path.strip_prefix(&root_path).unwrap()
4505                );
4506                fs.remove_dir(
4507                    &old_path,
4508                    RemoveOptions {
4509                        recursive: true,
4510                        ignore_if_not_exists: true,
4511                    },
4512                )
4513                .await
4514                .unwrap();
4515            }
4516        }
4517    }
4518
4519    fn gen_name(rng: &mut impl Rng) -> String {
4520        (0..6)
4521            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4522            .map(char::from)
4523            .collect()
4524    }
4525
4526    impl LocalSnapshot {
4527        fn check_invariants(&self) {
4528            assert_eq!(
4529                self.entries_by_path
4530                    .cursor::<()>()
4531                    .map(|e| (&e.path, e.id))
4532                    .collect::<Vec<_>>(),
4533                self.entries_by_id
4534                    .cursor::<()>()
4535                    .map(|e| (&e.path, e.id))
4536                    .collect::<collections::BTreeSet<_>>()
4537                    .into_iter()
4538                    .collect::<Vec<_>>(),
4539                "entries_by_path and entries_by_id are inconsistent"
4540            );
4541
4542            let mut files = self.files(true, 0);
4543            let mut visible_files = self.files(false, 0);
4544            for entry in self.entries_by_path.cursor::<()>() {
4545                if entry.is_file() {
4546                    assert_eq!(files.next().unwrap().inode, entry.inode);
4547                    if !entry.is_ignored {
4548                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4549                    }
4550                }
4551            }
4552
4553            assert!(files.next().is_none());
4554            assert!(visible_files.next().is_none());
4555
4556            let mut bfs_paths = Vec::new();
4557            let mut stack = vec![Path::new("")];
4558            while let Some(path) = stack.pop() {
4559                bfs_paths.push(path);
4560                let ix = stack.len();
4561                for child_entry in self.child_entries(path) {
4562                    stack.insert(ix, &child_entry.path);
4563                }
4564            }
4565
4566            let dfs_paths_via_iter = self
4567                .entries_by_path
4568                .cursor::<()>()
4569                .map(|e| e.path.as_ref())
4570                .collect::<Vec<_>>();
4571            assert_eq!(bfs_paths, dfs_paths_via_iter);
4572
4573            let dfs_paths_via_traversal = self
4574                .entries(true)
4575                .map(|e| e.path.as_ref())
4576                .collect::<Vec<_>>();
4577            assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4578
4579            for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4580                let ignore_parent_path =
4581                    ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4582                assert!(self.entry_for_path(&ignore_parent_path).is_some());
4583                assert!(self
4584                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4585                    .is_some());
4586            }
4587        }
4588
4589        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4590            let mut paths = Vec::new();
4591            for entry in self.entries_by_path.cursor::<()>() {
4592                if include_ignored || !entry.is_ignored {
4593                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4594                }
4595            }
4596            paths.sort_by(|a, b| a.0.cmp(b.0));
4597            paths
4598        }
4599    }
4600}