worktree.rs

   1use crate::{
   2    copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
   3};
   4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   5use anyhow::{anyhow, Context, Result};
   6use client::{proto, Client};
   7use clock::ReplicaId;
   8use collections::{HashMap, VecDeque};
   9use fs::{
  10    repository::{GitRepository, GitStatus, RepoPath},
  11    Fs, LineEnding,
  12};
  13use futures::{
  14    channel::{
  15        mpsc::{self, UnboundedSender},
  16        oneshot,
  17    },
  18    select_biased,
  19    task::Poll,
  20    Stream, StreamExt,
  21};
  22use fuzzy::CharBag;
  23use git::{DOT_GIT, GITIGNORE};
  24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
  25use language::{
  26    proto::{
  27        deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
  28        serialize_version,
  29    },
  30    Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
  31};
  32use lsp::LanguageServerId;
  33use parking_lot::Mutex;
  34use postage::{
  35    barrier,
  36    prelude::{Sink as _, Stream as _},
  37    watch,
  38};
  39use smol::channel::{self, Sender};
  40use std::{
  41    any::Any,
  42    cmp::{self, Ordering},
  43    convert::TryFrom,
  44    ffi::OsStr,
  45    fmt,
  46    future::Future,
  47    mem,
  48    ops::{Deref, DerefMut},
  49    path::{Path, PathBuf},
  50    pin::Pin,
  51    sync::{
  52        atomic::{AtomicUsize, Ordering::SeqCst},
  53        Arc,
  54    },
  55    time::{Duration, SystemTime},
  56};
  57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
  58use util::{paths::HOME, ResultExt, TryFutureExt};
  59
  60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  61pub struct WorktreeId(usize);
  62
  63pub enum Worktree {
  64    Local(LocalWorktree),
  65    Remote(RemoteWorktree),
  66}
  67
  68pub struct LocalWorktree {
  69    snapshot: LocalSnapshot,
  70    path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
  71    is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
  72    _background_scanner_task: Task<()>,
  73    share: Option<ShareState>,
  74    diagnostics: HashMap<
  75        Arc<Path>,
  76        Vec<(
  77            LanguageServerId,
  78            Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
  79        )>,
  80    >,
  81    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  82    client: Arc<Client>,
  83    fs: Arc<dyn Fs>,
  84    visible: bool,
  85}
  86
  87pub struct RemoteWorktree {
  88    snapshot: Snapshot,
  89    background_snapshot: Arc<Mutex<Snapshot>>,
  90    project_id: u64,
  91    client: Arc<Client>,
  92    updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
  93    snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
  94    replica_id: ReplicaId,
  95    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  96    visible: bool,
  97    disconnected: bool,
  98}
  99
 100#[derive(Clone)]
 101pub struct Snapshot {
 102    id: WorktreeId,
 103    abs_path: Arc<Path>,
 104    root_name: String,
 105    root_char_bag: CharBag,
 106    entries_by_path: SumTree<Entry>,
 107    entries_by_id: SumTree<PathEntry>,
 108    repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
 109
 110    /// A number that increases every time the worktree begins scanning
 111    /// a set of paths from the filesystem. This scanning could be caused
 112    /// by some operation performed on the worktree, such as reading or
 113    /// writing a file, or by an event reported by the filesystem.
 114    scan_id: usize,
 115
 116    /// The latest scan id that has completed, and whose preceding scans
 117    /// have all completed. The current `scan_id` could be more than one
 118    /// greater than the `completed_scan_id` if operations are performed
 119    /// on the worktree while it is processing a file-system event.
 120    completed_scan_id: usize,
 121}
 122
 123#[derive(Clone, Debug, PartialEq, Eq)]
 124pub struct RepositoryEntry {
 125    pub(crate) work_directory: WorkDirectoryEntry,
 126    pub(crate) branch: Option<Arc<str>>,
 127    pub(crate) statuses: TreeMap<RepoPath, GitStatus>,
 128}
 129
 130impl RepositoryEntry {
 131    pub fn branch(&self) -> Option<Arc<str>> {
 132        self.branch.clone()
 133    }
 134
 135    pub fn work_directory_id(&self) -> ProjectEntryId {
 136        *self.work_directory
 137    }
 138
 139    pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
 140        snapshot
 141            .entry_for_id(self.work_directory_id())
 142            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
 143    }
 144
 145    pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
 146        self.work_directory.contains(snapshot, path)
 147    }
 148}
 149
 150impl From<&RepositoryEntry> for proto::RepositoryEntry {
 151    fn from(value: &RepositoryEntry) -> Self {
 152        proto::RepositoryEntry {
 153            work_directory_id: value.work_directory.to_proto(),
 154            branch: value.branch.as_ref().map(|str| str.to_string()),
 155        }
 156    }
 157}
 158
 159/// This path corresponds to the 'content path' (the folder that contains the .git)
 160#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 161pub struct RepositoryWorkDirectory(Arc<Path>);
 162
 163impl Default for RepositoryWorkDirectory {
 164    fn default() -> Self {
 165        RepositoryWorkDirectory(Arc::from(Path::new("")))
 166    }
 167}
 168
 169impl AsRef<Path> for RepositoryWorkDirectory {
 170    fn as_ref(&self) -> &Path {
 171        self.0.as_ref()
 172    }
 173}
 174
 175#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 176pub struct WorkDirectoryEntry(ProjectEntryId);
 177
 178impl WorkDirectoryEntry {
 179    // Note that these paths should be relative to the worktree root.
 180    pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
 181        snapshot
 182            .entry_for_id(self.0)
 183            .map(|entry| path.starts_with(&entry.path))
 184            .unwrap_or(false)
 185    }
 186
 187    pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
 188        worktree.entry_for_id(self.0).and_then(|entry| {
 189            path.strip_prefix(&entry.path)
 190                .ok()
 191                .map(move |path| path.into())
 192        })
 193    }
 194}
 195
 196impl Deref for WorkDirectoryEntry {
 197    type Target = ProjectEntryId;
 198
 199    fn deref(&self) -> &Self::Target {
 200        &self.0
 201    }
 202}
 203
 204impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
 205    fn from(value: ProjectEntryId) -> Self {
 206        WorkDirectoryEntry(value)
 207    }
 208}
 209
 210#[derive(Debug, Clone)]
 211pub struct LocalSnapshot {
 212    ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
 213    // The ProjectEntryId corresponds to the entry for the .git dir
 214    // work_directory_id
 215    git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 216    removed_entry_ids: HashMap<u64, ProjectEntryId>,
 217    next_entry_id: Arc<AtomicUsize>,
 218    snapshot: Snapshot,
 219}
 220
 221#[derive(Debug, Clone)]
 222pub struct LocalRepositoryEntry {
 223    pub(crate) scan_id: usize,
 224    pub(crate) full_scan_id: usize,
 225    pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
 226    /// Path to the actual .git folder.
 227    /// Note: if .git is a file, this points to the folder indicated by the .git file
 228    pub(crate) git_dir_path: Arc<Path>,
 229}
 230
 231impl LocalRepositoryEntry {
 232    // Note that this path should be relative to the worktree root.
 233    pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
 234        path.starts_with(self.git_dir_path.as_ref())
 235    }
 236}
 237
 238impl Deref for LocalSnapshot {
 239    type Target = Snapshot;
 240
 241    fn deref(&self) -> &Self::Target {
 242        &self.snapshot
 243    }
 244}
 245
 246impl DerefMut for LocalSnapshot {
 247    fn deref_mut(&mut self) -> &mut Self::Target {
 248        &mut self.snapshot
 249    }
 250}
 251
 252enum ScanState {
 253    Started,
 254    Updated {
 255        snapshot: LocalSnapshot,
 256        changes: HashMap<Arc<Path>, PathChange>,
 257        barrier: Option<barrier::Sender>,
 258        scanning: bool,
 259    },
 260}
 261
 262struct ShareState {
 263    project_id: u64,
 264    snapshots_tx: watch::Sender<LocalSnapshot>,
 265    resume_updates: watch::Sender<()>,
 266    _maintain_remote_snapshot: Task<Option<()>>,
 267}
 268
 269pub enum Event {
 270    UpdatedEntries(HashMap<Arc<Path>, PathChange>),
 271    UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
 272}
 273
 274impl Entity for Worktree {
 275    type Event = Event;
 276}
 277
 278impl Worktree {
 279    pub async fn local(
 280        client: Arc<Client>,
 281        path: impl Into<Arc<Path>>,
 282        visible: bool,
 283        fs: Arc<dyn Fs>,
 284        next_entry_id: Arc<AtomicUsize>,
 285        cx: &mut AsyncAppContext,
 286    ) -> Result<ModelHandle<Self>> {
 287        // After determining whether the root entry is a file or a directory, populate the
 288        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 289        let abs_path = path.into();
 290        let metadata = fs
 291            .metadata(&abs_path)
 292            .await
 293            .context("failed to stat worktree path")?;
 294
 295        Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
 296            let root_name = abs_path
 297                .file_name()
 298                .map_or(String::new(), |f| f.to_string_lossy().to_string());
 299
 300            let mut snapshot = LocalSnapshot {
 301                ignores_by_parent_abs_path: Default::default(),
 302                removed_entry_ids: Default::default(),
 303                git_repositories: Default::default(),
 304                next_entry_id,
 305                snapshot: Snapshot {
 306                    id: WorktreeId::from_usize(cx.model_id()),
 307                    abs_path: abs_path.clone(),
 308                    root_name: root_name.clone(),
 309                    root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
 310                    entries_by_path: Default::default(),
 311                    entries_by_id: Default::default(),
 312                    repository_entries: Default::default(),
 313                    scan_id: 1,
 314                    completed_scan_id: 0,
 315                },
 316            };
 317
 318            if let Some(metadata) = metadata {
 319                snapshot.insert_entry(
 320                    Entry::new(
 321                        Arc::from(Path::new("")),
 322                        &metadata,
 323                        &snapshot.next_entry_id,
 324                        snapshot.root_char_bag,
 325                    ),
 326                    fs.as_ref(),
 327                );
 328            }
 329
 330            let (path_changes_tx, path_changes_rx) = channel::unbounded();
 331            let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
 332
 333            cx.spawn_weak(|this, mut cx| async move {
 334                while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
 335                    this.update(&mut cx, |this, cx| {
 336                        let this = this.as_local_mut().unwrap();
 337                        match state {
 338                            ScanState::Started => {
 339                                *this.is_scanning.0.borrow_mut() = true;
 340                            }
 341                            ScanState::Updated {
 342                                snapshot,
 343                                changes,
 344                                barrier,
 345                                scanning,
 346                            } => {
 347                                *this.is_scanning.0.borrow_mut() = scanning;
 348                                this.set_snapshot(snapshot, cx);
 349                                cx.emit(Event::UpdatedEntries(changes));
 350                                drop(barrier);
 351                            }
 352                        }
 353                        cx.notify();
 354                    });
 355                }
 356            })
 357            .detach();
 358
 359            let background_scanner_task = cx.background().spawn({
 360                let fs = fs.clone();
 361                let snapshot = snapshot.clone();
 362                let background = cx.background().clone();
 363                async move {
 364                    let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 365                    BackgroundScanner::new(
 366                        snapshot,
 367                        fs,
 368                        scan_states_tx,
 369                        background,
 370                        path_changes_rx,
 371                    )
 372                    .run(events)
 373                    .await;
 374                }
 375            });
 376
 377            Worktree::Local(LocalWorktree {
 378                snapshot,
 379                is_scanning: watch::channel_with(true),
 380                share: None,
 381                path_changes_tx,
 382                _background_scanner_task: background_scanner_task,
 383                diagnostics: Default::default(),
 384                diagnostic_summaries: Default::default(),
 385                client,
 386                fs,
 387                visible,
 388            })
 389        }))
 390    }
 391
 392    pub fn remote(
 393        project_remote_id: u64,
 394        replica_id: ReplicaId,
 395        worktree: proto::WorktreeMetadata,
 396        client: Arc<Client>,
 397        cx: &mut AppContext,
 398    ) -> ModelHandle<Self> {
 399        cx.add_model(|cx: &mut ModelContext<Self>| {
 400            let snapshot = Snapshot {
 401                id: WorktreeId(worktree.id as usize),
 402                abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
 403                root_name: worktree.root_name.clone(),
 404                root_char_bag: worktree
 405                    .root_name
 406                    .chars()
 407                    .map(|c| c.to_ascii_lowercase())
 408                    .collect(),
 409                entries_by_path: Default::default(),
 410                entries_by_id: Default::default(),
 411                repository_entries: Default::default(),
 412                scan_id: 1,
 413                completed_scan_id: 0,
 414            };
 415
 416            let (updates_tx, mut updates_rx) = mpsc::unbounded();
 417            let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
 418            let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
 419
 420            cx.background()
 421                .spawn({
 422                    let background_snapshot = background_snapshot.clone();
 423                    async move {
 424                        while let Some(update) = updates_rx.next().await {
 425                            if let Err(error) =
 426                                background_snapshot.lock().apply_remote_update(update)
 427                            {
 428                                log::error!("error applying worktree update: {}", error);
 429                            }
 430                            snapshot_updated_tx.send(()).await.ok();
 431                        }
 432                    }
 433                })
 434                .detach();
 435
 436            cx.spawn_weak(|this, mut cx| async move {
 437                while (snapshot_updated_rx.recv().await).is_some() {
 438                    if let Some(this) = this.upgrade(&cx) {
 439                        this.update(&mut cx, |this, cx| {
 440                            let this = this.as_remote_mut().unwrap();
 441                            this.snapshot = this.background_snapshot.lock().clone();
 442                            cx.emit(Event::UpdatedEntries(Default::default()));
 443                            cx.notify();
 444                            while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
 445                                if this.observed_snapshot(*scan_id) {
 446                                    let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
 447                                    let _ = tx.send(());
 448                                } else {
 449                                    break;
 450                                }
 451                            }
 452                        });
 453                    } else {
 454                        break;
 455                    }
 456                }
 457            })
 458            .detach();
 459
 460            Worktree::Remote(RemoteWorktree {
 461                project_id: project_remote_id,
 462                replica_id,
 463                snapshot: snapshot.clone(),
 464                background_snapshot,
 465                updates_tx: Some(updates_tx),
 466                snapshot_subscriptions: Default::default(),
 467                client: client.clone(),
 468                diagnostic_summaries: Default::default(),
 469                visible: worktree.visible,
 470                disconnected: false,
 471            })
 472        })
 473    }
 474
 475    pub fn as_local(&self) -> Option<&LocalWorktree> {
 476        if let Worktree::Local(worktree) = self {
 477            Some(worktree)
 478        } else {
 479            None
 480        }
 481    }
 482
 483    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 484        if let Worktree::Remote(worktree) = self {
 485            Some(worktree)
 486        } else {
 487            None
 488        }
 489    }
 490
 491    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 492        if let Worktree::Local(worktree) = self {
 493            Some(worktree)
 494        } else {
 495            None
 496        }
 497    }
 498
 499    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 500        if let Worktree::Remote(worktree) = self {
 501            Some(worktree)
 502        } else {
 503            None
 504        }
 505    }
 506
 507    pub fn is_local(&self) -> bool {
 508        matches!(self, Worktree::Local(_))
 509    }
 510
 511    pub fn is_remote(&self) -> bool {
 512        !self.is_local()
 513    }
 514
 515    pub fn snapshot(&self) -> Snapshot {
 516        match self {
 517            Worktree::Local(worktree) => worktree.snapshot().snapshot,
 518            Worktree::Remote(worktree) => worktree.snapshot(),
 519        }
 520    }
 521
 522    pub fn scan_id(&self) -> usize {
 523        match self {
 524            Worktree::Local(worktree) => worktree.snapshot.scan_id,
 525            Worktree::Remote(worktree) => worktree.snapshot.scan_id,
 526        }
 527    }
 528
 529    pub fn completed_scan_id(&self) -> usize {
 530        match self {
 531            Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
 532            Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
 533        }
 534    }
 535
 536    pub fn is_visible(&self) -> bool {
 537        match self {
 538            Worktree::Local(worktree) => worktree.visible,
 539            Worktree::Remote(worktree) => worktree.visible,
 540        }
 541    }
 542
 543    pub fn replica_id(&self) -> ReplicaId {
 544        match self {
 545            Worktree::Local(_) => 0,
 546            Worktree::Remote(worktree) => worktree.replica_id,
 547        }
 548    }
 549
 550    pub fn diagnostic_summaries(
 551        &self,
 552    ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
 553        match self {
 554            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 555            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 556        }
 557        .iter()
 558        .flat_map(|(path, summaries)| {
 559            summaries
 560                .iter()
 561                .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
 562        })
 563    }
 564
 565    pub fn abs_path(&self) -> Arc<Path> {
 566        match self {
 567            Worktree::Local(worktree) => worktree.abs_path.clone(),
 568            Worktree::Remote(worktree) => worktree.abs_path.clone(),
 569        }
 570    }
 571}
 572
 573impl LocalWorktree {
 574    pub fn contains_abs_path(&self, path: &Path) -> bool {
 575        path.starts_with(&self.abs_path)
 576    }
 577
 578    fn absolutize(&self, path: &Path) -> PathBuf {
 579        if path.file_name().is_some() {
 580            self.abs_path.join(path)
 581        } else {
 582            self.abs_path.to_path_buf()
 583        }
 584    }
 585
 586    pub(crate) fn load_buffer(
 587        &mut self,
 588        id: u64,
 589        path: &Path,
 590        cx: &mut ModelContext<Worktree>,
 591    ) -> Task<Result<ModelHandle<Buffer>>> {
 592        let path = Arc::from(path);
 593        cx.spawn(move |this, mut cx| async move {
 594            let (file, contents, diff_base) = this
 595                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
 596                .await?;
 597            let text_buffer = cx
 598                .background()
 599                .spawn(async move { text::Buffer::new(0, id, contents) })
 600                .await;
 601            Ok(cx.add_model(|cx| {
 602                let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
 603                buffer.git_diff_recalc(cx);
 604                buffer
 605            }))
 606        })
 607    }
 608
 609    pub fn diagnostics_for_path(
 610        &self,
 611        path: &Path,
 612    ) -> Vec<(
 613        LanguageServerId,
 614        Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 615    )> {
 616        self.diagnostics.get(path).cloned().unwrap_or_default()
 617    }
 618
 619    pub fn update_diagnostics(
 620        &mut self,
 621        server_id: LanguageServerId,
 622        worktree_path: Arc<Path>,
 623        diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 624        _: &mut ModelContext<Worktree>,
 625    ) -> Result<bool> {
 626        let summaries_by_server_id = self
 627            .diagnostic_summaries
 628            .entry(worktree_path.clone())
 629            .or_default();
 630
 631        let old_summary = summaries_by_server_id
 632            .remove(&server_id)
 633            .unwrap_or_default();
 634
 635        let new_summary = DiagnosticSummary::new(&diagnostics);
 636        if new_summary.is_empty() {
 637            if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
 638                if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 639                    diagnostics_by_server_id.remove(ix);
 640                }
 641                if diagnostics_by_server_id.is_empty() {
 642                    self.diagnostics.remove(&worktree_path);
 643                }
 644            }
 645        } else {
 646            summaries_by_server_id.insert(server_id, new_summary);
 647            let diagnostics_by_server_id =
 648                self.diagnostics.entry(worktree_path.clone()).or_default();
 649            match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 650                Ok(ix) => {
 651                    diagnostics_by_server_id[ix] = (server_id, diagnostics);
 652                }
 653                Err(ix) => {
 654                    diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
 655                }
 656            }
 657        }
 658
 659        if !old_summary.is_empty() || !new_summary.is_empty() {
 660            if let Some(share) = self.share.as_ref() {
 661                self.client
 662                    .send(proto::UpdateDiagnosticSummary {
 663                        project_id: share.project_id,
 664                        worktree_id: self.id().to_proto(),
 665                        summary: Some(proto::DiagnosticSummary {
 666                            path: worktree_path.to_string_lossy().to_string(),
 667                            language_server_id: server_id.0 as u64,
 668                            error_count: new_summary.error_count as u32,
 669                            warning_count: new_summary.warning_count as u32,
 670                        }),
 671                    })
 672                    .log_err();
 673            }
 674        }
 675
 676        Ok(!old_summary.is_empty() || !new_summary.is_empty())
 677    }
 678
 679    fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
 680        let updated_repos =
 681            self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
 682        self.snapshot = new_snapshot;
 683
 684        if let Some(share) = self.share.as_mut() {
 685            *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
 686        }
 687
 688        if !updated_repos.is_empty() {
 689            cx.emit(Event::UpdatedGitRepositories(updated_repos));
 690        }
 691    }
 692
 693    fn changed_repos(
 694        &self,
 695        old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 696        new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 697    ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
 698        let mut diff = HashMap::default();
 699        let mut old_repos = old_repos.iter().peekable();
 700        let mut new_repos = new_repos.iter().peekable();
 701        loop {
 702            match (old_repos.peek(), new_repos.peek()) {
 703                (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
 704                    match Ord::cmp(old_entry_id, new_entry_id) {
 705                        Ordering::Less => {
 706                            if let Some(entry) = self.entry_for_id(**old_entry_id) {
 707                                diff.insert(entry.path.clone(), (*old_repo).clone());
 708                            }
 709                            old_repos.next();
 710                        }
 711                        Ordering::Equal => {
 712                            if old_repo.scan_id != new_repo.scan_id {
 713                                if let Some(entry) = self.entry_for_id(**new_entry_id) {
 714                                    diff.insert(entry.path.clone(), (*new_repo).clone());
 715                                }
 716                            }
 717
 718                            old_repos.next();
 719                            new_repos.next();
 720                        }
 721                        Ordering::Greater => {
 722                            if let Some(entry) = self.entry_for_id(**new_entry_id) {
 723                                diff.insert(entry.path.clone(), (*new_repo).clone());
 724                            }
 725                            new_repos.next();
 726                        }
 727                    }
 728                }
 729                (Some((old_entry_id, old_repo)), None) => {
 730                    if let Some(entry) = self.entry_for_id(**old_entry_id) {
 731                        diff.insert(entry.path.clone(), (*old_repo).clone());
 732                    }
 733                    old_repos.next();
 734                }
 735                (None, Some((new_entry_id, new_repo))) => {
 736                    if let Some(entry) = self.entry_for_id(**new_entry_id) {
 737                        diff.insert(entry.path.clone(), (*new_repo).clone());
 738                    }
 739                    new_repos.next();
 740                }
 741                (None, None) => break,
 742            }
 743        }
 744        diff
 745    }
 746
 747    pub fn scan_complete(&self) -> impl Future<Output = ()> {
 748        let mut is_scanning_rx = self.is_scanning.1.clone();
 749        async move {
 750            let mut is_scanning = is_scanning_rx.borrow().clone();
 751            while is_scanning {
 752                if let Some(value) = is_scanning_rx.recv().await {
 753                    is_scanning = value;
 754                } else {
 755                    break;
 756                }
 757            }
 758        }
 759    }
 760
 761    pub fn snapshot(&self) -> LocalSnapshot {
 762        self.snapshot.clone()
 763    }
 764
 765    pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
 766        proto::WorktreeMetadata {
 767            id: self.id().to_proto(),
 768            root_name: self.root_name().to_string(),
 769            visible: self.visible,
 770            abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
 771        }
 772    }
 773
 774    fn load(
 775        &self,
 776        path: &Path,
 777        cx: &mut ModelContext<Worktree>,
 778    ) -> Task<Result<(File, String, Option<String>)>> {
 779        let handle = cx.handle();
 780        let path = Arc::from(path);
 781        let abs_path = self.absolutize(&path);
 782        let fs = self.fs.clone();
 783        let snapshot = self.snapshot();
 784
 785        let mut index_task = None;
 786
 787        if let Some(repo) = snapshot.repo_for(&path) {
 788            let repo_path = repo.work_directory.relativize(self, &path).unwrap();
 789            if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
 790                let repo = repo.repo_ptr.to_owned();
 791                index_task = Some(
 792                    cx.background()
 793                        .spawn(async move { repo.lock().load_index_text(&repo_path) }),
 794                );
 795            }
 796        }
 797
 798        cx.spawn(|this, mut cx| async move {
 799            let text = fs.load(&abs_path).await?;
 800
 801            let diff_base = if let Some(index_task) = index_task {
 802                index_task.await
 803            } else {
 804                None
 805            };
 806
 807            // Eagerly populate the snapshot with an updated entry for the loaded file
 808            let entry = this
 809                .update(&mut cx, |this, cx| {
 810                    this.as_local().unwrap().refresh_entry(path, None, cx)
 811                })
 812                .await?;
 813
 814            Ok((
 815                File {
 816                    entry_id: entry.id,
 817                    worktree: handle,
 818                    path: entry.path,
 819                    mtime: entry.mtime,
 820                    is_local: true,
 821                    is_deleted: false,
 822                },
 823                text,
 824                diff_base,
 825            ))
 826        })
 827    }
 828
 829    pub fn save_buffer(
 830        &self,
 831        buffer_handle: ModelHandle<Buffer>,
 832        path: Arc<Path>,
 833        has_changed_file: bool,
 834        cx: &mut ModelContext<Worktree>,
 835    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
 836        let handle = cx.handle();
 837        let buffer = buffer_handle.read(cx);
 838
 839        let rpc = self.client.clone();
 840        let buffer_id = buffer.remote_id();
 841        let project_id = self.share.as_ref().map(|share| share.project_id);
 842
 843        let text = buffer.as_rope().clone();
 844        let fingerprint = text.fingerprint();
 845        let version = buffer.version();
 846        let save = self.write_file(path, text, buffer.line_ending(), cx);
 847
 848        cx.as_mut().spawn(|mut cx| async move {
 849            let entry = save.await?;
 850
 851            if has_changed_file {
 852                let new_file = Arc::new(File {
 853                    entry_id: entry.id,
 854                    worktree: handle,
 855                    path: entry.path,
 856                    mtime: entry.mtime,
 857                    is_local: true,
 858                    is_deleted: false,
 859                });
 860
 861                if let Some(project_id) = project_id {
 862                    rpc.send(proto::UpdateBufferFile {
 863                        project_id,
 864                        buffer_id,
 865                        file: Some(new_file.to_proto()),
 866                    })
 867                    .log_err();
 868                }
 869
 870                buffer_handle.update(&mut cx, |buffer, cx| {
 871                    if has_changed_file {
 872                        buffer.file_updated(new_file, cx).detach();
 873                    }
 874                });
 875            }
 876
 877            if let Some(project_id) = project_id {
 878                rpc.send(proto::BufferSaved {
 879                    project_id,
 880                    buffer_id,
 881                    version: serialize_version(&version),
 882                    mtime: Some(entry.mtime.into()),
 883                    fingerprint: serialize_fingerprint(fingerprint),
 884                })?;
 885            }
 886
 887            buffer_handle.update(&mut cx, |buffer, cx| {
 888                buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
 889            });
 890
 891            Ok((version, fingerprint, entry.mtime))
 892        })
 893    }
 894
 895    pub fn create_entry(
 896        &self,
 897        path: impl Into<Arc<Path>>,
 898        is_dir: bool,
 899        cx: &mut ModelContext<Worktree>,
 900    ) -> Task<Result<Entry>> {
 901        let path = path.into();
 902        let abs_path = self.absolutize(&path);
 903        let fs = self.fs.clone();
 904        let write = cx.background().spawn(async move {
 905            if is_dir {
 906                fs.create_dir(&abs_path).await
 907            } else {
 908                fs.save(&abs_path, &Default::default(), Default::default())
 909                    .await
 910            }
 911        });
 912
 913        cx.spawn(|this, mut cx| async move {
 914            write.await?;
 915            this.update(&mut cx, |this, cx| {
 916                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
 917            })
 918            .await
 919        })
 920    }
 921
 922    pub fn write_file(
 923        &self,
 924        path: impl Into<Arc<Path>>,
 925        text: Rope,
 926        line_ending: LineEnding,
 927        cx: &mut ModelContext<Worktree>,
 928    ) -> Task<Result<Entry>> {
 929        let path = path.into();
 930        let abs_path = self.absolutize(&path);
 931        let fs = self.fs.clone();
 932        let write = cx
 933            .background()
 934            .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
 935
 936        cx.spawn(|this, mut cx| async move {
 937            write.await?;
 938            this.update(&mut cx, |this, cx| {
 939                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
 940            })
 941            .await
 942        })
 943    }
 944
 945    pub fn delete_entry(
 946        &self,
 947        entry_id: ProjectEntryId,
 948        cx: &mut ModelContext<Worktree>,
 949    ) -> Option<Task<Result<()>>> {
 950        let entry = self.entry_for_id(entry_id)?.clone();
 951        let abs_path = self.abs_path.clone();
 952        let fs = self.fs.clone();
 953
 954        let delete = cx.background().spawn(async move {
 955            let mut abs_path = fs.canonicalize(&abs_path).await?;
 956            if entry.path.file_name().is_some() {
 957                abs_path = abs_path.join(&entry.path);
 958            }
 959            if entry.is_file() {
 960                fs.remove_file(&abs_path, Default::default()).await?;
 961            } else {
 962                fs.remove_dir(
 963                    &abs_path,
 964                    RemoveOptions {
 965                        recursive: true,
 966                        ignore_if_not_exists: false,
 967                    },
 968                )
 969                .await?;
 970            }
 971            anyhow::Ok(abs_path)
 972        });
 973
 974        Some(cx.spawn(|this, mut cx| async move {
 975            let abs_path = delete.await?;
 976            let (tx, mut rx) = barrier::channel();
 977            this.update(&mut cx, |this, _| {
 978                this.as_local_mut()
 979                    .unwrap()
 980                    .path_changes_tx
 981                    .try_send((vec![abs_path], tx))
 982            })?;
 983            rx.recv().await;
 984            Ok(())
 985        }))
 986    }
 987
 988    pub fn rename_entry(
 989        &self,
 990        entry_id: ProjectEntryId,
 991        new_path: impl Into<Arc<Path>>,
 992        cx: &mut ModelContext<Worktree>,
 993    ) -> Option<Task<Result<Entry>>> {
 994        let old_path = self.entry_for_id(entry_id)?.path.clone();
 995        let new_path = new_path.into();
 996        let abs_old_path = self.absolutize(&old_path);
 997        let abs_new_path = self.absolutize(&new_path);
 998        let fs = self.fs.clone();
 999        let rename = cx.background().spawn(async move {
1000            fs.rename(&abs_old_path, &abs_new_path, Default::default())
1001                .await
1002        });
1003
1004        Some(cx.spawn(|this, mut cx| async move {
1005            rename.await?;
1006            this.update(&mut cx, |this, cx| {
1007                this.as_local_mut()
1008                    .unwrap()
1009                    .refresh_entry(new_path.clone(), Some(old_path), cx)
1010            })
1011            .await
1012        }))
1013    }
1014
1015    pub fn copy_entry(
1016        &self,
1017        entry_id: ProjectEntryId,
1018        new_path: impl Into<Arc<Path>>,
1019        cx: &mut ModelContext<Worktree>,
1020    ) -> Option<Task<Result<Entry>>> {
1021        let old_path = self.entry_for_id(entry_id)?.path.clone();
1022        let new_path = new_path.into();
1023        let abs_old_path = self.absolutize(&old_path);
1024        let abs_new_path = self.absolutize(&new_path);
1025        let fs = self.fs.clone();
1026        let copy = cx.background().spawn(async move {
1027            copy_recursive(
1028                fs.as_ref(),
1029                &abs_old_path,
1030                &abs_new_path,
1031                Default::default(),
1032            )
1033            .await
1034        });
1035
1036        Some(cx.spawn(|this, mut cx| async move {
1037            copy.await?;
1038            this.update(&mut cx, |this, cx| {
1039                this.as_local_mut()
1040                    .unwrap()
1041                    .refresh_entry(new_path.clone(), None, cx)
1042            })
1043            .await
1044        }))
1045    }
1046
1047    fn refresh_entry(
1048        &self,
1049        path: Arc<Path>,
1050        old_path: Option<Arc<Path>>,
1051        cx: &mut ModelContext<Worktree>,
1052    ) -> Task<Result<Entry>> {
1053        let fs = self.fs.clone();
1054        let abs_root_path = self.abs_path.clone();
1055        let path_changes_tx = self.path_changes_tx.clone();
1056        cx.spawn_weak(move |this, mut cx| async move {
1057            let abs_path = fs.canonicalize(&abs_root_path).await?;
1058            let mut paths = Vec::with_capacity(2);
1059            paths.push(if path.file_name().is_some() {
1060                abs_path.join(&path)
1061            } else {
1062                abs_path.clone()
1063            });
1064            if let Some(old_path) = old_path {
1065                paths.push(if old_path.file_name().is_some() {
1066                    abs_path.join(&old_path)
1067                } else {
1068                    abs_path.clone()
1069                });
1070            }
1071
1072            let (tx, mut rx) = barrier::channel();
1073            path_changes_tx.try_send((paths, tx))?;
1074            rx.recv().await;
1075            this.upgrade(&cx)
1076                .ok_or_else(|| anyhow!("worktree was dropped"))?
1077                .update(&mut cx, |this, _| {
1078                    this.entry_for_path(path)
1079                        .cloned()
1080                        .ok_or_else(|| anyhow!("failed to read path after update"))
1081                })
1082        })
1083    }
1084
1085    pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1086        let (share_tx, share_rx) = oneshot::channel();
1087
1088        if let Some(share) = self.share.as_mut() {
1089            let _ = share_tx.send(());
1090            *share.resume_updates.borrow_mut() = ();
1091        } else {
1092            let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1093            let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1094            let worktree_id = cx.model_id() as u64;
1095
1096            for (path, summaries) in &self.diagnostic_summaries {
1097                for (&server_id, summary) in summaries {
1098                    if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1099                        project_id,
1100                        worktree_id,
1101                        summary: Some(summary.to_proto(server_id, &path)),
1102                    }) {
1103                        return Task::ready(Err(e));
1104                    }
1105                }
1106            }
1107
1108            let _maintain_remote_snapshot = cx.background().spawn({
1109                let client = self.client.clone();
1110                async move {
1111                    let mut share_tx = Some(share_tx);
1112                    let mut prev_snapshot = LocalSnapshot {
1113                        ignores_by_parent_abs_path: Default::default(),
1114                        removed_entry_ids: Default::default(),
1115                        next_entry_id: Default::default(),
1116                        git_repositories: Default::default(),
1117                        snapshot: Snapshot {
1118                            id: WorktreeId(worktree_id as usize),
1119                            abs_path: Path::new("").into(),
1120                            root_name: Default::default(),
1121                            root_char_bag: Default::default(),
1122                            entries_by_path: Default::default(),
1123                            entries_by_id: Default::default(),
1124                            repository_entries: Default::default(),
1125                            scan_id: 0,
1126                            completed_scan_id: 0,
1127                        },
1128                    };
1129                    while let Some(snapshot) = snapshots_rx.recv().await {
1130                        #[cfg(any(test, feature = "test-support"))]
1131                        const MAX_CHUNK_SIZE: usize = 2;
1132                        #[cfg(not(any(test, feature = "test-support")))]
1133                        const MAX_CHUNK_SIZE: usize = 256;
1134
1135                        let update =
1136                            snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1137                        for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1138                            let _ = resume_updates_rx.try_recv();
1139                            while let Err(error) = client.request(update.clone()).await {
1140                                log::error!("failed to send worktree update: {}", error);
1141                                log::info!("waiting to resume updates");
1142                                if resume_updates_rx.next().await.is_none() {
1143                                    return Ok(());
1144                                }
1145                            }
1146                        }
1147
1148                        if let Some(share_tx) = share_tx.take() {
1149                            let _ = share_tx.send(());
1150                        }
1151
1152                        prev_snapshot = snapshot;
1153                    }
1154
1155                    Ok::<_, anyhow::Error>(())
1156                }
1157                .log_err()
1158            });
1159
1160            self.share = Some(ShareState {
1161                project_id,
1162                snapshots_tx,
1163                resume_updates: resume_updates_tx,
1164                _maintain_remote_snapshot,
1165            });
1166        }
1167
1168        cx.foreground()
1169            .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1170    }
1171
1172    pub fn unshare(&mut self) {
1173        self.share.take();
1174    }
1175
1176    pub fn is_shared(&self) -> bool {
1177        self.share.is_some()
1178    }
1179}
1180
1181impl RemoteWorktree {
1182    fn snapshot(&self) -> Snapshot {
1183        self.snapshot.clone()
1184    }
1185
1186    pub fn disconnected_from_host(&mut self) {
1187        self.updates_tx.take();
1188        self.snapshot_subscriptions.clear();
1189        self.disconnected = true;
1190    }
1191
1192    pub fn save_buffer(
1193        &self,
1194        buffer_handle: ModelHandle<Buffer>,
1195        cx: &mut ModelContext<Worktree>,
1196    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1197        let buffer = buffer_handle.read(cx);
1198        let buffer_id = buffer.remote_id();
1199        let version = buffer.version();
1200        let rpc = self.client.clone();
1201        let project_id = self.project_id;
1202        cx.as_mut().spawn(|mut cx| async move {
1203            let response = rpc
1204                .request(proto::SaveBuffer {
1205                    project_id,
1206                    buffer_id,
1207                    version: serialize_version(&version),
1208                })
1209                .await?;
1210            let version = deserialize_version(&response.version);
1211            let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1212            let mtime = response
1213                .mtime
1214                .ok_or_else(|| anyhow!("missing mtime"))?
1215                .into();
1216
1217            buffer_handle.update(&mut cx, |buffer, cx| {
1218                buffer.did_save(version.clone(), fingerprint, mtime, cx);
1219            });
1220
1221            Ok((version, fingerprint, mtime))
1222        })
1223    }
1224
1225    pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1226        if let Some(updates_tx) = &self.updates_tx {
1227            updates_tx
1228                .unbounded_send(update)
1229                .expect("consumer runs to completion");
1230        }
1231    }
1232
1233    fn observed_snapshot(&self, scan_id: usize) -> bool {
1234        self.completed_scan_id >= scan_id
1235    }
1236
1237    fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1238        let (tx, rx) = oneshot::channel();
1239        if self.observed_snapshot(scan_id) {
1240            let _ = tx.send(());
1241        } else if self.disconnected {
1242            drop(tx);
1243        } else {
1244            match self
1245                .snapshot_subscriptions
1246                .binary_search_by_key(&scan_id, |probe| probe.0)
1247            {
1248                Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1249            }
1250        }
1251
1252        async move {
1253            rx.await?;
1254            Ok(())
1255        }
1256    }
1257
1258    pub fn update_diagnostic_summary(
1259        &mut self,
1260        path: Arc<Path>,
1261        summary: &proto::DiagnosticSummary,
1262    ) {
1263        let server_id = LanguageServerId(summary.language_server_id as usize);
1264        let summary = DiagnosticSummary {
1265            error_count: summary.error_count as usize,
1266            warning_count: summary.warning_count as usize,
1267        };
1268
1269        if summary.is_empty() {
1270            if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1271                summaries.remove(&server_id);
1272                if summaries.is_empty() {
1273                    self.diagnostic_summaries.remove(&path);
1274                }
1275            }
1276        } else {
1277            self.diagnostic_summaries
1278                .entry(path)
1279                .or_default()
1280                .insert(server_id, summary);
1281        }
1282    }
1283
1284    pub fn insert_entry(
1285        &mut self,
1286        entry: proto::Entry,
1287        scan_id: usize,
1288        cx: &mut ModelContext<Worktree>,
1289    ) -> Task<Result<Entry>> {
1290        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1291        cx.spawn(|this, mut cx| async move {
1292            wait_for_snapshot.await?;
1293            this.update(&mut cx, |worktree, _| {
1294                let worktree = worktree.as_remote_mut().unwrap();
1295                let mut snapshot = worktree.background_snapshot.lock();
1296                let entry = snapshot.insert_entry(entry);
1297                worktree.snapshot = snapshot.clone();
1298                entry
1299            })
1300        })
1301    }
1302
1303    pub(crate) fn delete_entry(
1304        &mut self,
1305        id: ProjectEntryId,
1306        scan_id: usize,
1307        cx: &mut ModelContext<Worktree>,
1308    ) -> Task<Result<()>> {
1309        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1310        cx.spawn(|this, mut cx| async move {
1311            wait_for_snapshot.await?;
1312            this.update(&mut cx, |worktree, _| {
1313                let worktree = worktree.as_remote_mut().unwrap();
1314                let mut snapshot = worktree.background_snapshot.lock();
1315                snapshot.delete_entry(id);
1316                worktree.snapshot = snapshot.clone();
1317            });
1318            Ok(())
1319        })
1320    }
1321}
1322
1323impl Snapshot {
1324    pub fn id(&self) -> WorktreeId {
1325        self.id
1326    }
1327
1328    pub fn abs_path(&self) -> &Arc<Path> {
1329        &self.abs_path
1330    }
1331
1332    pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1333        self.entries_by_id.get(&entry_id, &()).is_some()
1334    }
1335
1336    pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1337        let entry = Entry::try_from((&self.root_char_bag, entry))?;
1338        let old_entry = self.entries_by_id.insert_or_replace(
1339            PathEntry {
1340                id: entry.id,
1341                path: entry.path.clone(),
1342                is_ignored: entry.is_ignored,
1343                scan_id: 0,
1344            },
1345            &(),
1346        );
1347        if let Some(old_entry) = old_entry {
1348            self.entries_by_path.remove(&PathKey(old_entry.path), &());
1349        }
1350        self.entries_by_path.insert_or_replace(entry.clone(), &());
1351        Ok(entry)
1352    }
1353
1354    fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1355        let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1356        self.entries_by_path = {
1357            let mut cursor = self.entries_by_path.cursor();
1358            let mut new_entries_by_path =
1359                cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1360            while let Some(entry) = cursor.item() {
1361                if entry.path.starts_with(&removed_entry.path) {
1362                    self.entries_by_id.remove(&entry.id, &());
1363                    cursor.next(&());
1364                } else {
1365                    break;
1366                }
1367            }
1368            new_entries_by_path.push_tree(cursor.suffix(&()), &());
1369            new_entries_by_path
1370        };
1371
1372        Some(removed_entry.path)
1373    }
1374
1375    pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1376        let mut entries_by_path_edits = Vec::new();
1377        let mut entries_by_id_edits = Vec::new();
1378        for entry_id in update.removed_entries {
1379            if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1380                entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1381                entries_by_id_edits.push(Edit::Remove(entry.id));
1382            }
1383        }
1384
1385        for entry in update.updated_entries {
1386            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1387            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1388                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1389            }
1390            entries_by_id_edits.push(Edit::Insert(PathEntry {
1391                id: entry.id,
1392                path: entry.path.clone(),
1393                is_ignored: entry.is_ignored,
1394                scan_id: 0,
1395            }));
1396            entries_by_path_edits.push(Edit::Insert(entry));
1397        }
1398
1399        self.entries_by_path.edit(entries_by_path_edits, &());
1400        self.entries_by_id.edit(entries_by_id_edits, &());
1401
1402        update.removed_repositories.sort_unstable();
1403        self.repository_entries.retain(|_, entry| {
1404            if let Ok(_) = update
1405                .removed_repositories
1406                .binary_search(&entry.work_directory.to_proto())
1407            {
1408                false
1409            } else {
1410                true
1411            }
1412        });
1413
1414        for repository in update.updated_repositories {
1415            let repository = RepositoryEntry {
1416                work_directory: ProjectEntryId::from_proto(repository.work_directory_id).into(),
1417                branch: repository.branch.map(Into::into),
1418                // TODO: status
1419                statuses: Default::default(),
1420            };
1421            if let Some(entry) = self.entry_for_id(repository.work_directory_id()) {
1422                self.repository_entries
1423                    .insert(RepositoryWorkDirectory(entry.path.clone()), repository)
1424            } else {
1425                log::error!("no work directory entry for repository {:?}", repository)
1426            }
1427        }
1428
1429        self.scan_id = update.scan_id as usize;
1430        if update.is_last_update {
1431            self.completed_scan_id = update.scan_id as usize;
1432        }
1433
1434        Ok(())
1435    }
1436
1437    pub fn file_count(&self) -> usize {
1438        self.entries_by_path.summary().file_count
1439    }
1440
1441    pub fn visible_file_count(&self) -> usize {
1442        self.entries_by_path.summary().visible_file_count
1443    }
1444
1445    fn traverse_from_offset(
1446        &self,
1447        include_dirs: bool,
1448        include_ignored: bool,
1449        start_offset: usize,
1450    ) -> Traversal {
1451        let mut cursor = self.entries_by_path.cursor();
1452        cursor.seek(
1453            &TraversalTarget::Count {
1454                count: start_offset,
1455                include_dirs,
1456                include_ignored,
1457            },
1458            Bias::Right,
1459            &(),
1460        );
1461        Traversal {
1462            cursor,
1463            include_dirs,
1464            include_ignored,
1465        }
1466    }
1467
1468    fn traverse_from_path(
1469        &self,
1470        include_dirs: bool,
1471        include_ignored: bool,
1472        path: &Path,
1473    ) -> Traversal {
1474        let mut cursor = self.entries_by_path.cursor();
1475        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1476        Traversal {
1477            cursor,
1478            include_dirs,
1479            include_ignored,
1480        }
1481    }
1482
1483    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1484        self.traverse_from_offset(false, include_ignored, start)
1485    }
1486
1487    pub fn entries(&self, include_ignored: bool) -> Traversal {
1488        self.traverse_from_offset(true, include_ignored, 0)
1489    }
1490
1491    pub fn repositories(&self) -> impl Iterator<Item = &RepositoryEntry> {
1492        self.repository_entries.values()
1493    }
1494
1495    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1496        let empty_path = Path::new("");
1497        self.entries_by_path
1498            .cursor::<()>()
1499            .filter(move |entry| entry.path.as_ref() != empty_path)
1500            .map(|entry| &entry.path)
1501    }
1502
1503    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1504        let mut cursor = self.entries_by_path.cursor();
1505        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1506        let traversal = Traversal {
1507            cursor,
1508            include_dirs: true,
1509            include_ignored: true,
1510        };
1511        ChildEntriesIter {
1512            traversal,
1513            parent_path,
1514        }
1515    }
1516
1517    pub fn root_entry(&self) -> Option<&Entry> {
1518        self.entry_for_path("")
1519    }
1520
1521    pub fn root_name(&self) -> &str {
1522        &self.root_name
1523    }
1524
1525    pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1526        self.repository_entries
1527            .get(&RepositoryWorkDirectory(Path::new("").into()))
1528            .map(|entry| entry.to_owned())
1529    }
1530
1531    pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1532        self.repository_entries.values()
1533    }
1534
1535    pub fn scan_id(&self) -> usize {
1536        self.scan_id
1537    }
1538
1539    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1540        let path = path.as_ref();
1541        self.traverse_from_path(true, true, path)
1542            .entry()
1543            .and_then(|entry| {
1544                if entry.path.as_ref() == path {
1545                    Some(entry)
1546                } else {
1547                    None
1548                }
1549            })
1550    }
1551
1552    pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1553        let entry = self.entries_by_id.get(&id, &())?;
1554        self.entry_for_path(&entry.path)
1555    }
1556
1557    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1558        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1559    }
1560}
1561
1562impl LocalSnapshot {
1563    pub(crate) fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
1564        let mut max_len = 0;
1565        let mut current_candidate = None;
1566        for (work_directory, repo) in (&self.repository_entries).iter() {
1567            if repo.contains(self, path) {
1568                if work_directory.0.as_os_str().len() >= max_len {
1569                    current_candidate = Some(repo);
1570                    max_len = work_directory.0.as_os_str().len();
1571                } else {
1572                    break;
1573                }
1574            }
1575        }
1576
1577        current_candidate.map(|entry| entry.to_owned())
1578    }
1579
1580    pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1581        self.git_repositories.get(&repo.work_directory.0)
1582    }
1583
1584    pub(crate) fn repo_for_metadata(
1585        &self,
1586        path: &Path,
1587    ) -> Option<(ProjectEntryId, Arc<Mutex<dyn GitRepository>>)> {
1588        let (entry_id, local_repo) = self
1589            .git_repositories
1590            .iter()
1591            .find(|(_, repo)| repo.in_dot_git(path))?;
1592        Some((*entry_id, local_repo.repo_ptr.to_owned()))
1593    }
1594
1595    #[cfg(test)]
1596    pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1597        let root_name = self.root_name.clone();
1598        proto::UpdateWorktree {
1599            project_id,
1600            worktree_id: self.id().to_proto(),
1601            abs_path: self.abs_path().to_string_lossy().into(),
1602            root_name,
1603            updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1604            removed_entries: Default::default(),
1605            scan_id: self.scan_id as u64,
1606            is_last_update: true,
1607            updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1608            removed_repositories: Default::default(),
1609        }
1610    }
1611
1612    pub(crate) fn build_update(
1613        &self,
1614        other: &Self,
1615        project_id: u64,
1616        worktree_id: u64,
1617        include_ignored: bool,
1618    ) -> proto::UpdateWorktree {
1619        let mut updated_entries = Vec::new();
1620        let mut removed_entries = Vec::new();
1621        let mut self_entries = self
1622            .entries_by_id
1623            .cursor::<()>()
1624            .filter(|e| include_ignored || !e.is_ignored)
1625            .peekable();
1626        let mut other_entries = other
1627            .entries_by_id
1628            .cursor::<()>()
1629            .filter(|e| include_ignored || !e.is_ignored)
1630            .peekable();
1631        loop {
1632            match (self_entries.peek(), other_entries.peek()) {
1633                (Some(self_entry), Some(other_entry)) => {
1634                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1635                        Ordering::Less => {
1636                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1637                            updated_entries.push(entry);
1638                            self_entries.next();
1639                        }
1640                        Ordering::Equal => {
1641                            if self_entry.scan_id != other_entry.scan_id {
1642                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1643                                updated_entries.push(entry);
1644                            }
1645
1646                            self_entries.next();
1647                            other_entries.next();
1648                        }
1649                        Ordering::Greater => {
1650                            removed_entries.push(other_entry.id.to_proto());
1651                            other_entries.next();
1652                        }
1653                    }
1654                }
1655                (Some(self_entry), None) => {
1656                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1657                    updated_entries.push(entry);
1658                    self_entries.next();
1659                }
1660                (None, Some(other_entry)) => {
1661                    removed_entries.push(other_entry.id.to_proto());
1662                    other_entries.next();
1663                }
1664                (None, None) => break,
1665            }
1666        }
1667
1668        let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1669        let mut removed_repositories = Vec::new();
1670        let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1671        let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1672        loop {
1673            match (self_repos.peek(), other_repos.peek()) {
1674                (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1675                    match Ord::cmp(self_work_dir, other_work_dir) {
1676                        Ordering::Less => {
1677                            updated_repositories.push((*self_repo).into());
1678                            self_repos.next();
1679                        }
1680                        Ordering::Equal => {
1681                            if self_repo != other_repo {
1682                                updated_repositories.push((*self_repo).into());
1683                            }
1684
1685                            self_repos.next();
1686                            other_repos.next();
1687                        }
1688                        Ordering::Greater => {
1689                            removed_repositories.push(other_repo.work_directory.to_proto());
1690                            other_repos.next();
1691                        }
1692                    }
1693                }
1694                (Some((_, self_repo)), None) => {
1695                    updated_repositories.push((*self_repo).into());
1696                    self_repos.next();
1697                }
1698                (None, Some((_, other_repo))) => {
1699                    removed_repositories.push(other_repo.work_directory.to_proto());
1700                    other_repos.next();
1701                }
1702                (None, None) => break,
1703            }
1704        }
1705
1706        proto::UpdateWorktree {
1707            project_id,
1708            worktree_id,
1709            abs_path: self.abs_path().to_string_lossy().into(),
1710            root_name: self.root_name().to_string(),
1711            updated_entries,
1712            removed_entries,
1713            scan_id: self.scan_id as u64,
1714            is_last_update: self.completed_scan_id == self.scan_id,
1715            updated_repositories,
1716            removed_repositories,
1717        }
1718    }
1719
1720    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1721        if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1722            let abs_path = self.abs_path.join(&entry.path);
1723            match smol::block_on(build_gitignore(&abs_path, fs)) {
1724                Ok(ignore) => {
1725                    self.ignores_by_parent_abs_path.insert(
1726                        abs_path.parent().unwrap().into(),
1727                        (Arc::new(ignore), self.scan_id),
1728                    );
1729                }
1730                Err(error) => {
1731                    log::error!(
1732                        "error loading .gitignore file {:?} - {:?}",
1733                        &entry.path,
1734                        error
1735                    );
1736                }
1737            }
1738        }
1739
1740        self.reuse_entry_id(&mut entry);
1741
1742        if entry.kind == EntryKind::PendingDir {
1743            if let Some(existing_entry) =
1744                self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1745            {
1746                entry.kind = existing_entry.kind;
1747            }
1748        }
1749
1750        let scan_id = self.scan_id;
1751        let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1752        if let Some(removed) = removed {
1753            if removed.id != entry.id {
1754                self.entries_by_id.remove(&removed.id, &());
1755            }
1756        }
1757        self.entries_by_id.insert_or_replace(
1758            PathEntry {
1759                id: entry.id,
1760                path: entry.path.clone(),
1761                is_ignored: entry.is_ignored,
1762                scan_id,
1763            },
1764            &(),
1765        );
1766
1767        entry
1768    }
1769
1770    fn populate_dir(
1771        &mut self,
1772        parent_path: Arc<Path>,
1773        entries: impl IntoIterator<Item = Entry>,
1774        ignore: Option<Arc<Gitignore>>,
1775        fs: &dyn Fs,
1776    ) {
1777        let mut parent_entry = if let Some(parent_entry) =
1778            self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1779        {
1780            parent_entry.clone()
1781        } else {
1782            log::warn!(
1783                "populating a directory {:?} that has been removed",
1784                parent_path
1785            );
1786            return;
1787        };
1788
1789        match parent_entry.kind {
1790            EntryKind::PendingDir => {
1791                parent_entry.kind = EntryKind::Dir;
1792            }
1793            EntryKind::Dir => {}
1794            _ => return,
1795        }
1796
1797        if let Some(ignore) = ignore {
1798            self.ignores_by_parent_abs_path.insert(
1799                self.abs_path.join(&parent_path).into(),
1800                (ignore, self.scan_id),
1801            );
1802        }
1803
1804        if parent_path.file_name() == Some(&DOT_GIT) {
1805            self.build_repo(parent_path, fs);
1806        }
1807
1808        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1809        let mut entries_by_id_edits = Vec::new();
1810
1811        for mut entry in entries {
1812            self.reuse_entry_id(&mut entry);
1813            entries_by_id_edits.push(Edit::Insert(PathEntry {
1814                id: entry.id,
1815                path: entry.path.clone(),
1816                is_ignored: entry.is_ignored,
1817                scan_id: self.scan_id,
1818            }));
1819            entries_by_path_edits.push(Edit::Insert(entry));
1820        }
1821
1822        self.entries_by_path.edit(entries_by_path_edits, &());
1823        self.entries_by_id.edit(entries_by_id_edits, &());
1824    }
1825
1826    fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1827        let abs_path = self.abs_path.join(&parent_path);
1828        let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1829
1830        // Guard against repositories inside the repository metadata
1831        if work_dir
1832            .components()
1833            .find(|component| component.as_os_str() == *DOT_GIT)
1834            .is_some()
1835        {
1836            return None;
1837        };
1838
1839        let work_dir_id = self
1840            .entry_for_path(work_dir.clone())
1841            .map(|entry| entry.id)?;
1842
1843        if self.git_repositories.get(&work_dir_id).is_none() {
1844            let repo = fs.open_repo(abs_path.as_path())?;
1845            let work_directory = RepositoryWorkDirectory(work_dir.clone());
1846            let scan_id = self.scan_id;
1847
1848            let repo_lock = repo.lock();
1849            self.repository_entries.insert(
1850                work_directory,
1851                RepositoryEntry {
1852                    work_directory: work_dir_id.into(),
1853                    branch: repo_lock.branch_name().map(Into::into),
1854                    statuses: repo_lock.statuses().unwrap_or_default(),
1855                },
1856            );
1857            drop(repo_lock);
1858
1859            self.git_repositories.insert(
1860                work_dir_id,
1861                LocalRepositoryEntry {
1862                    scan_id,
1863                    full_scan_id: scan_id,
1864                    repo_ptr: repo,
1865                    git_dir_path: parent_path.clone(),
1866                },
1867            )
1868        }
1869
1870        Some(())
1871    }
1872    fn reuse_entry_id(&mut self, entry: &mut Entry) {
1873        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1874            entry.id = removed_entry_id;
1875        } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1876            entry.id = existing_entry.id;
1877        }
1878    }
1879
1880    fn remove_path(&mut self, path: &Path) {
1881        let mut new_entries;
1882        let removed_entries;
1883        {
1884            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1885            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1886            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1887            new_entries.push_tree(cursor.suffix(&()), &());
1888        }
1889        self.entries_by_path = new_entries;
1890
1891        let mut entries_by_id_edits = Vec::new();
1892        for entry in removed_entries.cursor::<()>() {
1893            let removed_entry_id = self
1894                .removed_entry_ids
1895                .entry(entry.inode)
1896                .or_insert(entry.id);
1897            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1898            entries_by_id_edits.push(Edit::Remove(entry.id));
1899        }
1900        self.entries_by_id.edit(entries_by_id_edits, &());
1901
1902        if path.file_name() == Some(&GITIGNORE) {
1903            let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1904            if let Some((_, scan_id)) = self
1905                .ignores_by_parent_abs_path
1906                .get_mut(abs_parent_path.as_path())
1907            {
1908                *scan_id = self.snapshot.scan_id;
1909            }
1910        }
1911    }
1912
1913    fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1914        let mut inodes = TreeSet::default();
1915        for ancestor in path.ancestors().skip(1) {
1916            if let Some(entry) = self.entry_for_path(ancestor) {
1917                inodes.insert(entry.inode);
1918            }
1919        }
1920        inodes
1921    }
1922
1923    fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1924        let mut new_ignores = Vec::new();
1925        for ancestor in abs_path.ancestors().skip(1) {
1926            if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1927                new_ignores.push((ancestor, Some(ignore.clone())));
1928            } else {
1929                new_ignores.push((ancestor, None));
1930            }
1931        }
1932
1933        let mut ignore_stack = IgnoreStack::none();
1934        for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1935            if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1936                ignore_stack = IgnoreStack::all();
1937                break;
1938            } else if let Some(ignore) = ignore {
1939                ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1940            }
1941        }
1942
1943        if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1944            ignore_stack = IgnoreStack::all();
1945        }
1946
1947        ignore_stack
1948    }
1949}
1950
1951async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1952    let contents = fs.load(abs_path).await?;
1953    let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1954    let mut builder = GitignoreBuilder::new(parent);
1955    for line in contents.lines() {
1956        builder.add_line(Some(abs_path.into()), line)?;
1957    }
1958    Ok(builder.build()?)
1959}
1960
1961impl WorktreeId {
1962    pub fn from_usize(handle_id: usize) -> Self {
1963        Self(handle_id)
1964    }
1965
1966    pub(crate) fn from_proto(id: u64) -> Self {
1967        Self(id as usize)
1968    }
1969
1970    pub fn to_proto(&self) -> u64 {
1971        self.0 as u64
1972    }
1973
1974    pub fn to_usize(&self) -> usize {
1975        self.0
1976    }
1977}
1978
1979impl fmt::Display for WorktreeId {
1980    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1981        self.0.fmt(f)
1982    }
1983}
1984
1985impl Deref for Worktree {
1986    type Target = Snapshot;
1987
1988    fn deref(&self) -> &Self::Target {
1989        match self {
1990            Worktree::Local(worktree) => &worktree.snapshot,
1991            Worktree::Remote(worktree) => &worktree.snapshot,
1992        }
1993    }
1994}
1995
1996impl Deref for LocalWorktree {
1997    type Target = LocalSnapshot;
1998
1999    fn deref(&self) -> &Self::Target {
2000        &self.snapshot
2001    }
2002}
2003
2004impl Deref for RemoteWorktree {
2005    type Target = Snapshot;
2006
2007    fn deref(&self) -> &Self::Target {
2008        &self.snapshot
2009    }
2010}
2011
2012impl fmt::Debug for LocalWorktree {
2013    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2014        self.snapshot.fmt(f)
2015    }
2016}
2017
2018impl fmt::Debug for Snapshot {
2019    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2020        struct EntriesById<'a>(&'a SumTree<PathEntry>);
2021        struct EntriesByPath<'a>(&'a SumTree<Entry>);
2022
2023        impl<'a> fmt::Debug for EntriesByPath<'a> {
2024            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2025                f.debug_map()
2026                    .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2027                    .finish()
2028            }
2029        }
2030
2031        impl<'a> fmt::Debug for EntriesById<'a> {
2032            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2033                f.debug_list().entries(self.0.iter()).finish()
2034            }
2035        }
2036
2037        f.debug_struct("Snapshot")
2038            .field("id", &self.id)
2039            .field("root_name", &self.root_name)
2040            .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2041            .field("entries_by_id", &EntriesById(&self.entries_by_id))
2042            .finish()
2043    }
2044}
2045
2046#[derive(Clone, PartialEq)]
2047pub struct File {
2048    pub worktree: ModelHandle<Worktree>,
2049    pub path: Arc<Path>,
2050    pub mtime: SystemTime,
2051    pub(crate) entry_id: ProjectEntryId,
2052    pub(crate) is_local: bool,
2053    pub(crate) is_deleted: bool,
2054}
2055
2056impl language::File for File {
2057    fn as_local(&self) -> Option<&dyn language::LocalFile> {
2058        if self.is_local {
2059            Some(self)
2060        } else {
2061            None
2062        }
2063    }
2064
2065    fn mtime(&self) -> SystemTime {
2066        self.mtime
2067    }
2068
2069    fn path(&self) -> &Arc<Path> {
2070        &self.path
2071    }
2072
2073    fn full_path(&self, cx: &AppContext) -> PathBuf {
2074        let mut full_path = PathBuf::new();
2075        let worktree = self.worktree.read(cx);
2076
2077        if worktree.is_visible() {
2078            full_path.push(worktree.root_name());
2079        } else {
2080            let path = worktree.abs_path();
2081
2082            if worktree.is_local() && path.starts_with(HOME.as_path()) {
2083                full_path.push("~");
2084                full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2085            } else {
2086                full_path.push(path)
2087            }
2088        }
2089
2090        if self.path.components().next().is_some() {
2091            full_path.push(&self.path);
2092        }
2093
2094        full_path
2095    }
2096
2097    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2098    /// of its worktree, then this method will return the name of the worktree itself.
2099    fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2100        self.path
2101            .file_name()
2102            .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2103    }
2104
2105    fn is_deleted(&self) -> bool {
2106        self.is_deleted
2107    }
2108
2109    fn as_any(&self) -> &dyn Any {
2110        self
2111    }
2112
2113    fn to_proto(&self) -> rpc::proto::File {
2114        rpc::proto::File {
2115            worktree_id: self.worktree.id() as u64,
2116            entry_id: self.entry_id.to_proto(),
2117            path: self.path.to_string_lossy().into(),
2118            mtime: Some(self.mtime.into()),
2119            is_deleted: self.is_deleted,
2120        }
2121    }
2122}
2123
2124impl language::LocalFile for File {
2125    fn abs_path(&self, cx: &AppContext) -> PathBuf {
2126        self.worktree
2127            .read(cx)
2128            .as_local()
2129            .unwrap()
2130            .abs_path
2131            .join(&self.path)
2132    }
2133
2134    fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2135        let worktree = self.worktree.read(cx).as_local().unwrap();
2136        let abs_path = worktree.absolutize(&self.path);
2137        let fs = worktree.fs.clone();
2138        cx.background()
2139            .spawn(async move { fs.load(&abs_path).await })
2140    }
2141
2142    fn buffer_reloaded(
2143        &self,
2144        buffer_id: u64,
2145        version: &clock::Global,
2146        fingerprint: RopeFingerprint,
2147        line_ending: LineEnding,
2148        mtime: SystemTime,
2149        cx: &mut AppContext,
2150    ) {
2151        let worktree = self.worktree.read(cx).as_local().unwrap();
2152        if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2153            worktree
2154                .client
2155                .send(proto::BufferReloaded {
2156                    project_id,
2157                    buffer_id,
2158                    version: serialize_version(version),
2159                    mtime: Some(mtime.into()),
2160                    fingerprint: serialize_fingerprint(fingerprint),
2161                    line_ending: serialize_line_ending(line_ending) as i32,
2162                })
2163                .log_err();
2164        }
2165    }
2166}
2167
2168impl File {
2169    pub fn from_proto(
2170        proto: rpc::proto::File,
2171        worktree: ModelHandle<Worktree>,
2172        cx: &AppContext,
2173    ) -> Result<Self> {
2174        let worktree_id = worktree
2175            .read(cx)
2176            .as_remote()
2177            .ok_or_else(|| anyhow!("not remote"))?
2178            .id();
2179
2180        if worktree_id.to_proto() != proto.worktree_id {
2181            return Err(anyhow!("worktree id does not match file"));
2182        }
2183
2184        Ok(Self {
2185            worktree,
2186            path: Path::new(&proto.path).into(),
2187            mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2188            entry_id: ProjectEntryId::from_proto(proto.entry_id),
2189            is_local: false,
2190            is_deleted: proto.is_deleted,
2191        })
2192    }
2193
2194    pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2195        file.and_then(|f| f.as_any().downcast_ref())
2196    }
2197
2198    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2199        self.worktree.read(cx).id()
2200    }
2201
2202    pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2203        if self.is_deleted {
2204            None
2205        } else {
2206            Some(self.entry_id)
2207        }
2208    }
2209}
2210
2211#[derive(Clone, Debug, PartialEq, Eq)]
2212pub struct Entry {
2213    pub id: ProjectEntryId,
2214    pub kind: EntryKind,
2215    pub path: Arc<Path>,
2216    pub inode: u64,
2217    pub mtime: SystemTime,
2218    pub is_symlink: bool,
2219    pub is_ignored: bool,
2220}
2221
2222#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2223pub enum EntryKind {
2224    PendingDir,
2225    Dir,
2226    File(CharBag),
2227}
2228
2229#[derive(Clone, Copy, Debug)]
2230pub enum PathChange {
2231    Added,
2232    Removed,
2233    Updated,
2234    AddedOrUpdated,
2235}
2236
2237impl Entry {
2238    fn new(
2239        path: Arc<Path>,
2240        metadata: &fs::Metadata,
2241        next_entry_id: &AtomicUsize,
2242        root_char_bag: CharBag,
2243    ) -> Self {
2244        Self {
2245            id: ProjectEntryId::new(next_entry_id),
2246            kind: if metadata.is_dir {
2247                EntryKind::PendingDir
2248            } else {
2249                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2250            },
2251            path,
2252            inode: metadata.inode,
2253            mtime: metadata.mtime,
2254            is_symlink: metadata.is_symlink,
2255            is_ignored: false,
2256        }
2257    }
2258
2259    pub fn is_dir(&self) -> bool {
2260        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2261    }
2262
2263    pub fn is_file(&self) -> bool {
2264        matches!(self.kind, EntryKind::File(_))
2265    }
2266}
2267
2268impl sum_tree::Item for Entry {
2269    type Summary = EntrySummary;
2270
2271    fn summary(&self) -> Self::Summary {
2272        let visible_count = if self.is_ignored { 0 } else { 1 };
2273        let file_count;
2274        let visible_file_count;
2275        if self.is_file() {
2276            file_count = 1;
2277            visible_file_count = visible_count;
2278        } else {
2279            file_count = 0;
2280            visible_file_count = 0;
2281        }
2282
2283        EntrySummary {
2284            max_path: self.path.clone(),
2285            count: 1,
2286            visible_count,
2287            file_count,
2288            visible_file_count,
2289        }
2290    }
2291}
2292
2293impl sum_tree::KeyedItem for Entry {
2294    type Key = PathKey;
2295
2296    fn key(&self) -> Self::Key {
2297        PathKey(self.path.clone())
2298    }
2299}
2300
2301#[derive(Clone, Debug)]
2302pub struct EntrySummary {
2303    max_path: Arc<Path>,
2304    count: usize,
2305    visible_count: usize,
2306    file_count: usize,
2307    visible_file_count: usize,
2308}
2309
2310impl Default for EntrySummary {
2311    fn default() -> Self {
2312        Self {
2313            max_path: Arc::from(Path::new("")),
2314            count: 0,
2315            visible_count: 0,
2316            file_count: 0,
2317            visible_file_count: 0,
2318        }
2319    }
2320}
2321
2322impl sum_tree::Summary for EntrySummary {
2323    type Context = ();
2324
2325    fn add_summary(&mut self, rhs: &Self, _: &()) {
2326        self.max_path = rhs.max_path.clone();
2327        self.count += rhs.count;
2328        self.visible_count += rhs.visible_count;
2329        self.file_count += rhs.file_count;
2330        self.visible_file_count += rhs.visible_file_count;
2331    }
2332}
2333
2334#[derive(Clone, Debug)]
2335struct PathEntry {
2336    id: ProjectEntryId,
2337    path: Arc<Path>,
2338    is_ignored: bool,
2339    scan_id: usize,
2340}
2341
2342impl sum_tree::Item for PathEntry {
2343    type Summary = PathEntrySummary;
2344
2345    fn summary(&self) -> Self::Summary {
2346        PathEntrySummary { max_id: self.id }
2347    }
2348}
2349
2350impl sum_tree::KeyedItem for PathEntry {
2351    type Key = ProjectEntryId;
2352
2353    fn key(&self) -> Self::Key {
2354        self.id
2355    }
2356}
2357
2358#[derive(Clone, Debug, Default)]
2359struct PathEntrySummary {
2360    max_id: ProjectEntryId,
2361}
2362
2363impl sum_tree::Summary for PathEntrySummary {
2364    type Context = ();
2365
2366    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2367        self.max_id = summary.max_id;
2368    }
2369}
2370
2371impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2372    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2373        *self = summary.max_id;
2374    }
2375}
2376
2377#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2378pub struct PathKey(Arc<Path>);
2379
2380impl Default for PathKey {
2381    fn default() -> Self {
2382        Self(Path::new("").into())
2383    }
2384}
2385
2386impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2387    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2388        self.0 = summary.max_path.clone();
2389    }
2390}
2391
2392struct BackgroundScanner {
2393    snapshot: Mutex<LocalSnapshot>,
2394    fs: Arc<dyn Fs>,
2395    status_updates_tx: UnboundedSender<ScanState>,
2396    executor: Arc<executor::Background>,
2397    refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2398    prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2399    finished_initial_scan: bool,
2400}
2401
2402impl BackgroundScanner {
2403    fn new(
2404        snapshot: LocalSnapshot,
2405        fs: Arc<dyn Fs>,
2406        status_updates_tx: UnboundedSender<ScanState>,
2407        executor: Arc<executor::Background>,
2408        refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2409    ) -> Self {
2410        Self {
2411            fs,
2412            status_updates_tx,
2413            executor,
2414            refresh_requests_rx,
2415            prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2416            snapshot: Mutex::new(snapshot),
2417            finished_initial_scan: false,
2418        }
2419    }
2420
2421    async fn run(
2422        &mut self,
2423        mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2424    ) {
2425        use futures::FutureExt as _;
2426
2427        let (root_abs_path, root_inode) = {
2428            let snapshot = self.snapshot.lock();
2429            (
2430                snapshot.abs_path.clone(),
2431                snapshot.root_entry().map(|e| e.inode),
2432            )
2433        };
2434
2435        // Populate ignores above the root.
2436        let ignore_stack;
2437        for ancestor in root_abs_path.ancestors().skip(1) {
2438            if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2439            {
2440                self.snapshot
2441                    .lock()
2442                    .ignores_by_parent_abs_path
2443                    .insert(ancestor.into(), (ignore.into(), 0));
2444            }
2445        }
2446        {
2447            let mut snapshot = self.snapshot.lock();
2448            snapshot.scan_id += 1;
2449            ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2450            if ignore_stack.is_all() {
2451                if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2452                    root_entry.is_ignored = true;
2453                    snapshot.insert_entry(root_entry, self.fs.as_ref());
2454                }
2455            }
2456        };
2457
2458        // Perform an initial scan of the directory.
2459        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2460        smol::block_on(scan_job_tx.send(ScanJob {
2461            abs_path: root_abs_path,
2462            path: Arc::from(Path::new("")),
2463            ignore_stack,
2464            ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2465            scan_queue: scan_job_tx.clone(),
2466        }))
2467        .unwrap();
2468        drop(scan_job_tx);
2469        self.scan_dirs(true, scan_job_rx).await;
2470        {
2471            let mut snapshot = self.snapshot.lock();
2472            snapshot.completed_scan_id = snapshot.scan_id;
2473        }
2474        self.send_status_update(false, None);
2475
2476        // Process any any FS events that occurred while performing the initial scan.
2477        // For these events, update events cannot be as precise, because we didn't
2478        // have the previous state loaded yet.
2479        if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2480            let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2481            while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2482                paths.extend(more_events.into_iter().map(|e| e.path));
2483            }
2484            self.process_events(paths).await;
2485        }
2486
2487        self.finished_initial_scan = true;
2488
2489        // Continue processing events until the worktree is dropped.
2490        loop {
2491            select_biased! {
2492                // Process any path refresh requests from the worktree. Prioritize
2493                // these before handling changes reported by the filesystem.
2494                request = self.refresh_requests_rx.recv().fuse() => {
2495                    let Ok((paths, barrier)) = request else { break };
2496                    if !self.process_refresh_request(paths, barrier).await {
2497                        return;
2498                    }
2499                }
2500
2501                events = events_rx.next().fuse() => {
2502                    let Some(events) = events else { break };
2503                    let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2504                    while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2505                        paths.extend(more_events.into_iter().map(|e| e.path));
2506                    }
2507                    self.process_events(paths).await;
2508                }
2509            }
2510        }
2511    }
2512
2513    async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2514        self.reload_entries_for_paths(paths, None).await;
2515        self.send_status_update(false, Some(barrier))
2516    }
2517
2518    async fn process_events(&mut self, paths: Vec<PathBuf>) {
2519        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2520        if let Some(mut paths) = self
2521            .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2522            .await
2523        {
2524            paths.sort_unstable();
2525            util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2526        }
2527        drop(scan_job_tx);
2528        self.scan_dirs(false, scan_job_rx).await;
2529
2530        self.update_ignore_statuses().await;
2531
2532        let mut snapshot = self.snapshot.lock();
2533
2534        let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2535        git_repositories.retain(|work_directory_id, _| {
2536            snapshot
2537                .entry_for_id(*work_directory_id)
2538                .map_or(false, |entry| {
2539                    snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2540                })
2541        });
2542        snapshot.git_repositories = git_repositories;
2543
2544        let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2545        git_repository_entries.retain(|_, entry| {
2546            snapshot
2547                .git_repositories
2548                .get(&entry.work_directory.0)
2549                .is_some()
2550        });
2551        snapshot.snapshot.repository_entries = git_repository_entries;
2552
2553        snapshot.removed_entry_ids.clear();
2554        snapshot.completed_scan_id = snapshot.scan_id;
2555
2556        drop(snapshot);
2557
2558        self.send_status_update(false, None);
2559    }
2560
2561    async fn scan_dirs(
2562        &self,
2563        enable_progress_updates: bool,
2564        scan_jobs_rx: channel::Receiver<ScanJob>,
2565    ) {
2566        use futures::FutureExt as _;
2567
2568        if self
2569            .status_updates_tx
2570            .unbounded_send(ScanState::Started)
2571            .is_err()
2572        {
2573            return;
2574        }
2575
2576        let progress_update_count = AtomicUsize::new(0);
2577        self.executor
2578            .scoped(|scope| {
2579                for _ in 0..self.executor.num_cpus() {
2580                    scope.spawn(async {
2581                        let mut last_progress_update_count = 0;
2582                        let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2583                        futures::pin_mut!(progress_update_timer);
2584
2585                        loop {
2586                            select_biased! {
2587                                // Process any path refresh requests before moving on to process
2588                                // the scan queue, so that user operations are prioritized.
2589                                request = self.refresh_requests_rx.recv().fuse() => {
2590                                    let Ok((paths, barrier)) = request else { break };
2591                                    if !self.process_refresh_request(paths, barrier).await {
2592                                        return;
2593                                    }
2594                                }
2595
2596                                // Send periodic progress updates to the worktree. Use an atomic counter
2597                                // to ensure that only one of the workers sends a progress update after
2598                                // the update interval elapses.
2599                                _ = progress_update_timer => {
2600                                    match progress_update_count.compare_exchange(
2601                                        last_progress_update_count,
2602                                        last_progress_update_count + 1,
2603                                        SeqCst,
2604                                        SeqCst
2605                                    ) {
2606                                        Ok(_) => {
2607                                            last_progress_update_count += 1;
2608                                            self.send_status_update(true, None);
2609                                        }
2610                                        Err(count) => {
2611                                            last_progress_update_count = count;
2612                                        }
2613                                    }
2614                                    progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2615                                }
2616
2617                                // Recursively load directories from the file system.
2618                                job = scan_jobs_rx.recv().fuse() => {
2619                                    let Ok(job) = job else { break };
2620                                    if let Err(err) = self.scan_dir(&job).await {
2621                                        if job.path.as_ref() != Path::new("") {
2622                                            log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2623                                        }
2624                                    }
2625                                }
2626                            }
2627                        }
2628                    })
2629                }
2630            })
2631            .await;
2632    }
2633
2634    fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2635        let mut prev_state = self.prev_state.lock();
2636        let snapshot = self.snapshot.lock().clone();
2637        let mut old_snapshot = snapshot.snapshot.clone();
2638        mem::swap(&mut old_snapshot, &mut prev_state.0);
2639        let changed_paths = mem::take(&mut prev_state.1);
2640        let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2641        self.status_updates_tx
2642            .unbounded_send(ScanState::Updated {
2643                snapshot,
2644                changes,
2645                scanning,
2646                barrier,
2647            })
2648            .is_ok()
2649    }
2650
2651    async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2652        let mut new_entries: Vec<Entry> = Vec::new();
2653        let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2654        let mut ignore_stack = job.ignore_stack.clone();
2655        let mut new_ignore = None;
2656        let (root_abs_path, root_char_bag, next_entry_id) = {
2657            let snapshot = self.snapshot.lock();
2658            (
2659                snapshot.abs_path().clone(),
2660                snapshot.root_char_bag,
2661                snapshot.next_entry_id.clone(),
2662            )
2663        };
2664        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2665        while let Some(child_abs_path) = child_paths.next().await {
2666            let child_abs_path: Arc<Path> = match child_abs_path {
2667                Ok(child_abs_path) => child_abs_path.into(),
2668                Err(error) => {
2669                    log::error!("error processing entry {:?}", error);
2670                    continue;
2671                }
2672            };
2673
2674            let child_name = child_abs_path.file_name().unwrap();
2675            let child_path: Arc<Path> = job.path.join(child_name).into();
2676            let child_metadata = match self.fs.metadata(&child_abs_path).await {
2677                Ok(Some(metadata)) => metadata,
2678                Ok(None) => continue,
2679                Err(err) => {
2680                    log::error!("error processing {:?}: {:?}", child_abs_path, err);
2681                    continue;
2682                }
2683            };
2684
2685            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2686            if child_name == *GITIGNORE {
2687                match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2688                    Ok(ignore) => {
2689                        let ignore = Arc::new(ignore);
2690                        ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2691                        new_ignore = Some(ignore);
2692                    }
2693                    Err(error) => {
2694                        log::error!(
2695                            "error loading .gitignore file {:?} - {:?}",
2696                            child_name,
2697                            error
2698                        );
2699                    }
2700                }
2701
2702                // Update ignore status of any child entries we've already processed to reflect the
2703                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2704                // there should rarely be too numerous. Update the ignore stack associated with any
2705                // new jobs as well.
2706                let mut new_jobs = new_jobs.iter_mut();
2707                for entry in &mut new_entries {
2708                    let entry_abs_path = root_abs_path.join(&entry.path);
2709                    entry.is_ignored =
2710                        ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2711
2712                    if entry.is_dir() {
2713                        if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2714                            job.ignore_stack = if entry.is_ignored {
2715                                IgnoreStack::all()
2716                            } else {
2717                                ignore_stack.clone()
2718                            };
2719                        }
2720                    }
2721                }
2722            }
2723
2724            let mut child_entry = Entry::new(
2725                child_path.clone(),
2726                &child_metadata,
2727                &next_entry_id,
2728                root_char_bag,
2729            );
2730
2731            if child_entry.is_dir() {
2732                let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2733                child_entry.is_ignored = is_ignored;
2734
2735                // Avoid recursing until crash in the case of a recursive symlink
2736                if !job.ancestor_inodes.contains(&child_entry.inode) {
2737                    let mut ancestor_inodes = job.ancestor_inodes.clone();
2738                    ancestor_inodes.insert(child_entry.inode);
2739
2740                    new_jobs.push(Some(ScanJob {
2741                        abs_path: child_abs_path,
2742                        path: child_path,
2743                        ignore_stack: if is_ignored {
2744                            IgnoreStack::all()
2745                        } else {
2746                            ignore_stack.clone()
2747                        },
2748                        ancestor_inodes,
2749                        scan_queue: job.scan_queue.clone(),
2750                    }));
2751                } else {
2752                    new_jobs.push(None);
2753                }
2754            } else {
2755                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2756            }
2757
2758            new_entries.push(child_entry);
2759        }
2760
2761        self.snapshot.lock().populate_dir(
2762            job.path.clone(),
2763            new_entries,
2764            new_ignore,
2765            self.fs.as_ref(),
2766        );
2767
2768        for new_job in new_jobs {
2769            if let Some(new_job) = new_job {
2770                job.scan_queue.send(new_job).await.unwrap();
2771            }
2772        }
2773
2774        Ok(())
2775    }
2776
2777    async fn reload_entries_for_paths(
2778        &self,
2779        mut abs_paths: Vec<PathBuf>,
2780        scan_queue_tx: Option<Sender<ScanJob>>,
2781    ) -> Option<Vec<Arc<Path>>> {
2782        let doing_recursive_update = scan_queue_tx.is_some();
2783
2784        abs_paths.sort_unstable();
2785        abs_paths.dedup_by(|a, b| a.starts_with(&b));
2786
2787        let root_abs_path = self.snapshot.lock().abs_path.clone();
2788        let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2789        let metadata = futures::future::join_all(
2790            abs_paths
2791                .iter()
2792                .map(|abs_path| self.fs.metadata(&abs_path))
2793                .collect::<Vec<_>>(),
2794        )
2795        .await;
2796
2797        let mut snapshot = self.snapshot.lock();
2798        let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2799        snapshot.scan_id += 1;
2800        if is_idle && !doing_recursive_update {
2801            snapshot.completed_scan_id = snapshot.scan_id;
2802        }
2803
2804        // Remove any entries for paths that no longer exist or are being recursively
2805        // refreshed. Do this before adding any new entries, so that renames can be
2806        // detected regardless of the order of the paths.
2807        let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2808        for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2809            if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2810                if matches!(metadata, Ok(None)) || doing_recursive_update {
2811                    snapshot.remove_path(path);
2812                }
2813                event_paths.push(path.into());
2814            } else {
2815                log::error!(
2816                    "unexpected event {:?} for root path {:?}",
2817                    abs_path,
2818                    root_canonical_path
2819                );
2820            }
2821        }
2822
2823        for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2824            let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2825
2826            match metadata {
2827                Ok(Some(metadata)) => {
2828                    let ignore_stack =
2829                        snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2830                    let mut fs_entry = Entry::new(
2831                        path.clone(),
2832                        &metadata,
2833                        snapshot.next_entry_id.as_ref(),
2834                        snapshot.root_char_bag,
2835                    );
2836                    fs_entry.is_ignored = ignore_stack.is_all();
2837                    snapshot.insert_entry(fs_entry, self.fs.as_ref());
2838
2839                    self.reload_repo_for_path(&path, &mut snapshot);
2840
2841                    if let Some(scan_queue_tx) = &scan_queue_tx {
2842                        let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2843                        if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2844                            ancestor_inodes.insert(metadata.inode);
2845                            smol::block_on(scan_queue_tx.send(ScanJob {
2846                                abs_path,
2847                                path,
2848                                ignore_stack,
2849                                ancestor_inodes,
2850                                scan_queue: scan_queue_tx.clone(),
2851                            }))
2852                            .unwrap();
2853                        }
2854                    }
2855                }
2856                Ok(None) => {}
2857                Err(err) => {
2858                    // TODO - create a special 'error' entry in the entries tree to mark this
2859                    log::error!("error reading file on event {:?}", err);
2860                }
2861            }
2862        }
2863
2864        Some(event_paths)
2865    }
2866
2867    fn reload_repo_for_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
2868        let scan_id = snapshot.scan_id;
2869
2870        if path
2871            .components()
2872            .any(|component| component.as_os_str() == *DOT_GIT)
2873        {
2874            let (entry_id, repo) = snapshot.repo_for_metadata(&path)?;
2875
2876            let work_dir = snapshot
2877                .entry_for_id(entry_id)
2878                .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
2879
2880            let repo = repo.lock();
2881            repo.reload_index();
2882            let branch = repo.branch_name();
2883            let statuses = repo.statuses().unwrap_or_default();
2884
2885            snapshot.git_repositories.update(&entry_id, |entry| {
2886                entry.scan_id = scan_id;
2887                entry.full_scan_id = scan_id;
2888            });
2889
2890            snapshot.repository_entries.update(&work_dir, |entry| {
2891                entry.branch = branch.map(Into::into);
2892                entry.statuses = statuses;
2893            });
2894        } else if let Some(repo) = snapshot.repo_for(&path) {
2895            let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
2896
2897            let status = {
2898                let local_repo = snapshot.get_local_repo(&repo)?;
2899                // Short circuit if we've already scanned everything
2900                if local_repo.full_scan_id == scan_id {
2901                    return None;
2902                }
2903
2904                let git_ptr = local_repo.repo_ptr.lock();
2905                git_ptr.file_status(&repo_path)?
2906            };
2907
2908            if status != GitStatus::Untracked {
2909                let work_dir = repo.work_directory(snapshot)?;
2910                let work_dir_id = repo.work_directory;
2911
2912                snapshot
2913                    .git_repositories
2914                    .update(&work_dir_id, |entry| entry.scan_id = scan_id);
2915
2916                snapshot
2917                    .repository_entries
2918                    .update(&work_dir, |entry| entry.statuses.insert(repo_path, status));
2919            }
2920        }
2921
2922        Some(())
2923    }
2924
2925    async fn update_ignore_statuses(&self) {
2926        use futures::FutureExt as _;
2927
2928        let mut snapshot = self.snapshot.lock().clone();
2929        let mut ignores_to_update = Vec::new();
2930        let mut ignores_to_delete = Vec::new();
2931        for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2932            if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2933                if *scan_id > snapshot.completed_scan_id
2934                    && snapshot.entry_for_path(parent_path).is_some()
2935                {
2936                    ignores_to_update.push(parent_abs_path.clone());
2937                }
2938
2939                let ignore_path = parent_path.join(&*GITIGNORE);
2940                if snapshot.entry_for_path(ignore_path).is_none() {
2941                    ignores_to_delete.push(parent_abs_path.clone());
2942                }
2943            }
2944        }
2945
2946        for parent_abs_path in ignores_to_delete {
2947            snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2948            self.snapshot
2949                .lock()
2950                .ignores_by_parent_abs_path
2951                .remove(&parent_abs_path);
2952        }
2953
2954        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2955        ignores_to_update.sort_unstable();
2956        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2957        while let Some(parent_abs_path) = ignores_to_update.next() {
2958            while ignores_to_update
2959                .peek()
2960                .map_or(false, |p| p.starts_with(&parent_abs_path))
2961            {
2962                ignores_to_update.next().unwrap();
2963            }
2964
2965            let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2966            smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2967                abs_path: parent_abs_path,
2968                ignore_stack,
2969                ignore_queue: ignore_queue_tx.clone(),
2970            }))
2971            .unwrap();
2972        }
2973        drop(ignore_queue_tx);
2974
2975        self.executor
2976            .scoped(|scope| {
2977                for _ in 0..self.executor.num_cpus() {
2978                    scope.spawn(async {
2979                        loop {
2980                            select_biased! {
2981                                // Process any path refresh requests before moving on to process
2982                                // the queue of ignore statuses.
2983                                request = self.refresh_requests_rx.recv().fuse() => {
2984                                    let Ok((paths, barrier)) = request else { break };
2985                                    if !self.process_refresh_request(paths, barrier).await {
2986                                        return;
2987                                    }
2988                                }
2989
2990                                // Recursively process directories whose ignores have changed.
2991                                job = ignore_queue_rx.recv().fuse() => {
2992                                    let Ok(job) = job else { break };
2993                                    self.update_ignore_status(job, &snapshot).await;
2994                                }
2995                            }
2996                        }
2997                    });
2998                }
2999            })
3000            .await;
3001    }
3002
3003    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3004        let mut ignore_stack = job.ignore_stack;
3005        if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3006            ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3007        }
3008
3009        let mut entries_by_id_edits = Vec::new();
3010        let mut entries_by_path_edits = Vec::new();
3011        let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3012        for mut entry in snapshot.child_entries(path).cloned() {
3013            let was_ignored = entry.is_ignored;
3014            let abs_path = snapshot.abs_path().join(&entry.path);
3015            entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3016            if entry.is_dir() {
3017                let child_ignore_stack = if entry.is_ignored {
3018                    IgnoreStack::all()
3019                } else {
3020                    ignore_stack.clone()
3021                };
3022                job.ignore_queue
3023                    .send(UpdateIgnoreStatusJob {
3024                        abs_path: abs_path.into(),
3025                        ignore_stack: child_ignore_stack,
3026                        ignore_queue: job.ignore_queue.clone(),
3027                    })
3028                    .await
3029                    .unwrap();
3030            }
3031
3032            if entry.is_ignored != was_ignored {
3033                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3034                path_entry.scan_id = snapshot.scan_id;
3035                path_entry.is_ignored = entry.is_ignored;
3036                entries_by_id_edits.push(Edit::Insert(path_entry));
3037                entries_by_path_edits.push(Edit::Insert(entry));
3038            }
3039        }
3040
3041        let mut snapshot = self.snapshot.lock();
3042        snapshot.entries_by_path.edit(entries_by_path_edits, &());
3043        snapshot.entries_by_id.edit(entries_by_id_edits, &());
3044    }
3045
3046    fn build_change_set(
3047        &self,
3048        old_snapshot: &Snapshot,
3049        new_snapshot: &Snapshot,
3050        event_paths: Vec<Arc<Path>>,
3051    ) -> HashMap<Arc<Path>, PathChange> {
3052        use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3053
3054        let mut changes = HashMap::default();
3055        let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3056        let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3057        let received_before_initialized = !self.finished_initial_scan;
3058
3059        for path in event_paths {
3060            let path = PathKey(path);
3061            old_paths.seek(&path, Bias::Left, &());
3062            new_paths.seek(&path, Bias::Left, &());
3063
3064            loop {
3065                match (old_paths.item(), new_paths.item()) {
3066                    (Some(old_entry), Some(new_entry)) => {
3067                        if old_entry.path > path.0
3068                            && new_entry.path > path.0
3069                            && !old_entry.path.starts_with(&path.0)
3070                            && !new_entry.path.starts_with(&path.0)
3071                        {
3072                            break;
3073                        }
3074
3075                        match Ord::cmp(&old_entry.path, &new_entry.path) {
3076                            Ordering::Less => {
3077                                changes.insert(old_entry.path.clone(), Removed);
3078                                old_paths.next(&());
3079                            }
3080                            Ordering::Equal => {
3081                                if received_before_initialized {
3082                                    // If the worktree was not fully initialized when this event was generated,
3083                                    // we can't know whether this entry was added during the scan or whether
3084                                    // it was merely updated.
3085                                    changes.insert(new_entry.path.clone(), AddedOrUpdated);
3086                                } else if old_entry.mtime != new_entry.mtime {
3087                                    changes.insert(new_entry.path.clone(), Updated);
3088                                }
3089                                old_paths.next(&());
3090                                new_paths.next(&());
3091                            }
3092                            Ordering::Greater => {
3093                                changes.insert(new_entry.path.clone(), Added);
3094                                new_paths.next(&());
3095                            }
3096                        }
3097                    }
3098                    (Some(old_entry), None) => {
3099                        changes.insert(old_entry.path.clone(), Removed);
3100                        old_paths.next(&());
3101                    }
3102                    (None, Some(new_entry)) => {
3103                        changes.insert(new_entry.path.clone(), Added);
3104                        new_paths.next(&());
3105                    }
3106                    (None, None) => break,
3107                }
3108            }
3109        }
3110        changes
3111    }
3112
3113    async fn progress_timer(&self, running: bool) {
3114        if !running {
3115            return futures::future::pending().await;
3116        }
3117
3118        #[cfg(any(test, feature = "test-support"))]
3119        if self.fs.is_fake() {
3120            return self.executor.simulate_random_delay().await;
3121        }
3122
3123        smol::Timer::after(Duration::from_millis(100)).await;
3124    }
3125}
3126
3127fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3128    let mut result = root_char_bag;
3129    result.extend(
3130        path.to_string_lossy()
3131            .chars()
3132            .map(|c| c.to_ascii_lowercase()),
3133    );
3134    result
3135}
3136
3137struct ScanJob {
3138    abs_path: Arc<Path>,
3139    path: Arc<Path>,
3140    ignore_stack: Arc<IgnoreStack>,
3141    scan_queue: Sender<ScanJob>,
3142    ancestor_inodes: TreeSet<u64>,
3143}
3144
3145struct UpdateIgnoreStatusJob {
3146    abs_path: Arc<Path>,
3147    ignore_stack: Arc<IgnoreStack>,
3148    ignore_queue: Sender<UpdateIgnoreStatusJob>,
3149}
3150
3151pub trait WorktreeHandle {
3152    #[cfg(any(test, feature = "test-support"))]
3153    fn flush_fs_events<'a>(
3154        &self,
3155        cx: &'a gpui::TestAppContext,
3156    ) -> futures::future::LocalBoxFuture<'a, ()>;
3157}
3158
3159impl WorktreeHandle for ModelHandle<Worktree> {
3160    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3161    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3162    // extra directory scans, and emit extra scan-state notifications.
3163    //
3164    // This function mutates the worktree's directory and waits for those mutations to be picked up,
3165    // to ensure that all redundant FS events have already been processed.
3166    #[cfg(any(test, feature = "test-support"))]
3167    fn flush_fs_events<'a>(
3168        &self,
3169        cx: &'a gpui::TestAppContext,
3170    ) -> futures::future::LocalBoxFuture<'a, ()> {
3171        use smol::future::FutureExt;
3172
3173        let filename = "fs-event-sentinel";
3174        let tree = self.clone();
3175        let (fs, root_path) = self.read_with(cx, |tree, _| {
3176            let tree = tree.as_local().unwrap();
3177            (tree.fs.clone(), tree.abs_path().clone())
3178        });
3179
3180        async move {
3181            fs.create_file(&root_path.join(filename), Default::default())
3182                .await
3183                .unwrap();
3184            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3185                .await;
3186
3187            fs.remove_file(&root_path.join(filename), Default::default())
3188                .await
3189                .unwrap();
3190            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3191                .await;
3192
3193            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3194                .await;
3195        }
3196        .boxed_local()
3197    }
3198}
3199
3200#[derive(Clone, Debug)]
3201struct TraversalProgress<'a> {
3202    max_path: &'a Path,
3203    count: usize,
3204    visible_count: usize,
3205    file_count: usize,
3206    visible_file_count: usize,
3207}
3208
3209impl<'a> TraversalProgress<'a> {
3210    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3211        match (include_ignored, include_dirs) {
3212            (true, true) => self.count,
3213            (true, false) => self.file_count,
3214            (false, true) => self.visible_count,
3215            (false, false) => self.visible_file_count,
3216        }
3217    }
3218}
3219
3220impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3221    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3222        self.max_path = summary.max_path.as_ref();
3223        self.count += summary.count;
3224        self.visible_count += summary.visible_count;
3225        self.file_count += summary.file_count;
3226        self.visible_file_count += summary.visible_file_count;
3227    }
3228}
3229
3230impl<'a> Default for TraversalProgress<'a> {
3231    fn default() -> Self {
3232        Self {
3233            max_path: Path::new(""),
3234            count: 0,
3235            visible_count: 0,
3236            file_count: 0,
3237            visible_file_count: 0,
3238        }
3239    }
3240}
3241
3242pub struct Traversal<'a> {
3243    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3244    include_ignored: bool,
3245    include_dirs: bool,
3246}
3247
3248impl<'a> Traversal<'a> {
3249    pub fn advance(&mut self) -> bool {
3250        self.advance_to_offset(self.offset() + 1)
3251    }
3252
3253    pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3254        self.cursor.seek_forward(
3255            &TraversalTarget::Count {
3256                count: offset,
3257                include_dirs: self.include_dirs,
3258                include_ignored: self.include_ignored,
3259            },
3260            Bias::Right,
3261            &(),
3262        )
3263    }
3264
3265    pub fn advance_to_sibling(&mut self) -> bool {
3266        while let Some(entry) = self.cursor.item() {
3267            self.cursor.seek_forward(
3268                &TraversalTarget::PathSuccessor(&entry.path),
3269                Bias::Left,
3270                &(),
3271            );
3272            if let Some(entry) = self.cursor.item() {
3273                if (self.include_dirs || !entry.is_dir())
3274                    && (self.include_ignored || !entry.is_ignored)
3275                {
3276                    return true;
3277                }
3278            }
3279        }
3280        false
3281    }
3282
3283    pub fn entry(&self) -> Option<&'a Entry> {
3284        self.cursor.item()
3285    }
3286
3287    pub fn offset(&self) -> usize {
3288        self.cursor
3289            .start()
3290            .count(self.include_dirs, self.include_ignored)
3291    }
3292}
3293
3294impl<'a> Iterator for Traversal<'a> {
3295    type Item = &'a Entry;
3296
3297    fn next(&mut self) -> Option<Self::Item> {
3298        if let Some(item) = self.entry() {
3299            self.advance();
3300            Some(item)
3301        } else {
3302            None
3303        }
3304    }
3305}
3306
3307#[derive(Debug)]
3308enum TraversalTarget<'a> {
3309    Path(&'a Path),
3310    PathSuccessor(&'a Path),
3311    Count {
3312        count: usize,
3313        include_ignored: bool,
3314        include_dirs: bool,
3315    },
3316}
3317
3318impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3319    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3320        match self {
3321            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3322            TraversalTarget::PathSuccessor(path) => {
3323                if !cursor_location.max_path.starts_with(path) {
3324                    Ordering::Equal
3325                } else {
3326                    Ordering::Greater
3327                }
3328            }
3329            TraversalTarget::Count {
3330                count,
3331                include_dirs,
3332                include_ignored,
3333            } => Ord::cmp(
3334                count,
3335                &cursor_location.count(*include_dirs, *include_ignored),
3336            ),
3337        }
3338    }
3339}
3340
3341struct ChildEntriesIter<'a> {
3342    parent_path: &'a Path,
3343    traversal: Traversal<'a>,
3344}
3345
3346impl<'a> Iterator for ChildEntriesIter<'a> {
3347    type Item = &'a Entry;
3348
3349    fn next(&mut self) -> Option<Self::Item> {
3350        if let Some(item) = self.traversal.entry() {
3351            if item.path.starts_with(&self.parent_path) {
3352                self.traversal.advance_to_sibling();
3353                return Some(item);
3354            }
3355        }
3356        None
3357    }
3358}
3359
3360impl<'a> From<&'a Entry> for proto::Entry {
3361    fn from(entry: &'a Entry) -> Self {
3362        Self {
3363            id: entry.id.to_proto(),
3364            is_dir: entry.is_dir(),
3365            path: entry.path.to_string_lossy().into(),
3366            inode: entry.inode,
3367            mtime: Some(entry.mtime.into()),
3368            is_symlink: entry.is_symlink,
3369            is_ignored: entry.is_ignored,
3370        }
3371    }
3372}
3373
3374impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3375    type Error = anyhow::Error;
3376
3377    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3378        if let Some(mtime) = entry.mtime {
3379            let kind = if entry.is_dir {
3380                EntryKind::Dir
3381            } else {
3382                let mut char_bag = *root_char_bag;
3383                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3384                EntryKind::File(char_bag)
3385            };
3386            let path: Arc<Path> = PathBuf::from(entry.path).into();
3387            Ok(Entry {
3388                id: ProjectEntryId::from_proto(entry.id),
3389                kind,
3390                path,
3391                inode: entry.inode,
3392                mtime: mtime.into(),
3393                is_symlink: entry.is_symlink,
3394                is_ignored: entry.is_ignored,
3395            })
3396        } else {
3397            Err(anyhow!(
3398                "missing mtime in remote worktree entry {:?}",
3399                entry.path
3400            ))
3401        }
3402    }
3403}
3404
3405#[cfg(test)]
3406mod tests {
3407    use super::*;
3408    use fs::{FakeFs, RealFs};
3409    use gpui::{executor::Deterministic, TestAppContext};
3410    use pretty_assertions::assert_eq;
3411    use rand::prelude::*;
3412    use serde_json::json;
3413    use std::{env, fmt::Write};
3414    use util::{http::FakeHttpClient, test::temp_tree};
3415
3416    #[gpui::test]
3417    async fn test_traversal(cx: &mut TestAppContext) {
3418        let fs = FakeFs::new(cx.background());
3419        fs.insert_tree(
3420            "/root",
3421            json!({
3422               ".gitignore": "a/b\n",
3423               "a": {
3424                   "b": "",
3425                   "c": "",
3426               }
3427            }),
3428        )
3429        .await;
3430
3431        let http_client = FakeHttpClient::with_404_response();
3432        let client = cx.read(|cx| Client::new(http_client, cx));
3433
3434        let tree = Worktree::local(
3435            client,
3436            Path::new("/root"),
3437            true,
3438            fs,
3439            Default::default(),
3440            &mut cx.to_async(),
3441        )
3442        .await
3443        .unwrap();
3444        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3445            .await;
3446
3447        tree.read_with(cx, |tree, _| {
3448            assert_eq!(
3449                tree.entries(false)
3450                    .map(|entry| entry.path.as_ref())
3451                    .collect::<Vec<_>>(),
3452                vec![
3453                    Path::new(""),
3454                    Path::new(".gitignore"),
3455                    Path::new("a"),
3456                    Path::new("a/c"),
3457                ]
3458            );
3459            assert_eq!(
3460                tree.entries(true)
3461                    .map(|entry| entry.path.as_ref())
3462                    .collect::<Vec<_>>(),
3463                vec![
3464                    Path::new(""),
3465                    Path::new(".gitignore"),
3466                    Path::new("a"),
3467                    Path::new("a/b"),
3468                    Path::new("a/c"),
3469                ]
3470            );
3471        })
3472    }
3473
3474    #[gpui::test(iterations = 10)]
3475    async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3476        let fs = FakeFs::new(cx.background());
3477        fs.insert_tree(
3478            "/root",
3479            json!({
3480                "lib": {
3481                    "a": {
3482                        "a.txt": ""
3483                    },
3484                    "b": {
3485                        "b.txt": ""
3486                    }
3487                }
3488            }),
3489        )
3490        .await;
3491        fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3492        fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3493
3494        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3495        let tree = Worktree::local(
3496            client,
3497            Path::new("/root"),
3498            true,
3499            fs.clone(),
3500            Default::default(),
3501            &mut cx.to_async(),
3502        )
3503        .await
3504        .unwrap();
3505
3506        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3507            .await;
3508
3509        tree.read_with(cx, |tree, _| {
3510            assert_eq!(
3511                tree.entries(false)
3512                    .map(|entry| entry.path.as_ref())
3513                    .collect::<Vec<_>>(),
3514                vec![
3515                    Path::new(""),
3516                    Path::new("lib"),
3517                    Path::new("lib/a"),
3518                    Path::new("lib/a/a.txt"),
3519                    Path::new("lib/a/lib"),
3520                    Path::new("lib/b"),
3521                    Path::new("lib/b/b.txt"),
3522                    Path::new("lib/b/lib"),
3523                ]
3524            );
3525        });
3526
3527        fs.rename(
3528            Path::new("/root/lib/a/lib"),
3529            Path::new("/root/lib/a/lib-2"),
3530            Default::default(),
3531        )
3532        .await
3533        .unwrap();
3534        executor.run_until_parked();
3535        tree.read_with(cx, |tree, _| {
3536            assert_eq!(
3537                tree.entries(false)
3538                    .map(|entry| entry.path.as_ref())
3539                    .collect::<Vec<_>>(),
3540                vec![
3541                    Path::new(""),
3542                    Path::new("lib"),
3543                    Path::new("lib/a"),
3544                    Path::new("lib/a/a.txt"),
3545                    Path::new("lib/a/lib-2"),
3546                    Path::new("lib/b"),
3547                    Path::new("lib/b/b.txt"),
3548                    Path::new("lib/b/lib"),
3549                ]
3550            );
3551        });
3552    }
3553
3554    #[gpui::test]
3555    async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3556        let parent_dir = temp_tree(json!({
3557            ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3558            "tree": {
3559                ".git": {},
3560                ".gitignore": "ignored-dir\n",
3561                "tracked-dir": {
3562                    "tracked-file1": "",
3563                    "ancestor-ignored-file1": "",
3564                },
3565                "ignored-dir": {
3566                    "ignored-file1": ""
3567                }
3568            }
3569        }));
3570        let dir = parent_dir.path().join("tree");
3571
3572        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3573
3574        let tree = Worktree::local(
3575            client,
3576            dir.as_path(),
3577            true,
3578            Arc::new(RealFs),
3579            Default::default(),
3580            &mut cx.to_async(),
3581        )
3582        .await
3583        .unwrap();
3584        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3585            .await;
3586        tree.flush_fs_events(cx).await;
3587        cx.read(|cx| {
3588            let tree = tree.read(cx);
3589            assert!(
3590                !tree
3591                    .entry_for_path("tracked-dir/tracked-file1")
3592                    .unwrap()
3593                    .is_ignored
3594            );
3595            assert!(
3596                tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3597                    .unwrap()
3598                    .is_ignored
3599            );
3600            assert!(
3601                tree.entry_for_path("ignored-dir/ignored-file1")
3602                    .unwrap()
3603                    .is_ignored
3604            );
3605        });
3606
3607        std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3608        std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3609        std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3610        tree.flush_fs_events(cx).await;
3611        cx.read(|cx| {
3612            let tree = tree.read(cx);
3613            assert!(
3614                !tree
3615                    .entry_for_path("tracked-dir/tracked-file2")
3616                    .unwrap()
3617                    .is_ignored
3618            );
3619            assert!(
3620                tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3621                    .unwrap()
3622                    .is_ignored
3623            );
3624            assert!(
3625                tree.entry_for_path("ignored-dir/ignored-file2")
3626                    .unwrap()
3627                    .is_ignored
3628            );
3629            assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3630        });
3631    }
3632
3633    #[gpui::test]
3634    async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3635        let root = temp_tree(json!({
3636            "dir1": {
3637                ".git": {},
3638                "deps": {
3639                    "dep1": {
3640                        ".git": {},
3641                        "src": {
3642                            "a.txt": ""
3643                        }
3644                    }
3645                },
3646                "src": {
3647                    "b.txt": ""
3648                }
3649            },
3650            "c.txt": "",
3651        }));
3652
3653        let http_client = FakeHttpClient::with_404_response();
3654        let client = cx.read(|cx| Client::new(http_client, cx));
3655        let tree = Worktree::local(
3656            client,
3657            root.path(),
3658            true,
3659            Arc::new(RealFs),
3660            Default::default(),
3661            &mut cx.to_async(),
3662        )
3663        .await
3664        .unwrap();
3665
3666        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3667            .await;
3668        tree.flush_fs_events(cx).await;
3669
3670        tree.read_with(cx, |tree, _cx| {
3671            let tree = tree.as_local().unwrap();
3672
3673            assert!(tree.repo_for("c.txt".as_ref()).is_none());
3674
3675            let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3676            assert_eq!(
3677                entry
3678                    .work_directory(tree)
3679                    .map(|directory| directory.as_ref().to_owned()),
3680                Some(Path::new("dir1").to_owned())
3681            );
3682
3683            let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3684            assert_eq!(
3685                entry
3686                    .work_directory(tree)
3687                    .map(|directory| directory.as_ref().to_owned()),
3688                Some(Path::new("dir1/deps/dep1").to_owned())
3689            );
3690        });
3691
3692        let repo_update_events = Arc::new(Mutex::new(vec![]));
3693        tree.update(cx, |_, cx| {
3694            let repo_update_events = repo_update_events.clone();
3695            cx.subscribe(&tree, move |_, _, event, _| {
3696                if let Event::UpdatedGitRepositories(update) = event {
3697                    repo_update_events.lock().push(update.clone());
3698                }
3699            })
3700            .detach();
3701        });
3702
3703        std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3704        tree.flush_fs_events(cx).await;
3705
3706        assert_eq!(
3707            repo_update_events.lock()[0]
3708                .keys()
3709                .cloned()
3710                .collect::<Vec<Arc<Path>>>(),
3711            vec![Path::new("dir1").into()]
3712        );
3713
3714        std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3715        tree.flush_fs_events(cx).await;
3716
3717        tree.read_with(cx, |tree, _cx| {
3718            let tree = tree.as_local().unwrap();
3719
3720            assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3721        });
3722    }
3723
3724    #[gpui::test]
3725    async fn test_write_file(cx: &mut TestAppContext) {
3726        let dir = temp_tree(json!({
3727            ".git": {},
3728            ".gitignore": "ignored-dir\n",
3729            "tracked-dir": {},
3730            "ignored-dir": {}
3731        }));
3732
3733        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3734
3735        let tree = Worktree::local(
3736            client,
3737            dir.path(),
3738            true,
3739            Arc::new(RealFs),
3740            Default::default(),
3741            &mut cx.to_async(),
3742        )
3743        .await
3744        .unwrap();
3745        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3746            .await;
3747        tree.flush_fs_events(cx).await;
3748
3749        tree.update(cx, |tree, cx| {
3750            tree.as_local().unwrap().write_file(
3751                Path::new("tracked-dir/file.txt"),
3752                "hello".into(),
3753                Default::default(),
3754                cx,
3755            )
3756        })
3757        .await
3758        .unwrap();
3759        tree.update(cx, |tree, cx| {
3760            tree.as_local().unwrap().write_file(
3761                Path::new("ignored-dir/file.txt"),
3762                "world".into(),
3763                Default::default(),
3764                cx,
3765            )
3766        })
3767        .await
3768        .unwrap();
3769
3770        tree.read_with(cx, |tree, _| {
3771            let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3772            let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3773            assert!(!tracked.is_ignored);
3774            assert!(ignored.is_ignored);
3775        });
3776    }
3777
3778    #[gpui::test(iterations = 30)]
3779    async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3780        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3781
3782        let fs = FakeFs::new(cx.background());
3783        fs.insert_tree(
3784            "/root",
3785            json!({
3786                "b": {},
3787                "c": {},
3788                "d": {},
3789            }),
3790        )
3791        .await;
3792
3793        let tree = Worktree::local(
3794            client,
3795            "/root".as_ref(),
3796            true,
3797            fs,
3798            Default::default(),
3799            &mut cx.to_async(),
3800        )
3801        .await
3802        .unwrap();
3803
3804        let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3805
3806        let entry = tree
3807            .update(cx, |tree, cx| {
3808                tree.as_local_mut()
3809                    .unwrap()
3810                    .create_entry("a/e".as_ref(), true, cx)
3811            })
3812            .await
3813            .unwrap();
3814        assert!(entry.is_dir());
3815
3816        cx.foreground().run_until_parked();
3817        tree.read_with(cx, |tree, _| {
3818            assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3819        });
3820
3821        let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3822        let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3823        snapshot1.apply_remote_update(update).unwrap();
3824        assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3825    }
3826
3827    #[gpui::test(iterations = 100)]
3828    async fn test_random_worktree_operations_during_initial_scan(
3829        cx: &mut TestAppContext,
3830        mut rng: StdRng,
3831    ) {
3832        let operations = env::var("OPERATIONS")
3833            .map(|o| o.parse().unwrap())
3834            .unwrap_or(5);
3835        let initial_entries = env::var("INITIAL_ENTRIES")
3836            .map(|o| o.parse().unwrap())
3837            .unwrap_or(20);
3838
3839        let root_dir = Path::new("/test");
3840        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3841        fs.as_fake().insert_tree(root_dir, json!({})).await;
3842        for _ in 0..initial_entries {
3843            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3844        }
3845        log::info!("generated initial tree");
3846
3847        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3848        let worktree = Worktree::local(
3849            client.clone(),
3850            root_dir,
3851            true,
3852            fs.clone(),
3853            Default::default(),
3854            &mut cx.to_async(),
3855        )
3856        .await
3857        .unwrap();
3858
3859        let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3860
3861        for _ in 0..operations {
3862            worktree
3863                .update(cx, |worktree, cx| {
3864                    randomly_mutate_worktree(worktree, &mut rng, cx)
3865                })
3866                .await
3867                .log_err();
3868            worktree.read_with(cx, |tree, _| {
3869                tree.as_local().unwrap().snapshot.check_invariants()
3870            });
3871
3872            if rng.gen_bool(0.6) {
3873                let new_snapshot =
3874                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3875                let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3876                snapshot.apply_remote_update(update.clone()).unwrap();
3877                assert_eq!(
3878                    snapshot.to_vec(true),
3879                    new_snapshot.to_vec(true),
3880                    "incorrect snapshot after update {:?}",
3881                    update
3882                );
3883            }
3884        }
3885
3886        worktree
3887            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3888            .await;
3889        worktree.read_with(cx, |tree, _| {
3890            tree.as_local().unwrap().snapshot.check_invariants()
3891        });
3892
3893        let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3894        let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3895        snapshot.apply_remote_update(update.clone()).unwrap();
3896        assert_eq!(
3897            snapshot.to_vec(true),
3898            new_snapshot.to_vec(true),
3899            "incorrect snapshot after update {:?}",
3900            update
3901        );
3902    }
3903
3904    #[gpui::test(iterations = 100)]
3905    async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3906        let operations = env::var("OPERATIONS")
3907            .map(|o| o.parse().unwrap())
3908            .unwrap_or(40);
3909        let initial_entries = env::var("INITIAL_ENTRIES")
3910            .map(|o| o.parse().unwrap())
3911            .unwrap_or(20);
3912
3913        let root_dir = Path::new("/test");
3914        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3915        fs.as_fake().insert_tree(root_dir, json!({})).await;
3916        for _ in 0..initial_entries {
3917            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3918        }
3919        log::info!("generated initial tree");
3920
3921        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3922        let worktree = Worktree::local(
3923            client.clone(),
3924            root_dir,
3925            true,
3926            fs.clone(),
3927            Default::default(),
3928            &mut cx.to_async(),
3929        )
3930        .await
3931        .unwrap();
3932
3933        worktree
3934            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3935            .await;
3936
3937        // After the initial scan is complete, the `UpdatedEntries` event can
3938        // be used to follow along with all changes to the worktree's snapshot.
3939        worktree.update(cx, |tree, cx| {
3940            let mut paths = tree
3941                .as_local()
3942                .unwrap()
3943                .paths()
3944                .cloned()
3945                .collect::<Vec<_>>();
3946
3947            cx.subscribe(&worktree, move |tree, _, event, _| {
3948                if let Event::UpdatedEntries(changes) = event {
3949                    for (path, change_type) in changes.iter() {
3950                        let path = path.clone();
3951                        let ix = match paths.binary_search(&path) {
3952                            Ok(ix) | Err(ix) => ix,
3953                        };
3954                        match change_type {
3955                            PathChange::Added => {
3956                                assert_ne!(paths.get(ix), Some(&path));
3957                                paths.insert(ix, path);
3958                            }
3959                            PathChange::Removed => {
3960                                assert_eq!(paths.get(ix), Some(&path));
3961                                paths.remove(ix);
3962                            }
3963                            PathChange::Updated => {
3964                                assert_eq!(paths.get(ix), Some(&path));
3965                            }
3966                            PathChange::AddedOrUpdated => {
3967                                if paths[ix] != path {
3968                                    paths.insert(ix, path);
3969                                }
3970                            }
3971                        }
3972                    }
3973                    let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3974                    assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3975                }
3976            })
3977            .detach();
3978        });
3979
3980        let mut snapshots = Vec::new();
3981        let mut mutations_len = operations;
3982        while mutations_len > 1 {
3983            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3984            let buffered_event_count = fs.as_fake().buffered_event_count().await;
3985            if buffered_event_count > 0 && rng.gen_bool(0.3) {
3986                let len = rng.gen_range(0..=buffered_event_count);
3987                log::info!("flushing {} events", len);
3988                fs.as_fake().flush_events(len).await;
3989            } else {
3990                randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
3991                mutations_len -= 1;
3992            }
3993
3994            cx.foreground().run_until_parked();
3995            if rng.gen_bool(0.2) {
3996                log::info!("storing snapshot {}", snapshots.len());
3997                let snapshot =
3998                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3999                snapshots.push(snapshot);
4000            }
4001        }
4002
4003        log::info!("quiescing");
4004        fs.as_fake().flush_events(usize::MAX).await;
4005        cx.foreground().run_until_parked();
4006        let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4007        snapshot.check_invariants();
4008
4009        {
4010            let new_worktree = Worktree::local(
4011                client.clone(),
4012                root_dir,
4013                true,
4014                fs.clone(),
4015                Default::default(),
4016                &mut cx.to_async(),
4017            )
4018            .await
4019            .unwrap();
4020            new_worktree
4021                .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4022                .await;
4023            let new_snapshot =
4024                new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4025            assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4026        }
4027
4028        for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4029            let include_ignored = rng.gen::<bool>();
4030            if !include_ignored {
4031                let mut entries_by_path_edits = Vec::new();
4032                let mut entries_by_id_edits = Vec::new();
4033                for entry in prev_snapshot
4034                    .entries_by_id
4035                    .cursor::<()>()
4036                    .filter(|e| e.is_ignored)
4037                {
4038                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4039                    entries_by_id_edits.push(Edit::Remove(entry.id));
4040                }
4041
4042                prev_snapshot
4043                    .entries_by_path
4044                    .edit(entries_by_path_edits, &());
4045                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4046            }
4047
4048            let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4049            prev_snapshot.apply_remote_update(update.clone()).unwrap();
4050            assert_eq!(
4051                prev_snapshot.to_vec(include_ignored),
4052                snapshot.to_vec(include_ignored),
4053                "wrong update for snapshot {i}. update: {:?}",
4054                update
4055            );
4056        }
4057    }
4058
4059    fn randomly_mutate_worktree(
4060        worktree: &mut Worktree,
4061        rng: &mut impl Rng,
4062        cx: &mut ModelContext<Worktree>,
4063    ) -> Task<Result<()>> {
4064        let worktree = worktree.as_local_mut().unwrap();
4065        let snapshot = worktree.snapshot();
4066        let entry = snapshot.entries(false).choose(rng).unwrap();
4067
4068        match rng.gen_range(0_u32..100) {
4069            0..=33 if entry.path.as_ref() != Path::new("") => {
4070                log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4071                worktree.delete_entry(entry.id, cx).unwrap()
4072            }
4073            ..=66 if entry.path.as_ref() != Path::new("") => {
4074                let other_entry = snapshot.entries(false).choose(rng).unwrap();
4075                let new_parent_path = if other_entry.is_dir() {
4076                    other_entry.path.clone()
4077                } else {
4078                    other_entry.path.parent().unwrap().into()
4079                };
4080                let mut new_path = new_parent_path.join(gen_name(rng));
4081                if new_path.starts_with(&entry.path) {
4082                    new_path = gen_name(rng).into();
4083                }
4084
4085                log::info!(
4086                    "renaming entry {:?} ({}) to {:?}",
4087                    entry.path,
4088                    entry.id.0,
4089                    new_path
4090                );
4091                let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4092                cx.foreground().spawn(async move {
4093                    task.await?;
4094                    Ok(())
4095                })
4096            }
4097            _ => {
4098                let task = if entry.is_dir() {
4099                    let child_path = entry.path.join(gen_name(rng));
4100                    let is_dir = rng.gen_bool(0.3);
4101                    log::info!(
4102                        "creating {} at {:?}",
4103                        if is_dir { "dir" } else { "file" },
4104                        child_path,
4105                    );
4106                    worktree.create_entry(child_path, is_dir, cx)
4107                } else {
4108                    log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4109                    worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4110                };
4111                cx.foreground().spawn(async move {
4112                    task.await?;
4113                    Ok(())
4114                })
4115            }
4116        }
4117    }
4118
4119    async fn randomly_mutate_fs(
4120        fs: &Arc<dyn Fs>,
4121        root_path: &Path,
4122        insertion_probability: f64,
4123        rng: &mut impl Rng,
4124    ) {
4125        let mut files = Vec::new();
4126        let mut dirs = Vec::new();
4127        for path in fs.as_fake().paths() {
4128            if path.starts_with(root_path) {
4129                if fs.is_file(&path).await {
4130                    files.push(path);
4131                } else {
4132                    dirs.push(path);
4133                }
4134            }
4135        }
4136
4137        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4138            let path = dirs.choose(rng).unwrap();
4139            let new_path = path.join(gen_name(rng));
4140
4141            if rng.gen() {
4142                log::info!(
4143                    "creating dir {:?}",
4144                    new_path.strip_prefix(root_path).unwrap()
4145                );
4146                fs.create_dir(&new_path).await.unwrap();
4147            } else {
4148                log::info!(
4149                    "creating file {:?}",
4150                    new_path.strip_prefix(root_path).unwrap()
4151                );
4152                fs.create_file(&new_path, Default::default()).await.unwrap();
4153            }
4154        } else if rng.gen_bool(0.05) {
4155            let ignore_dir_path = dirs.choose(rng).unwrap();
4156            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4157
4158            let subdirs = dirs
4159                .iter()
4160                .filter(|d| d.starts_with(&ignore_dir_path))
4161                .cloned()
4162                .collect::<Vec<_>>();
4163            let subfiles = files
4164                .iter()
4165                .filter(|d| d.starts_with(&ignore_dir_path))
4166                .cloned()
4167                .collect::<Vec<_>>();
4168            let files_to_ignore = {
4169                let len = rng.gen_range(0..=subfiles.len());
4170                subfiles.choose_multiple(rng, len)
4171            };
4172            let dirs_to_ignore = {
4173                let len = rng.gen_range(0..subdirs.len());
4174                subdirs.choose_multiple(rng, len)
4175            };
4176
4177            let mut ignore_contents = String::new();
4178            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4179                writeln!(
4180                    ignore_contents,
4181                    "{}",
4182                    path_to_ignore
4183                        .strip_prefix(&ignore_dir_path)
4184                        .unwrap()
4185                        .to_str()
4186                        .unwrap()
4187                )
4188                .unwrap();
4189            }
4190            log::info!(
4191                "creating gitignore {:?} with contents:\n{}",
4192                ignore_path.strip_prefix(&root_path).unwrap(),
4193                ignore_contents
4194            );
4195            fs.save(
4196                &ignore_path,
4197                &ignore_contents.as_str().into(),
4198                Default::default(),
4199            )
4200            .await
4201            .unwrap();
4202        } else {
4203            let old_path = {
4204                let file_path = files.choose(rng);
4205                let dir_path = dirs[1..].choose(rng);
4206                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4207            };
4208
4209            let is_rename = rng.gen();
4210            if is_rename {
4211                let new_path_parent = dirs
4212                    .iter()
4213                    .filter(|d| !d.starts_with(old_path))
4214                    .choose(rng)
4215                    .unwrap();
4216
4217                let overwrite_existing_dir =
4218                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4219                let new_path = if overwrite_existing_dir {
4220                    fs.remove_dir(
4221                        &new_path_parent,
4222                        RemoveOptions {
4223                            recursive: true,
4224                            ignore_if_not_exists: true,
4225                        },
4226                    )
4227                    .await
4228                    .unwrap();
4229                    new_path_parent.to_path_buf()
4230                } else {
4231                    new_path_parent.join(gen_name(rng))
4232                };
4233
4234                log::info!(
4235                    "renaming {:?} to {}{:?}",
4236                    old_path.strip_prefix(&root_path).unwrap(),
4237                    if overwrite_existing_dir {
4238                        "overwrite "
4239                    } else {
4240                        ""
4241                    },
4242                    new_path.strip_prefix(&root_path).unwrap()
4243                );
4244                fs.rename(
4245                    &old_path,
4246                    &new_path,
4247                    fs::RenameOptions {
4248                        overwrite: true,
4249                        ignore_if_exists: true,
4250                    },
4251                )
4252                .await
4253                .unwrap();
4254            } else if fs.is_file(&old_path).await {
4255                log::info!(
4256                    "deleting file {:?}",
4257                    old_path.strip_prefix(&root_path).unwrap()
4258                );
4259                fs.remove_file(old_path, Default::default()).await.unwrap();
4260            } else {
4261                log::info!(
4262                    "deleting dir {:?}",
4263                    old_path.strip_prefix(&root_path).unwrap()
4264                );
4265                fs.remove_dir(
4266                    &old_path,
4267                    RemoveOptions {
4268                        recursive: true,
4269                        ignore_if_not_exists: true,
4270                    },
4271                )
4272                .await
4273                .unwrap();
4274            }
4275        }
4276    }
4277
4278    fn gen_name(rng: &mut impl Rng) -> String {
4279        (0..6)
4280            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4281            .map(char::from)
4282            .collect()
4283    }
4284
4285    impl LocalSnapshot {
4286        fn check_invariants(&self) {
4287            assert_eq!(
4288                self.entries_by_path
4289                    .cursor::<()>()
4290                    .map(|e| (&e.path, e.id))
4291                    .collect::<Vec<_>>(),
4292                self.entries_by_id
4293                    .cursor::<()>()
4294                    .map(|e| (&e.path, e.id))
4295                    .collect::<collections::BTreeSet<_>>()
4296                    .into_iter()
4297                    .collect::<Vec<_>>(),
4298                "entries_by_path and entries_by_id are inconsistent"
4299            );
4300
4301            let mut files = self.files(true, 0);
4302            let mut visible_files = self.files(false, 0);
4303            for entry in self.entries_by_path.cursor::<()>() {
4304                if entry.is_file() {
4305                    assert_eq!(files.next().unwrap().inode, entry.inode);
4306                    if !entry.is_ignored {
4307                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4308                    }
4309                }
4310            }
4311
4312            assert!(files.next().is_none());
4313            assert!(visible_files.next().is_none());
4314
4315            let mut bfs_paths = Vec::new();
4316            let mut stack = vec![Path::new("")];
4317            while let Some(path) = stack.pop() {
4318                bfs_paths.push(path);
4319                let ix = stack.len();
4320                for child_entry in self.child_entries(path) {
4321                    stack.insert(ix, &child_entry.path);
4322                }
4323            }
4324
4325            let dfs_paths_via_iter = self
4326                .entries_by_path
4327                .cursor::<()>()
4328                .map(|e| e.path.as_ref())
4329                .collect::<Vec<_>>();
4330            assert_eq!(bfs_paths, dfs_paths_via_iter);
4331
4332            let dfs_paths_via_traversal = self
4333                .entries(true)
4334                .map(|e| e.path.as_ref())
4335                .collect::<Vec<_>>();
4336            assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4337
4338            for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4339                let ignore_parent_path =
4340                    ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4341                assert!(self.entry_for_path(&ignore_parent_path).is_some());
4342                assert!(self
4343                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4344                    .is_some());
4345            }
4346        }
4347
4348        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4349            let mut paths = Vec::new();
4350            for entry in self.entries_by_path.cursor::<()>() {
4351                if include_ignored || !entry.is_ignored {
4352                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4353                }
4354            }
4355            paths.sort_by(|a, b| a.0.cmp(b.0));
4356            paths
4357        }
4358    }
4359}