worktree.rs

   1use crate::{
   2    copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
   3};
   4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   5use anyhow::{anyhow, Context, Result};
   6use client::{proto, Client};
   7use clock::ReplicaId;
   8use collections::{HashMap, VecDeque};
   9use fs::{
  10    repository::{GitRepository, GitStatus, RepoPath},
  11    Fs, LineEnding,
  12};
  13use futures::{
  14    channel::{
  15        mpsc::{self, UnboundedSender},
  16        oneshot,
  17    },
  18    select_biased,
  19    task::Poll,
  20    Stream, StreamExt,
  21};
  22use fuzzy::CharBag;
  23use git::{DOT_GIT, GITIGNORE};
  24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
  25use language::{
  26    proto::{
  27        deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
  28        serialize_version,
  29    },
  30    Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
  31};
  32use lsp::LanguageServerId;
  33use parking_lot::Mutex;
  34use postage::{
  35    barrier,
  36    prelude::{Sink as _, Stream as _},
  37    watch,
  38};
  39use smol::channel::{self, Sender};
  40use std::{
  41    any::Any,
  42    cmp::{self, Ordering},
  43    convert::TryFrom,
  44    ffi::OsStr,
  45    fmt,
  46    future::Future,
  47    mem,
  48    ops::{Deref, DerefMut},
  49    path::{Path, PathBuf},
  50    pin::Pin,
  51    sync::{
  52        atomic::{AtomicUsize, Ordering::SeqCst},
  53        Arc,
  54    },
  55    time::{Duration, SystemTime},
  56};
  57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
  58use util::{paths::HOME, ResultExt, TryFutureExt};
  59
  60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  61pub struct WorktreeId(usize);
  62
  63pub enum Worktree {
  64    Local(LocalWorktree),
  65    Remote(RemoteWorktree),
  66}
  67
  68pub struct LocalWorktree {
  69    snapshot: LocalSnapshot,
  70    path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
  71    is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
  72    _background_scanner_task: Task<()>,
  73    share: Option<ShareState>,
  74    diagnostics: HashMap<
  75        Arc<Path>,
  76        Vec<(
  77            LanguageServerId,
  78            Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
  79        )>,
  80    >,
  81    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  82    client: Arc<Client>,
  83    fs: Arc<dyn Fs>,
  84    visible: bool,
  85}
  86
  87pub struct RemoteWorktree {
  88    snapshot: Snapshot,
  89    background_snapshot: Arc<Mutex<Snapshot>>,
  90    project_id: u64,
  91    client: Arc<Client>,
  92    updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
  93    snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
  94    replica_id: ReplicaId,
  95    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  96    visible: bool,
  97    disconnected: bool,
  98}
  99
 100#[derive(Clone)]
 101pub struct Snapshot {
 102    id: WorktreeId,
 103    abs_path: Arc<Path>,
 104    root_name: String,
 105    root_char_bag: CharBag,
 106    entries_by_path: SumTree<Entry>,
 107    entries_by_id: SumTree<PathEntry>,
 108    repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
 109
 110    /// A number that increases every time the worktree begins scanning
 111    /// a set of paths from the filesystem. This scanning could be caused
 112    /// by some operation performed on the worktree, such as reading or
 113    /// writing a file, or by an event reported by the filesystem.
 114    scan_id: usize,
 115
 116    /// The latest scan id that has completed, and whose preceding scans
 117    /// have all completed. The current `scan_id` could be more than one
 118    /// greater than the `completed_scan_id` if operations are performed
 119    /// on the worktree while it is processing a file-system event.
 120    completed_scan_id: usize,
 121}
 122
 123#[derive(Clone, Debug, PartialEq, Eq)]
 124pub struct RepositoryEntry {
 125    pub(crate) work_directory: WorkDirectoryEntry,
 126    pub(crate) branch: Option<Arc<str>>,
 127    pub(crate) statuses: TreeMap<RepoPath, GitStatus>,
 128}
 129
 130impl RepositoryEntry {
 131    pub fn branch(&self) -> Option<Arc<str>> {
 132        self.branch.clone()
 133    }
 134
 135    pub fn work_directory_id(&self) -> ProjectEntryId {
 136        *self.work_directory
 137    }
 138
 139    pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
 140        snapshot
 141            .entry_for_id(self.work_directory_id())
 142            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
 143    }
 144
 145    pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
 146        self.work_directory.contains(snapshot, path)
 147    }
 148}
 149
 150impl From<&RepositoryEntry> for proto::RepositoryEntry {
 151    fn from(value: &RepositoryEntry) -> Self {
 152        proto::RepositoryEntry {
 153            work_directory_id: value.work_directory.to_proto(),
 154            branch: value.branch.as_ref().map(|str| str.to_string()),
 155        }
 156    }
 157}
 158
 159/// This path corresponds to the 'content path' (the folder that contains the .git)
 160#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 161pub struct RepositoryWorkDirectory(Arc<Path>);
 162
 163impl Default for RepositoryWorkDirectory {
 164    fn default() -> Self {
 165        RepositoryWorkDirectory(Arc::from(Path::new("")))
 166    }
 167}
 168
 169impl AsRef<Path> for RepositoryWorkDirectory {
 170    fn as_ref(&self) -> &Path {
 171        self.0.as_ref()
 172    }
 173}
 174
 175#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 176pub struct WorkDirectoryEntry(ProjectEntryId);
 177
 178impl WorkDirectoryEntry {
 179    // Note that these paths should be relative to the worktree root.
 180    pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
 181        snapshot
 182            .entry_for_id(self.0)
 183            .map(|entry| path.starts_with(&entry.path))
 184            .unwrap_or(false)
 185    }
 186
 187    pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
 188        worktree.entry_for_id(self.0).and_then(|entry| {
 189            path.strip_prefix(&entry.path)
 190                .ok()
 191                .map(move |path| path.into())
 192        })
 193    }
 194}
 195
 196impl Deref for WorkDirectoryEntry {
 197    type Target = ProjectEntryId;
 198
 199    fn deref(&self) -> &Self::Target {
 200        &self.0
 201    }
 202}
 203
 204impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
 205    fn from(value: ProjectEntryId) -> Self {
 206        WorkDirectoryEntry(value)
 207    }
 208}
 209
 210#[derive(Debug, Clone)]
 211pub struct LocalSnapshot {
 212    ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
 213    // The ProjectEntryId corresponds to the entry for the .git dir
 214    // work_directory_id
 215    git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 216    removed_entry_ids: HashMap<u64, ProjectEntryId>,
 217    next_entry_id: Arc<AtomicUsize>,
 218    snapshot: Snapshot,
 219}
 220
 221#[derive(Debug, Clone)]
 222pub struct LocalRepositoryEntry {
 223    pub(crate) scan_id: usize,
 224    pub(crate) full_scan_id: usize,
 225    pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
 226    /// Path to the actual .git folder.
 227    /// Note: if .git is a file, this points to the folder indicated by the .git file
 228    pub(crate) git_dir_path: Arc<Path>,
 229}
 230
 231impl LocalRepositoryEntry {
 232    // Note that this path should be relative to the worktree root.
 233    pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
 234        path.starts_with(self.git_dir_path.as_ref())
 235    }
 236}
 237
 238impl Deref for LocalSnapshot {
 239    type Target = Snapshot;
 240
 241    fn deref(&self) -> &Self::Target {
 242        &self.snapshot
 243    }
 244}
 245
 246impl DerefMut for LocalSnapshot {
 247    fn deref_mut(&mut self) -> &mut Self::Target {
 248        &mut self.snapshot
 249    }
 250}
 251
 252enum ScanState {
 253    Started,
 254    Updated {
 255        snapshot: LocalSnapshot,
 256        changes: HashMap<Arc<Path>, PathChange>,
 257        barrier: Option<barrier::Sender>,
 258        scanning: bool,
 259    },
 260}
 261
 262struct ShareState {
 263    project_id: u64,
 264    snapshots_tx: watch::Sender<LocalSnapshot>,
 265    resume_updates: watch::Sender<()>,
 266    _maintain_remote_snapshot: Task<Option<()>>,
 267}
 268
 269pub enum Event {
 270    UpdatedEntries(HashMap<Arc<Path>, PathChange>),
 271    UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
 272}
 273
 274impl Entity for Worktree {
 275    type Event = Event;
 276}
 277
 278impl Worktree {
 279    pub async fn local(
 280        client: Arc<Client>,
 281        path: impl Into<Arc<Path>>,
 282        visible: bool,
 283        fs: Arc<dyn Fs>,
 284        next_entry_id: Arc<AtomicUsize>,
 285        cx: &mut AsyncAppContext,
 286    ) -> Result<ModelHandle<Self>> {
 287        // After determining whether the root entry is a file or a directory, populate the
 288        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 289        let abs_path = path.into();
 290        let metadata = fs
 291            .metadata(&abs_path)
 292            .await
 293            .context("failed to stat worktree path")?;
 294
 295        Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
 296            let root_name = abs_path
 297                .file_name()
 298                .map_or(String::new(), |f| f.to_string_lossy().to_string());
 299
 300            let mut snapshot = LocalSnapshot {
 301                ignores_by_parent_abs_path: Default::default(),
 302                removed_entry_ids: Default::default(),
 303                git_repositories: Default::default(),
 304                next_entry_id,
 305                snapshot: Snapshot {
 306                    id: WorktreeId::from_usize(cx.model_id()),
 307                    abs_path: abs_path.clone(),
 308                    root_name: root_name.clone(),
 309                    root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
 310                    entries_by_path: Default::default(),
 311                    entries_by_id: Default::default(),
 312                    repository_entries: Default::default(),
 313                    scan_id: 1,
 314                    completed_scan_id: 0,
 315                },
 316            };
 317
 318            if let Some(metadata) = metadata {
 319                snapshot.insert_entry(
 320                    Entry::new(
 321                        Arc::from(Path::new("")),
 322                        &metadata,
 323                        &snapshot.next_entry_id,
 324                        snapshot.root_char_bag,
 325                    ),
 326                    fs.as_ref(),
 327                );
 328            }
 329
 330            let (path_changes_tx, path_changes_rx) = channel::unbounded();
 331            let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
 332
 333            cx.spawn_weak(|this, mut cx| async move {
 334                while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
 335                    this.update(&mut cx, |this, cx| {
 336                        let this = this.as_local_mut().unwrap();
 337                        match state {
 338                            ScanState::Started => {
 339                                *this.is_scanning.0.borrow_mut() = true;
 340                            }
 341                            ScanState::Updated {
 342                                snapshot,
 343                                changes,
 344                                barrier,
 345                                scanning,
 346                            } => {
 347                                *this.is_scanning.0.borrow_mut() = scanning;
 348                                this.set_snapshot(snapshot, cx);
 349                                cx.emit(Event::UpdatedEntries(changes));
 350                                drop(barrier);
 351                            }
 352                        }
 353                        cx.notify();
 354                    });
 355                }
 356            })
 357            .detach();
 358
 359            let background_scanner_task = cx.background().spawn({
 360                let fs = fs.clone();
 361                let snapshot = snapshot.clone();
 362                let background = cx.background().clone();
 363                async move {
 364                    let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 365                    BackgroundScanner::new(
 366                        snapshot,
 367                        fs,
 368                        scan_states_tx,
 369                        background,
 370                        path_changes_rx,
 371                    )
 372                    .run(events)
 373                    .await;
 374                }
 375            });
 376
 377            Worktree::Local(LocalWorktree {
 378                snapshot,
 379                is_scanning: watch::channel_with(true),
 380                share: None,
 381                path_changes_tx,
 382                _background_scanner_task: background_scanner_task,
 383                diagnostics: Default::default(),
 384                diagnostic_summaries: Default::default(),
 385                client,
 386                fs,
 387                visible,
 388            })
 389        }))
 390    }
 391
 392    pub fn remote(
 393        project_remote_id: u64,
 394        replica_id: ReplicaId,
 395        worktree: proto::WorktreeMetadata,
 396        client: Arc<Client>,
 397        cx: &mut AppContext,
 398    ) -> ModelHandle<Self> {
 399        cx.add_model(|cx: &mut ModelContext<Self>| {
 400            let snapshot = Snapshot {
 401                id: WorktreeId(worktree.id as usize),
 402                abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
 403                root_name: worktree.root_name.clone(),
 404                root_char_bag: worktree
 405                    .root_name
 406                    .chars()
 407                    .map(|c| c.to_ascii_lowercase())
 408                    .collect(),
 409                entries_by_path: Default::default(),
 410                entries_by_id: Default::default(),
 411                repository_entries: Default::default(),
 412                scan_id: 1,
 413                completed_scan_id: 0,
 414            };
 415
 416            let (updates_tx, mut updates_rx) = mpsc::unbounded();
 417            let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
 418            let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
 419
 420            cx.background()
 421                .spawn({
 422                    let background_snapshot = background_snapshot.clone();
 423                    async move {
 424                        while let Some(update) = updates_rx.next().await {
 425                            if let Err(error) =
 426                                background_snapshot.lock().apply_remote_update(update)
 427                            {
 428                                log::error!("error applying worktree update: {}", error);
 429                            }
 430                            snapshot_updated_tx.send(()).await.ok();
 431                        }
 432                    }
 433                })
 434                .detach();
 435
 436            cx.spawn_weak(|this, mut cx| async move {
 437                while (snapshot_updated_rx.recv().await).is_some() {
 438                    if let Some(this) = this.upgrade(&cx) {
 439                        this.update(&mut cx, |this, cx| {
 440                            let this = this.as_remote_mut().unwrap();
 441                            this.snapshot = this.background_snapshot.lock().clone();
 442                            cx.emit(Event::UpdatedEntries(Default::default()));
 443                            cx.notify();
 444                            while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
 445                                if this.observed_snapshot(*scan_id) {
 446                                    let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
 447                                    let _ = tx.send(());
 448                                } else {
 449                                    break;
 450                                }
 451                            }
 452                        });
 453                    } else {
 454                        break;
 455                    }
 456                }
 457            })
 458            .detach();
 459
 460            Worktree::Remote(RemoteWorktree {
 461                project_id: project_remote_id,
 462                replica_id,
 463                snapshot: snapshot.clone(),
 464                background_snapshot,
 465                updates_tx: Some(updates_tx),
 466                snapshot_subscriptions: Default::default(),
 467                client: client.clone(),
 468                diagnostic_summaries: Default::default(),
 469                visible: worktree.visible,
 470                disconnected: false,
 471            })
 472        })
 473    }
 474
 475    pub fn as_local(&self) -> Option<&LocalWorktree> {
 476        if let Worktree::Local(worktree) = self {
 477            Some(worktree)
 478        } else {
 479            None
 480        }
 481    }
 482
 483    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 484        if let Worktree::Remote(worktree) = self {
 485            Some(worktree)
 486        } else {
 487            None
 488        }
 489    }
 490
 491    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 492        if let Worktree::Local(worktree) = self {
 493            Some(worktree)
 494        } else {
 495            None
 496        }
 497    }
 498
 499    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 500        if let Worktree::Remote(worktree) = self {
 501            Some(worktree)
 502        } else {
 503            None
 504        }
 505    }
 506
 507    pub fn is_local(&self) -> bool {
 508        matches!(self, Worktree::Local(_))
 509    }
 510
 511    pub fn is_remote(&self) -> bool {
 512        !self.is_local()
 513    }
 514
 515    pub fn snapshot(&self) -> Snapshot {
 516        match self {
 517            Worktree::Local(worktree) => worktree.snapshot().snapshot,
 518            Worktree::Remote(worktree) => worktree.snapshot(),
 519        }
 520    }
 521
 522    pub fn scan_id(&self) -> usize {
 523        match self {
 524            Worktree::Local(worktree) => worktree.snapshot.scan_id,
 525            Worktree::Remote(worktree) => worktree.snapshot.scan_id,
 526        }
 527    }
 528
 529    pub fn completed_scan_id(&self) -> usize {
 530        match self {
 531            Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
 532            Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
 533        }
 534    }
 535
 536    pub fn is_visible(&self) -> bool {
 537        match self {
 538            Worktree::Local(worktree) => worktree.visible,
 539            Worktree::Remote(worktree) => worktree.visible,
 540        }
 541    }
 542
 543    pub fn replica_id(&self) -> ReplicaId {
 544        match self {
 545            Worktree::Local(_) => 0,
 546            Worktree::Remote(worktree) => worktree.replica_id,
 547        }
 548    }
 549
 550    pub fn diagnostic_summaries(
 551        &self,
 552    ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
 553        match self {
 554            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 555            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 556        }
 557        .iter()
 558        .flat_map(|(path, summaries)| {
 559            summaries
 560                .iter()
 561                .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
 562        })
 563    }
 564
 565    pub fn abs_path(&self) -> Arc<Path> {
 566        match self {
 567            Worktree::Local(worktree) => worktree.abs_path.clone(),
 568            Worktree::Remote(worktree) => worktree.abs_path.clone(),
 569        }
 570    }
 571}
 572
 573impl LocalWorktree {
 574    pub fn contains_abs_path(&self, path: &Path) -> bool {
 575        path.starts_with(&self.abs_path)
 576    }
 577
 578    fn absolutize(&self, path: &Path) -> PathBuf {
 579        if path.file_name().is_some() {
 580            self.abs_path.join(path)
 581        } else {
 582            self.abs_path.to_path_buf()
 583        }
 584    }
 585
 586    pub(crate) fn load_buffer(
 587        &mut self,
 588        id: u64,
 589        path: &Path,
 590        cx: &mut ModelContext<Worktree>,
 591    ) -> Task<Result<ModelHandle<Buffer>>> {
 592        let path = Arc::from(path);
 593        cx.spawn(move |this, mut cx| async move {
 594            let (file, contents, diff_base) = this
 595                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
 596                .await?;
 597            let text_buffer = cx
 598                .background()
 599                .spawn(async move { text::Buffer::new(0, id, contents) })
 600                .await;
 601            Ok(cx.add_model(|cx| {
 602                let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
 603                buffer.git_diff_recalc(cx);
 604                buffer
 605            }))
 606        })
 607    }
 608
 609    pub fn diagnostics_for_path(
 610        &self,
 611        path: &Path,
 612    ) -> Vec<(
 613        LanguageServerId,
 614        Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 615    )> {
 616        self.diagnostics.get(path).cloned().unwrap_or_default()
 617    }
 618
 619    pub fn update_diagnostics(
 620        &mut self,
 621        server_id: LanguageServerId,
 622        worktree_path: Arc<Path>,
 623        diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 624        _: &mut ModelContext<Worktree>,
 625    ) -> Result<bool> {
 626        let summaries_by_server_id = self
 627            .diagnostic_summaries
 628            .entry(worktree_path.clone())
 629            .or_default();
 630
 631        let old_summary = summaries_by_server_id
 632            .remove(&server_id)
 633            .unwrap_or_default();
 634
 635        let new_summary = DiagnosticSummary::new(&diagnostics);
 636        if new_summary.is_empty() {
 637            if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
 638                if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 639                    diagnostics_by_server_id.remove(ix);
 640                }
 641                if diagnostics_by_server_id.is_empty() {
 642                    self.diagnostics.remove(&worktree_path);
 643                }
 644            }
 645        } else {
 646            summaries_by_server_id.insert(server_id, new_summary);
 647            let diagnostics_by_server_id =
 648                self.diagnostics.entry(worktree_path.clone()).or_default();
 649            match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 650                Ok(ix) => {
 651                    diagnostics_by_server_id[ix] = (server_id, diagnostics);
 652                }
 653                Err(ix) => {
 654                    diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
 655                }
 656            }
 657        }
 658
 659        if !old_summary.is_empty() || !new_summary.is_empty() {
 660            if let Some(share) = self.share.as_ref() {
 661                self.client
 662                    .send(proto::UpdateDiagnosticSummary {
 663                        project_id: share.project_id,
 664                        worktree_id: self.id().to_proto(),
 665                        summary: Some(proto::DiagnosticSummary {
 666                            path: worktree_path.to_string_lossy().to_string(),
 667                            language_server_id: server_id.0 as u64,
 668                            error_count: new_summary.error_count as u32,
 669                            warning_count: new_summary.warning_count as u32,
 670                        }),
 671                    })
 672                    .log_err();
 673            }
 674        }
 675
 676        Ok(!old_summary.is_empty() || !new_summary.is_empty())
 677    }
 678
 679    fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
 680        let updated_repos =
 681            self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
 682        self.snapshot = new_snapshot;
 683
 684        if let Some(share) = self.share.as_mut() {
 685            *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
 686        }
 687
 688        if !updated_repos.is_empty() {
 689            cx.emit(Event::UpdatedGitRepositories(updated_repos));
 690        }
 691    }
 692
 693    fn changed_repos(
 694        &self,
 695        old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 696        new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 697    ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
 698        let mut diff = HashMap::default();
 699        let mut old_repos = old_repos.iter().peekable();
 700        let mut new_repos = new_repos.iter().peekable();
 701        loop {
 702            match (old_repos.peek(), new_repos.peek()) {
 703                (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
 704                    match Ord::cmp(old_entry_id, new_entry_id) {
 705                        Ordering::Less => {
 706                            if let Some(entry) = self.entry_for_id(**old_entry_id) {
 707                                diff.insert(entry.path.clone(), (*old_repo).clone());
 708                            }
 709                            old_repos.next();
 710                        }
 711                        Ordering::Equal => {
 712                            if old_repo.scan_id != new_repo.scan_id {
 713                                if let Some(entry) = self.entry_for_id(**new_entry_id) {
 714                                    diff.insert(entry.path.clone(), (*new_repo).clone());
 715                                }
 716                            }
 717
 718                            old_repos.next();
 719                            new_repos.next();
 720                        }
 721                        Ordering::Greater => {
 722                            if let Some(entry) = self.entry_for_id(**new_entry_id) {
 723                                diff.insert(entry.path.clone(), (*new_repo).clone());
 724                            }
 725                            new_repos.next();
 726                        }
 727                    }
 728                }
 729                (Some((old_entry_id, old_repo)), None) => {
 730                    if let Some(entry) = self.entry_for_id(**old_entry_id) {
 731                        diff.insert(entry.path.clone(), (*old_repo).clone());
 732                    }
 733                    old_repos.next();
 734                }
 735                (None, Some((new_entry_id, new_repo))) => {
 736                    if let Some(entry) = self.entry_for_id(**new_entry_id) {
 737                        diff.insert(entry.path.clone(), (*new_repo).clone());
 738                    }
 739                    new_repos.next();
 740                }
 741                (None, None) => break,
 742            }
 743        }
 744        diff
 745    }
 746
 747    pub fn scan_complete(&self) -> impl Future<Output = ()> {
 748        let mut is_scanning_rx = self.is_scanning.1.clone();
 749        async move {
 750            let mut is_scanning = is_scanning_rx.borrow().clone();
 751            while is_scanning {
 752                if let Some(value) = is_scanning_rx.recv().await {
 753                    is_scanning = value;
 754                } else {
 755                    break;
 756                }
 757            }
 758        }
 759    }
 760
 761    pub fn snapshot(&self) -> LocalSnapshot {
 762        self.snapshot.clone()
 763    }
 764
 765    pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
 766        proto::WorktreeMetadata {
 767            id: self.id().to_proto(),
 768            root_name: self.root_name().to_string(),
 769            visible: self.visible,
 770            abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
 771        }
 772    }
 773
 774    fn load(
 775        &self,
 776        path: &Path,
 777        cx: &mut ModelContext<Worktree>,
 778    ) -> Task<Result<(File, String, Option<String>)>> {
 779        let handle = cx.handle();
 780        let path = Arc::from(path);
 781        let abs_path = self.absolutize(&path);
 782        let fs = self.fs.clone();
 783        let snapshot = self.snapshot();
 784
 785        let mut index_task = None;
 786
 787        if let Some(repo) = snapshot.repo_for(&path) {
 788            let repo_path = repo.work_directory.relativize(self, &path).unwrap();
 789            if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
 790                let repo = repo.repo_ptr.to_owned();
 791                index_task = Some(
 792                    cx.background()
 793                        .spawn(async move { repo.lock().load_index_text(&repo_path) }),
 794                );
 795            }
 796        }
 797
 798        cx.spawn(|this, mut cx| async move {
 799            let text = fs.load(&abs_path).await?;
 800
 801            let diff_base = if let Some(index_task) = index_task {
 802                index_task.await
 803            } else {
 804                None
 805            };
 806
 807            // Eagerly populate the snapshot with an updated entry for the loaded file
 808            let entry = this
 809                .update(&mut cx, |this, cx| {
 810                    this.as_local().unwrap().refresh_entry(path, None, cx)
 811                })
 812                .await?;
 813
 814            Ok((
 815                File {
 816                    entry_id: entry.id,
 817                    worktree: handle,
 818                    path: entry.path,
 819                    mtime: entry.mtime,
 820                    is_local: true,
 821                    is_deleted: false,
 822                },
 823                text,
 824                diff_base,
 825            ))
 826        })
 827    }
 828
 829    pub fn save_buffer(
 830        &self,
 831        buffer_handle: ModelHandle<Buffer>,
 832        path: Arc<Path>,
 833        has_changed_file: bool,
 834        cx: &mut ModelContext<Worktree>,
 835    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
 836        let handle = cx.handle();
 837        let buffer = buffer_handle.read(cx);
 838
 839        let rpc = self.client.clone();
 840        let buffer_id = buffer.remote_id();
 841        let project_id = self.share.as_ref().map(|share| share.project_id);
 842
 843        let text = buffer.as_rope().clone();
 844        let fingerprint = text.fingerprint();
 845        let version = buffer.version();
 846        let save = self.write_file(path, text, buffer.line_ending(), cx);
 847
 848        cx.as_mut().spawn(|mut cx| async move {
 849            let entry = save.await?;
 850
 851            if has_changed_file {
 852                let new_file = Arc::new(File {
 853                    entry_id: entry.id,
 854                    worktree: handle,
 855                    path: entry.path,
 856                    mtime: entry.mtime,
 857                    is_local: true,
 858                    is_deleted: false,
 859                });
 860
 861                if let Some(project_id) = project_id {
 862                    rpc.send(proto::UpdateBufferFile {
 863                        project_id,
 864                        buffer_id,
 865                        file: Some(new_file.to_proto()),
 866                    })
 867                    .log_err();
 868                }
 869
 870                buffer_handle.update(&mut cx, |buffer, cx| {
 871                    if has_changed_file {
 872                        buffer.file_updated(new_file, cx).detach();
 873                    }
 874                });
 875            }
 876
 877            if let Some(project_id) = project_id {
 878                rpc.send(proto::BufferSaved {
 879                    project_id,
 880                    buffer_id,
 881                    version: serialize_version(&version),
 882                    mtime: Some(entry.mtime.into()),
 883                    fingerprint: serialize_fingerprint(fingerprint),
 884                })?;
 885            }
 886
 887            buffer_handle.update(&mut cx, |buffer, cx| {
 888                buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
 889            });
 890
 891            Ok((version, fingerprint, entry.mtime))
 892        })
 893    }
 894
 895    pub fn create_entry(
 896        &self,
 897        path: impl Into<Arc<Path>>,
 898        is_dir: bool,
 899        cx: &mut ModelContext<Worktree>,
 900    ) -> Task<Result<Entry>> {
 901        let path = path.into();
 902        let abs_path = self.absolutize(&path);
 903        let fs = self.fs.clone();
 904        let write = cx.background().spawn(async move {
 905            if is_dir {
 906                fs.create_dir(&abs_path).await
 907            } else {
 908                fs.save(&abs_path, &Default::default(), Default::default())
 909                    .await
 910            }
 911        });
 912
 913        cx.spawn(|this, mut cx| async move {
 914            write.await?;
 915            this.update(&mut cx, |this, cx| {
 916                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
 917            })
 918            .await
 919        })
 920    }
 921
 922    pub fn write_file(
 923        &self,
 924        path: impl Into<Arc<Path>>,
 925        text: Rope,
 926        line_ending: LineEnding,
 927        cx: &mut ModelContext<Worktree>,
 928    ) -> Task<Result<Entry>> {
 929        let path = path.into();
 930        let abs_path = self.absolutize(&path);
 931        let fs = self.fs.clone();
 932        let write = cx
 933            .background()
 934            .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
 935
 936        cx.spawn(|this, mut cx| async move {
 937            write.await?;
 938            this.update(&mut cx, |this, cx| {
 939                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
 940            })
 941            .await
 942        })
 943    }
 944
 945    pub fn delete_entry(
 946        &self,
 947        entry_id: ProjectEntryId,
 948        cx: &mut ModelContext<Worktree>,
 949    ) -> Option<Task<Result<()>>> {
 950        let entry = self.entry_for_id(entry_id)?.clone();
 951        let abs_path = self.abs_path.clone();
 952        let fs = self.fs.clone();
 953
 954        let delete = cx.background().spawn(async move {
 955            let mut abs_path = fs.canonicalize(&abs_path).await?;
 956            if entry.path.file_name().is_some() {
 957                abs_path = abs_path.join(&entry.path);
 958            }
 959            if entry.is_file() {
 960                fs.remove_file(&abs_path, Default::default()).await?;
 961            } else {
 962                fs.remove_dir(
 963                    &abs_path,
 964                    RemoveOptions {
 965                        recursive: true,
 966                        ignore_if_not_exists: false,
 967                    },
 968                )
 969                .await?;
 970            }
 971            anyhow::Ok(abs_path)
 972        });
 973
 974        Some(cx.spawn(|this, mut cx| async move {
 975            let abs_path = delete.await?;
 976            let (tx, mut rx) = barrier::channel();
 977            this.update(&mut cx, |this, _| {
 978                this.as_local_mut()
 979                    .unwrap()
 980                    .path_changes_tx
 981                    .try_send((vec![abs_path], tx))
 982            })?;
 983            rx.recv().await;
 984            Ok(())
 985        }))
 986    }
 987
 988    pub fn rename_entry(
 989        &self,
 990        entry_id: ProjectEntryId,
 991        new_path: impl Into<Arc<Path>>,
 992        cx: &mut ModelContext<Worktree>,
 993    ) -> Option<Task<Result<Entry>>> {
 994        let old_path = self.entry_for_id(entry_id)?.path.clone();
 995        let new_path = new_path.into();
 996        let abs_old_path = self.absolutize(&old_path);
 997        let abs_new_path = self.absolutize(&new_path);
 998        let fs = self.fs.clone();
 999        let rename = cx.background().spawn(async move {
1000            fs.rename(&abs_old_path, &abs_new_path, Default::default())
1001                .await
1002        });
1003
1004        Some(cx.spawn(|this, mut cx| async move {
1005            rename.await?;
1006            this.update(&mut cx, |this, cx| {
1007                this.as_local_mut()
1008                    .unwrap()
1009                    .refresh_entry(new_path.clone(), Some(old_path), cx)
1010            })
1011            .await
1012        }))
1013    }
1014
1015    pub fn copy_entry(
1016        &self,
1017        entry_id: ProjectEntryId,
1018        new_path: impl Into<Arc<Path>>,
1019        cx: &mut ModelContext<Worktree>,
1020    ) -> Option<Task<Result<Entry>>> {
1021        let old_path = self.entry_for_id(entry_id)?.path.clone();
1022        let new_path = new_path.into();
1023        let abs_old_path = self.absolutize(&old_path);
1024        let abs_new_path = self.absolutize(&new_path);
1025        let fs = self.fs.clone();
1026        let copy = cx.background().spawn(async move {
1027            copy_recursive(
1028                fs.as_ref(),
1029                &abs_old_path,
1030                &abs_new_path,
1031                Default::default(),
1032            )
1033            .await
1034        });
1035
1036        Some(cx.spawn(|this, mut cx| async move {
1037            copy.await?;
1038            this.update(&mut cx, |this, cx| {
1039                this.as_local_mut()
1040                    .unwrap()
1041                    .refresh_entry(new_path.clone(), None, cx)
1042            })
1043            .await
1044        }))
1045    }
1046
1047    fn refresh_entry(
1048        &self,
1049        path: Arc<Path>,
1050        old_path: Option<Arc<Path>>,
1051        cx: &mut ModelContext<Worktree>,
1052    ) -> Task<Result<Entry>> {
1053        let fs = self.fs.clone();
1054        let abs_root_path = self.abs_path.clone();
1055        let path_changes_tx = self.path_changes_tx.clone();
1056        cx.spawn_weak(move |this, mut cx| async move {
1057            let abs_path = fs.canonicalize(&abs_root_path).await?;
1058            let mut paths = Vec::with_capacity(2);
1059            paths.push(if path.file_name().is_some() {
1060                abs_path.join(&path)
1061            } else {
1062                abs_path.clone()
1063            });
1064            if let Some(old_path) = old_path {
1065                paths.push(if old_path.file_name().is_some() {
1066                    abs_path.join(&old_path)
1067                } else {
1068                    abs_path.clone()
1069                });
1070            }
1071
1072            let (tx, mut rx) = barrier::channel();
1073            path_changes_tx.try_send((paths, tx))?;
1074            rx.recv().await;
1075            this.upgrade(&cx)
1076                .ok_or_else(|| anyhow!("worktree was dropped"))?
1077                .update(&mut cx, |this, _| {
1078                    this.entry_for_path(path)
1079                        .cloned()
1080                        .ok_or_else(|| anyhow!("failed to read path after update"))
1081                })
1082        })
1083    }
1084
1085    pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1086        let (share_tx, share_rx) = oneshot::channel();
1087
1088        if let Some(share) = self.share.as_mut() {
1089            let _ = share_tx.send(());
1090            *share.resume_updates.borrow_mut() = ();
1091        } else {
1092            let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1093            let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1094            let worktree_id = cx.model_id() as u64;
1095
1096            for (path, summaries) in &self.diagnostic_summaries {
1097                for (&server_id, summary) in summaries {
1098                    if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1099                        project_id,
1100                        worktree_id,
1101                        summary: Some(summary.to_proto(server_id, &path)),
1102                    }) {
1103                        return Task::ready(Err(e));
1104                    }
1105                }
1106            }
1107
1108            let _maintain_remote_snapshot = cx.background().spawn({
1109                let client = self.client.clone();
1110                async move {
1111                    let mut share_tx = Some(share_tx);
1112                    let mut prev_snapshot = LocalSnapshot {
1113                        ignores_by_parent_abs_path: Default::default(),
1114                        removed_entry_ids: Default::default(),
1115                        next_entry_id: Default::default(),
1116                        git_repositories: Default::default(),
1117                        snapshot: Snapshot {
1118                            id: WorktreeId(worktree_id as usize),
1119                            abs_path: Path::new("").into(),
1120                            root_name: Default::default(),
1121                            root_char_bag: Default::default(),
1122                            entries_by_path: Default::default(),
1123                            entries_by_id: Default::default(),
1124                            repository_entries: Default::default(),
1125                            scan_id: 0,
1126                            completed_scan_id: 0,
1127                        },
1128                    };
1129                    while let Some(snapshot) = snapshots_rx.recv().await {
1130                        #[cfg(any(test, feature = "test-support"))]
1131                        const MAX_CHUNK_SIZE: usize = 2;
1132                        #[cfg(not(any(test, feature = "test-support")))]
1133                        const MAX_CHUNK_SIZE: usize = 256;
1134
1135                        let update =
1136                            snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1137                        for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1138                            let _ = resume_updates_rx.try_recv();
1139                            while let Err(error) = client.request(update.clone()).await {
1140                                log::error!("failed to send worktree update: {}", error);
1141                                log::info!("waiting to resume updates");
1142                                if resume_updates_rx.next().await.is_none() {
1143                                    return Ok(());
1144                                }
1145                            }
1146                        }
1147
1148                        if let Some(share_tx) = share_tx.take() {
1149                            let _ = share_tx.send(());
1150                        }
1151
1152                        prev_snapshot = snapshot;
1153                    }
1154
1155                    Ok::<_, anyhow::Error>(())
1156                }
1157                .log_err()
1158            });
1159
1160            self.share = Some(ShareState {
1161                project_id,
1162                snapshots_tx,
1163                resume_updates: resume_updates_tx,
1164                _maintain_remote_snapshot,
1165            });
1166        }
1167
1168        cx.foreground()
1169            .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1170    }
1171
1172    pub fn unshare(&mut self) {
1173        self.share.take();
1174    }
1175
1176    pub fn is_shared(&self) -> bool {
1177        self.share.is_some()
1178    }
1179}
1180
1181impl RemoteWorktree {
1182    fn snapshot(&self) -> Snapshot {
1183        self.snapshot.clone()
1184    }
1185
1186    pub fn disconnected_from_host(&mut self) {
1187        self.updates_tx.take();
1188        self.snapshot_subscriptions.clear();
1189        self.disconnected = true;
1190    }
1191
1192    pub fn save_buffer(
1193        &self,
1194        buffer_handle: ModelHandle<Buffer>,
1195        cx: &mut ModelContext<Worktree>,
1196    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1197        let buffer = buffer_handle.read(cx);
1198        let buffer_id = buffer.remote_id();
1199        let version = buffer.version();
1200        let rpc = self.client.clone();
1201        let project_id = self.project_id;
1202        cx.as_mut().spawn(|mut cx| async move {
1203            let response = rpc
1204                .request(proto::SaveBuffer {
1205                    project_id,
1206                    buffer_id,
1207                    version: serialize_version(&version),
1208                })
1209                .await?;
1210            let version = deserialize_version(&response.version);
1211            let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1212            let mtime = response
1213                .mtime
1214                .ok_or_else(|| anyhow!("missing mtime"))?
1215                .into();
1216
1217            buffer_handle.update(&mut cx, |buffer, cx| {
1218                buffer.did_save(version.clone(), fingerprint, mtime, cx);
1219            });
1220
1221            Ok((version, fingerprint, mtime))
1222        })
1223    }
1224
1225    pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1226        if let Some(updates_tx) = &self.updates_tx {
1227            updates_tx
1228                .unbounded_send(update)
1229                .expect("consumer runs to completion");
1230        }
1231    }
1232
1233    fn observed_snapshot(&self, scan_id: usize) -> bool {
1234        self.completed_scan_id >= scan_id
1235    }
1236
1237    fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1238        let (tx, rx) = oneshot::channel();
1239        if self.observed_snapshot(scan_id) {
1240            let _ = tx.send(());
1241        } else if self.disconnected {
1242            drop(tx);
1243        } else {
1244            match self
1245                .snapshot_subscriptions
1246                .binary_search_by_key(&scan_id, |probe| probe.0)
1247            {
1248                Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1249            }
1250        }
1251
1252        async move {
1253            rx.await?;
1254            Ok(())
1255        }
1256    }
1257
1258    pub fn update_diagnostic_summary(
1259        &mut self,
1260        path: Arc<Path>,
1261        summary: &proto::DiagnosticSummary,
1262    ) {
1263        let server_id = LanguageServerId(summary.language_server_id as usize);
1264        let summary = DiagnosticSummary {
1265            error_count: summary.error_count as usize,
1266            warning_count: summary.warning_count as usize,
1267        };
1268
1269        if summary.is_empty() {
1270            if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1271                summaries.remove(&server_id);
1272                if summaries.is_empty() {
1273                    self.diagnostic_summaries.remove(&path);
1274                }
1275            }
1276        } else {
1277            self.diagnostic_summaries
1278                .entry(path)
1279                .or_default()
1280                .insert(server_id, summary);
1281        }
1282    }
1283
1284    pub fn insert_entry(
1285        &mut self,
1286        entry: proto::Entry,
1287        scan_id: usize,
1288        cx: &mut ModelContext<Worktree>,
1289    ) -> Task<Result<Entry>> {
1290        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1291        cx.spawn(|this, mut cx| async move {
1292            wait_for_snapshot.await?;
1293            this.update(&mut cx, |worktree, _| {
1294                let worktree = worktree.as_remote_mut().unwrap();
1295                let mut snapshot = worktree.background_snapshot.lock();
1296                let entry = snapshot.insert_entry(entry);
1297                worktree.snapshot = snapshot.clone();
1298                entry
1299            })
1300        })
1301    }
1302
1303    pub(crate) fn delete_entry(
1304        &mut self,
1305        id: ProjectEntryId,
1306        scan_id: usize,
1307        cx: &mut ModelContext<Worktree>,
1308    ) -> Task<Result<()>> {
1309        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1310        cx.spawn(|this, mut cx| async move {
1311            wait_for_snapshot.await?;
1312            this.update(&mut cx, |worktree, _| {
1313                let worktree = worktree.as_remote_mut().unwrap();
1314                let mut snapshot = worktree.background_snapshot.lock();
1315                snapshot.delete_entry(id);
1316                worktree.snapshot = snapshot.clone();
1317            });
1318            Ok(())
1319        })
1320    }
1321}
1322
1323impl Snapshot {
1324    pub fn id(&self) -> WorktreeId {
1325        self.id
1326    }
1327
1328    pub fn abs_path(&self) -> &Arc<Path> {
1329        &self.abs_path
1330    }
1331
1332    pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1333        self.entries_by_id.get(&entry_id, &()).is_some()
1334    }
1335
1336    pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1337        let entry = Entry::try_from((&self.root_char_bag, entry))?;
1338        let old_entry = self.entries_by_id.insert_or_replace(
1339            PathEntry {
1340                id: entry.id,
1341                path: entry.path.clone(),
1342                is_ignored: entry.is_ignored,
1343                scan_id: 0,
1344            },
1345            &(),
1346        );
1347        if let Some(old_entry) = old_entry {
1348            self.entries_by_path.remove(&PathKey(old_entry.path), &());
1349        }
1350        self.entries_by_path.insert_or_replace(entry.clone(), &());
1351        Ok(entry)
1352    }
1353
1354    fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1355        let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1356        self.entries_by_path = {
1357            let mut cursor = self.entries_by_path.cursor();
1358            let mut new_entries_by_path =
1359                cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1360            while let Some(entry) = cursor.item() {
1361                if entry.path.starts_with(&removed_entry.path) {
1362                    self.entries_by_id.remove(&entry.id, &());
1363                    cursor.next(&());
1364                } else {
1365                    break;
1366                }
1367            }
1368            new_entries_by_path.push_tree(cursor.suffix(&()), &());
1369            new_entries_by_path
1370        };
1371
1372        Some(removed_entry.path)
1373    }
1374
1375    pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1376        let mut entries_by_path_edits = Vec::new();
1377        let mut entries_by_id_edits = Vec::new();
1378        for entry_id in update.removed_entries {
1379            if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1380                entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1381                entries_by_id_edits.push(Edit::Remove(entry.id));
1382            }
1383        }
1384
1385        for entry in update.updated_entries {
1386            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1387            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1388                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1389            }
1390            entries_by_id_edits.push(Edit::Insert(PathEntry {
1391                id: entry.id,
1392                path: entry.path.clone(),
1393                is_ignored: entry.is_ignored,
1394                scan_id: 0,
1395            }));
1396            entries_by_path_edits.push(Edit::Insert(entry));
1397        }
1398
1399        self.entries_by_path.edit(entries_by_path_edits, &());
1400        self.entries_by_id.edit(entries_by_id_edits, &());
1401
1402        update.removed_repositories.sort_unstable();
1403        self.repository_entries.retain(|_, entry| {
1404            if let Ok(_) = update
1405                .removed_repositories
1406                .binary_search(&entry.work_directory.to_proto())
1407            {
1408                false
1409            } else {
1410                true
1411            }
1412        });
1413
1414        for repository in update.updated_repositories {
1415            let repository = RepositoryEntry {
1416                work_directory: ProjectEntryId::from_proto(repository.work_directory_id).into(),
1417                branch: repository.branch.map(Into::into),
1418                // TODO: status
1419                statuses: Default::default(),
1420            };
1421            if let Some(entry) = self.entry_for_id(repository.work_directory_id()) {
1422                self.repository_entries
1423                    .insert(RepositoryWorkDirectory(entry.path.clone()), repository)
1424            } else {
1425                log::error!("no work directory entry for repository {:?}", repository)
1426            }
1427        }
1428
1429        self.scan_id = update.scan_id as usize;
1430        if update.is_last_update {
1431            self.completed_scan_id = update.scan_id as usize;
1432        }
1433
1434        Ok(())
1435    }
1436
1437    pub fn file_count(&self) -> usize {
1438        self.entries_by_path.summary().file_count
1439    }
1440
1441    pub fn visible_file_count(&self) -> usize {
1442        self.entries_by_path.summary().visible_file_count
1443    }
1444
1445    fn traverse_from_offset(
1446        &self,
1447        include_dirs: bool,
1448        include_ignored: bool,
1449        start_offset: usize,
1450    ) -> Traversal {
1451        let mut cursor = self.entries_by_path.cursor();
1452        cursor.seek(
1453            &TraversalTarget::Count {
1454                count: start_offset,
1455                include_dirs,
1456                include_ignored,
1457            },
1458            Bias::Right,
1459            &(),
1460        );
1461        Traversal {
1462            cursor,
1463            include_dirs,
1464            include_ignored,
1465        }
1466    }
1467
1468    fn traverse_from_path(
1469        &self,
1470        include_dirs: bool,
1471        include_ignored: bool,
1472        path: &Path,
1473    ) -> Traversal {
1474        let mut cursor = self.entries_by_path.cursor();
1475        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1476        Traversal {
1477            cursor,
1478            include_dirs,
1479            include_ignored,
1480        }
1481    }
1482
1483    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1484        self.traverse_from_offset(false, include_ignored, start)
1485    }
1486
1487    pub fn entries(&self, include_ignored: bool) -> Traversal {
1488        self.traverse_from_offset(true, include_ignored, 0)
1489    }
1490
1491    pub fn repositories(&self) -> impl Iterator<Item = &RepositoryEntry> {
1492        self.repository_entries.values()
1493    }
1494
1495    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1496        let empty_path = Path::new("");
1497        self.entries_by_path
1498            .cursor::<()>()
1499            .filter(move |entry| entry.path.as_ref() != empty_path)
1500            .map(|entry| &entry.path)
1501    }
1502
1503    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1504        let mut cursor = self.entries_by_path.cursor();
1505        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1506        let traversal = Traversal {
1507            cursor,
1508            include_dirs: true,
1509            include_ignored: true,
1510        };
1511        ChildEntriesIter {
1512            traversal,
1513            parent_path,
1514        }
1515    }
1516
1517    pub fn root_entry(&self) -> Option<&Entry> {
1518        self.entry_for_path("")
1519    }
1520
1521    pub fn root_name(&self) -> &str {
1522        &self.root_name
1523    }
1524
1525    pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1526        self.repository_entries
1527            .get(&RepositoryWorkDirectory(Path::new("").into()))
1528            .map(|entry| entry.to_owned())
1529    }
1530
1531    pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1532        self.repository_entries.values()
1533    }
1534
1535    pub fn scan_id(&self) -> usize {
1536        self.scan_id
1537    }
1538
1539    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1540        let path = path.as_ref();
1541        self.traverse_from_path(true, true, path)
1542            .entry()
1543            .and_then(|entry| {
1544                if entry.path.as_ref() == path {
1545                    Some(entry)
1546                } else {
1547                    None
1548                }
1549            })
1550    }
1551
1552    pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1553        let entry = self.entries_by_id.get(&id, &())?;
1554        self.entry_for_path(&entry.path)
1555    }
1556
1557    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1558        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1559    }
1560}
1561
1562impl LocalSnapshot {
1563    pub(crate) fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
1564        let mut max_len = 0;
1565        let mut current_candidate = None;
1566        for (work_directory, repo) in (&self.repository_entries).iter() {
1567            if repo.contains(self, path) {
1568                if work_directory.0.as_os_str().len() >= max_len {
1569                    current_candidate = Some(repo);
1570                    max_len = work_directory.0.as_os_str().len();
1571                } else {
1572                    break;
1573                }
1574            }
1575        }
1576
1577        current_candidate.map(|entry| entry.to_owned())
1578    }
1579
1580    pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1581        self.git_repositories.get(&repo.work_directory.0)
1582    }
1583
1584    pub(crate) fn repo_for_metadata(
1585        &self,
1586        path: &Path,
1587    ) -> Option<(ProjectEntryId, Arc<Mutex<dyn GitRepository>>)> {
1588        let (entry_id, local_repo) = self
1589            .git_repositories
1590            .iter()
1591            .find(|(_, repo)| repo.in_dot_git(path))?;
1592        Some((*entry_id, local_repo.repo_ptr.to_owned()))
1593    }
1594
1595    #[cfg(test)]
1596    pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1597        let root_name = self.root_name.clone();
1598        proto::UpdateWorktree {
1599            project_id,
1600            worktree_id: self.id().to_proto(),
1601            abs_path: self.abs_path().to_string_lossy().into(),
1602            root_name,
1603            updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1604            removed_entries: Default::default(),
1605            scan_id: self.scan_id as u64,
1606            is_last_update: true,
1607            updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1608            removed_repositories: Default::default(),
1609        }
1610    }
1611
1612    pub(crate) fn build_update(
1613        &self,
1614        other: &Self,
1615        project_id: u64,
1616        worktree_id: u64,
1617        include_ignored: bool,
1618    ) -> proto::UpdateWorktree {
1619        let mut updated_entries = Vec::new();
1620        let mut removed_entries = Vec::new();
1621        let mut self_entries = self
1622            .entries_by_id
1623            .cursor::<()>()
1624            .filter(|e| include_ignored || !e.is_ignored)
1625            .peekable();
1626        let mut other_entries = other
1627            .entries_by_id
1628            .cursor::<()>()
1629            .filter(|e| include_ignored || !e.is_ignored)
1630            .peekable();
1631        loop {
1632            match (self_entries.peek(), other_entries.peek()) {
1633                (Some(self_entry), Some(other_entry)) => {
1634                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1635                        Ordering::Less => {
1636                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1637                            updated_entries.push(entry);
1638                            self_entries.next();
1639                        }
1640                        Ordering::Equal => {
1641                            if self_entry.scan_id != other_entry.scan_id {
1642                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1643                                updated_entries.push(entry);
1644                            }
1645
1646                            self_entries.next();
1647                            other_entries.next();
1648                        }
1649                        Ordering::Greater => {
1650                            removed_entries.push(other_entry.id.to_proto());
1651                            other_entries.next();
1652                        }
1653                    }
1654                }
1655                (Some(self_entry), None) => {
1656                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1657                    updated_entries.push(entry);
1658                    self_entries.next();
1659                }
1660                (None, Some(other_entry)) => {
1661                    removed_entries.push(other_entry.id.to_proto());
1662                    other_entries.next();
1663                }
1664                (None, None) => break,
1665            }
1666        }
1667
1668        let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1669        let mut removed_repositories = Vec::new();
1670        let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1671        let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1672        loop {
1673            match (self_repos.peek(), other_repos.peek()) {
1674                (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1675                    match Ord::cmp(self_work_dir, other_work_dir) {
1676                        Ordering::Less => {
1677                            updated_repositories.push((*self_repo).into());
1678                            self_repos.next();
1679                        }
1680                        Ordering::Equal => {
1681                            if self_repo != other_repo {
1682                                updated_repositories.push((*self_repo).into());
1683                            }
1684
1685                            self_repos.next();
1686                            other_repos.next();
1687                        }
1688                        Ordering::Greater => {
1689                            removed_repositories.push(other_repo.work_directory.to_proto());
1690                            other_repos.next();
1691                        }
1692                    }
1693                }
1694                (Some((_, self_repo)), None) => {
1695                    updated_repositories.push((*self_repo).into());
1696                    self_repos.next();
1697                }
1698                (None, Some((_, other_repo))) => {
1699                    removed_repositories.push(other_repo.work_directory.to_proto());
1700                    other_repos.next();
1701                }
1702                (None, None) => break,
1703            }
1704        }
1705
1706        proto::UpdateWorktree {
1707            project_id,
1708            worktree_id,
1709            abs_path: self.abs_path().to_string_lossy().into(),
1710            root_name: self.root_name().to_string(),
1711            updated_entries,
1712            removed_entries,
1713            scan_id: self.scan_id as u64,
1714            is_last_update: self.completed_scan_id == self.scan_id,
1715            updated_repositories,
1716            removed_repositories,
1717        }
1718    }
1719
1720    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1721        if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1722            let abs_path = self.abs_path.join(&entry.path);
1723            match smol::block_on(build_gitignore(&abs_path, fs)) {
1724                Ok(ignore) => {
1725                    self.ignores_by_parent_abs_path.insert(
1726                        abs_path.parent().unwrap().into(),
1727                        (Arc::new(ignore), self.scan_id),
1728                    );
1729                }
1730                Err(error) => {
1731                    log::error!(
1732                        "error loading .gitignore file {:?} - {:?}",
1733                        &entry.path,
1734                        error
1735                    );
1736                }
1737            }
1738        }
1739
1740        self.reuse_entry_id(&mut entry);
1741
1742        if entry.kind == EntryKind::PendingDir {
1743            if let Some(existing_entry) =
1744                self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1745            {
1746                entry.kind = existing_entry.kind;
1747            }
1748        }
1749
1750        let scan_id = self.scan_id;
1751        let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1752        if let Some(removed) = removed {
1753            if removed.id != entry.id {
1754                self.entries_by_id.remove(&removed.id, &());
1755            }
1756        }
1757        self.entries_by_id.insert_or_replace(
1758            PathEntry {
1759                id: entry.id,
1760                path: entry.path.clone(),
1761                is_ignored: entry.is_ignored,
1762                scan_id,
1763            },
1764            &(),
1765        );
1766
1767        entry
1768    }
1769
1770    fn populate_dir(
1771        &mut self,
1772        parent_path: Arc<Path>,
1773        entries: impl IntoIterator<Item = Entry>,
1774        ignore: Option<Arc<Gitignore>>,
1775        fs: &dyn Fs,
1776    ) {
1777        let mut parent_entry = if let Some(parent_entry) =
1778            self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1779        {
1780            parent_entry.clone()
1781        } else {
1782            log::warn!(
1783                "populating a directory {:?} that has been removed",
1784                parent_path
1785            );
1786            return;
1787        };
1788
1789        match parent_entry.kind {
1790            EntryKind::PendingDir => {
1791                parent_entry.kind = EntryKind::Dir;
1792            }
1793            EntryKind::Dir => {}
1794            _ => return,
1795        }
1796
1797        if let Some(ignore) = ignore {
1798            self.ignores_by_parent_abs_path.insert(
1799                self.abs_path.join(&parent_path).into(),
1800                (ignore, self.scan_id),
1801            );
1802        }
1803
1804        if parent_path.file_name() == Some(&DOT_GIT) {
1805            self.build_repo(parent_path, fs);
1806        }
1807
1808        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1809        let mut entries_by_id_edits = Vec::new();
1810
1811        for mut entry in entries {
1812            self.reuse_entry_id(&mut entry);
1813            entries_by_id_edits.push(Edit::Insert(PathEntry {
1814                id: entry.id,
1815                path: entry.path.clone(),
1816                is_ignored: entry.is_ignored,
1817                scan_id: self.scan_id,
1818            }));
1819            entries_by_path_edits.push(Edit::Insert(entry));
1820        }
1821
1822        self.entries_by_path.edit(entries_by_path_edits, &());
1823        self.entries_by_id.edit(entries_by_id_edits, &());
1824    }
1825
1826    fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1827        let abs_path = self.abs_path.join(&parent_path);
1828        let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1829
1830        // Guard against repositories inside the repository metadata
1831        if work_dir
1832            .components()
1833            .find(|component| component.as_os_str() == *DOT_GIT)
1834            .is_some()
1835        {
1836            return None;
1837        };
1838
1839        let work_dir_id = self
1840            .entry_for_path(work_dir.clone())
1841            .map(|entry| entry.id)?;
1842
1843        if self.git_repositories.get(&work_dir_id).is_none() {
1844            let repo = fs.open_repo(abs_path.as_path())?;
1845            let work_directory = RepositoryWorkDirectory(work_dir.clone());
1846            let scan_id = self.scan_id;
1847
1848            let repo_lock = repo.lock();
1849            self.repository_entries.insert(
1850                work_directory,
1851                RepositoryEntry {
1852                    work_directory: work_dir_id.into(),
1853                    branch: repo_lock.branch_name().map(Into::into),
1854                    statuses: repo_lock.statuses().unwrap_or_default(),
1855                },
1856            );
1857            drop(repo_lock);
1858
1859            self.git_repositories.insert(
1860                work_dir_id,
1861                LocalRepositoryEntry {
1862                    scan_id,
1863                    full_scan_id: scan_id,
1864                    repo_ptr: repo,
1865                    git_dir_path: parent_path.clone(),
1866                },
1867            )
1868        }
1869
1870        Some(())
1871    }
1872    fn reuse_entry_id(&mut self, entry: &mut Entry) {
1873        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1874            entry.id = removed_entry_id;
1875        } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1876            entry.id = existing_entry.id;
1877        }
1878    }
1879
1880    fn remove_path(&mut self, path: &Path) {
1881        let mut new_entries;
1882        let removed_entries;
1883        {
1884            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1885            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1886            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1887            new_entries.push_tree(cursor.suffix(&()), &());
1888        }
1889        self.entries_by_path = new_entries;
1890
1891        let mut entries_by_id_edits = Vec::new();
1892        for entry in removed_entries.cursor::<()>() {
1893            let removed_entry_id = self
1894                .removed_entry_ids
1895                .entry(entry.inode)
1896                .or_insert(entry.id);
1897            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1898            entries_by_id_edits.push(Edit::Remove(entry.id));
1899        }
1900        self.entries_by_id.edit(entries_by_id_edits, &());
1901
1902        if path.file_name() == Some(&GITIGNORE) {
1903            let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1904            if let Some((_, scan_id)) = self
1905                .ignores_by_parent_abs_path
1906                .get_mut(abs_parent_path.as_path())
1907            {
1908                *scan_id = self.snapshot.scan_id;
1909            }
1910        }
1911    }
1912
1913    fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1914        let mut inodes = TreeSet::default();
1915        for ancestor in path.ancestors().skip(1) {
1916            if let Some(entry) = self.entry_for_path(ancestor) {
1917                inodes.insert(entry.inode);
1918            }
1919        }
1920        inodes
1921    }
1922
1923    fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1924        let mut new_ignores = Vec::new();
1925        for ancestor in abs_path.ancestors().skip(1) {
1926            if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1927                new_ignores.push((ancestor, Some(ignore.clone())));
1928            } else {
1929                new_ignores.push((ancestor, None));
1930            }
1931        }
1932
1933        let mut ignore_stack = IgnoreStack::none();
1934        for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1935            if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1936                ignore_stack = IgnoreStack::all();
1937                break;
1938            } else if let Some(ignore) = ignore {
1939                ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1940            }
1941        }
1942
1943        if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1944            ignore_stack = IgnoreStack::all();
1945        }
1946
1947        ignore_stack
1948    }
1949}
1950
1951async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1952    let contents = fs.load(abs_path).await?;
1953    let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1954    let mut builder = GitignoreBuilder::new(parent);
1955    for line in contents.lines() {
1956        builder.add_line(Some(abs_path.into()), line)?;
1957    }
1958    Ok(builder.build()?)
1959}
1960
1961impl WorktreeId {
1962    pub fn from_usize(handle_id: usize) -> Self {
1963        Self(handle_id)
1964    }
1965
1966    pub(crate) fn from_proto(id: u64) -> Self {
1967        Self(id as usize)
1968    }
1969
1970    pub fn to_proto(&self) -> u64 {
1971        self.0 as u64
1972    }
1973
1974    pub fn to_usize(&self) -> usize {
1975        self.0
1976    }
1977}
1978
1979impl fmt::Display for WorktreeId {
1980    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1981        self.0.fmt(f)
1982    }
1983}
1984
1985impl Deref for Worktree {
1986    type Target = Snapshot;
1987
1988    fn deref(&self) -> &Self::Target {
1989        match self {
1990            Worktree::Local(worktree) => &worktree.snapshot,
1991            Worktree::Remote(worktree) => &worktree.snapshot,
1992        }
1993    }
1994}
1995
1996impl Deref for LocalWorktree {
1997    type Target = LocalSnapshot;
1998
1999    fn deref(&self) -> &Self::Target {
2000        &self.snapshot
2001    }
2002}
2003
2004impl Deref for RemoteWorktree {
2005    type Target = Snapshot;
2006
2007    fn deref(&self) -> &Self::Target {
2008        &self.snapshot
2009    }
2010}
2011
2012impl fmt::Debug for LocalWorktree {
2013    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2014        self.snapshot.fmt(f)
2015    }
2016}
2017
2018impl fmt::Debug for Snapshot {
2019    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2020        struct EntriesById<'a>(&'a SumTree<PathEntry>);
2021        struct EntriesByPath<'a>(&'a SumTree<Entry>);
2022
2023        impl<'a> fmt::Debug for EntriesByPath<'a> {
2024            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2025                f.debug_map()
2026                    .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2027                    .finish()
2028            }
2029        }
2030
2031        impl<'a> fmt::Debug for EntriesById<'a> {
2032            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2033                f.debug_list().entries(self.0.iter()).finish()
2034            }
2035        }
2036
2037        f.debug_struct("Snapshot")
2038            .field("id", &self.id)
2039            .field("root_name", &self.root_name)
2040            .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2041            .field("entries_by_id", &EntriesById(&self.entries_by_id))
2042            .finish()
2043    }
2044}
2045
2046#[derive(Clone, PartialEq)]
2047pub struct File {
2048    pub worktree: ModelHandle<Worktree>,
2049    pub path: Arc<Path>,
2050    pub mtime: SystemTime,
2051    pub(crate) entry_id: ProjectEntryId,
2052    pub(crate) is_local: bool,
2053    pub(crate) is_deleted: bool,
2054}
2055
2056impl language::File for File {
2057    fn as_local(&self) -> Option<&dyn language::LocalFile> {
2058        if self.is_local {
2059            Some(self)
2060        } else {
2061            None
2062        }
2063    }
2064
2065    fn mtime(&self) -> SystemTime {
2066        self.mtime
2067    }
2068
2069    fn path(&self) -> &Arc<Path> {
2070        &self.path
2071    }
2072
2073    fn full_path(&self, cx: &AppContext) -> PathBuf {
2074        let mut full_path = PathBuf::new();
2075        let worktree = self.worktree.read(cx);
2076
2077        if worktree.is_visible() {
2078            full_path.push(worktree.root_name());
2079        } else {
2080            let path = worktree.abs_path();
2081
2082            if worktree.is_local() && path.starts_with(HOME.as_path()) {
2083                full_path.push("~");
2084                full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2085            } else {
2086                full_path.push(path)
2087            }
2088        }
2089
2090        if self.path.components().next().is_some() {
2091            full_path.push(&self.path);
2092        }
2093
2094        full_path
2095    }
2096
2097    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2098    /// of its worktree, then this method will return the name of the worktree itself.
2099    fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2100        self.path
2101            .file_name()
2102            .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2103    }
2104
2105    fn is_deleted(&self) -> bool {
2106        self.is_deleted
2107    }
2108
2109    fn as_any(&self) -> &dyn Any {
2110        self
2111    }
2112
2113    fn to_proto(&self) -> rpc::proto::File {
2114        rpc::proto::File {
2115            worktree_id: self.worktree.id() as u64,
2116            entry_id: self.entry_id.to_proto(),
2117            path: self.path.to_string_lossy().into(),
2118            mtime: Some(self.mtime.into()),
2119            is_deleted: self.is_deleted,
2120        }
2121    }
2122}
2123
2124impl language::LocalFile for File {
2125    fn abs_path(&self, cx: &AppContext) -> PathBuf {
2126        self.worktree
2127            .read(cx)
2128            .as_local()
2129            .unwrap()
2130            .abs_path
2131            .join(&self.path)
2132    }
2133
2134    fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2135        let worktree = self.worktree.read(cx).as_local().unwrap();
2136        let abs_path = worktree.absolutize(&self.path);
2137        let fs = worktree.fs.clone();
2138        cx.background()
2139            .spawn(async move { fs.load(&abs_path).await })
2140    }
2141
2142    fn buffer_reloaded(
2143        &self,
2144        buffer_id: u64,
2145        version: &clock::Global,
2146        fingerprint: RopeFingerprint,
2147        line_ending: LineEnding,
2148        mtime: SystemTime,
2149        cx: &mut AppContext,
2150    ) {
2151        let worktree = self.worktree.read(cx).as_local().unwrap();
2152        if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2153            worktree
2154                .client
2155                .send(proto::BufferReloaded {
2156                    project_id,
2157                    buffer_id,
2158                    version: serialize_version(version),
2159                    mtime: Some(mtime.into()),
2160                    fingerprint: serialize_fingerprint(fingerprint),
2161                    line_ending: serialize_line_ending(line_ending) as i32,
2162                })
2163                .log_err();
2164        }
2165    }
2166}
2167
2168impl File {
2169    pub fn from_proto(
2170        proto: rpc::proto::File,
2171        worktree: ModelHandle<Worktree>,
2172        cx: &AppContext,
2173    ) -> Result<Self> {
2174        let worktree_id = worktree
2175            .read(cx)
2176            .as_remote()
2177            .ok_or_else(|| anyhow!("not remote"))?
2178            .id();
2179
2180        if worktree_id.to_proto() != proto.worktree_id {
2181            return Err(anyhow!("worktree id does not match file"));
2182        }
2183
2184        Ok(Self {
2185            worktree,
2186            path: Path::new(&proto.path).into(),
2187            mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2188            entry_id: ProjectEntryId::from_proto(proto.entry_id),
2189            is_local: false,
2190            is_deleted: proto.is_deleted,
2191        })
2192    }
2193
2194    pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2195        file.and_then(|f| f.as_any().downcast_ref())
2196    }
2197
2198    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2199        self.worktree.read(cx).id()
2200    }
2201
2202    pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2203        if self.is_deleted {
2204            None
2205        } else {
2206            Some(self.entry_id)
2207        }
2208    }
2209}
2210
2211#[derive(Clone, Debug, PartialEq, Eq)]
2212pub struct Entry {
2213    pub id: ProjectEntryId,
2214    pub kind: EntryKind,
2215    pub path: Arc<Path>,
2216    pub inode: u64,
2217    pub mtime: SystemTime,
2218    pub is_symlink: bool,
2219    pub is_ignored: bool,
2220}
2221
2222#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2223pub enum EntryKind {
2224    PendingDir,
2225    Dir,
2226    File(CharBag),
2227}
2228
2229#[derive(Clone, Copy, Debug)]
2230pub enum PathChange {
2231    Added,
2232    Removed,
2233    Updated,
2234    AddedOrUpdated,
2235}
2236
2237impl Entry {
2238    fn new(
2239        path: Arc<Path>,
2240        metadata: &fs::Metadata,
2241        next_entry_id: &AtomicUsize,
2242        root_char_bag: CharBag,
2243    ) -> Self {
2244        Self {
2245            id: ProjectEntryId::new(next_entry_id),
2246            kind: if metadata.is_dir {
2247                EntryKind::PendingDir
2248            } else {
2249                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2250            },
2251            path,
2252            inode: metadata.inode,
2253            mtime: metadata.mtime,
2254            is_symlink: metadata.is_symlink,
2255            is_ignored: false,
2256        }
2257    }
2258
2259    pub fn is_dir(&self) -> bool {
2260        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2261    }
2262
2263    pub fn is_file(&self) -> bool {
2264        matches!(self.kind, EntryKind::File(_))
2265    }
2266}
2267
2268impl sum_tree::Item for Entry {
2269    type Summary = EntrySummary;
2270
2271    fn summary(&self) -> Self::Summary {
2272        let visible_count = if self.is_ignored { 0 } else { 1 };
2273        let file_count;
2274        let visible_file_count;
2275        if self.is_file() {
2276            file_count = 1;
2277            visible_file_count = visible_count;
2278        } else {
2279            file_count = 0;
2280            visible_file_count = 0;
2281        }
2282
2283        EntrySummary {
2284            max_path: self.path.clone(),
2285            count: 1,
2286            visible_count,
2287            file_count,
2288            visible_file_count,
2289        }
2290    }
2291}
2292
2293impl sum_tree::KeyedItem for Entry {
2294    type Key = PathKey;
2295
2296    fn key(&self) -> Self::Key {
2297        PathKey(self.path.clone())
2298    }
2299}
2300
2301#[derive(Clone, Debug)]
2302pub struct EntrySummary {
2303    max_path: Arc<Path>,
2304    count: usize,
2305    visible_count: usize,
2306    file_count: usize,
2307    visible_file_count: usize,
2308}
2309
2310impl Default for EntrySummary {
2311    fn default() -> Self {
2312        Self {
2313            max_path: Arc::from(Path::new("")),
2314            count: 0,
2315            visible_count: 0,
2316            file_count: 0,
2317            visible_file_count: 0,
2318        }
2319    }
2320}
2321
2322impl sum_tree::Summary for EntrySummary {
2323    type Context = ();
2324
2325    fn add_summary(&mut self, rhs: &Self, _: &()) {
2326        self.max_path = rhs.max_path.clone();
2327        self.count += rhs.count;
2328        self.visible_count += rhs.visible_count;
2329        self.file_count += rhs.file_count;
2330        self.visible_file_count += rhs.visible_file_count;
2331    }
2332}
2333
2334#[derive(Clone, Debug)]
2335struct PathEntry {
2336    id: ProjectEntryId,
2337    path: Arc<Path>,
2338    is_ignored: bool,
2339    scan_id: usize,
2340}
2341
2342impl sum_tree::Item for PathEntry {
2343    type Summary = PathEntrySummary;
2344
2345    fn summary(&self) -> Self::Summary {
2346        PathEntrySummary { max_id: self.id }
2347    }
2348}
2349
2350impl sum_tree::KeyedItem for PathEntry {
2351    type Key = ProjectEntryId;
2352
2353    fn key(&self) -> Self::Key {
2354        self.id
2355    }
2356}
2357
2358#[derive(Clone, Debug, Default)]
2359struct PathEntrySummary {
2360    max_id: ProjectEntryId,
2361}
2362
2363impl sum_tree::Summary for PathEntrySummary {
2364    type Context = ();
2365
2366    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2367        self.max_id = summary.max_id;
2368    }
2369}
2370
2371impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2372    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2373        *self = summary.max_id;
2374    }
2375}
2376
2377#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2378pub struct PathKey(Arc<Path>);
2379
2380impl Default for PathKey {
2381    fn default() -> Self {
2382        Self(Path::new("").into())
2383    }
2384}
2385
2386impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2387    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2388        self.0 = summary.max_path.clone();
2389    }
2390}
2391
2392struct BackgroundScanner {
2393    snapshot: Mutex<LocalSnapshot>,
2394    fs: Arc<dyn Fs>,
2395    status_updates_tx: UnboundedSender<ScanState>,
2396    executor: Arc<executor::Background>,
2397    refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2398    prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2399    finished_initial_scan: bool,
2400}
2401
2402impl BackgroundScanner {
2403    fn new(
2404        snapshot: LocalSnapshot,
2405        fs: Arc<dyn Fs>,
2406        status_updates_tx: UnboundedSender<ScanState>,
2407        executor: Arc<executor::Background>,
2408        refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2409    ) -> Self {
2410        Self {
2411            fs,
2412            status_updates_tx,
2413            executor,
2414            refresh_requests_rx,
2415            prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2416            snapshot: Mutex::new(snapshot),
2417            finished_initial_scan: false,
2418        }
2419    }
2420
2421    async fn run(
2422        &mut self,
2423        mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2424    ) {
2425        use futures::FutureExt as _;
2426
2427        let (root_abs_path, root_inode) = {
2428            let snapshot = self.snapshot.lock();
2429            (
2430                snapshot.abs_path.clone(),
2431                snapshot.root_entry().map(|e| e.inode),
2432            )
2433        };
2434
2435        // Populate ignores above the root.
2436        let ignore_stack;
2437        for ancestor in root_abs_path.ancestors().skip(1) {
2438            if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2439            {
2440                self.snapshot
2441                    .lock()
2442                    .ignores_by_parent_abs_path
2443                    .insert(ancestor.into(), (ignore.into(), 0));
2444            }
2445        }
2446        {
2447            let mut snapshot = self.snapshot.lock();
2448            snapshot.scan_id += 1;
2449            ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2450            if ignore_stack.is_all() {
2451                if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2452                    root_entry.is_ignored = true;
2453                    snapshot.insert_entry(root_entry, self.fs.as_ref());
2454                }
2455            }
2456        };
2457
2458        // Perform an initial scan of the directory.
2459        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2460        smol::block_on(scan_job_tx.send(ScanJob {
2461            abs_path: root_abs_path,
2462            path: Arc::from(Path::new("")),
2463            ignore_stack,
2464            ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2465            scan_queue: scan_job_tx.clone(),
2466        }))
2467        .unwrap();
2468        drop(scan_job_tx);
2469        self.scan_dirs(true, scan_job_rx).await;
2470        {
2471            let mut snapshot = self.snapshot.lock();
2472            snapshot.completed_scan_id = snapshot.scan_id;
2473        }
2474        self.send_status_update(false, None);
2475
2476        // Process any any FS events that occurred while performing the initial scan.
2477        // For these events, update events cannot be as precise, because we didn't
2478        // have the previous state loaded yet.
2479        if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2480            let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2481            while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2482                paths.extend(more_events.into_iter().map(|e| e.path));
2483            }
2484            self.process_events(paths).await;
2485        }
2486
2487        self.finished_initial_scan = true;
2488
2489        // Continue processing events until the worktree is dropped.
2490        loop {
2491            select_biased! {
2492                // Process any path refresh requests from the worktree. Prioritize
2493                // these before handling changes reported by the filesystem.
2494                request = self.refresh_requests_rx.recv().fuse() => {
2495                    let Ok((paths, barrier)) = request else { break };
2496                    if !self.process_refresh_request(paths, barrier).await {
2497                        return;
2498                    }
2499                }
2500
2501                events = events_rx.next().fuse() => {
2502                    let Some(events) = events else { break };
2503                    let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2504                    while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2505                        paths.extend(more_events.into_iter().map(|e| e.path));
2506                    }
2507                    self.process_events(paths).await;
2508                }
2509            }
2510        }
2511    }
2512
2513    async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2514        self.reload_entries_for_paths(paths, None).await;
2515        self.send_status_update(false, Some(barrier))
2516    }
2517
2518    async fn process_events(&mut self, paths: Vec<PathBuf>) {
2519        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2520        if let Some(mut paths) = self
2521            .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2522            .await
2523        {
2524            paths.sort_unstable();
2525            util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2526        }
2527        drop(scan_job_tx);
2528        self.scan_dirs(false, scan_job_rx).await;
2529
2530        self.update_ignore_statuses().await;
2531
2532        let mut snapshot = self.snapshot.lock();
2533
2534        let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2535        git_repositories.retain(|work_directory_id, _| {
2536            snapshot
2537                .entry_for_id(*work_directory_id)
2538                .map_or(false, |entry| {
2539                    snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2540                })
2541        });
2542        snapshot.git_repositories = git_repositories;
2543
2544        let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2545        git_repository_entries.retain(|_, entry| {
2546            snapshot
2547                .git_repositories
2548                .get(&entry.work_directory.0)
2549                .is_some()
2550        });
2551        snapshot.snapshot.repository_entries = git_repository_entries;
2552
2553        snapshot.removed_entry_ids.clear();
2554        snapshot.completed_scan_id = snapshot.scan_id;
2555
2556        drop(snapshot);
2557
2558        self.send_status_update(false, None);
2559    }
2560
2561    async fn scan_dirs(
2562        &self,
2563        enable_progress_updates: bool,
2564        scan_jobs_rx: channel::Receiver<ScanJob>,
2565    ) {
2566        use futures::FutureExt as _;
2567
2568        if self
2569            .status_updates_tx
2570            .unbounded_send(ScanState::Started)
2571            .is_err()
2572        {
2573            return;
2574        }
2575
2576        let progress_update_count = AtomicUsize::new(0);
2577        self.executor
2578            .scoped(|scope| {
2579                for _ in 0..self.executor.num_cpus() {
2580                    scope.spawn(async {
2581                        let mut last_progress_update_count = 0;
2582                        let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2583                        futures::pin_mut!(progress_update_timer);
2584
2585                        loop {
2586                            select_biased! {
2587                                // Process any path refresh requests before moving on to process
2588                                // the scan queue, so that user operations are prioritized.
2589                                request = self.refresh_requests_rx.recv().fuse() => {
2590                                    let Ok((paths, barrier)) = request else { break };
2591                                    if !self.process_refresh_request(paths, barrier).await {
2592                                        return;
2593                                    }
2594                                }
2595
2596                                // Send periodic progress updates to the worktree. Use an atomic counter
2597                                // to ensure that only one of the workers sends a progress update after
2598                                // the update interval elapses.
2599                                _ = progress_update_timer => {
2600                                    match progress_update_count.compare_exchange(
2601                                        last_progress_update_count,
2602                                        last_progress_update_count + 1,
2603                                        SeqCst,
2604                                        SeqCst
2605                                    ) {
2606                                        Ok(_) => {
2607                                            last_progress_update_count += 1;
2608                                            self.send_status_update(true, None);
2609                                        }
2610                                        Err(count) => {
2611                                            last_progress_update_count = count;
2612                                        }
2613                                    }
2614                                    progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2615                                }
2616
2617                                // Recursively load directories from the file system.
2618                                job = scan_jobs_rx.recv().fuse() => {
2619                                    let Ok(job) = job else { break };
2620                                    if let Err(err) = self.scan_dir(&job).await {
2621                                        if job.path.as_ref() != Path::new("") {
2622                                            log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2623                                        }
2624                                    }
2625                                }
2626                            }
2627                        }
2628                    })
2629                }
2630            })
2631            .await;
2632    }
2633
2634    fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2635        let mut prev_state = self.prev_state.lock();
2636        let snapshot = self.snapshot.lock().clone();
2637        let mut old_snapshot = snapshot.snapshot.clone();
2638        mem::swap(&mut old_snapshot, &mut prev_state.0);
2639        let changed_paths = mem::take(&mut prev_state.1);
2640        let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2641        self.status_updates_tx
2642            .unbounded_send(ScanState::Updated {
2643                snapshot,
2644                changes,
2645                scanning,
2646                barrier,
2647            })
2648            .is_ok()
2649    }
2650
2651    async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2652        let mut new_entries: Vec<Entry> = Vec::new();
2653        let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2654        let mut ignore_stack = job.ignore_stack.clone();
2655        let mut new_ignore = None;
2656        let (root_abs_path, root_char_bag, next_entry_id) = {
2657            let snapshot = self.snapshot.lock();
2658            (
2659                snapshot.abs_path().clone(),
2660                snapshot.root_char_bag,
2661                snapshot.next_entry_id.clone(),
2662            )
2663        };
2664        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2665        while let Some(child_abs_path) = child_paths.next().await {
2666            let child_abs_path: Arc<Path> = match child_abs_path {
2667                Ok(child_abs_path) => child_abs_path.into(),
2668                Err(error) => {
2669                    log::error!("error processing entry {:?}", error);
2670                    continue;
2671                }
2672            };
2673
2674            let child_name = child_abs_path.file_name().unwrap();
2675            let child_path: Arc<Path> = job.path.join(child_name).into();
2676            let child_metadata = match self.fs.metadata(&child_abs_path).await {
2677                Ok(Some(metadata)) => metadata,
2678                Ok(None) => continue,
2679                Err(err) => {
2680                    log::error!("error processing {:?}: {:?}", child_abs_path, err);
2681                    continue;
2682                }
2683            };
2684
2685            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2686            if child_name == *GITIGNORE {
2687                match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2688                    Ok(ignore) => {
2689                        let ignore = Arc::new(ignore);
2690                        ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2691                        new_ignore = Some(ignore);
2692                    }
2693                    Err(error) => {
2694                        log::error!(
2695                            "error loading .gitignore file {:?} - {:?}",
2696                            child_name,
2697                            error
2698                        );
2699                    }
2700                }
2701
2702                // Update ignore status of any child entries we've already processed to reflect the
2703                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2704                // there should rarely be too numerous. Update the ignore stack associated with any
2705                // new jobs as well.
2706                let mut new_jobs = new_jobs.iter_mut();
2707                for entry in &mut new_entries {
2708                    let entry_abs_path = root_abs_path.join(&entry.path);
2709                    entry.is_ignored =
2710                        ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2711
2712                    if entry.is_dir() {
2713                        if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2714                            job.ignore_stack = if entry.is_ignored {
2715                                IgnoreStack::all()
2716                            } else {
2717                                ignore_stack.clone()
2718                            };
2719                        }
2720                    }
2721                }
2722            }
2723
2724            let mut child_entry = Entry::new(
2725                child_path.clone(),
2726                &child_metadata,
2727                &next_entry_id,
2728                root_char_bag,
2729            );
2730
2731            if child_entry.is_dir() {
2732                let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2733                child_entry.is_ignored = is_ignored;
2734
2735                // Avoid recursing until crash in the case of a recursive symlink
2736                if !job.ancestor_inodes.contains(&child_entry.inode) {
2737                    let mut ancestor_inodes = job.ancestor_inodes.clone();
2738                    ancestor_inodes.insert(child_entry.inode);
2739
2740                    new_jobs.push(Some(ScanJob {
2741                        abs_path: child_abs_path,
2742                        path: child_path,
2743                        ignore_stack: if is_ignored {
2744                            IgnoreStack::all()
2745                        } else {
2746                            ignore_stack.clone()
2747                        },
2748                        ancestor_inodes,
2749                        scan_queue: job.scan_queue.clone(),
2750                    }));
2751                } else {
2752                    new_jobs.push(None);
2753                }
2754            } else {
2755                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2756            }
2757
2758            new_entries.push(child_entry);
2759        }
2760
2761        self.snapshot.lock().populate_dir(
2762            job.path.clone(),
2763            new_entries,
2764            new_ignore,
2765            self.fs.as_ref(),
2766        );
2767
2768        for new_job in new_jobs {
2769            if let Some(new_job) = new_job {
2770                job.scan_queue.send(new_job).await.unwrap();
2771            }
2772        }
2773
2774        Ok(())
2775    }
2776
2777    async fn reload_entries_for_paths(
2778        &self,
2779        mut abs_paths: Vec<PathBuf>,
2780        scan_queue_tx: Option<Sender<ScanJob>>,
2781    ) -> Option<Vec<Arc<Path>>> {
2782        let doing_recursive_update = scan_queue_tx.is_some();
2783
2784        abs_paths.sort_unstable();
2785        abs_paths.dedup_by(|a, b| a.starts_with(&b));
2786
2787        let root_abs_path = self.snapshot.lock().abs_path.clone();
2788        let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2789        let metadata = futures::future::join_all(
2790            abs_paths
2791                .iter()
2792                .map(|abs_path| self.fs.metadata(&abs_path))
2793                .collect::<Vec<_>>(),
2794        )
2795        .await;
2796
2797        let mut snapshot = self.snapshot.lock();
2798        let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2799        snapshot.scan_id += 1;
2800        if is_idle && !doing_recursive_update {
2801            snapshot.completed_scan_id = snapshot.scan_id;
2802        }
2803
2804        // Remove any entries for paths that no longer exist or are being recursively
2805        // refreshed. Do this before adding any new entries, so that renames can be
2806        // detected regardless of the order of the paths.
2807        let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2808        for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2809            if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2810                if matches!(metadata, Ok(None)) || doing_recursive_update {
2811                    snapshot.remove_path(path);
2812                }
2813                event_paths.push(path.into());
2814            } else {
2815                log::error!(
2816                    "unexpected event {:?} for root path {:?}",
2817                    abs_path,
2818                    root_canonical_path
2819                );
2820            }
2821        }
2822
2823        for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2824            let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2825
2826            match metadata {
2827                Ok(Some(metadata)) => {
2828                    let ignore_stack =
2829                        snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2830                    let mut fs_entry = Entry::new(
2831                        path.clone(),
2832                        &metadata,
2833                        snapshot.next_entry_id.as_ref(),
2834                        snapshot.root_char_bag,
2835                    );
2836                    fs_entry.is_ignored = ignore_stack.is_all();
2837                    snapshot.insert_entry(fs_entry, self.fs.as_ref());
2838
2839                    self.reload_repo_for_path(&path, &mut snapshot);
2840
2841                    if let Some(scan_queue_tx) = &scan_queue_tx {
2842                        let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2843                        if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2844                            ancestor_inodes.insert(metadata.inode);
2845                            smol::block_on(scan_queue_tx.send(ScanJob {
2846                                abs_path,
2847                                path,
2848                                ignore_stack,
2849                                ancestor_inodes,
2850                                scan_queue: scan_queue_tx.clone(),
2851                            }))
2852                            .unwrap();
2853                        }
2854                    }
2855                }
2856                Ok(None) => {}
2857                Err(err) => {
2858                    // TODO - create a special 'error' entry in the entries tree to mark this
2859                    log::error!("error reading file on event {:?}", err);
2860                }
2861            }
2862        }
2863
2864        Some(event_paths)
2865    }
2866
2867    fn reload_repo_for_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
2868        let scan_id = snapshot.scan_id;
2869
2870        if path
2871            .components()
2872            .any(|component| component.as_os_str() == *DOT_GIT)
2873        {
2874            let (entry_id, repo) = snapshot.repo_for_metadata(&path)?;
2875
2876            let work_dir = snapshot
2877                .entry_for_id(entry_id)
2878                .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
2879
2880            let repo = repo.lock();
2881            repo.reload_index();
2882            let branch = repo.branch_name();
2883            let statuses = repo.statuses().unwrap_or_default();
2884
2885            snapshot.git_repositories.update(&entry_id, |entry| {
2886                entry.scan_id = scan_id;
2887                entry.full_scan_id = scan_id;
2888            });
2889
2890            snapshot.repository_entries.update(&work_dir, |entry| {
2891                entry.branch = branch.map(Into::into);
2892                entry.statuses = statuses;
2893            });
2894        } else if let Some(repo) = snapshot.repo_for(&path) {
2895            let status = {
2896                let local_repo = snapshot.get_local_repo(&repo)?;
2897                // Short circuit if we've already scanned everything
2898                if local_repo.full_scan_id == scan_id {
2899                    return None;
2900                }
2901
2902                let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
2903                let git_ptr = local_repo.repo_ptr.lock();
2904                git_ptr.file_status(&repo_path)?
2905            };
2906
2907            if status != GitStatus::Untracked {
2908                let work_dir = repo.work_directory(snapshot)?;
2909                let work_dir_id = repo.work_directory;
2910
2911                snapshot
2912                    .git_repositories
2913                    .update(&work_dir_id, |entry| entry.scan_id = scan_id);
2914
2915                snapshot
2916                    .repository_entries
2917                    .update(&work_dir, |entry| entry.statuses.insert(repo_path, status));
2918            }
2919        }
2920
2921        Some(())
2922    }
2923
2924    async fn update_ignore_statuses(&self) {
2925        use futures::FutureExt as _;
2926
2927        let mut snapshot = self.snapshot.lock().clone();
2928        let mut ignores_to_update = Vec::new();
2929        let mut ignores_to_delete = Vec::new();
2930        for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2931            if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2932                if *scan_id > snapshot.completed_scan_id
2933                    && snapshot.entry_for_path(parent_path).is_some()
2934                {
2935                    ignores_to_update.push(parent_abs_path.clone());
2936                }
2937
2938                let ignore_path = parent_path.join(&*GITIGNORE);
2939                if snapshot.entry_for_path(ignore_path).is_none() {
2940                    ignores_to_delete.push(parent_abs_path.clone());
2941                }
2942            }
2943        }
2944
2945        for parent_abs_path in ignores_to_delete {
2946            snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2947            self.snapshot
2948                .lock()
2949                .ignores_by_parent_abs_path
2950                .remove(&parent_abs_path);
2951        }
2952
2953        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2954        ignores_to_update.sort_unstable();
2955        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2956        while let Some(parent_abs_path) = ignores_to_update.next() {
2957            while ignores_to_update
2958                .peek()
2959                .map_or(false, |p| p.starts_with(&parent_abs_path))
2960            {
2961                ignores_to_update.next().unwrap();
2962            }
2963
2964            let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2965            smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2966                abs_path: parent_abs_path,
2967                ignore_stack,
2968                ignore_queue: ignore_queue_tx.clone(),
2969            }))
2970            .unwrap();
2971        }
2972        drop(ignore_queue_tx);
2973
2974        self.executor
2975            .scoped(|scope| {
2976                for _ in 0..self.executor.num_cpus() {
2977                    scope.spawn(async {
2978                        loop {
2979                            select_biased! {
2980                                // Process any path refresh requests before moving on to process
2981                                // the queue of ignore statuses.
2982                                request = self.refresh_requests_rx.recv().fuse() => {
2983                                    let Ok((paths, barrier)) = request else { break };
2984                                    if !self.process_refresh_request(paths, barrier).await {
2985                                        return;
2986                                    }
2987                                }
2988
2989                                // Recursively process directories whose ignores have changed.
2990                                job = ignore_queue_rx.recv().fuse() => {
2991                                    let Ok(job) = job else { break };
2992                                    self.update_ignore_status(job, &snapshot).await;
2993                                }
2994                            }
2995                        }
2996                    });
2997                }
2998            })
2999            .await;
3000    }
3001
3002    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3003        let mut ignore_stack = job.ignore_stack;
3004        if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3005            ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3006        }
3007
3008        let mut entries_by_id_edits = Vec::new();
3009        let mut entries_by_path_edits = Vec::new();
3010        let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3011        for mut entry in snapshot.child_entries(path).cloned() {
3012            let was_ignored = entry.is_ignored;
3013            let abs_path = snapshot.abs_path().join(&entry.path);
3014            entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3015            if entry.is_dir() {
3016                let child_ignore_stack = if entry.is_ignored {
3017                    IgnoreStack::all()
3018                } else {
3019                    ignore_stack.clone()
3020                };
3021                job.ignore_queue
3022                    .send(UpdateIgnoreStatusJob {
3023                        abs_path: abs_path.into(),
3024                        ignore_stack: child_ignore_stack,
3025                        ignore_queue: job.ignore_queue.clone(),
3026                    })
3027                    .await
3028                    .unwrap();
3029            }
3030
3031            if entry.is_ignored != was_ignored {
3032                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3033                path_entry.scan_id = snapshot.scan_id;
3034                path_entry.is_ignored = entry.is_ignored;
3035                entries_by_id_edits.push(Edit::Insert(path_entry));
3036                entries_by_path_edits.push(Edit::Insert(entry));
3037            }
3038        }
3039
3040        let mut snapshot = self.snapshot.lock();
3041        snapshot.entries_by_path.edit(entries_by_path_edits, &());
3042        snapshot.entries_by_id.edit(entries_by_id_edits, &());
3043    }
3044
3045    fn build_change_set(
3046        &self,
3047        old_snapshot: &Snapshot,
3048        new_snapshot: &Snapshot,
3049        event_paths: Vec<Arc<Path>>,
3050    ) -> HashMap<Arc<Path>, PathChange> {
3051        use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3052
3053        let mut changes = HashMap::default();
3054        let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3055        let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3056        let received_before_initialized = !self.finished_initial_scan;
3057
3058        for path in event_paths {
3059            let path = PathKey(path);
3060            old_paths.seek(&path, Bias::Left, &());
3061            new_paths.seek(&path, Bias::Left, &());
3062
3063            loop {
3064                match (old_paths.item(), new_paths.item()) {
3065                    (Some(old_entry), Some(new_entry)) => {
3066                        if old_entry.path > path.0
3067                            && new_entry.path > path.0
3068                            && !old_entry.path.starts_with(&path.0)
3069                            && !new_entry.path.starts_with(&path.0)
3070                        {
3071                            break;
3072                        }
3073
3074                        match Ord::cmp(&old_entry.path, &new_entry.path) {
3075                            Ordering::Less => {
3076                                changes.insert(old_entry.path.clone(), Removed);
3077                                old_paths.next(&());
3078                            }
3079                            Ordering::Equal => {
3080                                if received_before_initialized {
3081                                    // If the worktree was not fully initialized when this event was generated,
3082                                    // we can't know whether this entry was added during the scan or whether
3083                                    // it was merely updated.
3084                                    changes.insert(new_entry.path.clone(), AddedOrUpdated);
3085                                } else if old_entry.mtime != new_entry.mtime {
3086                                    changes.insert(new_entry.path.clone(), Updated);
3087                                }
3088                                old_paths.next(&());
3089                                new_paths.next(&());
3090                            }
3091                            Ordering::Greater => {
3092                                changes.insert(new_entry.path.clone(), Added);
3093                                new_paths.next(&());
3094                            }
3095                        }
3096                    }
3097                    (Some(old_entry), None) => {
3098                        changes.insert(old_entry.path.clone(), Removed);
3099                        old_paths.next(&());
3100                    }
3101                    (None, Some(new_entry)) => {
3102                        changes.insert(new_entry.path.clone(), Added);
3103                        new_paths.next(&());
3104                    }
3105                    (None, None) => break,
3106                }
3107            }
3108        }
3109        changes
3110    }
3111
3112    async fn progress_timer(&self, running: bool) {
3113        if !running {
3114            return futures::future::pending().await;
3115        }
3116
3117        #[cfg(any(test, feature = "test-support"))]
3118        if self.fs.is_fake() {
3119            return self.executor.simulate_random_delay().await;
3120        }
3121
3122        smol::Timer::after(Duration::from_millis(100)).await;
3123    }
3124}
3125
3126fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3127    let mut result = root_char_bag;
3128    result.extend(
3129        path.to_string_lossy()
3130            .chars()
3131            .map(|c| c.to_ascii_lowercase()),
3132    );
3133    result
3134}
3135
3136struct ScanJob {
3137    abs_path: Arc<Path>,
3138    path: Arc<Path>,
3139    ignore_stack: Arc<IgnoreStack>,
3140    scan_queue: Sender<ScanJob>,
3141    ancestor_inodes: TreeSet<u64>,
3142}
3143
3144struct UpdateIgnoreStatusJob {
3145    abs_path: Arc<Path>,
3146    ignore_stack: Arc<IgnoreStack>,
3147    ignore_queue: Sender<UpdateIgnoreStatusJob>,
3148}
3149
3150pub trait WorktreeHandle {
3151    #[cfg(any(test, feature = "test-support"))]
3152    fn flush_fs_events<'a>(
3153        &self,
3154        cx: &'a gpui::TestAppContext,
3155    ) -> futures::future::LocalBoxFuture<'a, ()>;
3156}
3157
3158impl WorktreeHandle for ModelHandle<Worktree> {
3159    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3160    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3161    // extra directory scans, and emit extra scan-state notifications.
3162    //
3163    // This function mutates the worktree's directory and waits for those mutations to be picked up,
3164    // to ensure that all redundant FS events have already been processed.
3165    #[cfg(any(test, feature = "test-support"))]
3166    fn flush_fs_events<'a>(
3167        &self,
3168        cx: &'a gpui::TestAppContext,
3169    ) -> futures::future::LocalBoxFuture<'a, ()> {
3170        use smol::future::FutureExt;
3171
3172        let filename = "fs-event-sentinel";
3173        let tree = self.clone();
3174        let (fs, root_path) = self.read_with(cx, |tree, _| {
3175            let tree = tree.as_local().unwrap();
3176            (tree.fs.clone(), tree.abs_path().clone())
3177        });
3178
3179        async move {
3180            fs.create_file(&root_path.join(filename), Default::default())
3181                .await
3182                .unwrap();
3183            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3184                .await;
3185
3186            fs.remove_file(&root_path.join(filename), Default::default())
3187                .await
3188                .unwrap();
3189            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3190                .await;
3191
3192            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3193                .await;
3194        }
3195        .boxed_local()
3196    }
3197}
3198
3199#[derive(Clone, Debug)]
3200struct TraversalProgress<'a> {
3201    max_path: &'a Path,
3202    count: usize,
3203    visible_count: usize,
3204    file_count: usize,
3205    visible_file_count: usize,
3206}
3207
3208impl<'a> TraversalProgress<'a> {
3209    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3210        match (include_ignored, include_dirs) {
3211            (true, true) => self.count,
3212            (true, false) => self.file_count,
3213            (false, true) => self.visible_count,
3214            (false, false) => self.visible_file_count,
3215        }
3216    }
3217}
3218
3219impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3220    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3221        self.max_path = summary.max_path.as_ref();
3222        self.count += summary.count;
3223        self.visible_count += summary.visible_count;
3224        self.file_count += summary.file_count;
3225        self.visible_file_count += summary.visible_file_count;
3226    }
3227}
3228
3229impl<'a> Default for TraversalProgress<'a> {
3230    fn default() -> Self {
3231        Self {
3232            max_path: Path::new(""),
3233            count: 0,
3234            visible_count: 0,
3235            file_count: 0,
3236            visible_file_count: 0,
3237        }
3238    }
3239}
3240
3241pub struct Traversal<'a> {
3242    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3243    include_ignored: bool,
3244    include_dirs: bool,
3245}
3246
3247impl<'a> Traversal<'a> {
3248    pub fn advance(&mut self) -> bool {
3249        self.advance_to_offset(self.offset() + 1)
3250    }
3251
3252    pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3253        self.cursor.seek_forward(
3254            &TraversalTarget::Count {
3255                count: offset,
3256                include_dirs: self.include_dirs,
3257                include_ignored: self.include_ignored,
3258            },
3259            Bias::Right,
3260            &(),
3261        )
3262    }
3263
3264    pub fn advance_to_sibling(&mut self) -> bool {
3265        while let Some(entry) = self.cursor.item() {
3266            self.cursor.seek_forward(
3267                &TraversalTarget::PathSuccessor(&entry.path),
3268                Bias::Left,
3269                &(),
3270            );
3271            if let Some(entry) = self.cursor.item() {
3272                if (self.include_dirs || !entry.is_dir())
3273                    && (self.include_ignored || !entry.is_ignored)
3274                {
3275                    return true;
3276                }
3277            }
3278        }
3279        false
3280    }
3281
3282    pub fn entry(&self) -> Option<&'a Entry> {
3283        self.cursor.item()
3284    }
3285
3286    pub fn offset(&self) -> usize {
3287        self.cursor
3288            .start()
3289            .count(self.include_dirs, self.include_ignored)
3290    }
3291}
3292
3293impl<'a> Iterator for Traversal<'a> {
3294    type Item = &'a Entry;
3295
3296    fn next(&mut self) -> Option<Self::Item> {
3297        if let Some(item) = self.entry() {
3298            self.advance();
3299            Some(item)
3300        } else {
3301            None
3302        }
3303    }
3304}
3305
3306#[derive(Debug)]
3307enum TraversalTarget<'a> {
3308    Path(&'a Path),
3309    PathSuccessor(&'a Path),
3310    Count {
3311        count: usize,
3312        include_ignored: bool,
3313        include_dirs: bool,
3314    },
3315}
3316
3317impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3318    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3319        match self {
3320            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3321            TraversalTarget::PathSuccessor(path) => {
3322                if !cursor_location.max_path.starts_with(path) {
3323                    Ordering::Equal
3324                } else {
3325                    Ordering::Greater
3326                }
3327            }
3328            TraversalTarget::Count {
3329                count,
3330                include_dirs,
3331                include_ignored,
3332            } => Ord::cmp(
3333                count,
3334                &cursor_location.count(*include_dirs, *include_ignored),
3335            ),
3336        }
3337    }
3338}
3339
3340struct ChildEntriesIter<'a> {
3341    parent_path: &'a Path,
3342    traversal: Traversal<'a>,
3343}
3344
3345impl<'a> Iterator for ChildEntriesIter<'a> {
3346    type Item = &'a Entry;
3347
3348    fn next(&mut self) -> Option<Self::Item> {
3349        if let Some(item) = self.traversal.entry() {
3350            if item.path.starts_with(&self.parent_path) {
3351                self.traversal.advance_to_sibling();
3352                return Some(item);
3353            }
3354        }
3355        None
3356    }
3357}
3358
3359impl<'a> From<&'a Entry> for proto::Entry {
3360    fn from(entry: &'a Entry) -> Self {
3361        Self {
3362            id: entry.id.to_proto(),
3363            is_dir: entry.is_dir(),
3364            path: entry.path.to_string_lossy().into(),
3365            inode: entry.inode,
3366            mtime: Some(entry.mtime.into()),
3367            is_symlink: entry.is_symlink,
3368            is_ignored: entry.is_ignored,
3369        }
3370    }
3371}
3372
3373impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3374    type Error = anyhow::Error;
3375
3376    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3377        if let Some(mtime) = entry.mtime {
3378            let kind = if entry.is_dir {
3379                EntryKind::Dir
3380            } else {
3381                let mut char_bag = *root_char_bag;
3382                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3383                EntryKind::File(char_bag)
3384            };
3385            let path: Arc<Path> = PathBuf::from(entry.path).into();
3386            Ok(Entry {
3387                id: ProjectEntryId::from_proto(entry.id),
3388                kind,
3389                path,
3390                inode: entry.inode,
3391                mtime: mtime.into(),
3392                is_symlink: entry.is_symlink,
3393                is_ignored: entry.is_ignored,
3394            })
3395        } else {
3396            Err(anyhow!(
3397                "missing mtime in remote worktree entry {:?}",
3398                entry.path
3399            ))
3400        }
3401    }
3402}
3403
3404#[cfg(test)]
3405mod tests {
3406    use super::*;
3407    use fs::{FakeFs, RealFs};
3408    use gpui::{executor::Deterministic, TestAppContext};
3409    use pretty_assertions::assert_eq;
3410    use rand::prelude::*;
3411    use serde_json::json;
3412    use std::{env, fmt::Write};
3413    use util::{http::FakeHttpClient, test::temp_tree};
3414
3415    #[gpui::test]
3416    async fn test_traversal(cx: &mut TestAppContext) {
3417        let fs = FakeFs::new(cx.background());
3418        fs.insert_tree(
3419            "/root",
3420            json!({
3421               ".gitignore": "a/b\n",
3422               "a": {
3423                   "b": "",
3424                   "c": "",
3425               }
3426            }),
3427        )
3428        .await;
3429
3430        let http_client = FakeHttpClient::with_404_response();
3431        let client = cx.read(|cx| Client::new(http_client, cx));
3432
3433        let tree = Worktree::local(
3434            client,
3435            Path::new("/root"),
3436            true,
3437            fs,
3438            Default::default(),
3439            &mut cx.to_async(),
3440        )
3441        .await
3442        .unwrap();
3443        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3444            .await;
3445
3446        tree.read_with(cx, |tree, _| {
3447            assert_eq!(
3448                tree.entries(false)
3449                    .map(|entry| entry.path.as_ref())
3450                    .collect::<Vec<_>>(),
3451                vec![
3452                    Path::new(""),
3453                    Path::new(".gitignore"),
3454                    Path::new("a"),
3455                    Path::new("a/c"),
3456                ]
3457            );
3458            assert_eq!(
3459                tree.entries(true)
3460                    .map(|entry| entry.path.as_ref())
3461                    .collect::<Vec<_>>(),
3462                vec![
3463                    Path::new(""),
3464                    Path::new(".gitignore"),
3465                    Path::new("a"),
3466                    Path::new("a/b"),
3467                    Path::new("a/c"),
3468                ]
3469            );
3470        })
3471    }
3472
3473    #[gpui::test(iterations = 10)]
3474    async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3475        let fs = FakeFs::new(cx.background());
3476        fs.insert_tree(
3477            "/root",
3478            json!({
3479                "lib": {
3480                    "a": {
3481                        "a.txt": ""
3482                    },
3483                    "b": {
3484                        "b.txt": ""
3485                    }
3486                }
3487            }),
3488        )
3489        .await;
3490        fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3491        fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3492
3493        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3494        let tree = Worktree::local(
3495            client,
3496            Path::new("/root"),
3497            true,
3498            fs.clone(),
3499            Default::default(),
3500            &mut cx.to_async(),
3501        )
3502        .await
3503        .unwrap();
3504
3505        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3506            .await;
3507
3508        tree.read_with(cx, |tree, _| {
3509            assert_eq!(
3510                tree.entries(false)
3511                    .map(|entry| entry.path.as_ref())
3512                    .collect::<Vec<_>>(),
3513                vec![
3514                    Path::new(""),
3515                    Path::new("lib"),
3516                    Path::new("lib/a"),
3517                    Path::new("lib/a/a.txt"),
3518                    Path::new("lib/a/lib"),
3519                    Path::new("lib/b"),
3520                    Path::new("lib/b/b.txt"),
3521                    Path::new("lib/b/lib"),
3522                ]
3523            );
3524        });
3525
3526        fs.rename(
3527            Path::new("/root/lib/a/lib"),
3528            Path::new("/root/lib/a/lib-2"),
3529            Default::default(),
3530        )
3531        .await
3532        .unwrap();
3533        executor.run_until_parked();
3534        tree.read_with(cx, |tree, _| {
3535            assert_eq!(
3536                tree.entries(false)
3537                    .map(|entry| entry.path.as_ref())
3538                    .collect::<Vec<_>>(),
3539                vec![
3540                    Path::new(""),
3541                    Path::new("lib"),
3542                    Path::new("lib/a"),
3543                    Path::new("lib/a/a.txt"),
3544                    Path::new("lib/a/lib-2"),
3545                    Path::new("lib/b"),
3546                    Path::new("lib/b/b.txt"),
3547                    Path::new("lib/b/lib"),
3548                ]
3549            );
3550        });
3551    }
3552
3553    #[gpui::test]
3554    async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3555        let parent_dir = temp_tree(json!({
3556            ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3557            "tree": {
3558                ".git": {},
3559                ".gitignore": "ignored-dir\n",
3560                "tracked-dir": {
3561                    "tracked-file1": "",
3562                    "ancestor-ignored-file1": "",
3563                },
3564                "ignored-dir": {
3565                    "ignored-file1": ""
3566                }
3567            }
3568        }));
3569        let dir = parent_dir.path().join("tree");
3570
3571        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3572
3573        let tree = Worktree::local(
3574            client,
3575            dir.as_path(),
3576            true,
3577            Arc::new(RealFs),
3578            Default::default(),
3579            &mut cx.to_async(),
3580        )
3581        .await
3582        .unwrap();
3583        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3584            .await;
3585        tree.flush_fs_events(cx).await;
3586        cx.read(|cx| {
3587            let tree = tree.read(cx);
3588            assert!(
3589                !tree
3590                    .entry_for_path("tracked-dir/tracked-file1")
3591                    .unwrap()
3592                    .is_ignored
3593            );
3594            assert!(
3595                tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3596                    .unwrap()
3597                    .is_ignored
3598            );
3599            assert!(
3600                tree.entry_for_path("ignored-dir/ignored-file1")
3601                    .unwrap()
3602                    .is_ignored
3603            );
3604        });
3605
3606        std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3607        std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3608        std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3609        tree.flush_fs_events(cx).await;
3610        cx.read(|cx| {
3611            let tree = tree.read(cx);
3612            assert!(
3613                !tree
3614                    .entry_for_path("tracked-dir/tracked-file2")
3615                    .unwrap()
3616                    .is_ignored
3617            );
3618            assert!(
3619                tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3620                    .unwrap()
3621                    .is_ignored
3622            );
3623            assert!(
3624                tree.entry_for_path("ignored-dir/ignored-file2")
3625                    .unwrap()
3626                    .is_ignored
3627            );
3628            assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3629        });
3630    }
3631
3632    #[gpui::test]
3633    async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3634        let root = temp_tree(json!({
3635            "dir1": {
3636                ".git": {},
3637                "deps": {
3638                    "dep1": {
3639                        ".git": {},
3640                        "src": {
3641                            "a.txt": ""
3642                        }
3643                    }
3644                },
3645                "src": {
3646                    "b.txt": ""
3647                }
3648            },
3649            "c.txt": "",
3650        }));
3651
3652        let http_client = FakeHttpClient::with_404_response();
3653        let client = cx.read(|cx| Client::new(http_client, cx));
3654        let tree = Worktree::local(
3655            client,
3656            root.path(),
3657            true,
3658            Arc::new(RealFs),
3659            Default::default(),
3660            &mut cx.to_async(),
3661        )
3662        .await
3663        .unwrap();
3664
3665        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3666            .await;
3667        tree.flush_fs_events(cx).await;
3668
3669        tree.read_with(cx, |tree, _cx| {
3670            let tree = tree.as_local().unwrap();
3671
3672            assert!(tree.repo_for("c.txt".as_ref()).is_none());
3673
3674            let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3675            assert_eq!(
3676                entry
3677                    .work_directory(tree)
3678                    .map(|directory| directory.as_ref().to_owned()),
3679                Some(Path::new("dir1").to_owned())
3680            );
3681
3682            let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3683            assert_eq!(
3684                entry
3685                    .work_directory(tree)
3686                    .map(|directory| directory.as_ref().to_owned()),
3687                Some(Path::new("dir1/deps/dep1").to_owned())
3688            );
3689        });
3690
3691        let repo_update_events = Arc::new(Mutex::new(vec![]));
3692        tree.update(cx, |_, cx| {
3693            let repo_update_events = repo_update_events.clone();
3694            cx.subscribe(&tree, move |_, _, event, _| {
3695                if let Event::UpdatedGitRepositories(update) = event {
3696                    repo_update_events.lock().push(update.clone());
3697                }
3698            })
3699            .detach();
3700        });
3701
3702        std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3703        tree.flush_fs_events(cx).await;
3704
3705        assert_eq!(
3706            repo_update_events.lock()[0]
3707                .keys()
3708                .cloned()
3709                .collect::<Vec<Arc<Path>>>(),
3710            vec![Path::new("dir1").into()]
3711        );
3712
3713        std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3714        tree.flush_fs_events(cx).await;
3715
3716        tree.read_with(cx, |tree, _cx| {
3717            let tree = tree.as_local().unwrap();
3718
3719            assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3720        });
3721    }
3722
3723    #[gpui::test]
3724    async fn test_write_file(cx: &mut TestAppContext) {
3725        let dir = temp_tree(json!({
3726            ".git": {},
3727            ".gitignore": "ignored-dir\n",
3728            "tracked-dir": {},
3729            "ignored-dir": {}
3730        }));
3731
3732        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3733
3734        let tree = Worktree::local(
3735            client,
3736            dir.path(),
3737            true,
3738            Arc::new(RealFs),
3739            Default::default(),
3740            &mut cx.to_async(),
3741        )
3742        .await
3743        .unwrap();
3744        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3745            .await;
3746        tree.flush_fs_events(cx).await;
3747
3748        tree.update(cx, |tree, cx| {
3749            tree.as_local().unwrap().write_file(
3750                Path::new("tracked-dir/file.txt"),
3751                "hello".into(),
3752                Default::default(),
3753                cx,
3754            )
3755        })
3756        .await
3757        .unwrap();
3758        tree.update(cx, |tree, cx| {
3759            tree.as_local().unwrap().write_file(
3760                Path::new("ignored-dir/file.txt"),
3761                "world".into(),
3762                Default::default(),
3763                cx,
3764            )
3765        })
3766        .await
3767        .unwrap();
3768
3769        tree.read_with(cx, |tree, _| {
3770            let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3771            let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3772            assert!(!tracked.is_ignored);
3773            assert!(ignored.is_ignored);
3774        });
3775    }
3776
3777    #[gpui::test(iterations = 30)]
3778    async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3779        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3780
3781        let fs = FakeFs::new(cx.background());
3782        fs.insert_tree(
3783            "/root",
3784            json!({
3785                "b": {},
3786                "c": {},
3787                "d": {},
3788            }),
3789        )
3790        .await;
3791
3792        let tree = Worktree::local(
3793            client,
3794            "/root".as_ref(),
3795            true,
3796            fs,
3797            Default::default(),
3798            &mut cx.to_async(),
3799        )
3800        .await
3801        .unwrap();
3802
3803        let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3804
3805        let entry = tree
3806            .update(cx, |tree, cx| {
3807                tree.as_local_mut()
3808                    .unwrap()
3809                    .create_entry("a/e".as_ref(), true, cx)
3810            })
3811            .await
3812            .unwrap();
3813        assert!(entry.is_dir());
3814
3815        cx.foreground().run_until_parked();
3816        tree.read_with(cx, |tree, _| {
3817            assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3818        });
3819
3820        let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3821        let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3822        snapshot1.apply_remote_update(update).unwrap();
3823        assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3824    }
3825
3826    #[gpui::test(iterations = 100)]
3827    async fn test_random_worktree_operations_during_initial_scan(
3828        cx: &mut TestAppContext,
3829        mut rng: StdRng,
3830    ) {
3831        let operations = env::var("OPERATIONS")
3832            .map(|o| o.parse().unwrap())
3833            .unwrap_or(5);
3834        let initial_entries = env::var("INITIAL_ENTRIES")
3835            .map(|o| o.parse().unwrap())
3836            .unwrap_or(20);
3837
3838        let root_dir = Path::new("/test");
3839        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3840        fs.as_fake().insert_tree(root_dir, json!({})).await;
3841        for _ in 0..initial_entries {
3842            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3843        }
3844        log::info!("generated initial tree");
3845
3846        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3847        let worktree = Worktree::local(
3848            client.clone(),
3849            root_dir,
3850            true,
3851            fs.clone(),
3852            Default::default(),
3853            &mut cx.to_async(),
3854        )
3855        .await
3856        .unwrap();
3857
3858        let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3859
3860        for _ in 0..operations {
3861            worktree
3862                .update(cx, |worktree, cx| {
3863                    randomly_mutate_worktree(worktree, &mut rng, cx)
3864                })
3865                .await
3866                .log_err();
3867            worktree.read_with(cx, |tree, _| {
3868                tree.as_local().unwrap().snapshot.check_invariants()
3869            });
3870
3871            if rng.gen_bool(0.6) {
3872                let new_snapshot =
3873                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3874                let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3875                snapshot.apply_remote_update(update.clone()).unwrap();
3876                assert_eq!(
3877                    snapshot.to_vec(true),
3878                    new_snapshot.to_vec(true),
3879                    "incorrect snapshot after update {:?}",
3880                    update
3881                );
3882            }
3883        }
3884
3885        worktree
3886            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3887            .await;
3888        worktree.read_with(cx, |tree, _| {
3889            tree.as_local().unwrap().snapshot.check_invariants()
3890        });
3891
3892        let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3893        let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3894        snapshot.apply_remote_update(update.clone()).unwrap();
3895        assert_eq!(
3896            snapshot.to_vec(true),
3897            new_snapshot.to_vec(true),
3898            "incorrect snapshot after update {:?}",
3899            update
3900        );
3901    }
3902
3903    #[gpui::test(iterations = 100)]
3904    async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3905        let operations = env::var("OPERATIONS")
3906            .map(|o| o.parse().unwrap())
3907            .unwrap_or(40);
3908        let initial_entries = env::var("INITIAL_ENTRIES")
3909            .map(|o| o.parse().unwrap())
3910            .unwrap_or(20);
3911
3912        let root_dir = Path::new("/test");
3913        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3914        fs.as_fake().insert_tree(root_dir, json!({})).await;
3915        for _ in 0..initial_entries {
3916            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3917        }
3918        log::info!("generated initial tree");
3919
3920        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3921        let worktree = Worktree::local(
3922            client.clone(),
3923            root_dir,
3924            true,
3925            fs.clone(),
3926            Default::default(),
3927            &mut cx.to_async(),
3928        )
3929        .await
3930        .unwrap();
3931
3932        worktree
3933            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3934            .await;
3935
3936        // After the initial scan is complete, the `UpdatedEntries` event can
3937        // be used to follow along with all changes to the worktree's snapshot.
3938        worktree.update(cx, |tree, cx| {
3939            let mut paths = tree
3940                .as_local()
3941                .unwrap()
3942                .paths()
3943                .cloned()
3944                .collect::<Vec<_>>();
3945
3946            cx.subscribe(&worktree, move |tree, _, event, _| {
3947                if let Event::UpdatedEntries(changes) = event {
3948                    for (path, change_type) in changes.iter() {
3949                        let path = path.clone();
3950                        let ix = match paths.binary_search(&path) {
3951                            Ok(ix) | Err(ix) => ix,
3952                        };
3953                        match change_type {
3954                            PathChange::Added => {
3955                                assert_ne!(paths.get(ix), Some(&path));
3956                                paths.insert(ix, path);
3957                            }
3958                            PathChange::Removed => {
3959                                assert_eq!(paths.get(ix), Some(&path));
3960                                paths.remove(ix);
3961                            }
3962                            PathChange::Updated => {
3963                                assert_eq!(paths.get(ix), Some(&path));
3964                            }
3965                            PathChange::AddedOrUpdated => {
3966                                if paths[ix] != path {
3967                                    paths.insert(ix, path);
3968                                }
3969                            }
3970                        }
3971                    }
3972                    let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3973                    assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3974                }
3975            })
3976            .detach();
3977        });
3978
3979        let mut snapshots = Vec::new();
3980        let mut mutations_len = operations;
3981        while mutations_len > 1 {
3982            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3983            let buffered_event_count = fs.as_fake().buffered_event_count().await;
3984            if buffered_event_count > 0 && rng.gen_bool(0.3) {
3985                let len = rng.gen_range(0..=buffered_event_count);
3986                log::info!("flushing {} events", len);
3987                fs.as_fake().flush_events(len).await;
3988            } else {
3989                randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
3990                mutations_len -= 1;
3991            }
3992
3993            cx.foreground().run_until_parked();
3994            if rng.gen_bool(0.2) {
3995                log::info!("storing snapshot {}", snapshots.len());
3996                let snapshot =
3997                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3998                snapshots.push(snapshot);
3999            }
4000        }
4001
4002        log::info!("quiescing");
4003        fs.as_fake().flush_events(usize::MAX).await;
4004        cx.foreground().run_until_parked();
4005        let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4006        snapshot.check_invariants();
4007
4008        {
4009            let new_worktree = Worktree::local(
4010                client.clone(),
4011                root_dir,
4012                true,
4013                fs.clone(),
4014                Default::default(),
4015                &mut cx.to_async(),
4016            )
4017            .await
4018            .unwrap();
4019            new_worktree
4020                .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4021                .await;
4022            let new_snapshot =
4023                new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4024            assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4025        }
4026
4027        for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4028            let include_ignored = rng.gen::<bool>();
4029            if !include_ignored {
4030                let mut entries_by_path_edits = Vec::new();
4031                let mut entries_by_id_edits = Vec::new();
4032                for entry in prev_snapshot
4033                    .entries_by_id
4034                    .cursor::<()>()
4035                    .filter(|e| e.is_ignored)
4036                {
4037                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4038                    entries_by_id_edits.push(Edit::Remove(entry.id));
4039                }
4040
4041                prev_snapshot
4042                    .entries_by_path
4043                    .edit(entries_by_path_edits, &());
4044                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4045            }
4046
4047            let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4048            prev_snapshot.apply_remote_update(update.clone()).unwrap();
4049            assert_eq!(
4050                prev_snapshot.to_vec(include_ignored),
4051                snapshot.to_vec(include_ignored),
4052                "wrong update for snapshot {i}. update: {:?}",
4053                update
4054            );
4055        }
4056    }
4057
4058    fn randomly_mutate_worktree(
4059        worktree: &mut Worktree,
4060        rng: &mut impl Rng,
4061        cx: &mut ModelContext<Worktree>,
4062    ) -> Task<Result<()>> {
4063        let worktree = worktree.as_local_mut().unwrap();
4064        let snapshot = worktree.snapshot();
4065        let entry = snapshot.entries(false).choose(rng).unwrap();
4066
4067        match rng.gen_range(0_u32..100) {
4068            0..=33 if entry.path.as_ref() != Path::new("") => {
4069                log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4070                worktree.delete_entry(entry.id, cx).unwrap()
4071            }
4072            ..=66 if entry.path.as_ref() != Path::new("") => {
4073                let other_entry = snapshot.entries(false).choose(rng).unwrap();
4074                let new_parent_path = if other_entry.is_dir() {
4075                    other_entry.path.clone()
4076                } else {
4077                    other_entry.path.parent().unwrap().into()
4078                };
4079                let mut new_path = new_parent_path.join(gen_name(rng));
4080                if new_path.starts_with(&entry.path) {
4081                    new_path = gen_name(rng).into();
4082                }
4083
4084                log::info!(
4085                    "renaming entry {:?} ({}) to {:?}",
4086                    entry.path,
4087                    entry.id.0,
4088                    new_path
4089                );
4090                let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4091                cx.foreground().spawn(async move {
4092                    task.await?;
4093                    Ok(())
4094                })
4095            }
4096            _ => {
4097                let task = if entry.is_dir() {
4098                    let child_path = entry.path.join(gen_name(rng));
4099                    let is_dir = rng.gen_bool(0.3);
4100                    log::info!(
4101                        "creating {} at {:?}",
4102                        if is_dir { "dir" } else { "file" },
4103                        child_path,
4104                    );
4105                    worktree.create_entry(child_path, is_dir, cx)
4106                } else {
4107                    log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4108                    worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4109                };
4110                cx.foreground().spawn(async move {
4111                    task.await?;
4112                    Ok(())
4113                })
4114            }
4115        }
4116    }
4117
4118    async fn randomly_mutate_fs(
4119        fs: &Arc<dyn Fs>,
4120        root_path: &Path,
4121        insertion_probability: f64,
4122        rng: &mut impl Rng,
4123    ) {
4124        let mut files = Vec::new();
4125        let mut dirs = Vec::new();
4126        for path in fs.as_fake().paths() {
4127            if path.starts_with(root_path) {
4128                if fs.is_file(&path).await {
4129                    files.push(path);
4130                } else {
4131                    dirs.push(path);
4132                }
4133            }
4134        }
4135
4136        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4137            let path = dirs.choose(rng).unwrap();
4138            let new_path = path.join(gen_name(rng));
4139
4140            if rng.gen() {
4141                log::info!(
4142                    "creating dir {:?}",
4143                    new_path.strip_prefix(root_path).unwrap()
4144                );
4145                fs.create_dir(&new_path).await.unwrap();
4146            } else {
4147                log::info!(
4148                    "creating file {:?}",
4149                    new_path.strip_prefix(root_path).unwrap()
4150                );
4151                fs.create_file(&new_path, Default::default()).await.unwrap();
4152            }
4153        } else if rng.gen_bool(0.05) {
4154            let ignore_dir_path = dirs.choose(rng).unwrap();
4155            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4156
4157            let subdirs = dirs
4158                .iter()
4159                .filter(|d| d.starts_with(&ignore_dir_path))
4160                .cloned()
4161                .collect::<Vec<_>>();
4162            let subfiles = files
4163                .iter()
4164                .filter(|d| d.starts_with(&ignore_dir_path))
4165                .cloned()
4166                .collect::<Vec<_>>();
4167            let files_to_ignore = {
4168                let len = rng.gen_range(0..=subfiles.len());
4169                subfiles.choose_multiple(rng, len)
4170            };
4171            let dirs_to_ignore = {
4172                let len = rng.gen_range(0..subdirs.len());
4173                subdirs.choose_multiple(rng, len)
4174            };
4175
4176            let mut ignore_contents = String::new();
4177            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4178                writeln!(
4179                    ignore_contents,
4180                    "{}",
4181                    path_to_ignore
4182                        .strip_prefix(&ignore_dir_path)
4183                        .unwrap()
4184                        .to_str()
4185                        .unwrap()
4186                )
4187                .unwrap();
4188            }
4189            log::info!(
4190                "creating gitignore {:?} with contents:\n{}",
4191                ignore_path.strip_prefix(&root_path).unwrap(),
4192                ignore_contents
4193            );
4194            fs.save(
4195                &ignore_path,
4196                &ignore_contents.as_str().into(),
4197                Default::default(),
4198            )
4199            .await
4200            .unwrap();
4201        } else {
4202            let old_path = {
4203                let file_path = files.choose(rng);
4204                let dir_path = dirs[1..].choose(rng);
4205                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4206            };
4207
4208            let is_rename = rng.gen();
4209            if is_rename {
4210                let new_path_parent = dirs
4211                    .iter()
4212                    .filter(|d| !d.starts_with(old_path))
4213                    .choose(rng)
4214                    .unwrap();
4215
4216                let overwrite_existing_dir =
4217                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4218                let new_path = if overwrite_existing_dir {
4219                    fs.remove_dir(
4220                        &new_path_parent,
4221                        RemoveOptions {
4222                            recursive: true,
4223                            ignore_if_not_exists: true,
4224                        },
4225                    )
4226                    .await
4227                    .unwrap();
4228                    new_path_parent.to_path_buf()
4229                } else {
4230                    new_path_parent.join(gen_name(rng))
4231                };
4232
4233                log::info!(
4234                    "renaming {:?} to {}{:?}",
4235                    old_path.strip_prefix(&root_path).unwrap(),
4236                    if overwrite_existing_dir {
4237                        "overwrite "
4238                    } else {
4239                        ""
4240                    },
4241                    new_path.strip_prefix(&root_path).unwrap()
4242                );
4243                fs.rename(
4244                    &old_path,
4245                    &new_path,
4246                    fs::RenameOptions {
4247                        overwrite: true,
4248                        ignore_if_exists: true,
4249                    },
4250                )
4251                .await
4252                .unwrap();
4253            } else if fs.is_file(&old_path).await {
4254                log::info!(
4255                    "deleting file {:?}",
4256                    old_path.strip_prefix(&root_path).unwrap()
4257                );
4258                fs.remove_file(old_path, Default::default()).await.unwrap();
4259            } else {
4260                log::info!(
4261                    "deleting dir {:?}",
4262                    old_path.strip_prefix(&root_path).unwrap()
4263                );
4264                fs.remove_dir(
4265                    &old_path,
4266                    RemoveOptions {
4267                        recursive: true,
4268                        ignore_if_not_exists: true,
4269                    },
4270                )
4271                .await
4272                .unwrap();
4273            }
4274        }
4275    }
4276
4277    fn gen_name(rng: &mut impl Rng) -> String {
4278        (0..6)
4279            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4280            .map(char::from)
4281            .collect()
4282    }
4283
4284    impl LocalSnapshot {
4285        fn check_invariants(&self) {
4286            assert_eq!(
4287                self.entries_by_path
4288                    .cursor::<()>()
4289                    .map(|e| (&e.path, e.id))
4290                    .collect::<Vec<_>>(),
4291                self.entries_by_id
4292                    .cursor::<()>()
4293                    .map(|e| (&e.path, e.id))
4294                    .collect::<collections::BTreeSet<_>>()
4295                    .into_iter()
4296                    .collect::<Vec<_>>(),
4297                "entries_by_path and entries_by_id are inconsistent"
4298            );
4299
4300            let mut files = self.files(true, 0);
4301            let mut visible_files = self.files(false, 0);
4302            for entry in self.entries_by_path.cursor::<()>() {
4303                if entry.is_file() {
4304                    assert_eq!(files.next().unwrap().inode, entry.inode);
4305                    if !entry.is_ignored {
4306                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4307                    }
4308                }
4309            }
4310
4311            assert!(files.next().is_none());
4312            assert!(visible_files.next().is_none());
4313
4314            let mut bfs_paths = Vec::new();
4315            let mut stack = vec![Path::new("")];
4316            while let Some(path) = stack.pop() {
4317                bfs_paths.push(path);
4318                let ix = stack.len();
4319                for child_entry in self.child_entries(path) {
4320                    stack.insert(ix, &child_entry.path);
4321                }
4322            }
4323
4324            let dfs_paths_via_iter = self
4325                .entries_by_path
4326                .cursor::<()>()
4327                .map(|e| e.path.as_ref())
4328                .collect::<Vec<_>>();
4329            assert_eq!(bfs_paths, dfs_paths_via_iter);
4330
4331            let dfs_paths_via_traversal = self
4332                .entries(true)
4333                .map(|e| e.path.as_ref())
4334                .collect::<Vec<_>>();
4335            assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4336
4337            for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4338                let ignore_parent_path =
4339                    ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4340                assert!(self.entry_for_path(&ignore_parent_path).is_some());
4341                assert!(self
4342                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4343                    .is_some());
4344            }
4345        }
4346
4347        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4348            let mut paths = Vec::new();
4349            for entry in self.entries_by_path.cursor::<()>() {
4350                if include_ignored || !entry.is_ignored {
4351                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4352                }
4353            }
4354            paths.sort_by(|a, b| a.0.cmp(b.0));
4355            paths
4356        }
4357    }
4358}