worktree.rs

   1use crate::{
   2    copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
   3};
   4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   5use anyhow::{anyhow, Context, Result};
   6use client::{proto, Client};
   7use clock::ReplicaId;
   8use collections::{HashMap, VecDeque};
   9use fs::{repository::{GitRepository, RepoPath, GitStatus}, Fs, LineEnding};
  10use futures::{
  11    channel::{
  12        mpsc::{self, UnboundedSender},
  13        oneshot,
  14    },
  15    select_biased,
  16    task::Poll,
  17    Stream, StreamExt,
  18};
  19use fuzzy::CharBag;
  20use git::{DOT_GIT, GITIGNORE};
  21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
  22use language::{
  23    proto::{
  24        deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
  25        serialize_version,
  26    },
  27    Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
  28};
  29use lsp::LanguageServerId;
  30use parking_lot::Mutex;
  31use postage::{
  32    barrier,
  33    prelude::{Sink as _, Stream as _},
  34    watch,
  35};
  36use smol::channel::{self, Sender};
  37use std::{
  38    any::Any,
  39    cmp::{self, Ordering},
  40    convert::TryFrom,
  41    ffi::OsStr,
  42    fmt,
  43    future::Future,
  44    mem,
  45    ops::{Deref, DerefMut},
  46    path::{Path, PathBuf},
  47    pin::Pin,
  48    sync::{
  49        atomic::{AtomicUsize, Ordering::SeqCst},
  50        Arc,
  51    },
  52    time::{Duration, SystemTime},
  53};
  54use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
  55use util::{paths::HOME, ResultExt, TryFutureExt};
  56
  57#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  58pub struct WorktreeId(usize);
  59
  60pub enum Worktree {
  61    Local(LocalWorktree),
  62    Remote(RemoteWorktree),
  63}
  64
  65pub struct LocalWorktree {
  66    snapshot: LocalSnapshot,
  67    path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
  68    is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
  69    _background_scanner_task: Task<()>,
  70    share: Option<ShareState>,
  71    diagnostics: HashMap<
  72        Arc<Path>,
  73        Vec<(
  74            LanguageServerId,
  75            Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
  76        )>,
  77    >,
  78    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  79    client: Arc<Client>,
  80    fs: Arc<dyn Fs>,
  81    visible: bool,
  82}
  83
  84pub struct RemoteWorktree {
  85    snapshot: Snapshot,
  86    background_snapshot: Arc<Mutex<Snapshot>>,
  87    project_id: u64,
  88    client: Arc<Client>,
  89    updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
  90    snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
  91    replica_id: ReplicaId,
  92    diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
  93    visible: bool,
  94    disconnected: bool,
  95}
  96
  97#[derive(Clone)]
  98pub struct Snapshot {
  99    id: WorktreeId,
 100    abs_path: Arc<Path>,
 101    root_name: String,
 102    root_char_bag: CharBag,
 103    entries_by_path: SumTree<Entry>,
 104    entries_by_id: SumTree<PathEntry>,
 105    repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
 106
 107    /// A number that increases every time the worktree begins scanning
 108    /// a set of paths from the filesystem. This scanning could be caused
 109    /// by some operation performed on the worktree, such as reading or
 110    /// writing a file, or by an event reported by the filesystem.
 111    scan_id: usize,
 112
 113    /// The latest scan id that has completed, and whose preceding scans
 114    /// have all completed. The current `scan_id` could be more than one
 115    /// greater than the `completed_scan_id` if operations are performed
 116    /// on the worktree while it is processing a file-system event.
 117    completed_scan_id: usize,
 118}
 119
 120#[derive(Clone, Debug, PartialEq, Eq)]
 121pub struct RepositoryEntry {
 122    pub(crate) work_directory: WorkDirectoryEntry,
 123    pub(crate) branch: Option<Arc<str>>,
 124    // pub(crate) statuses: TreeMap<RepoPath, GitStatus>
 125}
 126
 127impl RepositoryEntry {
 128    pub fn branch(&self) -> Option<Arc<str>> {
 129        self.branch.clone()
 130    }
 131
 132    pub fn work_directory_id(&self) -> ProjectEntryId {
 133        *self.work_directory
 134    }
 135
 136    pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
 137        snapshot
 138            .entry_for_id(self.work_directory_id())
 139            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
 140    }
 141
 142    pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
 143        self.work_directory.contains(snapshot, path)
 144    }
 145}
 146
 147impl From<&RepositoryEntry> for proto::RepositoryEntry {
 148    fn from(value: &RepositoryEntry) -> Self {
 149        proto::RepositoryEntry {
 150            work_directory_id: value.work_directory.to_proto(),
 151            branch: value.branch.as_ref().map(|str| str.to_string()),
 152        }
 153    }
 154}
 155
 156/// This path corresponds to the 'content path' (the folder that contains the .git)
 157#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 158pub struct RepositoryWorkDirectory(Arc<Path>);
 159
 160impl Default for RepositoryWorkDirectory {
 161    fn default() -> Self {
 162        RepositoryWorkDirectory(Arc::from(Path::new("")))
 163    }
 164}
 165
 166impl AsRef<Path> for RepositoryWorkDirectory {
 167    fn as_ref(&self) -> &Path {
 168        self.0.as_ref()
 169    }
 170}
 171
 172
 173#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
 174pub struct WorkDirectoryEntry(ProjectEntryId);
 175
 176impl WorkDirectoryEntry {
 177    // Note that these paths should be relative to the worktree root.
 178    pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
 179        snapshot
 180            .entry_for_id(self.0)
 181            .map(|entry| path.starts_with(&entry.path))
 182            .unwrap_or(false)
 183    }
 184
 185    pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
 186        worktree.entry_for_id(self.0).and_then(|entry| {
 187            path.strip_prefix(&entry.path)
 188                .ok()
 189                .map(move |path| path.into())
 190        })
 191    }
 192}
 193
 194impl Deref for WorkDirectoryEntry {
 195    type Target = ProjectEntryId;
 196
 197    fn deref(&self) -> &Self::Target {
 198        &self.0
 199    }
 200}
 201
 202impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
 203    fn from(value: ProjectEntryId) -> Self {
 204        WorkDirectoryEntry(value)
 205    }
 206}
 207
 208#[derive(Debug, Clone)]
 209pub struct LocalSnapshot {
 210    ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
 211    // The ProjectEntryId corresponds to the entry for the .git dir
 212    // work_directory_id
 213    git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 214    removed_entry_ids: HashMap<u64, ProjectEntryId>,
 215    next_entry_id: Arc<AtomicUsize>,
 216    snapshot: Snapshot,
 217}
 218
 219#[derive(Debug, Clone)]
 220pub struct LocalRepositoryEntry {
 221    pub(crate) scan_id: usize,
 222    pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
 223    /// Path to the actual .git folder.
 224    /// Note: if .git is a file, this points to the folder indicated by the .git file
 225    pub(crate) git_dir_path: Arc<Path>,
 226}
 227
 228impl LocalRepositoryEntry {
 229    // Note that this path should be relative to the worktree root.
 230    pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
 231        path.starts_with(self.git_dir_path.as_ref())
 232    }
 233}
 234
 235impl Deref for LocalSnapshot {
 236    type Target = Snapshot;
 237
 238    fn deref(&self) -> &Self::Target {
 239        &self.snapshot
 240    }
 241}
 242
 243impl DerefMut for LocalSnapshot {
 244    fn deref_mut(&mut self) -> &mut Self::Target {
 245        &mut self.snapshot
 246    }
 247}
 248
 249enum ScanState {
 250    Started,
 251    Updated {
 252        snapshot: LocalSnapshot,
 253        changes: HashMap<Arc<Path>, PathChange>,
 254        barrier: Option<barrier::Sender>,
 255        scanning: bool,
 256    },
 257}
 258
 259struct ShareState {
 260    project_id: u64,
 261    snapshots_tx: watch::Sender<LocalSnapshot>,
 262    resume_updates: watch::Sender<()>,
 263    _maintain_remote_snapshot: Task<Option<()>>,
 264}
 265
 266pub enum Event {
 267    UpdatedEntries(HashMap<Arc<Path>, PathChange>),
 268    UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
 269}
 270
 271impl Entity for Worktree {
 272    type Event = Event;
 273}
 274
 275impl Worktree {
 276    pub async fn local(
 277        client: Arc<Client>,
 278        path: impl Into<Arc<Path>>,
 279        visible: bool,
 280        fs: Arc<dyn Fs>,
 281        next_entry_id: Arc<AtomicUsize>,
 282        cx: &mut AsyncAppContext,
 283    ) -> Result<ModelHandle<Self>> {
 284        // After determining whether the root entry is a file or a directory, populate the
 285        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 286        let abs_path = path.into();
 287        let metadata = fs
 288            .metadata(&abs_path)
 289            .await
 290            .context("failed to stat worktree path")?;
 291
 292        Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
 293            let root_name = abs_path
 294                .file_name()
 295                .map_or(String::new(), |f| f.to_string_lossy().to_string());
 296
 297            let mut snapshot = LocalSnapshot {
 298                ignores_by_parent_abs_path: Default::default(),
 299                removed_entry_ids: Default::default(),
 300                git_repositories: Default::default(),
 301                next_entry_id,
 302                snapshot: Snapshot {
 303                    id: WorktreeId::from_usize(cx.model_id()),
 304                    abs_path: abs_path.clone(),
 305                    root_name: root_name.clone(),
 306                    root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
 307                    entries_by_path: Default::default(),
 308                    entries_by_id: Default::default(),
 309                    repository_entries: Default::default(),
 310                    scan_id: 1,
 311                    completed_scan_id: 0,
 312                },
 313            };
 314
 315            if let Some(metadata) = metadata {
 316                snapshot.insert_entry(
 317                    Entry::new(
 318                        Arc::from(Path::new("")),
 319                        &metadata,
 320                        &snapshot.next_entry_id,
 321                        snapshot.root_char_bag,
 322                    ),
 323                    fs.as_ref(),
 324                );
 325            }
 326
 327            let (path_changes_tx, path_changes_rx) = channel::unbounded();
 328            let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
 329
 330            cx.spawn_weak(|this, mut cx| async move {
 331                while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
 332                    this.update(&mut cx, |this, cx| {
 333                        let this = this.as_local_mut().unwrap();
 334                        match state {
 335                            ScanState::Started => {
 336                                *this.is_scanning.0.borrow_mut() = true;
 337                            }
 338                            ScanState::Updated {
 339                                snapshot,
 340                                changes,
 341                                barrier,
 342                                scanning,
 343                            } => {
 344                                *this.is_scanning.0.borrow_mut() = scanning;
 345                                this.set_snapshot(snapshot, cx);
 346                                cx.emit(Event::UpdatedEntries(changes));
 347                                drop(barrier);
 348                            }
 349                        }
 350                        cx.notify();
 351                    });
 352                }
 353            })
 354            .detach();
 355
 356            let background_scanner_task = cx.background().spawn({
 357                let fs = fs.clone();
 358                let snapshot = snapshot.clone();
 359                let background = cx.background().clone();
 360                async move {
 361                    let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 362                    BackgroundScanner::new(
 363                        snapshot,
 364                        fs,
 365                        scan_states_tx,
 366                        background,
 367                        path_changes_rx,
 368                    )
 369                    .run(events)
 370                    .await;
 371                }
 372            });
 373
 374            Worktree::Local(LocalWorktree {
 375                snapshot,
 376                is_scanning: watch::channel_with(true),
 377                share: None,
 378                path_changes_tx,
 379                _background_scanner_task: background_scanner_task,
 380                diagnostics: Default::default(),
 381                diagnostic_summaries: Default::default(),
 382                client,
 383                fs,
 384                visible,
 385            })
 386        }))
 387    }
 388
 389    pub fn remote(
 390        project_remote_id: u64,
 391        replica_id: ReplicaId,
 392        worktree: proto::WorktreeMetadata,
 393        client: Arc<Client>,
 394        cx: &mut AppContext,
 395    ) -> ModelHandle<Self> {
 396        cx.add_model(|cx: &mut ModelContext<Self>| {
 397            let snapshot = Snapshot {
 398                id: WorktreeId(worktree.id as usize),
 399                abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
 400                root_name: worktree.root_name.clone(),
 401                root_char_bag: worktree
 402                    .root_name
 403                    .chars()
 404                    .map(|c| c.to_ascii_lowercase())
 405                    .collect(),
 406                entries_by_path: Default::default(),
 407                entries_by_id: Default::default(),
 408                repository_entries: Default::default(),
 409                scan_id: 1,
 410                completed_scan_id: 0,
 411            };
 412
 413            let (updates_tx, mut updates_rx) = mpsc::unbounded();
 414            let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
 415            let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
 416
 417            cx.background()
 418                .spawn({
 419                    let background_snapshot = background_snapshot.clone();
 420                    async move {
 421                        while let Some(update) = updates_rx.next().await {
 422                            if let Err(error) =
 423                                background_snapshot.lock().apply_remote_update(update)
 424                            {
 425                                log::error!("error applying worktree update: {}", error);
 426                            }
 427                            snapshot_updated_tx.send(()).await.ok();
 428                        }
 429                    }
 430                })
 431                .detach();
 432
 433            cx.spawn_weak(|this, mut cx| async move {
 434                while (snapshot_updated_rx.recv().await).is_some() {
 435                    if let Some(this) = this.upgrade(&cx) {
 436                        this.update(&mut cx, |this, cx| {
 437                            let this = this.as_remote_mut().unwrap();
 438                            this.snapshot = this.background_snapshot.lock().clone();
 439                            cx.emit(Event::UpdatedEntries(Default::default()));
 440                            cx.notify();
 441                            while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
 442                                if this.observed_snapshot(*scan_id) {
 443                                    let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
 444                                    let _ = tx.send(());
 445                                } else {
 446                                    break;
 447                                }
 448                            }
 449                        });
 450                    } else {
 451                        break;
 452                    }
 453                }
 454            })
 455            .detach();
 456
 457            Worktree::Remote(RemoteWorktree {
 458                project_id: project_remote_id,
 459                replica_id,
 460                snapshot: snapshot.clone(),
 461                background_snapshot,
 462                updates_tx: Some(updates_tx),
 463                snapshot_subscriptions: Default::default(),
 464                client: client.clone(),
 465                diagnostic_summaries: Default::default(),
 466                visible: worktree.visible,
 467                disconnected: false,
 468            })
 469        })
 470    }
 471
 472    pub fn as_local(&self) -> Option<&LocalWorktree> {
 473        if let Worktree::Local(worktree) = self {
 474            Some(worktree)
 475        } else {
 476            None
 477        }
 478    }
 479
 480    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 481        if let Worktree::Remote(worktree) = self {
 482            Some(worktree)
 483        } else {
 484            None
 485        }
 486    }
 487
 488    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 489        if let Worktree::Local(worktree) = self {
 490            Some(worktree)
 491        } else {
 492            None
 493        }
 494    }
 495
 496    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 497        if let Worktree::Remote(worktree) = self {
 498            Some(worktree)
 499        } else {
 500            None
 501        }
 502    }
 503
 504    pub fn is_local(&self) -> bool {
 505        matches!(self, Worktree::Local(_))
 506    }
 507
 508    pub fn is_remote(&self) -> bool {
 509        !self.is_local()
 510    }
 511
 512    pub fn snapshot(&self) -> Snapshot {
 513        match self {
 514            Worktree::Local(worktree) => worktree.snapshot().snapshot,
 515            Worktree::Remote(worktree) => worktree.snapshot(),
 516        }
 517    }
 518
 519    pub fn scan_id(&self) -> usize {
 520        match self {
 521            Worktree::Local(worktree) => worktree.snapshot.scan_id,
 522            Worktree::Remote(worktree) => worktree.snapshot.scan_id,
 523        }
 524    }
 525
 526    pub fn completed_scan_id(&self) -> usize {
 527        match self {
 528            Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
 529            Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
 530        }
 531    }
 532
 533    pub fn is_visible(&self) -> bool {
 534        match self {
 535            Worktree::Local(worktree) => worktree.visible,
 536            Worktree::Remote(worktree) => worktree.visible,
 537        }
 538    }
 539
 540    pub fn replica_id(&self) -> ReplicaId {
 541        match self {
 542            Worktree::Local(_) => 0,
 543            Worktree::Remote(worktree) => worktree.replica_id,
 544        }
 545    }
 546
 547    pub fn diagnostic_summaries(
 548        &self,
 549    ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
 550        match self {
 551            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 552            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 553        }
 554        .iter()
 555        .flat_map(|(path, summaries)| {
 556            summaries
 557                .iter()
 558                .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
 559        })
 560    }
 561
 562    pub fn abs_path(&self) -> Arc<Path> {
 563        match self {
 564            Worktree::Local(worktree) => worktree.abs_path.clone(),
 565            Worktree::Remote(worktree) => worktree.abs_path.clone(),
 566        }
 567    }
 568}
 569
 570impl LocalWorktree {
 571    pub fn contains_abs_path(&self, path: &Path) -> bool {
 572        path.starts_with(&self.abs_path)
 573    }
 574
 575    fn absolutize(&self, path: &Path) -> PathBuf {
 576        if path.file_name().is_some() {
 577            self.abs_path.join(path)
 578        } else {
 579            self.abs_path.to_path_buf()
 580        }
 581    }
 582
 583    pub(crate) fn load_buffer(
 584        &mut self,
 585        id: u64,
 586        path: &Path,
 587        cx: &mut ModelContext<Worktree>,
 588    ) -> Task<Result<ModelHandle<Buffer>>> {
 589        let path = Arc::from(path);
 590        cx.spawn(move |this, mut cx| async move {
 591            let (file, contents, diff_base) = this
 592                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
 593                .await?;
 594            let text_buffer = cx
 595                .background()
 596                .spawn(async move { text::Buffer::new(0, id, contents) })
 597                .await;
 598            Ok(cx.add_model(|cx| {
 599                let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
 600                buffer.git_diff_recalc(cx);
 601                buffer
 602            }))
 603        })
 604    }
 605
 606    pub fn diagnostics_for_path(
 607        &self,
 608        path: &Path,
 609    ) -> Vec<(
 610        LanguageServerId,
 611        Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 612    )> {
 613        self.diagnostics.get(path).cloned().unwrap_or_default()
 614    }
 615
 616    pub fn update_diagnostics(
 617        &mut self,
 618        server_id: LanguageServerId,
 619        worktree_path: Arc<Path>,
 620        diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
 621        _: &mut ModelContext<Worktree>,
 622    ) -> Result<bool> {
 623        let summaries_by_server_id = self
 624            .diagnostic_summaries
 625            .entry(worktree_path.clone())
 626            .or_default();
 627
 628        let old_summary = summaries_by_server_id
 629            .remove(&server_id)
 630            .unwrap_or_default();
 631
 632        let new_summary = DiagnosticSummary::new(&diagnostics);
 633        if new_summary.is_empty() {
 634            if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
 635                if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 636                    diagnostics_by_server_id.remove(ix);
 637                }
 638                if diagnostics_by_server_id.is_empty() {
 639                    self.diagnostics.remove(&worktree_path);
 640                }
 641            }
 642        } else {
 643            summaries_by_server_id.insert(server_id, new_summary);
 644            let diagnostics_by_server_id =
 645                self.diagnostics.entry(worktree_path.clone()).or_default();
 646            match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
 647                Ok(ix) => {
 648                    diagnostics_by_server_id[ix] = (server_id, diagnostics);
 649                }
 650                Err(ix) => {
 651                    diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
 652                }
 653            }
 654        }
 655
 656        if !old_summary.is_empty() || !new_summary.is_empty() {
 657            if let Some(share) = self.share.as_ref() {
 658                self.client
 659                    .send(proto::UpdateDiagnosticSummary {
 660                        project_id: share.project_id,
 661                        worktree_id: self.id().to_proto(),
 662                        summary: Some(proto::DiagnosticSummary {
 663                            path: worktree_path.to_string_lossy().to_string(),
 664                            language_server_id: server_id.0 as u64,
 665                            error_count: new_summary.error_count as u32,
 666                            warning_count: new_summary.warning_count as u32,
 667                        }),
 668                    })
 669                    .log_err();
 670            }
 671        }
 672
 673        Ok(!old_summary.is_empty() || !new_summary.is_empty())
 674    }
 675
 676    fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
 677        let updated_repos =
 678            self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
 679        self.snapshot = new_snapshot;
 680
 681        if let Some(share) = self.share.as_mut() {
 682            *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
 683        }
 684
 685        if !updated_repos.is_empty() {
 686            cx.emit(Event::UpdatedGitRepositories(updated_repos));
 687        }
 688    }
 689
 690    fn changed_repos(
 691        &self,
 692        old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 693        new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
 694    ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
 695        let mut diff = HashMap::default();
 696        let mut old_repos = old_repos.iter().peekable();
 697        let mut new_repos = new_repos.iter().peekable();
 698        loop {
 699            match (old_repos.peek(), new_repos.peek()) {
 700                (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
 701                    match Ord::cmp(old_entry_id, new_entry_id) {
 702                        Ordering::Less => {
 703                            if let Some(entry) = self.entry_for_id(**old_entry_id) {
 704                                diff.insert(entry.path.clone(), (*old_repo).clone());
 705                            }
 706                            old_repos.next();
 707                        }
 708                        Ordering::Equal => {
 709                            if old_repo.scan_id != new_repo.scan_id {
 710                                if let Some(entry) = self.entry_for_id(**new_entry_id) {
 711                                    diff.insert(entry.path.clone(), (*new_repo).clone());
 712                                }
 713                            }
 714
 715                            old_repos.next();
 716                            new_repos.next();
 717                        }
 718                        Ordering::Greater => {
 719                            if let Some(entry) = self.entry_for_id(**new_entry_id) {
 720                                diff.insert(entry.path.clone(), (*new_repo).clone());
 721                            }
 722                            new_repos.next();
 723                        }
 724                    }
 725                }
 726                (Some((old_entry_id, old_repo)), None) => {
 727                    if let Some(entry) = self.entry_for_id(**old_entry_id) {
 728                        diff.insert(entry.path.clone(), (*old_repo).clone());
 729                    }
 730                    old_repos.next();
 731                }
 732                (None, Some((new_entry_id, new_repo))) => {
 733                    if let Some(entry) = self.entry_for_id(**new_entry_id) {
 734                        diff.insert(entry.path.clone(), (*new_repo).clone());
 735                    }
 736                    new_repos.next();
 737                }
 738                (None, None) => break,
 739            }
 740        }
 741        diff
 742    }
 743
 744    pub fn scan_complete(&self) -> impl Future<Output = ()> {
 745        let mut is_scanning_rx = self.is_scanning.1.clone();
 746        async move {
 747            let mut is_scanning = is_scanning_rx.borrow().clone();
 748            while is_scanning {
 749                if let Some(value) = is_scanning_rx.recv().await {
 750                    is_scanning = value;
 751                } else {
 752                    break;
 753                }
 754            }
 755        }
 756    }
 757
 758    pub fn snapshot(&self) -> LocalSnapshot {
 759        self.snapshot.clone()
 760    }
 761
 762    pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
 763        proto::WorktreeMetadata {
 764            id: self.id().to_proto(),
 765            root_name: self.root_name().to_string(),
 766            visible: self.visible,
 767            abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
 768        }
 769    }
 770
 771    fn load(
 772        &self,
 773        path: &Path,
 774        cx: &mut ModelContext<Worktree>,
 775    ) -> Task<Result<(File, String, Option<String>)>> {
 776        let handle = cx.handle();
 777        let path = Arc::from(path);
 778        let abs_path = self.absolutize(&path);
 779        let fs = self.fs.clone();
 780        let snapshot = self.snapshot();
 781
 782        let mut index_task = None;
 783
 784        if let Some(repo) = snapshot.repo_for(&path) {
 785            let repo_path = repo.work_directory.relativize(self, &path).unwrap();
 786            if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
 787                let repo = repo.repo_ptr.to_owned();
 788                index_task = Some(
 789                    cx.background()
 790                        .spawn(async move { repo.lock().load_index_text(&repo_path) }),
 791                );
 792            }
 793        }
 794
 795        cx.spawn(|this, mut cx| async move {
 796            let text = fs.load(&abs_path).await?;
 797
 798            let diff_base = if let Some(index_task) = index_task {
 799                index_task.await
 800            } else {
 801                None
 802            };
 803
 804            // Eagerly populate the snapshot with an updated entry for the loaded file
 805            let entry = this
 806                .update(&mut cx, |this, cx| {
 807                    this.as_local().unwrap().refresh_entry(path, None, cx)
 808                })
 809                .await?;
 810
 811            Ok((
 812                File {
 813                    entry_id: entry.id,
 814                    worktree: handle,
 815                    path: entry.path,
 816                    mtime: entry.mtime,
 817                    is_local: true,
 818                    is_deleted: false,
 819                },
 820                text,
 821                diff_base,
 822            ))
 823        })
 824    }
 825
 826    pub fn save_buffer(
 827        &self,
 828        buffer_handle: ModelHandle<Buffer>,
 829        path: Arc<Path>,
 830        has_changed_file: bool,
 831        cx: &mut ModelContext<Worktree>,
 832    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
 833        let handle = cx.handle();
 834        let buffer = buffer_handle.read(cx);
 835
 836        let rpc = self.client.clone();
 837        let buffer_id = buffer.remote_id();
 838        let project_id = self.share.as_ref().map(|share| share.project_id);
 839
 840        let text = buffer.as_rope().clone();
 841        let fingerprint = text.fingerprint();
 842        let version = buffer.version();
 843        let save = self.write_file(path, text, buffer.line_ending(), cx);
 844
 845        cx.as_mut().spawn(|mut cx| async move {
 846            let entry = save.await?;
 847
 848            if has_changed_file {
 849                let new_file = Arc::new(File {
 850                    entry_id: entry.id,
 851                    worktree: handle,
 852                    path: entry.path,
 853                    mtime: entry.mtime,
 854                    is_local: true,
 855                    is_deleted: false,
 856                });
 857
 858                if let Some(project_id) = project_id {
 859                    rpc.send(proto::UpdateBufferFile {
 860                        project_id,
 861                        buffer_id,
 862                        file: Some(new_file.to_proto()),
 863                    })
 864                    .log_err();
 865                }
 866
 867                buffer_handle.update(&mut cx, |buffer, cx| {
 868                    if has_changed_file {
 869                        buffer.file_updated(new_file, cx).detach();
 870                    }
 871                });
 872            }
 873
 874            if let Some(project_id) = project_id {
 875                rpc.send(proto::BufferSaved {
 876                    project_id,
 877                    buffer_id,
 878                    version: serialize_version(&version),
 879                    mtime: Some(entry.mtime.into()),
 880                    fingerprint: serialize_fingerprint(fingerprint),
 881                })?;
 882            }
 883
 884            buffer_handle.update(&mut cx, |buffer, cx| {
 885                buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
 886            });
 887
 888            Ok((version, fingerprint, entry.mtime))
 889        })
 890    }
 891
 892    pub fn create_entry(
 893        &self,
 894        path: impl Into<Arc<Path>>,
 895        is_dir: bool,
 896        cx: &mut ModelContext<Worktree>,
 897    ) -> Task<Result<Entry>> {
 898        let path = path.into();
 899        let abs_path = self.absolutize(&path);
 900        let fs = self.fs.clone();
 901        let write = cx.background().spawn(async move {
 902            if is_dir {
 903                fs.create_dir(&abs_path).await
 904            } else {
 905                fs.save(&abs_path, &Default::default(), Default::default())
 906                    .await
 907            }
 908        });
 909
 910        cx.spawn(|this, mut cx| async move {
 911            write.await?;
 912            this.update(&mut cx, |this, cx| {
 913                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
 914            })
 915            .await
 916        })
 917    }
 918
 919    pub fn write_file(
 920        &self,
 921        path: impl Into<Arc<Path>>,
 922        text: Rope,
 923        line_ending: LineEnding,
 924        cx: &mut ModelContext<Worktree>,
 925    ) -> Task<Result<Entry>> {
 926        let path = path.into();
 927        let abs_path = self.absolutize(&path);
 928        let fs = self.fs.clone();
 929        let write = cx
 930            .background()
 931            .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
 932
 933        cx.spawn(|this, mut cx| async move {
 934            write.await?;
 935            this.update(&mut cx, |this, cx| {
 936                this.as_local_mut().unwrap().refresh_entry(path, None, cx)
 937            })
 938            .await
 939        })
 940    }
 941
 942    pub fn delete_entry(
 943        &self,
 944        entry_id: ProjectEntryId,
 945        cx: &mut ModelContext<Worktree>,
 946    ) -> Option<Task<Result<()>>> {
 947        let entry = self.entry_for_id(entry_id)?.clone();
 948        let abs_path = self.abs_path.clone();
 949        let fs = self.fs.clone();
 950
 951        let delete = cx.background().spawn(async move {
 952            let mut abs_path = fs.canonicalize(&abs_path).await?;
 953            if entry.path.file_name().is_some() {
 954                abs_path = abs_path.join(&entry.path);
 955            }
 956            if entry.is_file() {
 957                fs.remove_file(&abs_path, Default::default()).await?;
 958            } else {
 959                fs.remove_dir(
 960                    &abs_path,
 961                    RemoveOptions {
 962                        recursive: true,
 963                        ignore_if_not_exists: false,
 964                    },
 965                )
 966                .await?;
 967            }
 968            anyhow::Ok(abs_path)
 969        });
 970
 971        Some(cx.spawn(|this, mut cx| async move {
 972            let abs_path = delete.await?;
 973            let (tx, mut rx) = barrier::channel();
 974            this.update(&mut cx, |this, _| {
 975                this.as_local_mut()
 976                    .unwrap()
 977                    .path_changes_tx
 978                    .try_send((vec![abs_path], tx))
 979            })?;
 980            rx.recv().await;
 981            Ok(())
 982        }))
 983    }
 984
 985    pub fn rename_entry(
 986        &self,
 987        entry_id: ProjectEntryId,
 988        new_path: impl Into<Arc<Path>>,
 989        cx: &mut ModelContext<Worktree>,
 990    ) -> Option<Task<Result<Entry>>> {
 991        let old_path = self.entry_for_id(entry_id)?.path.clone();
 992        let new_path = new_path.into();
 993        let abs_old_path = self.absolutize(&old_path);
 994        let abs_new_path = self.absolutize(&new_path);
 995        let fs = self.fs.clone();
 996        let rename = cx.background().spawn(async move {
 997            fs.rename(&abs_old_path, &abs_new_path, Default::default())
 998                .await
 999        });
1000
1001        Some(cx.spawn(|this, mut cx| async move {
1002            rename.await?;
1003            this.update(&mut cx, |this, cx| {
1004                this.as_local_mut()
1005                    .unwrap()
1006                    .refresh_entry(new_path.clone(), Some(old_path), cx)
1007            })
1008            .await
1009        }))
1010    }
1011
1012    pub fn copy_entry(
1013        &self,
1014        entry_id: ProjectEntryId,
1015        new_path: impl Into<Arc<Path>>,
1016        cx: &mut ModelContext<Worktree>,
1017    ) -> Option<Task<Result<Entry>>> {
1018        let old_path = self.entry_for_id(entry_id)?.path.clone();
1019        let new_path = new_path.into();
1020        let abs_old_path = self.absolutize(&old_path);
1021        let abs_new_path = self.absolutize(&new_path);
1022        let fs = self.fs.clone();
1023        let copy = cx.background().spawn(async move {
1024            copy_recursive(
1025                fs.as_ref(),
1026                &abs_old_path,
1027                &abs_new_path,
1028                Default::default(),
1029            )
1030            .await
1031        });
1032
1033        Some(cx.spawn(|this, mut cx| async move {
1034            copy.await?;
1035            this.update(&mut cx, |this, cx| {
1036                this.as_local_mut()
1037                    .unwrap()
1038                    .refresh_entry(new_path.clone(), None, cx)
1039            })
1040            .await
1041        }))
1042    }
1043
1044    fn refresh_entry(
1045        &self,
1046        path: Arc<Path>,
1047        old_path: Option<Arc<Path>>,
1048        cx: &mut ModelContext<Worktree>,
1049    ) -> Task<Result<Entry>> {
1050        let fs = self.fs.clone();
1051        let abs_root_path = self.abs_path.clone();
1052        let path_changes_tx = self.path_changes_tx.clone();
1053        cx.spawn_weak(move |this, mut cx| async move {
1054            let abs_path = fs.canonicalize(&abs_root_path).await?;
1055            let mut paths = Vec::with_capacity(2);
1056            paths.push(if path.file_name().is_some() {
1057                abs_path.join(&path)
1058            } else {
1059                abs_path.clone()
1060            });
1061            if let Some(old_path) = old_path {
1062                paths.push(if old_path.file_name().is_some() {
1063                    abs_path.join(&old_path)
1064                } else {
1065                    abs_path.clone()
1066                });
1067            }
1068
1069            let (tx, mut rx) = barrier::channel();
1070            path_changes_tx.try_send((paths, tx))?;
1071            rx.recv().await;
1072            this.upgrade(&cx)
1073                .ok_or_else(|| anyhow!("worktree was dropped"))?
1074                .update(&mut cx, |this, _| {
1075                    this.entry_for_path(path)
1076                        .cloned()
1077                        .ok_or_else(|| anyhow!("failed to read path after update"))
1078                })
1079        })
1080    }
1081
1082    pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1083        let (share_tx, share_rx) = oneshot::channel();
1084
1085        if let Some(share) = self.share.as_mut() {
1086            let _ = share_tx.send(());
1087            *share.resume_updates.borrow_mut() = ();
1088        } else {
1089            let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1090            let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1091            let worktree_id = cx.model_id() as u64;
1092
1093            for (path, summaries) in &self.diagnostic_summaries {
1094                for (&server_id, summary) in summaries {
1095                    if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1096                        project_id,
1097                        worktree_id,
1098                        summary: Some(summary.to_proto(server_id, &path)),
1099                    }) {
1100                        return Task::ready(Err(e));
1101                    }
1102                }
1103            }
1104
1105            let _maintain_remote_snapshot = cx.background().spawn({
1106                let client = self.client.clone();
1107                async move {
1108                    let mut share_tx = Some(share_tx);
1109                    let mut prev_snapshot = LocalSnapshot {
1110                        ignores_by_parent_abs_path: Default::default(),
1111                        removed_entry_ids: Default::default(),
1112                        next_entry_id: Default::default(),
1113                        git_repositories: Default::default(),
1114                        snapshot: Snapshot {
1115                            id: WorktreeId(worktree_id as usize),
1116                            abs_path: Path::new("").into(),
1117                            root_name: Default::default(),
1118                            root_char_bag: Default::default(),
1119                            entries_by_path: Default::default(),
1120                            entries_by_id: Default::default(),
1121                            repository_entries: Default::default(),
1122                            scan_id: 0,
1123                            completed_scan_id: 0,
1124                        },
1125                    };
1126                    while let Some(snapshot) = snapshots_rx.recv().await {
1127                        #[cfg(any(test, feature = "test-support"))]
1128                        const MAX_CHUNK_SIZE: usize = 2;
1129                        #[cfg(not(any(test, feature = "test-support")))]
1130                        const MAX_CHUNK_SIZE: usize = 256;
1131
1132                        let update =
1133                            snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1134                        for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1135                            let _ = resume_updates_rx.try_recv();
1136                            while let Err(error) = client.request(update.clone()).await {
1137                                log::error!("failed to send worktree update: {}", error);
1138                                log::info!("waiting to resume updates");
1139                                if resume_updates_rx.next().await.is_none() {
1140                                    return Ok(());
1141                                }
1142                            }
1143                        }
1144
1145                        if let Some(share_tx) = share_tx.take() {
1146                            let _ = share_tx.send(());
1147                        }
1148
1149                        prev_snapshot = snapshot;
1150                    }
1151
1152                    Ok::<_, anyhow::Error>(())
1153                }
1154                .log_err()
1155            });
1156
1157            self.share = Some(ShareState {
1158                project_id,
1159                snapshots_tx,
1160                resume_updates: resume_updates_tx,
1161                _maintain_remote_snapshot,
1162            });
1163        }
1164
1165        cx.foreground()
1166            .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1167    }
1168
1169    pub fn unshare(&mut self) {
1170        self.share.take();
1171    }
1172
1173    pub fn is_shared(&self) -> bool {
1174        self.share.is_some()
1175    }
1176}
1177
1178impl RemoteWorktree {
1179    fn snapshot(&self) -> Snapshot {
1180        self.snapshot.clone()
1181    }
1182
1183    pub fn disconnected_from_host(&mut self) {
1184        self.updates_tx.take();
1185        self.snapshot_subscriptions.clear();
1186        self.disconnected = true;
1187    }
1188
1189    pub fn save_buffer(
1190        &self,
1191        buffer_handle: ModelHandle<Buffer>,
1192        cx: &mut ModelContext<Worktree>,
1193    ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1194        let buffer = buffer_handle.read(cx);
1195        let buffer_id = buffer.remote_id();
1196        let version = buffer.version();
1197        let rpc = self.client.clone();
1198        let project_id = self.project_id;
1199        cx.as_mut().spawn(|mut cx| async move {
1200            let response = rpc
1201                .request(proto::SaveBuffer {
1202                    project_id,
1203                    buffer_id,
1204                    version: serialize_version(&version),
1205                })
1206                .await?;
1207            let version = deserialize_version(&response.version);
1208            let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1209            let mtime = response
1210                .mtime
1211                .ok_or_else(|| anyhow!("missing mtime"))?
1212                .into();
1213
1214            buffer_handle.update(&mut cx, |buffer, cx| {
1215                buffer.did_save(version.clone(), fingerprint, mtime, cx);
1216            });
1217
1218            Ok((version, fingerprint, mtime))
1219        })
1220    }
1221
1222    pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1223        if let Some(updates_tx) = &self.updates_tx {
1224            updates_tx
1225                .unbounded_send(update)
1226                .expect("consumer runs to completion");
1227        }
1228    }
1229
1230    fn observed_snapshot(&self, scan_id: usize) -> bool {
1231        self.completed_scan_id >= scan_id
1232    }
1233
1234    fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1235        let (tx, rx) = oneshot::channel();
1236        if self.observed_snapshot(scan_id) {
1237            let _ = tx.send(());
1238        } else if self.disconnected {
1239            drop(tx);
1240        } else {
1241            match self
1242                .snapshot_subscriptions
1243                .binary_search_by_key(&scan_id, |probe| probe.0)
1244            {
1245                Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1246            }
1247        }
1248
1249        async move {
1250            rx.await?;
1251            Ok(())
1252        }
1253    }
1254
1255    pub fn update_diagnostic_summary(
1256        &mut self,
1257        path: Arc<Path>,
1258        summary: &proto::DiagnosticSummary,
1259    ) {
1260        let server_id = LanguageServerId(summary.language_server_id as usize);
1261        let summary = DiagnosticSummary {
1262            error_count: summary.error_count as usize,
1263            warning_count: summary.warning_count as usize,
1264        };
1265
1266        if summary.is_empty() {
1267            if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1268                summaries.remove(&server_id);
1269                if summaries.is_empty() {
1270                    self.diagnostic_summaries.remove(&path);
1271                }
1272            }
1273        } else {
1274            self.diagnostic_summaries
1275                .entry(path)
1276                .or_default()
1277                .insert(server_id, summary);
1278        }
1279    }
1280
1281    pub fn insert_entry(
1282        &mut self,
1283        entry: proto::Entry,
1284        scan_id: usize,
1285        cx: &mut ModelContext<Worktree>,
1286    ) -> Task<Result<Entry>> {
1287        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1288        cx.spawn(|this, mut cx| async move {
1289            wait_for_snapshot.await?;
1290            this.update(&mut cx, |worktree, _| {
1291                let worktree = worktree.as_remote_mut().unwrap();
1292                let mut snapshot = worktree.background_snapshot.lock();
1293                let entry = snapshot.insert_entry(entry);
1294                worktree.snapshot = snapshot.clone();
1295                entry
1296            })
1297        })
1298    }
1299
1300    pub(crate) fn delete_entry(
1301        &mut self,
1302        id: ProjectEntryId,
1303        scan_id: usize,
1304        cx: &mut ModelContext<Worktree>,
1305    ) -> Task<Result<()>> {
1306        let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1307        cx.spawn(|this, mut cx| async move {
1308            wait_for_snapshot.await?;
1309            this.update(&mut cx, |worktree, _| {
1310                let worktree = worktree.as_remote_mut().unwrap();
1311                let mut snapshot = worktree.background_snapshot.lock();
1312                snapshot.delete_entry(id);
1313                worktree.snapshot = snapshot.clone();
1314            });
1315            Ok(())
1316        })
1317    }
1318}
1319
1320impl Snapshot {
1321    pub fn id(&self) -> WorktreeId {
1322        self.id
1323    }
1324
1325    pub fn abs_path(&self) -> &Arc<Path> {
1326        &self.abs_path
1327    }
1328
1329    pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1330        self.entries_by_id.get(&entry_id, &()).is_some()
1331    }
1332
1333    pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1334        let entry = Entry::try_from((&self.root_char_bag, entry))?;
1335        let old_entry = self.entries_by_id.insert_or_replace(
1336            PathEntry {
1337                id: entry.id,
1338                path: entry.path.clone(),
1339                is_ignored: entry.is_ignored,
1340                scan_id: 0,
1341            },
1342            &(),
1343        );
1344        if let Some(old_entry) = old_entry {
1345            self.entries_by_path.remove(&PathKey(old_entry.path), &());
1346        }
1347        self.entries_by_path.insert_or_replace(entry.clone(), &());
1348        Ok(entry)
1349    }
1350
1351    fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1352        let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1353        self.entries_by_path = {
1354            let mut cursor = self.entries_by_path.cursor();
1355            let mut new_entries_by_path =
1356                cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1357            while let Some(entry) = cursor.item() {
1358                if entry.path.starts_with(&removed_entry.path) {
1359                    self.entries_by_id.remove(&entry.id, &());
1360                    cursor.next(&());
1361                } else {
1362                    break;
1363                }
1364            }
1365            new_entries_by_path.push_tree(cursor.suffix(&()), &());
1366            new_entries_by_path
1367        };
1368
1369        Some(removed_entry.path)
1370    }
1371
1372    pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1373        let mut entries_by_path_edits = Vec::new();
1374        let mut entries_by_id_edits = Vec::new();
1375        for entry_id in update.removed_entries {
1376            if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1377                entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1378                entries_by_id_edits.push(Edit::Remove(entry.id));
1379            }
1380        }
1381
1382        for entry in update.updated_entries {
1383            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1384            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1385                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1386            }
1387            entries_by_id_edits.push(Edit::Insert(PathEntry {
1388                id: entry.id,
1389                path: entry.path.clone(),
1390                is_ignored: entry.is_ignored,
1391                scan_id: 0,
1392            }));
1393            entries_by_path_edits.push(Edit::Insert(entry));
1394        }
1395
1396        self.entries_by_path.edit(entries_by_path_edits, &());
1397        self.entries_by_id.edit(entries_by_id_edits, &());
1398
1399        update.removed_repositories.sort_unstable();
1400        self.repository_entries.retain(|_, entry| {
1401            if let Ok(_) = update
1402                .removed_repositories
1403                .binary_search(&entry.work_directory.to_proto())
1404            {
1405                false
1406            } else {
1407                true
1408            }
1409        });
1410
1411        for repository in update.updated_repositories {
1412            let repository = RepositoryEntry {
1413                work_directory: ProjectEntryId::from_proto(repository.work_directory_id).into(),
1414                branch: repository.branch.map(Into::into),
1415            };
1416            if let Some(entry) = self.entry_for_id(repository.work_directory_id()) {
1417                self.repository_entries
1418                    .insert(RepositoryWorkDirectory(entry.path.clone()), repository)
1419            } else {
1420                log::error!("no work directory entry for repository {:?}", repository)
1421            }
1422        }
1423
1424        self.scan_id = update.scan_id as usize;
1425        if update.is_last_update {
1426            self.completed_scan_id = update.scan_id as usize;
1427        }
1428
1429        Ok(())
1430    }
1431
1432    pub fn file_count(&self) -> usize {
1433        self.entries_by_path.summary().file_count
1434    }
1435
1436    pub fn visible_file_count(&self) -> usize {
1437        self.entries_by_path.summary().visible_file_count
1438    }
1439
1440    fn traverse_from_offset(
1441        &self,
1442        include_dirs: bool,
1443        include_ignored: bool,
1444        start_offset: usize,
1445    ) -> Traversal {
1446        let mut cursor = self.entries_by_path.cursor();
1447        cursor.seek(
1448            &TraversalTarget::Count {
1449                count: start_offset,
1450                include_dirs,
1451                include_ignored,
1452            },
1453            Bias::Right,
1454            &(),
1455        );
1456        Traversal {
1457            cursor,
1458            include_dirs,
1459            include_ignored,
1460        }
1461    }
1462
1463    fn traverse_from_path(
1464        &self,
1465        include_dirs: bool,
1466        include_ignored: bool,
1467        path: &Path,
1468    ) -> Traversal {
1469        let mut cursor = self.entries_by_path.cursor();
1470        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1471        Traversal {
1472            cursor,
1473            include_dirs,
1474            include_ignored,
1475        }
1476    }
1477
1478    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1479        self.traverse_from_offset(false, include_ignored, start)
1480    }
1481
1482    pub fn entries(&self, include_ignored: bool) -> Traversal {
1483        self.traverse_from_offset(true, include_ignored, 0)
1484    }
1485
1486    pub fn repositories(&self) -> impl Iterator<Item = &RepositoryEntry> {
1487        self.repository_entries.values()
1488    }
1489
1490    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1491        let empty_path = Path::new("");
1492        self.entries_by_path
1493            .cursor::<()>()
1494            .filter(move |entry| entry.path.as_ref() != empty_path)
1495            .map(|entry| &entry.path)
1496    }
1497
1498    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1499        let mut cursor = self.entries_by_path.cursor();
1500        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1501        let traversal = Traversal {
1502            cursor,
1503            include_dirs: true,
1504            include_ignored: true,
1505        };
1506        ChildEntriesIter {
1507            traversal,
1508            parent_path,
1509        }
1510    }
1511
1512    pub fn root_entry(&self) -> Option<&Entry> {
1513        self.entry_for_path("")
1514    }
1515
1516    pub fn root_name(&self) -> &str {
1517        &self.root_name
1518    }
1519
1520    pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1521        self.repository_entries
1522            .get(&RepositoryWorkDirectory(Path::new("").into()))
1523            .map(|entry| entry.to_owned())
1524    }
1525
1526    pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1527        self.repository_entries.values()
1528    }
1529
1530    pub fn scan_id(&self) -> usize {
1531        self.scan_id
1532    }
1533
1534    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1535        let path = path.as_ref();
1536        self.traverse_from_path(true, true, path)
1537            .entry()
1538            .and_then(|entry| {
1539                if entry.path.as_ref() == path {
1540                    Some(entry)
1541                } else {
1542                    None
1543                }
1544            })
1545    }
1546
1547    pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1548        let entry = self.entries_by_id.get(&id, &())?;
1549        self.entry_for_path(&entry.path)
1550    }
1551
1552    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1553        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1554    }
1555}
1556
1557impl LocalSnapshot {
1558    pub(crate) fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
1559        let mut max_len = 0;
1560        let mut current_candidate = None;
1561        for (work_directory, repo) in (&self.repository_entries).iter() {
1562            if repo.contains(self, path) {
1563                if work_directory.0.as_os_str().len() >= max_len {
1564                    current_candidate = Some(repo);
1565                    max_len = work_directory.0.as_os_str().len();
1566                } else {
1567                    break;
1568                }
1569            }
1570        }
1571
1572        current_candidate.map(|entry| entry.to_owned())
1573    }
1574
1575    pub(crate) fn repo_for_metadata(
1576        &self,
1577        path: &Path,
1578    ) -> Option<(ProjectEntryId, Arc<Mutex<dyn GitRepository>>)> {
1579        let (entry_id, local_repo) = self
1580            .git_repositories
1581            .iter()
1582            .find(|(_, repo)| repo.in_dot_git(path))?;
1583        Some((*entry_id, local_repo.repo_ptr.to_owned()))
1584    }
1585
1586    #[cfg(test)]
1587    pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1588        let root_name = self.root_name.clone();
1589        proto::UpdateWorktree {
1590            project_id,
1591            worktree_id: self.id().to_proto(),
1592            abs_path: self.abs_path().to_string_lossy().into(),
1593            root_name,
1594            updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1595            removed_entries: Default::default(),
1596            scan_id: self.scan_id as u64,
1597            is_last_update: true,
1598            updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1599            removed_repositories: Default::default(),
1600        }
1601    }
1602
1603    pub(crate) fn build_update(
1604        &self,
1605        other: &Self,
1606        project_id: u64,
1607        worktree_id: u64,
1608        include_ignored: bool,
1609    ) -> proto::UpdateWorktree {
1610        let mut updated_entries = Vec::new();
1611        let mut removed_entries = Vec::new();
1612        let mut self_entries = self
1613            .entries_by_id
1614            .cursor::<()>()
1615            .filter(|e| include_ignored || !e.is_ignored)
1616            .peekable();
1617        let mut other_entries = other
1618            .entries_by_id
1619            .cursor::<()>()
1620            .filter(|e| include_ignored || !e.is_ignored)
1621            .peekable();
1622        loop {
1623            match (self_entries.peek(), other_entries.peek()) {
1624                (Some(self_entry), Some(other_entry)) => {
1625                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1626                        Ordering::Less => {
1627                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1628                            updated_entries.push(entry);
1629                            self_entries.next();
1630                        }
1631                        Ordering::Equal => {
1632                            if self_entry.scan_id != other_entry.scan_id {
1633                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1634                                updated_entries.push(entry);
1635                            }
1636
1637                            self_entries.next();
1638                            other_entries.next();
1639                        }
1640                        Ordering::Greater => {
1641                            removed_entries.push(other_entry.id.to_proto());
1642                            other_entries.next();
1643                        }
1644                    }
1645                }
1646                (Some(self_entry), None) => {
1647                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1648                    updated_entries.push(entry);
1649                    self_entries.next();
1650                }
1651                (None, Some(other_entry)) => {
1652                    removed_entries.push(other_entry.id.to_proto());
1653                    other_entries.next();
1654                }
1655                (None, None) => break,
1656            }
1657        }
1658
1659        let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1660        let mut removed_repositories = Vec::new();
1661        let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1662        let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1663        loop {
1664            match (self_repos.peek(), other_repos.peek()) {
1665                (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1666                    match Ord::cmp(self_work_dir, other_work_dir) {
1667                        Ordering::Less => {
1668                            updated_repositories.push((*self_repo).into());
1669                            self_repos.next();
1670                        }
1671                        Ordering::Equal => {
1672                            if self_repo != other_repo {
1673                                updated_repositories.push((*self_repo).into());
1674                            }
1675
1676                            self_repos.next();
1677                            other_repos.next();
1678                        }
1679                        Ordering::Greater => {
1680                            removed_repositories.push(other_repo.work_directory.to_proto());
1681                            other_repos.next();
1682                        }
1683                    }
1684                }
1685                (Some((_, self_repo)), None) => {
1686                    updated_repositories.push((*self_repo).into());
1687                    self_repos.next();
1688                }
1689                (None, Some((_, other_repo))) => {
1690                    removed_repositories.push(other_repo.work_directory.to_proto());
1691                    other_repos.next();
1692                }
1693                (None, None) => break,
1694            }
1695        }
1696
1697        proto::UpdateWorktree {
1698            project_id,
1699            worktree_id,
1700            abs_path: self.abs_path().to_string_lossy().into(),
1701            root_name: self.root_name().to_string(),
1702            updated_entries,
1703            removed_entries,
1704            scan_id: self.scan_id as u64,
1705            is_last_update: self.completed_scan_id == self.scan_id,
1706            updated_repositories,
1707            removed_repositories,
1708        }
1709    }
1710
1711    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1712        if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1713            let abs_path = self.abs_path.join(&entry.path);
1714            match smol::block_on(build_gitignore(&abs_path, fs)) {
1715                Ok(ignore) => {
1716                    self.ignores_by_parent_abs_path.insert(
1717                        abs_path.parent().unwrap().into(),
1718                        (Arc::new(ignore), self.scan_id),
1719                    );
1720                }
1721                Err(error) => {
1722                    log::error!(
1723                        "error loading .gitignore file {:?} - {:?}",
1724                        &entry.path,
1725                        error
1726                    );
1727                }
1728            }
1729        }
1730
1731        self.reuse_entry_id(&mut entry);
1732
1733        if entry.kind == EntryKind::PendingDir {
1734            if let Some(existing_entry) =
1735                self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1736            {
1737                entry.kind = existing_entry.kind;
1738            }
1739        }
1740
1741        let scan_id = self.scan_id;
1742        let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1743        if let Some(removed) = removed {
1744            if removed.id != entry.id {
1745                self.entries_by_id.remove(&removed.id, &());
1746            }
1747        }
1748        self.entries_by_id.insert_or_replace(
1749            PathEntry {
1750                id: entry.id,
1751                path: entry.path.clone(),
1752                is_ignored: entry.is_ignored,
1753                scan_id,
1754            },
1755            &(),
1756        );
1757
1758        entry
1759    }
1760
1761    fn populate_dir(
1762        &mut self,
1763        parent_path: Arc<Path>,
1764        entries: impl IntoIterator<Item = Entry>,
1765        ignore: Option<Arc<Gitignore>>,
1766        fs: &dyn Fs,
1767    ) {
1768        let mut parent_entry = if let Some(parent_entry) =
1769            self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1770        {
1771            parent_entry.clone()
1772        } else {
1773            log::warn!(
1774                "populating a directory {:?} that has been removed",
1775                parent_path
1776            );
1777            return;
1778        };
1779
1780        match parent_entry.kind {
1781            EntryKind::PendingDir => {
1782                parent_entry.kind = EntryKind::Dir;
1783            }
1784            EntryKind::Dir => {}
1785            _ => return,
1786        }
1787
1788        if let Some(ignore) = ignore {
1789            self.ignores_by_parent_abs_path.insert(
1790                self.abs_path.join(&parent_path).into(),
1791                (ignore, self.scan_id),
1792            );
1793        }
1794
1795        if parent_path.file_name() == Some(&DOT_GIT) {
1796            self.build_repo(parent_path, fs);
1797        }
1798
1799        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1800        let mut entries_by_id_edits = Vec::new();
1801
1802        for mut entry in entries {
1803            self.reuse_entry_id(&mut entry);
1804            entries_by_id_edits.push(Edit::Insert(PathEntry {
1805                id: entry.id,
1806                path: entry.path.clone(),
1807                is_ignored: entry.is_ignored,
1808                scan_id: self.scan_id,
1809            }));
1810            entries_by_path_edits.push(Edit::Insert(entry));
1811        }
1812
1813        self.entries_by_path.edit(entries_by_path_edits, &());
1814        self.entries_by_id.edit(entries_by_id_edits, &());
1815    }
1816
1817    fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
1818        let abs_path = self.abs_path.join(&parent_path);
1819        let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
1820
1821        // Guard against repositories inside the repository metadata
1822        if work_dir
1823            .components()
1824            .find(|component| component.as_os_str() == *DOT_GIT)
1825            .is_some()
1826        {
1827            return None;
1828        };
1829
1830        let work_dir_id = self
1831            .entry_for_path(work_dir.clone())
1832            .map(|entry| entry.id)?;
1833
1834        if self.git_repositories.get(&work_dir_id).is_none() {
1835            let repo = fs.open_repo(abs_path.as_path())?;
1836            let work_directory = RepositoryWorkDirectory(work_dir.clone());
1837            let scan_id = self.scan_id;
1838
1839            let repo_lock = repo.lock();
1840            self.repository_entries.insert(
1841                work_directory,
1842                RepositoryEntry {
1843                    work_directory: work_dir_id.into(),
1844                    branch: repo_lock.branch_name().map(Into::into),
1845                },
1846            );
1847            drop(repo_lock);
1848
1849            self.git_repositories.insert(
1850                work_dir_id,
1851                LocalRepositoryEntry {
1852                    scan_id,
1853                    repo_ptr: repo,
1854                    git_dir_path: parent_path.clone(),
1855                },
1856            )
1857        }
1858
1859        Some(())
1860    }
1861    fn reuse_entry_id(&mut self, entry: &mut Entry) {
1862        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1863            entry.id = removed_entry_id;
1864        } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1865            entry.id = existing_entry.id;
1866        }
1867    }
1868
1869    fn remove_path(&mut self, path: &Path) {
1870        let mut new_entries;
1871        let removed_entries;
1872        {
1873            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1874            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1875            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1876            new_entries.push_tree(cursor.suffix(&()), &());
1877        }
1878        self.entries_by_path = new_entries;
1879
1880        let mut entries_by_id_edits = Vec::new();
1881        for entry in removed_entries.cursor::<()>() {
1882            let removed_entry_id = self
1883                .removed_entry_ids
1884                .entry(entry.inode)
1885                .or_insert(entry.id);
1886            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1887            entries_by_id_edits.push(Edit::Remove(entry.id));
1888        }
1889        self.entries_by_id.edit(entries_by_id_edits, &());
1890
1891        if path.file_name() == Some(&GITIGNORE) {
1892            let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1893            if let Some((_, scan_id)) = self
1894                .ignores_by_parent_abs_path
1895                .get_mut(abs_parent_path.as_path())
1896            {
1897                *scan_id = self.snapshot.scan_id;
1898            }
1899        }
1900    }
1901
1902    fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1903        let mut inodes = TreeSet::default();
1904        for ancestor in path.ancestors().skip(1) {
1905            if let Some(entry) = self.entry_for_path(ancestor) {
1906                inodes.insert(entry.inode);
1907            }
1908        }
1909        inodes
1910    }
1911
1912    fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1913        let mut new_ignores = Vec::new();
1914        for ancestor in abs_path.ancestors().skip(1) {
1915            if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1916                new_ignores.push((ancestor, Some(ignore.clone())));
1917            } else {
1918                new_ignores.push((ancestor, None));
1919            }
1920        }
1921
1922        let mut ignore_stack = IgnoreStack::none();
1923        for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1924            if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1925                ignore_stack = IgnoreStack::all();
1926                break;
1927            } else if let Some(ignore) = ignore {
1928                ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1929            }
1930        }
1931
1932        if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1933            ignore_stack = IgnoreStack::all();
1934        }
1935
1936        ignore_stack
1937    }
1938}
1939
1940async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1941    let contents = fs.load(abs_path).await?;
1942    let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1943    let mut builder = GitignoreBuilder::new(parent);
1944    for line in contents.lines() {
1945        builder.add_line(Some(abs_path.into()), line)?;
1946    }
1947    Ok(builder.build()?)
1948}
1949
1950impl WorktreeId {
1951    pub fn from_usize(handle_id: usize) -> Self {
1952        Self(handle_id)
1953    }
1954
1955    pub(crate) fn from_proto(id: u64) -> Self {
1956        Self(id as usize)
1957    }
1958
1959    pub fn to_proto(&self) -> u64 {
1960        self.0 as u64
1961    }
1962
1963    pub fn to_usize(&self) -> usize {
1964        self.0
1965    }
1966}
1967
1968impl fmt::Display for WorktreeId {
1969    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1970        self.0.fmt(f)
1971    }
1972}
1973
1974impl Deref for Worktree {
1975    type Target = Snapshot;
1976
1977    fn deref(&self) -> &Self::Target {
1978        match self {
1979            Worktree::Local(worktree) => &worktree.snapshot,
1980            Worktree::Remote(worktree) => &worktree.snapshot,
1981        }
1982    }
1983}
1984
1985impl Deref for LocalWorktree {
1986    type Target = LocalSnapshot;
1987
1988    fn deref(&self) -> &Self::Target {
1989        &self.snapshot
1990    }
1991}
1992
1993impl Deref for RemoteWorktree {
1994    type Target = Snapshot;
1995
1996    fn deref(&self) -> &Self::Target {
1997        &self.snapshot
1998    }
1999}
2000
2001impl fmt::Debug for LocalWorktree {
2002    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2003        self.snapshot.fmt(f)
2004    }
2005}
2006
2007impl fmt::Debug for Snapshot {
2008    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2009        struct EntriesById<'a>(&'a SumTree<PathEntry>);
2010        struct EntriesByPath<'a>(&'a SumTree<Entry>);
2011
2012        impl<'a> fmt::Debug for EntriesByPath<'a> {
2013            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2014                f.debug_map()
2015                    .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2016                    .finish()
2017            }
2018        }
2019
2020        impl<'a> fmt::Debug for EntriesById<'a> {
2021            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2022                f.debug_list().entries(self.0.iter()).finish()
2023            }
2024        }
2025
2026        f.debug_struct("Snapshot")
2027            .field("id", &self.id)
2028            .field("root_name", &self.root_name)
2029            .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2030            .field("entries_by_id", &EntriesById(&self.entries_by_id))
2031            .finish()
2032    }
2033}
2034
2035#[derive(Clone, PartialEq)]
2036pub struct File {
2037    pub worktree: ModelHandle<Worktree>,
2038    pub path: Arc<Path>,
2039    pub mtime: SystemTime,
2040    pub(crate) entry_id: ProjectEntryId,
2041    pub(crate) is_local: bool,
2042    pub(crate) is_deleted: bool,
2043}
2044
2045impl language::File for File {
2046    fn as_local(&self) -> Option<&dyn language::LocalFile> {
2047        if self.is_local {
2048            Some(self)
2049        } else {
2050            None
2051        }
2052    }
2053
2054    fn mtime(&self) -> SystemTime {
2055        self.mtime
2056    }
2057
2058    fn path(&self) -> &Arc<Path> {
2059        &self.path
2060    }
2061
2062    fn full_path(&self, cx: &AppContext) -> PathBuf {
2063        let mut full_path = PathBuf::new();
2064        let worktree = self.worktree.read(cx);
2065
2066        if worktree.is_visible() {
2067            full_path.push(worktree.root_name());
2068        } else {
2069            let path = worktree.abs_path();
2070
2071            if worktree.is_local() && path.starts_with(HOME.as_path()) {
2072                full_path.push("~");
2073                full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2074            } else {
2075                full_path.push(path)
2076            }
2077        }
2078
2079        if self.path.components().next().is_some() {
2080            full_path.push(&self.path);
2081        }
2082
2083        full_path
2084    }
2085
2086    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2087    /// of its worktree, then this method will return the name of the worktree itself.
2088    fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2089        self.path
2090            .file_name()
2091            .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2092    }
2093
2094    fn is_deleted(&self) -> bool {
2095        self.is_deleted
2096    }
2097
2098    fn as_any(&self) -> &dyn Any {
2099        self
2100    }
2101
2102    fn to_proto(&self) -> rpc::proto::File {
2103        rpc::proto::File {
2104            worktree_id: self.worktree.id() as u64,
2105            entry_id: self.entry_id.to_proto(),
2106            path: self.path.to_string_lossy().into(),
2107            mtime: Some(self.mtime.into()),
2108            is_deleted: self.is_deleted,
2109        }
2110    }
2111}
2112
2113impl language::LocalFile for File {
2114    fn abs_path(&self, cx: &AppContext) -> PathBuf {
2115        self.worktree
2116            .read(cx)
2117            .as_local()
2118            .unwrap()
2119            .abs_path
2120            .join(&self.path)
2121    }
2122
2123    fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2124        let worktree = self.worktree.read(cx).as_local().unwrap();
2125        let abs_path = worktree.absolutize(&self.path);
2126        let fs = worktree.fs.clone();
2127        cx.background()
2128            .spawn(async move { fs.load(&abs_path).await })
2129    }
2130
2131    fn buffer_reloaded(
2132        &self,
2133        buffer_id: u64,
2134        version: &clock::Global,
2135        fingerprint: RopeFingerprint,
2136        line_ending: LineEnding,
2137        mtime: SystemTime,
2138        cx: &mut AppContext,
2139    ) {
2140        let worktree = self.worktree.read(cx).as_local().unwrap();
2141        if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2142            worktree
2143                .client
2144                .send(proto::BufferReloaded {
2145                    project_id,
2146                    buffer_id,
2147                    version: serialize_version(version),
2148                    mtime: Some(mtime.into()),
2149                    fingerprint: serialize_fingerprint(fingerprint),
2150                    line_ending: serialize_line_ending(line_ending) as i32,
2151                })
2152                .log_err();
2153        }
2154    }
2155}
2156
2157impl File {
2158    pub fn from_proto(
2159        proto: rpc::proto::File,
2160        worktree: ModelHandle<Worktree>,
2161        cx: &AppContext,
2162    ) -> Result<Self> {
2163        let worktree_id = worktree
2164            .read(cx)
2165            .as_remote()
2166            .ok_or_else(|| anyhow!("not remote"))?
2167            .id();
2168
2169        if worktree_id.to_proto() != proto.worktree_id {
2170            return Err(anyhow!("worktree id does not match file"));
2171        }
2172
2173        Ok(Self {
2174            worktree,
2175            path: Path::new(&proto.path).into(),
2176            mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2177            entry_id: ProjectEntryId::from_proto(proto.entry_id),
2178            is_local: false,
2179            is_deleted: proto.is_deleted,
2180        })
2181    }
2182
2183    pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2184        file.and_then(|f| f.as_any().downcast_ref())
2185    }
2186
2187    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2188        self.worktree.read(cx).id()
2189    }
2190
2191    pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2192        if self.is_deleted {
2193            None
2194        } else {
2195            Some(self.entry_id)
2196        }
2197    }
2198}
2199
2200#[derive(Clone, Debug, PartialEq, Eq)]
2201pub struct Entry {
2202    pub id: ProjectEntryId,
2203    pub kind: EntryKind,
2204    pub path: Arc<Path>,
2205    pub inode: u64,
2206    pub mtime: SystemTime,
2207    pub is_symlink: bool,
2208    pub is_ignored: bool,
2209}
2210
2211#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2212pub enum EntryKind {
2213    PendingDir,
2214    Dir,
2215    File(CharBag),
2216}
2217
2218#[derive(Clone, Copy, Debug)]
2219pub enum PathChange {
2220    Added,
2221    Removed,
2222    Updated,
2223    AddedOrUpdated,
2224}
2225
2226impl Entry {
2227    fn new(
2228        path: Arc<Path>,
2229        metadata: &fs::Metadata,
2230        next_entry_id: &AtomicUsize,
2231        root_char_bag: CharBag,
2232    ) -> Self {
2233        Self {
2234            id: ProjectEntryId::new(next_entry_id),
2235            kind: if metadata.is_dir {
2236                EntryKind::PendingDir
2237            } else {
2238                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2239            },
2240            path,
2241            inode: metadata.inode,
2242            mtime: metadata.mtime,
2243            is_symlink: metadata.is_symlink,
2244            is_ignored: false,
2245        }
2246    }
2247
2248    pub fn is_dir(&self) -> bool {
2249        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2250    }
2251
2252    pub fn is_file(&self) -> bool {
2253        matches!(self.kind, EntryKind::File(_))
2254    }
2255}
2256
2257impl sum_tree::Item for Entry {
2258    type Summary = EntrySummary;
2259
2260    fn summary(&self) -> Self::Summary {
2261        let visible_count = if self.is_ignored { 0 } else { 1 };
2262        let file_count;
2263        let visible_file_count;
2264        if self.is_file() {
2265            file_count = 1;
2266            visible_file_count = visible_count;
2267        } else {
2268            file_count = 0;
2269            visible_file_count = 0;
2270        }
2271
2272        EntrySummary {
2273            max_path: self.path.clone(),
2274            count: 1,
2275            visible_count,
2276            file_count,
2277            visible_file_count,
2278        }
2279    }
2280}
2281
2282impl sum_tree::KeyedItem for Entry {
2283    type Key = PathKey;
2284
2285    fn key(&self) -> Self::Key {
2286        PathKey(self.path.clone())
2287    }
2288}
2289
2290#[derive(Clone, Debug)]
2291pub struct EntrySummary {
2292    max_path: Arc<Path>,
2293    count: usize,
2294    visible_count: usize,
2295    file_count: usize,
2296    visible_file_count: usize,
2297}
2298
2299impl Default for EntrySummary {
2300    fn default() -> Self {
2301        Self {
2302            max_path: Arc::from(Path::new("")),
2303            count: 0,
2304            visible_count: 0,
2305            file_count: 0,
2306            visible_file_count: 0,
2307        }
2308    }
2309}
2310
2311impl sum_tree::Summary for EntrySummary {
2312    type Context = ();
2313
2314    fn add_summary(&mut self, rhs: &Self, _: &()) {
2315        self.max_path = rhs.max_path.clone();
2316        self.count += rhs.count;
2317        self.visible_count += rhs.visible_count;
2318        self.file_count += rhs.file_count;
2319        self.visible_file_count += rhs.visible_file_count;
2320    }
2321}
2322
2323#[derive(Clone, Debug)]
2324struct PathEntry {
2325    id: ProjectEntryId,
2326    path: Arc<Path>,
2327    is_ignored: bool,
2328    scan_id: usize,
2329}
2330
2331impl sum_tree::Item for PathEntry {
2332    type Summary = PathEntrySummary;
2333
2334    fn summary(&self) -> Self::Summary {
2335        PathEntrySummary { max_id: self.id }
2336    }
2337}
2338
2339impl sum_tree::KeyedItem for PathEntry {
2340    type Key = ProjectEntryId;
2341
2342    fn key(&self) -> Self::Key {
2343        self.id
2344    }
2345}
2346
2347#[derive(Clone, Debug, Default)]
2348struct PathEntrySummary {
2349    max_id: ProjectEntryId,
2350}
2351
2352impl sum_tree::Summary for PathEntrySummary {
2353    type Context = ();
2354
2355    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2356        self.max_id = summary.max_id;
2357    }
2358}
2359
2360impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2361    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2362        *self = summary.max_id;
2363    }
2364}
2365
2366#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2367pub struct PathKey(Arc<Path>);
2368
2369impl Default for PathKey {
2370    fn default() -> Self {
2371        Self(Path::new("").into())
2372    }
2373}
2374
2375impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2376    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2377        self.0 = summary.max_path.clone();
2378    }
2379}
2380
2381struct BackgroundScanner {
2382    snapshot: Mutex<LocalSnapshot>,
2383    fs: Arc<dyn Fs>,
2384    status_updates_tx: UnboundedSender<ScanState>,
2385    executor: Arc<executor::Background>,
2386    refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2387    prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2388    finished_initial_scan: bool,
2389}
2390
2391impl BackgroundScanner {
2392    fn new(
2393        snapshot: LocalSnapshot,
2394        fs: Arc<dyn Fs>,
2395        status_updates_tx: UnboundedSender<ScanState>,
2396        executor: Arc<executor::Background>,
2397        refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2398    ) -> Self {
2399        Self {
2400            fs,
2401            status_updates_tx,
2402            executor,
2403            refresh_requests_rx,
2404            prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2405            snapshot: Mutex::new(snapshot),
2406            finished_initial_scan: false,
2407        }
2408    }
2409
2410    async fn run(
2411        &mut self,
2412        mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2413    ) {
2414        use futures::FutureExt as _;
2415
2416        let (root_abs_path, root_inode) = {
2417            let snapshot = self.snapshot.lock();
2418            (
2419                snapshot.abs_path.clone(),
2420                snapshot.root_entry().map(|e| e.inode),
2421            )
2422        };
2423
2424        // Populate ignores above the root.
2425        let ignore_stack;
2426        for ancestor in root_abs_path.ancestors().skip(1) {
2427            if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2428            {
2429                self.snapshot
2430                    .lock()
2431                    .ignores_by_parent_abs_path
2432                    .insert(ancestor.into(), (ignore.into(), 0));
2433            }
2434        }
2435        {
2436            let mut snapshot = self.snapshot.lock();
2437            snapshot.scan_id += 1;
2438            ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2439            if ignore_stack.is_all() {
2440                if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2441                    root_entry.is_ignored = true;
2442                    snapshot.insert_entry(root_entry, self.fs.as_ref());
2443                }
2444            }
2445        };
2446
2447        // Perform an initial scan of the directory.
2448        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2449        smol::block_on(scan_job_tx.send(ScanJob {
2450            abs_path: root_abs_path,
2451            path: Arc::from(Path::new("")),
2452            ignore_stack,
2453            ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2454            scan_queue: scan_job_tx.clone(),
2455        }))
2456        .unwrap();
2457        drop(scan_job_tx);
2458        self.scan_dirs(true, scan_job_rx).await;
2459        {
2460            let mut snapshot = self.snapshot.lock();
2461            snapshot.completed_scan_id = snapshot.scan_id;
2462        }
2463        self.send_status_update(false, None);
2464
2465        // Process any any FS events that occurred while performing the initial scan.
2466        // For these events, update events cannot be as precise, because we didn't
2467        // have the previous state loaded yet.
2468        if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2469            let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2470            while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2471                paths.extend(more_events.into_iter().map(|e| e.path));
2472            }
2473            self.process_events(paths).await;
2474        }
2475
2476        self.finished_initial_scan = true;
2477
2478        // Continue processing events until the worktree is dropped.
2479        loop {
2480            select_biased! {
2481                // Process any path refresh requests from the worktree. Prioritize
2482                // these before handling changes reported by the filesystem.
2483                request = self.refresh_requests_rx.recv().fuse() => {
2484                    let Ok((paths, barrier)) = request else { break };
2485                    if !self.process_refresh_request(paths, barrier).await {
2486                        return;
2487                    }
2488                }
2489
2490                events = events_rx.next().fuse() => {
2491                    let Some(events) = events else { break };
2492                    let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2493                    while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2494                        paths.extend(more_events.into_iter().map(|e| e.path));
2495                    }
2496                    self.process_events(paths).await;
2497                }
2498            }
2499        }
2500    }
2501
2502    async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2503        self.reload_entries_for_paths(paths, None).await;
2504        self.send_status_update(false, Some(barrier))
2505    }
2506
2507    async fn process_events(&mut self, paths: Vec<PathBuf>) {
2508        let (scan_job_tx, scan_job_rx) = channel::unbounded();
2509        if let Some(mut paths) = self
2510            .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2511            .await
2512        {
2513            paths.sort_unstable();
2514            util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2515        }
2516        drop(scan_job_tx);
2517        self.scan_dirs(false, scan_job_rx).await;
2518
2519        self.update_ignore_statuses().await;
2520
2521        let mut snapshot = self.snapshot.lock();
2522
2523        let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2524        git_repositories.retain(|work_directory_id, _| {
2525            snapshot
2526                .entry_for_id(*work_directory_id)
2527                .map_or(false, |entry| {
2528                    snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2529                })
2530        });
2531        snapshot.git_repositories = git_repositories;
2532
2533        let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2534        git_repository_entries.retain(|_, entry| {
2535            snapshot
2536                .git_repositories
2537                .get(&entry.work_directory.0)
2538                .is_some()
2539        });
2540        snapshot.snapshot.repository_entries = git_repository_entries;
2541
2542        snapshot.removed_entry_ids.clear();
2543        snapshot.completed_scan_id = snapshot.scan_id;
2544
2545        drop(snapshot);
2546
2547        self.send_status_update(false, None);
2548    }
2549
2550    async fn scan_dirs(
2551        &self,
2552        enable_progress_updates: bool,
2553        scan_jobs_rx: channel::Receiver<ScanJob>,
2554    ) {
2555        use futures::FutureExt as _;
2556
2557        if self
2558            .status_updates_tx
2559            .unbounded_send(ScanState::Started)
2560            .is_err()
2561        {
2562            return;
2563        }
2564
2565        let progress_update_count = AtomicUsize::new(0);
2566        self.executor
2567            .scoped(|scope| {
2568                for _ in 0..self.executor.num_cpus() {
2569                    scope.spawn(async {
2570                        let mut last_progress_update_count = 0;
2571                        let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2572                        futures::pin_mut!(progress_update_timer);
2573
2574                        loop {
2575                            select_biased! {
2576                                // Process any path refresh requests before moving on to process
2577                                // the scan queue, so that user operations are prioritized.
2578                                request = self.refresh_requests_rx.recv().fuse() => {
2579                                    let Ok((paths, barrier)) = request else { break };
2580                                    if !self.process_refresh_request(paths, barrier).await {
2581                                        return;
2582                                    }
2583                                }
2584
2585                                // Send periodic progress updates to the worktree. Use an atomic counter
2586                                // to ensure that only one of the workers sends a progress update after
2587                                // the update interval elapses.
2588                                _ = progress_update_timer => {
2589                                    match progress_update_count.compare_exchange(
2590                                        last_progress_update_count,
2591                                        last_progress_update_count + 1,
2592                                        SeqCst,
2593                                        SeqCst
2594                                    ) {
2595                                        Ok(_) => {
2596                                            last_progress_update_count += 1;
2597                                            self.send_status_update(true, None);
2598                                        }
2599                                        Err(count) => {
2600                                            last_progress_update_count = count;
2601                                        }
2602                                    }
2603                                    progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2604                                }
2605
2606                                // Recursively load directories from the file system.
2607                                job = scan_jobs_rx.recv().fuse() => {
2608                                    let Ok(job) = job else { break };
2609                                    if let Err(err) = self.scan_dir(&job).await {
2610                                        if job.path.as_ref() != Path::new("") {
2611                                            log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2612                                        }
2613                                    }
2614                                }
2615                            }
2616                        }
2617                    })
2618                }
2619            })
2620            .await;
2621    }
2622
2623    fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2624        let mut prev_state = self.prev_state.lock();
2625        let snapshot = self.snapshot.lock().clone();
2626        let mut old_snapshot = snapshot.snapshot.clone();
2627        mem::swap(&mut old_snapshot, &mut prev_state.0);
2628        let changed_paths = mem::take(&mut prev_state.1);
2629        let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2630        self.status_updates_tx
2631            .unbounded_send(ScanState::Updated {
2632                snapshot,
2633                changes,
2634                scanning,
2635                barrier,
2636            })
2637            .is_ok()
2638    }
2639
2640    async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2641        let mut new_entries: Vec<Entry> = Vec::new();
2642        let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2643        let mut ignore_stack = job.ignore_stack.clone();
2644        let mut new_ignore = None;
2645        let (root_abs_path, root_char_bag, next_entry_id) = {
2646            let snapshot = self.snapshot.lock();
2647            (
2648                snapshot.abs_path().clone(),
2649                snapshot.root_char_bag,
2650                snapshot.next_entry_id.clone(),
2651            )
2652        };
2653        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2654        while let Some(child_abs_path) = child_paths.next().await {
2655            let child_abs_path: Arc<Path> = match child_abs_path {
2656                Ok(child_abs_path) => child_abs_path.into(),
2657                Err(error) => {
2658                    log::error!("error processing entry {:?}", error);
2659                    continue;
2660                }
2661            };
2662
2663            let child_name = child_abs_path.file_name().unwrap();
2664            let child_path: Arc<Path> = job.path.join(child_name).into();
2665            let child_metadata = match self.fs.metadata(&child_abs_path).await {
2666                Ok(Some(metadata)) => metadata,
2667                Ok(None) => continue,
2668                Err(err) => {
2669                    log::error!("error processing {:?}: {:?}", child_abs_path, err);
2670                    continue;
2671                }
2672            };
2673
2674            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2675            if child_name == *GITIGNORE {
2676                match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2677                    Ok(ignore) => {
2678                        let ignore = Arc::new(ignore);
2679                        ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2680                        new_ignore = Some(ignore);
2681                    }
2682                    Err(error) => {
2683                        log::error!(
2684                            "error loading .gitignore file {:?} - {:?}",
2685                            child_name,
2686                            error
2687                        );
2688                    }
2689                }
2690
2691                // Update ignore status of any child entries we've already processed to reflect the
2692                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2693                // there should rarely be too numerous. Update the ignore stack associated with any
2694                // new jobs as well.
2695                let mut new_jobs = new_jobs.iter_mut();
2696                for entry in &mut new_entries {
2697                    let entry_abs_path = root_abs_path.join(&entry.path);
2698                    entry.is_ignored =
2699                        ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2700
2701                    if entry.is_dir() {
2702                        if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2703                            job.ignore_stack = if entry.is_ignored {
2704                                IgnoreStack::all()
2705                            } else {
2706                                ignore_stack.clone()
2707                            };
2708                        }
2709                    }
2710                }
2711            }
2712
2713            let mut child_entry = Entry::new(
2714                child_path.clone(),
2715                &child_metadata,
2716                &next_entry_id,
2717                root_char_bag,
2718            );
2719
2720            if child_entry.is_dir() {
2721                let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2722                child_entry.is_ignored = is_ignored;
2723
2724                // Avoid recursing until crash in the case of a recursive symlink
2725                if !job.ancestor_inodes.contains(&child_entry.inode) {
2726                    let mut ancestor_inodes = job.ancestor_inodes.clone();
2727                    ancestor_inodes.insert(child_entry.inode);
2728
2729                    new_jobs.push(Some(ScanJob {
2730                        abs_path: child_abs_path,
2731                        path: child_path,
2732                        ignore_stack: if is_ignored {
2733                            IgnoreStack::all()
2734                        } else {
2735                            ignore_stack.clone()
2736                        },
2737                        ancestor_inodes,
2738                        scan_queue: job.scan_queue.clone(),
2739                    }));
2740                } else {
2741                    new_jobs.push(None);
2742                }
2743            } else {
2744                child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2745            }
2746
2747            new_entries.push(child_entry);
2748        }
2749
2750        self.snapshot.lock().populate_dir(
2751            job.path.clone(),
2752            new_entries,
2753            new_ignore,
2754            self.fs.as_ref(),
2755        );
2756
2757        for new_job in new_jobs {
2758            if let Some(new_job) = new_job {
2759                job.scan_queue.send(new_job).await.unwrap();
2760            }
2761        }
2762
2763        Ok(())
2764    }
2765
2766    async fn reload_entries_for_paths(
2767        &self,
2768        mut abs_paths: Vec<PathBuf>,
2769        scan_queue_tx: Option<Sender<ScanJob>>,
2770    ) -> Option<Vec<Arc<Path>>> {
2771        let doing_recursive_update = scan_queue_tx.is_some();
2772
2773        abs_paths.sort_unstable();
2774        abs_paths.dedup_by(|a, b| a.starts_with(&b));
2775
2776        let root_abs_path = self.snapshot.lock().abs_path.clone();
2777        let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2778        let metadata = futures::future::join_all(
2779            abs_paths
2780                .iter()
2781                .map(|abs_path| self.fs.metadata(&abs_path))
2782                .collect::<Vec<_>>(),
2783        )
2784        .await;
2785
2786        let mut snapshot = self.snapshot.lock();
2787        let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2788        snapshot.scan_id += 1;
2789        if is_idle && !doing_recursive_update {
2790            snapshot.completed_scan_id = snapshot.scan_id;
2791        }
2792
2793        // Remove any entries for paths that no longer exist or are being recursively
2794        // refreshed. Do this before adding any new entries, so that renames can be
2795        // detected regardless of the order of the paths.
2796        let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2797        for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2798            if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2799                if matches!(metadata, Ok(None)) || doing_recursive_update {
2800                    snapshot.remove_path(path);
2801                }
2802                event_paths.push(path.into());
2803            } else {
2804                log::error!(
2805                    "unexpected event {:?} for root path {:?}",
2806                    abs_path,
2807                    root_canonical_path
2808                );
2809            }
2810        }
2811
2812        for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2813            let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2814
2815            match metadata {
2816                Ok(Some(metadata)) => {
2817                    let ignore_stack =
2818                        snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2819                    let mut fs_entry = Entry::new(
2820                        path.clone(),
2821                        &metadata,
2822                        snapshot.next_entry_id.as_ref(),
2823                        snapshot.root_char_bag,
2824                    );
2825                    fs_entry.is_ignored = ignore_stack.is_all();
2826                    snapshot.insert_entry(fs_entry, self.fs.as_ref());
2827
2828                    let scan_id = snapshot.scan_id;
2829
2830                    let repo_with_path_in_dotgit = snapshot.repo_for_metadata(&path);
2831                    if let Some((entry_id, repo)) = repo_with_path_in_dotgit {
2832                        let work_dir = snapshot
2833                            .entry_for_id(entry_id)
2834                            .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
2835
2836                        let repo = repo.lock();
2837                        repo.reload_index();
2838                        let branch = repo.branch_name();
2839
2840                        snapshot.git_repositories.update(&entry_id, |entry| {
2841                            entry.scan_id = scan_id;
2842                        });
2843
2844                        snapshot
2845                            .repository_entries
2846                            .update(&work_dir, |entry| entry.branch = branch.map(Into::into));
2847                    }
2848
2849                    if let Some(scan_queue_tx) = &scan_queue_tx {
2850                        let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2851                        if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2852                            ancestor_inodes.insert(metadata.inode);
2853                            smol::block_on(scan_queue_tx.send(ScanJob {
2854                                abs_path,
2855                                path,
2856                                ignore_stack,
2857                                ancestor_inodes,
2858                                scan_queue: scan_queue_tx.clone(),
2859                            }))
2860                            .unwrap();
2861                        }
2862                    }
2863                }
2864                Ok(None) => {}
2865                Err(err) => {
2866                    // TODO - create a special 'error' entry in the entries tree to mark this
2867                    log::error!("error reading file on event {:?}", err);
2868                }
2869            }
2870        }
2871
2872        Some(event_paths)
2873    }
2874
2875    async fn update_ignore_statuses(&self) {
2876        use futures::FutureExt as _;
2877
2878        let mut snapshot = self.snapshot.lock().clone();
2879        let mut ignores_to_update = Vec::new();
2880        let mut ignores_to_delete = Vec::new();
2881        for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2882            if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2883                if *scan_id > snapshot.completed_scan_id
2884                    && snapshot.entry_for_path(parent_path).is_some()
2885                {
2886                    ignores_to_update.push(parent_abs_path.clone());
2887                }
2888
2889                let ignore_path = parent_path.join(&*GITIGNORE);
2890                if snapshot.entry_for_path(ignore_path).is_none() {
2891                    ignores_to_delete.push(parent_abs_path.clone());
2892                }
2893            }
2894        }
2895
2896        for parent_abs_path in ignores_to_delete {
2897            snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2898            self.snapshot
2899                .lock()
2900                .ignores_by_parent_abs_path
2901                .remove(&parent_abs_path);
2902        }
2903
2904        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2905        ignores_to_update.sort_unstable();
2906        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2907        while let Some(parent_abs_path) = ignores_to_update.next() {
2908            while ignores_to_update
2909                .peek()
2910                .map_or(false, |p| p.starts_with(&parent_abs_path))
2911            {
2912                ignores_to_update.next().unwrap();
2913            }
2914
2915            let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2916            smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2917                abs_path: parent_abs_path,
2918                ignore_stack,
2919                ignore_queue: ignore_queue_tx.clone(),
2920            }))
2921            .unwrap();
2922        }
2923        drop(ignore_queue_tx);
2924
2925        self.executor
2926            .scoped(|scope| {
2927                for _ in 0..self.executor.num_cpus() {
2928                    scope.spawn(async {
2929                        loop {
2930                            select_biased! {
2931                                // Process any path refresh requests before moving on to process
2932                                // the queue of ignore statuses.
2933                                request = self.refresh_requests_rx.recv().fuse() => {
2934                                    let Ok((paths, barrier)) = request else { break };
2935                                    if !self.process_refresh_request(paths, barrier).await {
2936                                        return;
2937                                    }
2938                                }
2939
2940                                // Recursively process directories whose ignores have changed.
2941                                job = ignore_queue_rx.recv().fuse() => {
2942                                    let Ok(job) = job else { break };
2943                                    self.update_ignore_status(job, &snapshot).await;
2944                                }
2945                            }
2946                        }
2947                    });
2948                }
2949            })
2950            .await;
2951    }
2952
2953    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2954        let mut ignore_stack = job.ignore_stack;
2955        if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2956            ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2957        }
2958
2959        let mut entries_by_id_edits = Vec::new();
2960        let mut entries_by_path_edits = Vec::new();
2961        let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2962        for mut entry in snapshot.child_entries(path).cloned() {
2963            let was_ignored = entry.is_ignored;
2964            let abs_path = snapshot.abs_path().join(&entry.path);
2965            entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2966            if entry.is_dir() {
2967                let child_ignore_stack = if entry.is_ignored {
2968                    IgnoreStack::all()
2969                } else {
2970                    ignore_stack.clone()
2971                };
2972                job.ignore_queue
2973                    .send(UpdateIgnoreStatusJob {
2974                        abs_path: abs_path.into(),
2975                        ignore_stack: child_ignore_stack,
2976                        ignore_queue: job.ignore_queue.clone(),
2977                    })
2978                    .await
2979                    .unwrap();
2980            }
2981
2982            if entry.is_ignored != was_ignored {
2983                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2984                path_entry.scan_id = snapshot.scan_id;
2985                path_entry.is_ignored = entry.is_ignored;
2986                entries_by_id_edits.push(Edit::Insert(path_entry));
2987                entries_by_path_edits.push(Edit::Insert(entry));
2988            }
2989        }
2990
2991        let mut snapshot = self.snapshot.lock();
2992        snapshot.entries_by_path.edit(entries_by_path_edits, &());
2993        snapshot.entries_by_id.edit(entries_by_id_edits, &());
2994    }
2995
2996    fn build_change_set(
2997        &self,
2998        old_snapshot: &Snapshot,
2999        new_snapshot: &Snapshot,
3000        event_paths: Vec<Arc<Path>>,
3001    ) -> HashMap<Arc<Path>, PathChange> {
3002        use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3003
3004        let mut changes = HashMap::default();
3005        let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3006        let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3007        let received_before_initialized = !self.finished_initial_scan;
3008
3009        for path in event_paths {
3010            let path = PathKey(path);
3011            old_paths.seek(&path, Bias::Left, &());
3012            new_paths.seek(&path, Bias::Left, &());
3013
3014            loop {
3015                match (old_paths.item(), new_paths.item()) {
3016                    (Some(old_entry), Some(new_entry)) => {
3017                        if old_entry.path > path.0
3018                            && new_entry.path > path.0
3019                            && !old_entry.path.starts_with(&path.0)
3020                            && !new_entry.path.starts_with(&path.0)
3021                        {
3022                            break;
3023                        }
3024
3025                        match Ord::cmp(&old_entry.path, &new_entry.path) {
3026                            Ordering::Less => {
3027                                changes.insert(old_entry.path.clone(), Removed);
3028                                old_paths.next(&());
3029                            }
3030                            Ordering::Equal => {
3031                                if received_before_initialized {
3032                                    // If the worktree was not fully initialized when this event was generated,
3033                                    // we can't know whether this entry was added during the scan or whether
3034                                    // it was merely updated.
3035                                    changes.insert(new_entry.path.clone(), AddedOrUpdated);
3036                                } else if old_entry.mtime != new_entry.mtime {
3037                                    changes.insert(new_entry.path.clone(), Updated);
3038                                }
3039                                old_paths.next(&());
3040                                new_paths.next(&());
3041                            }
3042                            Ordering::Greater => {
3043                                changes.insert(new_entry.path.clone(), Added);
3044                                new_paths.next(&());
3045                            }
3046                        }
3047                    }
3048                    (Some(old_entry), None) => {
3049                        changes.insert(old_entry.path.clone(), Removed);
3050                        old_paths.next(&());
3051                    }
3052                    (None, Some(new_entry)) => {
3053                        changes.insert(new_entry.path.clone(), Added);
3054                        new_paths.next(&());
3055                    }
3056                    (None, None) => break,
3057                }
3058            }
3059        }
3060        changes
3061    }
3062
3063    async fn progress_timer(&self, running: bool) {
3064        if !running {
3065            return futures::future::pending().await;
3066        }
3067
3068        #[cfg(any(test, feature = "test-support"))]
3069        if self.fs.is_fake() {
3070            return self.executor.simulate_random_delay().await;
3071        }
3072
3073        smol::Timer::after(Duration::from_millis(100)).await;
3074    }
3075}
3076
3077fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3078    let mut result = root_char_bag;
3079    result.extend(
3080        path.to_string_lossy()
3081            .chars()
3082            .map(|c| c.to_ascii_lowercase()),
3083    );
3084    result
3085}
3086
3087struct ScanJob {
3088    abs_path: Arc<Path>,
3089    path: Arc<Path>,
3090    ignore_stack: Arc<IgnoreStack>,
3091    scan_queue: Sender<ScanJob>,
3092    ancestor_inodes: TreeSet<u64>,
3093}
3094
3095struct UpdateIgnoreStatusJob {
3096    abs_path: Arc<Path>,
3097    ignore_stack: Arc<IgnoreStack>,
3098    ignore_queue: Sender<UpdateIgnoreStatusJob>,
3099}
3100
3101pub trait WorktreeHandle {
3102    #[cfg(any(test, feature = "test-support"))]
3103    fn flush_fs_events<'a>(
3104        &self,
3105        cx: &'a gpui::TestAppContext,
3106    ) -> futures::future::LocalBoxFuture<'a, ()>;
3107}
3108
3109impl WorktreeHandle for ModelHandle<Worktree> {
3110    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3111    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3112    // extra directory scans, and emit extra scan-state notifications.
3113    //
3114    // This function mutates the worktree's directory and waits for those mutations to be picked up,
3115    // to ensure that all redundant FS events have already been processed.
3116    #[cfg(any(test, feature = "test-support"))]
3117    fn flush_fs_events<'a>(
3118        &self,
3119        cx: &'a gpui::TestAppContext,
3120    ) -> futures::future::LocalBoxFuture<'a, ()> {
3121        use smol::future::FutureExt;
3122
3123        let filename = "fs-event-sentinel";
3124        let tree = self.clone();
3125        let (fs, root_path) = self.read_with(cx, |tree, _| {
3126            let tree = tree.as_local().unwrap();
3127            (tree.fs.clone(), tree.abs_path().clone())
3128        });
3129
3130        async move {
3131            fs.create_file(&root_path.join(filename), Default::default())
3132                .await
3133                .unwrap();
3134            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3135                .await;
3136
3137            fs.remove_file(&root_path.join(filename), Default::default())
3138                .await
3139                .unwrap();
3140            tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3141                .await;
3142
3143            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3144                .await;
3145        }
3146        .boxed_local()
3147    }
3148}
3149
3150#[derive(Clone, Debug)]
3151struct TraversalProgress<'a> {
3152    max_path: &'a Path,
3153    count: usize,
3154    visible_count: usize,
3155    file_count: usize,
3156    visible_file_count: usize,
3157}
3158
3159impl<'a> TraversalProgress<'a> {
3160    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3161        match (include_ignored, include_dirs) {
3162            (true, true) => self.count,
3163            (true, false) => self.file_count,
3164            (false, true) => self.visible_count,
3165            (false, false) => self.visible_file_count,
3166        }
3167    }
3168}
3169
3170impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3171    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3172        self.max_path = summary.max_path.as_ref();
3173        self.count += summary.count;
3174        self.visible_count += summary.visible_count;
3175        self.file_count += summary.file_count;
3176        self.visible_file_count += summary.visible_file_count;
3177    }
3178}
3179
3180impl<'a> Default for TraversalProgress<'a> {
3181    fn default() -> Self {
3182        Self {
3183            max_path: Path::new(""),
3184            count: 0,
3185            visible_count: 0,
3186            file_count: 0,
3187            visible_file_count: 0,
3188        }
3189    }
3190}
3191
3192pub struct Traversal<'a> {
3193    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3194    include_ignored: bool,
3195    include_dirs: bool,
3196}
3197
3198impl<'a> Traversal<'a> {
3199    pub fn advance(&mut self) -> bool {
3200        self.advance_to_offset(self.offset() + 1)
3201    }
3202
3203    pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3204        self.cursor.seek_forward(
3205            &TraversalTarget::Count {
3206                count: offset,
3207                include_dirs: self.include_dirs,
3208                include_ignored: self.include_ignored,
3209            },
3210            Bias::Right,
3211            &(),
3212        )
3213    }
3214
3215    pub fn advance_to_sibling(&mut self) -> bool {
3216        while let Some(entry) = self.cursor.item() {
3217            self.cursor.seek_forward(
3218                &TraversalTarget::PathSuccessor(&entry.path),
3219                Bias::Left,
3220                &(),
3221            );
3222            if let Some(entry) = self.cursor.item() {
3223                if (self.include_dirs || !entry.is_dir())
3224                    && (self.include_ignored || !entry.is_ignored)
3225                {
3226                    return true;
3227                }
3228            }
3229        }
3230        false
3231    }
3232
3233    pub fn entry(&self) -> Option<&'a Entry> {
3234        self.cursor.item()
3235    }
3236
3237    pub fn offset(&self) -> usize {
3238        self.cursor
3239            .start()
3240            .count(self.include_dirs, self.include_ignored)
3241    }
3242}
3243
3244impl<'a> Iterator for Traversal<'a> {
3245    type Item = &'a Entry;
3246
3247    fn next(&mut self) -> Option<Self::Item> {
3248        if let Some(item) = self.entry() {
3249            self.advance();
3250            Some(item)
3251        } else {
3252            None
3253        }
3254    }
3255}
3256
3257#[derive(Debug)]
3258enum TraversalTarget<'a> {
3259    Path(&'a Path),
3260    PathSuccessor(&'a Path),
3261    Count {
3262        count: usize,
3263        include_ignored: bool,
3264        include_dirs: bool,
3265    },
3266}
3267
3268impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3269    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3270        match self {
3271            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3272            TraversalTarget::PathSuccessor(path) => {
3273                if !cursor_location.max_path.starts_with(path) {
3274                    Ordering::Equal
3275                } else {
3276                    Ordering::Greater
3277                }
3278            }
3279            TraversalTarget::Count {
3280                count,
3281                include_dirs,
3282                include_ignored,
3283            } => Ord::cmp(
3284                count,
3285                &cursor_location.count(*include_dirs, *include_ignored),
3286            ),
3287        }
3288    }
3289}
3290
3291struct ChildEntriesIter<'a> {
3292    parent_path: &'a Path,
3293    traversal: Traversal<'a>,
3294}
3295
3296impl<'a> Iterator for ChildEntriesIter<'a> {
3297    type Item = &'a Entry;
3298
3299    fn next(&mut self) -> Option<Self::Item> {
3300        if let Some(item) = self.traversal.entry() {
3301            if item.path.starts_with(&self.parent_path) {
3302                self.traversal.advance_to_sibling();
3303                return Some(item);
3304            }
3305        }
3306        None
3307    }
3308}
3309
3310impl<'a> From<&'a Entry> for proto::Entry {
3311    fn from(entry: &'a Entry) -> Self {
3312        Self {
3313            id: entry.id.to_proto(),
3314            is_dir: entry.is_dir(),
3315            path: entry.path.to_string_lossy().into(),
3316            inode: entry.inode,
3317            mtime: Some(entry.mtime.into()),
3318            is_symlink: entry.is_symlink,
3319            is_ignored: entry.is_ignored,
3320        }
3321    }
3322}
3323
3324impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3325    type Error = anyhow::Error;
3326
3327    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3328        if let Some(mtime) = entry.mtime {
3329            let kind = if entry.is_dir {
3330                EntryKind::Dir
3331            } else {
3332                let mut char_bag = *root_char_bag;
3333                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3334                EntryKind::File(char_bag)
3335            };
3336            let path: Arc<Path> = PathBuf::from(entry.path).into();
3337            Ok(Entry {
3338                id: ProjectEntryId::from_proto(entry.id),
3339                kind,
3340                path,
3341                inode: entry.inode,
3342                mtime: mtime.into(),
3343                is_symlink: entry.is_symlink,
3344                is_ignored: entry.is_ignored,
3345            })
3346        } else {
3347            Err(anyhow!(
3348                "missing mtime in remote worktree entry {:?}",
3349                entry.path
3350            ))
3351        }
3352    }
3353}
3354
3355#[cfg(test)]
3356mod tests {
3357    use super::*;
3358    use fs::{FakeFs, RealFs};
3359    use gpui::{executor::Deterministic, TestAppContext};
3360    use pretty_assertions::assert_eq;
3361    use rand::prelude::*;
3362    use serde_json::json;
3363    use std::{env, fmt::Write};
3364    use util::{http::FakeHttpClient, test::temp_tree};
3365
3366    #[gpui::test]
3367    async fn test_traversal(cx: &mut TestAppContext) {
3368        let fs = FakeFs::new(cx.background());
3369        fs.insert_tree(
3370            "/root",
3371            json!({
3372               ".gitignore": "a/b\n",
3373               "a": {
3374                   "b": "",
3375                   "c": "",
3376               }
3377            }),
3378        )
3379        .await;
3380
3381        let http_client = FakeHttpClient::with_404_response();
3382        let client = cx.read(|cx| Client::new(http_client, cx));
3383
3384        let tree = Worktree::local(
3385            client,
3386            Path::new("/root"),
3387            true,
3388            fs,
3389            Default::default(),
3390            &mut cx.to_async(),
3391        )
3392        .await
3393        .unwrap();
3394        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3395            .await;
3396
3397        tree.read_with(cx, |tree, _| {
3398            assert_eq!(
3399                tree.entries(false)
3400                    .map(|entry| entry.path.as_ref())
3401                    .collect::<Vec<_>>(),
3402                vec![
3403                    Path::new(""),
3404                    Path::new(".gitignore"),
3405                    Path::new("a"),
3406                    Path::new("a/c"),
3407                ]
3408            );
3409            assert_eq!(
3410                tree.entries(true)
3411                    .map(|entry| entry.path.as_ref())
3412                    .collect::<Vec<_>>(),
3413                vec![
3414                    Path::new(""),
3415                    Path::new(".gitignore"),
3416                    Path::new("a"),
3417                    Path::new("a/b"),
3418                    Path::new("a/c"),
3419                ]
3420            );
3421        })
3422    }
3423
3424    #[gpui::test(iterations = 10)]
3425    async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3426        let fs = FakeFs::new(cx.background());
3427        fs.insert_tree(
3428            "/root",
3429            json!({
3430                "lib": {
3431                    "a": {
3432                        "a.txt": ""
3433                    },
3434                    "b": {
3435                        "b.txt": ""
3436                    }
3437                }
3438            }),
3439        )
3440        .await;
3441        fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3442        fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3443
3444        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3445        let tree = Worktree::local(
3446            client,
3447            Path::new("/root"),
3448            true,
3449            fs.clone(),
3450            Default::default(),
3451            &mut cx.to_async(),
3452        )
3453        .await
3454        .unwrap();
3455
3456        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3457            .await;
3458
3459        tree.read_with(cx, |tree, _| {
3460            assert_eq!(
3461                tree.entries(false)
3462                    .map(|entry| entry.path.as_ref())
3463                    .collect::<Vec<_>>(),
3464                vec![
3465                    Path::new(""),
3466                    Path::new("lib"),
3467                    Path::new("lib/a"),
3468                    Path::new("lib/a/a.txt"),
3469                    Path::new("lib/a/lib"),
3470                    Path::new("lib/b"),
3471                    Path::new("lib/b/b.txt"),
3472                    Path::new("lib/b/lib"),
3473                ]
3474            );
3475        });
3476
3477        fs.rename(
3478            Path::new("/root/lib/a/lib"),
3479            Path::new("/root/lib/a/lib-2"),
3480            Default::default(),
3481        )
3482        .await
3483        .unwrap();
3484        executor.run_until_parked();
3485        tree.read_with(cx, |tree, _| {
3486            assert_eq!(
3487                tree.entries(false)
3488                    .map(|entry| entry.path.as_ref())
3489                    .collect::<Vec<_>>(),
3490                vec![
3491                    Path::new(""),
3492                    Path::new("lib"),
3493                    Path::new("lib/a"),
3494                    Path::new("lib/a/a.txt"),
3495                    Path::new("lib/a/lib-2"),
3496                    Path::new("lib/b"),
3497                    Path::new("lib/b/b.txt"),
3498                    Path::new("lib/b/lib"),
3499                ]
3500            );
3501        });
3502    }
3503
3504    #[gpui::test]
3505    async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3506        let parent_dir = temp_tree(json!({
3507            ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3508            "tree": {
3509                ".git": {},
3510                ".gitignore": "ignored-dir\n",
3511                "tracked-dir": {
3512                    "tracked-file1": "",
3513                    "ancestor-ignored-file1": "",
3514                },
3515                "ignored-dir": {
3516                    "ignored-file1": ""
3517                }
3518            }
3519        }));
3520        let dir = parent_dir.path().join("tree");
3521
3522        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3523
3524        let tree = Worktree::local(
3525            client,
3526            dir.as_path(),
3527            true,
3528            Arc::new(RealFs),
3529            Default::default(),
3530            &mut cx.to_async(),
3531        )
3532        .await
3533        .unwrap();
3534        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3535            .await;
3536        tree.flush_fs_events(cx).await;
3537        cx.read(|cx| {
3538            let tree = tree.read(cx);
3539            assert!(
3540                !tree
3541                    .entry_for_path("tracked-dir/tracked-file1")
3542                    .unwrap()
3543                    .is_ignored
3544            );
3545            assert!(
3546                tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3547                    .unwrap()
3548                    .is_ignored
3549            );
3550            assert!(
3551                tree.entry_for_path("ignored-dir/ignored-file1")
3552                    .unwrap()
3553                    .is_ignored
3554            );
3555        });
3556
3557        std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3558        std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3559        std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3560        tree.flush_fs_events(cx).await;
3561        cx.read(|cx| {
3562            let tree = tree.read(cx);
3563            assert!(
3564                !tree
3565                    .entry_for_path("tracked-dir/tracked-file2")
3566                    .unwrap()
3567                    .is_ignored
3568            );
3569            assert!(
3570                tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3571                    .unwrap()
3572                    .is_ignored
3573            );
3574            assert!(
3575                tree.entry_for_path("ignored-dir/ignored-file2")
3576                    .unwrap()
3577                    .is_ignored
3578            );
3579            assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3580        });
3581    }
3582
3583    #[gpui::test]
3584    async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3585        let root = temp_tree(json!({
3586            "dir1": {
3587                ".git": {},
3588                "deps": {
3589                    "dep1": {
3590                        ".git": {},
3591                        "src": {
3592                            "a.txt": ""
3593                        }
3594                    }
3595                },
3596                "src": {
3597                    "b.txt": ""
3598                }
3599            },
3600            "c.txt": "",
3601        }));
3602
3603        let http_client = FakeHttpClient::with_404_response();
3604        let client = cx.read(|cx| Client::new(http_client, cx));
3605        let tree = Worktree::local(
3606            client,
3607            root.path(),
3608            true,
3609            Arc::new(RealFs),
3610            Default::default(),
3611            &mut cx.to_async(),
3612        )
3613        .await
3614        .unwrap();
3615
3616        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3617            .await;
3618        tree.flush_fs_events(cx).await;
3619
3620        tree.read_with(cx, |tree, _cx| {
3621            let tree = tree.as_local().unwrap();
3622
3623            assert!(tree.repo_for("c.txt".as_ref()).is_none());
3624
3625            let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3626            assert_eq!(
3627                entry
3628                    .work_directory(tree)
3629                    .map(|directory| directory.as_ref().to_owned()),
3630                Some(Path::new("dir1").to_owned())
3631            );
3632
3633            let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3634            assert_eq!(
3635                entry
3636                    .work_directory(tree)
3637                    .map(|directory| directory.as_ref().to_owned()),
3638                Some(Path::new("dir1/deps/dep1").to_owned())
3639            );
3640        });
3641
3642        let repo_update_events = Arc::new(Mutex::new(vec![]));
3643        tree.update(cx, |_, cx| {
3644            let repo_update_events = repo_update_events.clone();
3645            cx.subscribe(&tree, move |_, _, event, _| {
3646                if let Event::UpdatedGitRepositories(update) = event {
3647                    repo_update_events.lock().push(update.clone());
3648                }
3649            })
3650            .detach();
3651        });
3652
3653        std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3654        tree.flush_fs_events(cx).await;
3655
3656        assert_eq!(
3657            repo_update_events.lock()[0]
3658                .keys()
3659                .cloned()
3660                .collect::<Vec<Arc<Path>>>(),
3661            vec![Path::new("dir1").into()]
3662        );
3663
3664        std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3665        tree.flush_fs_events(cx).await;
3666
3667        tree.read_with(cx, |tree, _cx| {
3668            let tree = tree.as_local().unwrap();
3669
3670            assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3671        });
3672    }
3673
3674    #[gpui::test]
3675    async fn test_write_file(cx: &mut TestAppContext) {
3676        let dir = temp_tree(json!({
3677            ".git": {},
3678            ".gitignore": "ignored-dir\n",
3679            "tracked-dir": {},
3680            "ignored-dir": {}
3681        }));
3682
3683        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3684
3685        let tree = Worktree::local(
3686            client,
3687            dir.path(),
3688            true,
3689            Arc::new(RealFs),
3690            Default::default(),
3691            &mut cx.to_async(),
3692        )
3693        .await
3694        .unwrap();
3695        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3696            .await;
3697        tree.flush_fs_events(cx).await;
3698
3699        tree.update(cx, |tree, cx| {
3700            tree.as_local().unwrap().write_file(
3701                Path::new("tracked-dir/file.txt"),
3702                "hello".into(),
3703                Default::default(),
3704                cx,
3705            )
3706        })
3707        .await
3708        .unwrap();
3709        tree.update(cx, |tree, cx| {
3710            tree.as_local().unwrap().write_file(
3711                Path::new("ignored-dir/file.txt"),
3712                "world".into(),
3713                Default::default(),
3714                cx,
3715            )
3716        })
3717        .await
3718        .unwrap();
3719
3720        tree.read_with(cx, |tree, _| {
3721            let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3722            let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3723            assert!(!tracked.is_ignored);
3724            assert!(ignored.is_ignored);
3725        });
3726    }
3727
3728    #[gpui::test(iterations = 30)]
3729    async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3730        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3731
3732        let fs = FakeFs::new(cx.background());
3733        fs.insert_tree(
3734            "/root",
3735            json!({
3736                "b": {},
3737                "c": {},
3738                "d": {},
3739            }),
3740        )
3741        .await;
3742
3743        let tree = Worktree::local(
3744            client,
3745            "/root".as_ref(),
3746            true,
3747            fs,
3748            Default::default(),
3749            &mut cx.to_async(),
3750        )
3751        .await
3752        .unwrap();
3753
3754        let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3755
3756        let entry = tree
3757            .update(cx, |tree, cx| {
3758                tree.as_local_mut()
3759                    .unwrap()
3760                    .create_entry("a/e".as_ref(), true, cx)
3761            })
3762            .await
3763            .unwrap();
3764        assert!(entry.is_dir());
3765
3766        cx.foreground().run_until_parked();
3767        tree.read_with(cx, |tree, _| {
3768            assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3769        });
3770
3771        let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3772        let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3773        snapshot1.apply_remote_update(update).unwrap();
3774        assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3775    }
3776
3777    #[gpui::test(iterations = 100)]
3778    async fn test_random_worktree_operations_during_initial_scan(
3779        cx: &mut TestAppContext,
3780        mut rng: StdRng,
3781    ) {
3782        let operations = env::var("OPERATIONS")
3783            .map(|o| o.parse().unwrap())
3784            .unwrap_or(5);
3785        let initial_entries = env::var("INITIAL_ENTRIES")
3786            .map(|o| o.parse().unwrap())
3787            .unwrap_or(20);
3788
3789        let root_dir = Path::new("/test");
3790        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3791        fs.as_fake().insert_tree(root_dir, json!({})).await;
3792        for _ in 0..initial_entries {
3793            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3794        }
3795        log::info!("generated initial tree");
3796
3797        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3798        let worktree = Worktree::local(
3799            client.clone(),
3800            root_dir,
3801            true,
3802            fs.clone(),
3803            Default::default(),
3804            &mut cx.to_async(),
3805        )
3806        .await
3807        .unwrap();
3808
3809        let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3810
3811        for _ in 0..operations {
3812            worktree
3813                .update(cx, |worktree, cx| {
3814                    randomly_mutate_worktree(worktree, &mut rng, cx)
3815                })
3816                .await
3817                .log_err();
3818            worktree.read_with(cx, |tree, _| {
3819                tree.as_local().unwrap().snapshot.check_invariants()
3820            });
3821
3822            if rng.gen_bool(0.6) {
3823                let new_snapshot =
3824                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3825                let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3826                snapshot.apply_remote_update(update.clone()).unwrap();
3827                assert_eq!(
3828                    snapshot.to_vec(true),
3829                    new_snapshot.to_vec(true),
3830                    "incorrect snapshot after update {:?}",
3831                    update
3832                );
3833            }
3834        }
3835
3836        worktree
3837            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3838            .await;
3839        worktree.read_with(cx, |tree, _| {
3840            tree.as_local().unwrap().snapshot.check_invariants()
3841        });
3842
3843        let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3844        let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3845        snapshot.apply_remote_update(update.clone()).unwrap();
3846        assert_eq!(
3847            snapshot.to_vec(true),
3848            new_snapshot.to_vec(true),
3849            "incorrect snapshot after update {:?}",
3850            update
3851        );
3852    }
3853
3854    #[gpui::test(iterations = 100)]
3855    async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3856        let operations = env::var("OPERATIONS")
3857            .map(|o| o.parse().unwrap())
3858            .unwrap_or(40);
3859        let initial_entries = env::var("INITIAL_ENTRIES")
3860            .map(|o| o.parse().unwrap())
3861            .unwrap_or(20);
3862
3863        let root_dir = Path::new("/test");
3864        let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3865        fs.as_fake().insert_tree(root_dir, json!({})).await;
3866        for _ in 0..initial_entries {
3867            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3868        }
3869        log::info!("generated initial tree");
3870
3871        let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3872        let worktree = Worktree::local(
3873            client.clone(),
3874            root_dir,
3875            true,
3876            fs.clone(),
3877            Default::default(),
3878            &mut cx.to_async(),
3879        )
3880        .await
3881        .unwrap();
3882
3883        worktree
3884            .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3885            .await;
3886
3887        // After the initial scan is complete, the `UpdatedEntries` event can
3888        // be used to follow along with all changes to the worktree's snapshot.
3889        worktree.update(cx, |tree, cx| {
3890            let mut paths = tree
3891                .as_local()
3892                .unwrap()
3893                .paths()
3894                .cloned()
3895                .collect::<Vec<_>>();
3896
3897            cx.subscribe(&worktree, move |tree, _, event, _| {
3898                if let Event::UpdatedEntries(changes) = event {
3899                    for (path, change_type) in changes.iter() {
3900                        let path = path.clone();
3901                        let ix = match paths.binary_search(&path) {
3902                            Ok(ix) | Err(ix) => ix,
3903                        };
3904                        match change_type {
3905                            PathChange::Added => {
3906                                assert_ne!(paths.get(ix), Some(&path));
3907                                paths.insert(ix, path);
3908                            }
3909                            PathChange::Removed => {
3910                                assert_eq!(paths.get(ix), Some(&path));
3911                                paths.remove(ix);
3912                            }
3913                            PathChange::Updated => {
3914                                assert_eq!(paths.get(ix), Some(&path));
3915                            }
3916                            PathChange::AddedOrUpdated => {
3917                                if paths[ix] != path {
3918                                    paths.insert(ix, path);
3919                                }
3920                            }
3921                        }
3922                    }
3923                    let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3924                    assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3925                }
3926            })
3927            .detach();
3928        });
3929
3930        let mut snapshots = Vec::new();
3931        let mut mutations_len = operations;
3932        while mutations_len > 1 {
3933            randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3934            let buffered_event_count = fs.as_fake().buffered_event_count().await;
3935            if buffered_event_count > 0 && rng.gen_bool(0.3) {
3936                let len = rng.gen_range(0..=buffered_event_count);
3937                log::info!("flushing {} events", len);
3938                fs.as_fake().flush_events(len).await;
3939            } else {
3940                randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
3941                mutations_len -= 1;
3942            }
3943
3944            cx.foreground().run_until_parked();
3945            if rng.gen_bool(0.2) {
3946                log::info!("storing snapshot {}", snapshots.len());
3947                let snapshot =
3948                    worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3949                snapshots.push(snapshot);
3950            }
3951        }
3952
3953        log::info!("quiescing");
3954        fs.as_fake().flush_events(usize::MAX).await;
3955        cx.foreground().run_until_parked();
3956        let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3957        snapshot.check_invariants();
3958
3959        {
3960            let new_worktree = Worktree::local(
3961                client.clone(),
3962                root_dir,
3963                true,
3964                fs.clone(),
3965                Default::default(),
3966                &mut cx.to_async(),
3967            )
3968            .await
3969            .unwrap();
3970            new_worktree
3971                .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3972                .await;
3973            let new_snapshot =
3974                new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3975            assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3976        }
3977
3978        for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3979            let include_ignored = rng.gen::<bool>();
3980            if !include_ignored {
3981                let mut entries_by_path_edits = Vec::new();
3982                let mut entries_by_id_edits = Vec::new();
3983                for entry in prev_snapshot
3984                    .entries_by_id
3985                    .cursor::<()>()
3986                    .filter(|e| e.is_ignored)
3987                {
3988                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3989                    entries_by_id_edits.push(Edit::Remove(entry.id));
3990                }
3991
3992                prev_snapshot
3993                    .entries_by_path
3994                    .edit(entries_by_path_edits, &());
3995                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3996            }
3997
3998            let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3999            prev_snapshot.apply_remote_update(update.clone()).unwrap();
4000            assert_eq!(
4001                prev_snapshot.to_vec(include_ignored),
4002                snapshot.to_vec(include_ignored),
4003                "wrong update for snapshot {i}. update: {:?}",
4004                update
4005            );
4006        }
4007    }
4008
4009    fn randomly_mutate_worktree(
4010        worktree: &mut Worktree,
4011        rng: &mut impl Rng,
4012        cx: &mut ModelContext<Worktree>,
4013    ) -> Task<Result<()>> {
4014        let worktree = worktree.as_local_mut().unwrap();
4015        let snapshot = worktree.snapshot();
4016        let entry = snapshot.entries(false).choose(rng).unwrap();
4017
4018        match rng.gen_range(0_u32..100) {
4019            0..=33 if entry.path.as_ref() != Path::new("") => {
4020                log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4021                worktree.delete_entry(entry.id, cx).unwrap()
4022            }
4023            ..=66 if entry.path.as_ref() != Path::new("") => {
4024                let other_entry = snapshot.entries(false).choose(rng).unwrap();
4025                let new_parent_path = if other_entry.is_dir() {
4026                    other_entry.path.clone()
4027                } else {
4028                    other_entry.path.parent().unwrap().into()
4029                };
4030                let mut new_path = new_parent_path.join(gen_name(rng));
4031                if new_path.starts_with(&entry.path) {
4032                    new_path = gen_name(rng).into();
4033                }
4034
4035                log::info!(
4036                    "renaming entry {:?} ({}) to {:?}",
4037                    entry.path,
4038                    entry.id.0,
4039                    new_path
4040                );
4041                let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4042                cx.foreground().spawn(async move {
4043                    task.await?;
4044                    Ok(())
4045                })
4046            }
4047            _ => {
4048                let task = if entry.is_dir() {
4049                    let child_path = entry.path.join(gen_name(rng));
4050                    let is_dir = rng.gen_bool(0.3);
4051                    log::info!(
4052                        "creating {} at {:?}",
4053                        if is_dir { "dir" } else { "file" },
4054                        child_path,
4055                    );
4056                    worktree.create_entry(child_path, is_dir, cx)
4057                } else {
4058                    log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4059                    worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4060                };
4061                cx.foreground().spawn(async move {
4062                    task.await?;
4063                    Ok(())
4064                })
4065            }
4066        }
4067    }
4068
4069    async fn randomly_mutate_fs(
4070        fs: &Arc<dyn Fs>,
4071        root_path: &Path,
4072        insertion_probability: f64,
4073        rng: &mut impl Rng,
4074    ) {
4075        let mut files = Vec::new();
4076        let mut dirs = Vec::new();
4077        for path in fs.as_fake().paths() {
4078            if path.starts_with(root_path) {
4079                if fs.is_file(&path).await {
4080                    files.push(path);
4081                } else {
4082                    dirs.push(path);
4083                }
4084            }
4085        }
4086
4087        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4088            let path = dirs.choose(rng).unwrap();
4089            let new_path = path.join(gen_name(rng));
4090
4091            if rng.gen() {
4092                log::info!(
4093                    "creating dir {:?}",
4094                    new_path.strip_prefix(root_path).unwrap()
4095                );
4096                fs.create_dir(&new_path).await.unwrap();
4097            } else {
4098                log::info!(
4099                    "creating file {:?}",
4100                    new_path.strip_prefix(root_path).unwrap()
4101                );
4102                fs.create_file(&new_path, Default::default()).await.unwrap();
4103            }
4104        } else if rng.gen_bool(0.05) {
4105            let ignore_dir_path = dirs.choose(rng).unwrap();
4106            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4107
4108            let subdirs = dirs
4109                .iter()
4110                .filter(|d| d.starts_with(&ignore_dir_path))
4111                .cloned()
4112                .collect::<Vec<_>>();
4113            let subfiles = files
4114                .iter()
4115                .filter(|d| d.starts_with(&ignore_dir_path))
4116                .cloned()
4117                .collect::<Vec<_>>();
4118            let files_to_ignore = {
4119                let len = rng.gen_range(0..=subfiles.len());
4120                subfiles.choose_multiple(rng, len)
4121            };
4122            let dirs_to_ignore = {
4123                let len = rng.gen_range(0..subdirs.len());
4124                subdirs.choose_multiple(rng, len)
4125            };
4126
4127            let mut ignore_contents = String::new();
4128            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4129                writeln!(
4130                    ignore_contents,
4131                    "{}",
4132                    path_to_ignore
4133                        .strip_prefix(&ignore_dir_path)
4134                        .unwrap()
4135                        .to_str()
4136                        .unwrap()
4137                )
4138                .unwrap();
4139            }
4140            log::info!(
4141                "creating gitignore {:?} with contents:\n{}",
4142                ignore_path.strip_prefix(&root_path).unwrap(),
4143                ignore_contents
4144            );
4145            fs.save(
4146                &ignore_path,
4147                &ignore_contents.as_str().into(),
4148                Default::default(),
4149            )
4150            .await
4151            .unwrap();
4152        } else {
4153            let old_path = {
4154                let file_path = files.choose(rng);
4155                let dir_path = dirs[1..].choose(rng);
4156                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4157            };
4158
4159            let is_rename = rng.gen();
4160            if is_rename {
4161                let new_path_parent = dirs
4162                    .iter()
4163                    .filter(|d| !d.starts_with(old_path))
4164                    .choose(rng)
4165                    .unwrap();
4166
4167                let overwrite_existing_dir =
4168                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4169                let new_path = if overwrite_existing_dir {
4170                    fs.remove_dir(
4171                        &new_path_parent,
4172                        RemoveOptions {
4173                            recursive: true,
4174                            ignore_if_not_exists: true,
4175                        },
4176                    )
4177                    .await
4178                    .unwrap();
4179                    new_path_parent.to_path_buf()
4180                } else {
4181                    new_path_parent.join(gen_name(rng))
4182                };
4183
4184                log::info!(
4185                    "renaming {:?} to {}{:?}",
4186                    old_path.strip_prefix(&root_path).unwrap(),
4187                    if overwrite_existing_dir {
4188                        "overwrite "
4189                    } else {
4190                        ""
4191                    },
4192                    new_path.strip_prefix(&root_path).unwrap()
4193                );
4194                fs.rename(
4195                    &old_path,
4196                    &new_path,
4197                    fs::RenameOptions {
4198                        overwrite: true,
4199                        ignore_if_exists: true,
4200                    },
4201                )
4202                .await
4203                .unwrap();
4204            } else if fs.is_file(&old_path).await {
4205                log::info!(
4206                    "deleting file {:?}",
4207                    old_path.strip_prefix(&root_path).unwrap()
4208                );
4209                fs.remove_file(old_path, Default::default()).await.unwrap();
4210            } else {
4211                log::info!(
4212                    "deleting dir {:?}",
4213                    old_path.strip_prefix(&root_path).unwrap()
4214                );
4215                fs.remove_dir(
4216                    &old_path,
4217                    RemoveOptions {
4218                        recursive: true,
4219                        ignore_if_not_exists: true,
4220                    },
4221                )
4222                .await
4223                .unwrap();
4224            }
4225        }
4226    }
4227
4228    fn gen_name(rng: &mut impl Rng) -> String {
4229        (0..6)
4230            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4231            .map(char::from)
4232            .collect()
4233    }
4234
4235    impl LocalSnapshot {
4236        fn check_invariants(&self) {
4237            assert_eq!(
4238                self.entries_by_path
4239                    .cursor::<()>()
4240                    .map(|e| (&e.path, e.id))
4241                    .collect::<Vec<_>>(),
4242                self.entries_by_id
4243                    .cursor::<()>()
4244                    .map(|e| (&e.path, e.id))
4245                    .collect::<collections::BTreeSet<_>>()
4246                    .into_iter()
4247                    .collect::<Vec<_>>(),
4248                "entries_by_path and entries_by_id are inconsistent"
4249            );
4250
4251            let mut files = self.files(true, 0);
4252            let mut visible_files = self.files(false, 0);
4253            for entry in self.entries_by_path.cursor::<()>() {
4254                if entry.is_file() {
4255                    assert_eq!(files.next().unwrap().inode, entry.inode);
4256                    if !entry.is_ignored {
4257                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4258                    }
4259                }
4260            }
4261
4262            assert!(files.next().is_none());
4263            assert!(visible_files.next().is_none());
4264
4265            let mut bfs_paths = Vec::new();
4266            let mut stack = vec![Path::new("")];
4267            while let Some(path) = stack.pop() {
4268                bfs_paths.push(path);
4269                let ix = stack.len();
4270                for child_entry in self.child_entries(path) {
4271                    stack.insert(ix, &child_entry.path);
4272                }
4273            }
4274
4275            let dfs_paths_via_iter = self
4276                .entries_by_path
4277                .cursor::<()>()
4278                .map(|e| e.path.as_ref())
4279                .collect::<Vec<_>>();
4280            assert_eq!(bfs_paths, dfs_paths_via_iter);
4281
4282            let dfs_paths_via_traversal = self
4283                .entries(true)
4284                .map(|e| e.path.as_ref())
4285                .collect::<Vec<_>>();
4286            assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4287
4288            for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4289                let ignore_parent_path =
4290                    ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4291                assert!(self.entry_for_path(&ignore_parent_path).is_some());
4292                assert!(self
4293                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4294                    .is_some());
4295            }
4296        }
4297
4298        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4299            let mut paths = Vec::new();
4300            for entry in self.entries_by_path.cursor::<()>() {
4301                if include_ignored || !entry.is_ignored {
4302                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4303                }
4304            }
4305            paths.sort_by(|a, b| a.0.cmp(b.0));
4306            paths
4307        }
4308    }
4309}