worktree.rs

   1use super::{
   2    fs::{self, Fs},
   3    ignore::IgnoreStack,
   4    DiagnosticSummary, ProjectEntry,
   5};
   6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   7use anyhow::{anyhow, Result};
   8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
   9use clock::ReplicaId;
  10use collections::{hash_map, HashMap, HashSet};
  11use futures::{Stream, StreamExt};
  12use fuzzy::CharBag;
  13use gpui::{
  14    executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
  15    Task, UpgradeModelHandle, WeakModelHandle,
  16};
  17use language::{
  18    range_from_lsp, Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Operation,
  19    PointUtf16, Rope,
  20};
  21use lazy_static::lazy_static;
  22use parking_lot::Mutex;
  23use postage::{
  24    prelude::{Sink as _, Stream as _},
  25    watch,
  26};
  27use serde::Deserialize;
  28use smol::channel::{self, Sender};
  29use std::{
  30    any::Any,
  31    cmp::{self, Ordering},
  32    convert::{TryFrom, TryInto},
  33    ffi::{OsStr, OsString},
  34    fmt,
  35    future::Future,
  36    ops::Deref,
  37    path::{Path, PathBuf},
  38    sync::{
  39        atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst},
  40        Arc,
  41    },
  42    time::{Duration, SystemTime},
  43};
  44use sum_tree::{Bias, TreeMap};
  45use sum_tree::{Edit, SeekTarget, SumTree};
  46use util::{post_inc, ResultExt, TryFutureExt};
  47
  48lazy_static! {
  49    static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
  50}
  51
  52#[derive(Clone, Debug)]
  53enum ScanState {
  54    Idle,
  55    Scanning,
  56    Err(Arc<anyhow::Error>),
  57}
  58
  59#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  60pub struct WorktreeId(usize);
  61
  62pub enum Worktree {
  63    Local(LocalWorktree),
  64    Remote(RemoteWorktree),
  65}
  66
  67impl Entity for Worktree {
  68    type Event = ();
  69}
  70
  71impl Worktree {
  72    pub async fn open_local(
  73        client: Arc<Client>,
  74        user_store: ModelHandle<UserStore>,
  75        path: impl Into<Arc<Path>>,
  76        fs: Arc<dyn Fs>,
  77        cx: &mut AsyncAppContext,
  78    ) -> Result<ModelHandle<Self>> {
  79        let (tree, scan_states_tx) =
  80            LocalWorktree::new(client, user_store, path, fs.clone(), cx).await?;
  81        tree.update(cx, |tree, cx| {
  82            let tree = tree.as_local_mut().unwrap();
  83            let abs_path = tree.snapshot.abs_path.clone();
  84            let background_snapshot = tree.background_snapshot.clone();
  85            let background = cx.background().clone();
  86            tree._background_scanner_task = Some(cx.background().spawn(async move {
  87                let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
  88                let scanner =
  89                    BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
  90                scanner.run(events).await;
  91            }));
  92        });
  93        Ok(tree)
  94    }
  95
  96    pub async fn remote(
  97        project_remote_id: u64,
  98        replica_id: ReplicaId,
  99        worktree: proto::Worktree,
 100        client: Arc<Client>,
 101        user_store: ModelHandle<UserStore>,
 102        cx: &mut AsyncAppContext,
 103    ) -> Result<ModelHandle<Self>> {
 104        let remote_id = worktree.id;
 105        let root_char_bag: CharBag = worktree
 106            .root_name
 107            .chars()
 108            .map(|c| c.to_ascii_lowercase())
 109            .collect();
 110        let root_name = worktree.root_name.clone();
 111        let (entries_by_path, entries_by_id, diagnostic_summaries) = cx
 112            .background()
 113            .spawn(async move {
 114                let mut entries_by_path_edits = Vec::new();
 115                let mut entries_by_id_edits = Vec::new();
 116                for entry in worktree.entries {
 117                    match Entry::try_from((&root_char_bag, entry)) {
 118                        Ok(entry) => {
 119                            entries_by_id_edits.push(Edit::Insert(PathEntry {
 120                                id: entry.id,
 121                                path: entry.path.clone(),
 122                                is_ignored: entry.is_ignored,
 123                                scan_id: 0,
 124                            }));
 125                            entries_by_path_edits.push(Edit::Insert(entry));
 126                        }
 127                        Err(err) => log::warn!("error for remote worktree entry {:?}", err),
 128                    }
 129                }
 130
 131                let mut entries_by_path = SumTree::new();
 132                let mut entries_by_id = SumTree::new();
 133                entries_by_path.edit(entries_by_path_edits, &());
 134                entries_by_id.edit(entries_by_id_edits, &());
 135
 136                let diagnostic_summaries = TreeMap::from_ordered_entries(
 137                    worktree.diagnostic_summaries.into_iter().map(|summary| {
 138                        (
 139                            PathKey(PathBuf::from(summary.path).into()),
 140                            DiagnosticSummary {
 141                                error_count: summary.error_count as usize,
 142                                warning_count: summary.warning_count as usize,
 143                                info_count: summary.info_count as usize,
 144                                hint_count: summary.hint_count as usize,
 145                            },
 146                        )
 147                    }),
 148                );
 149
 150                (entries_by_path, entries_by_id, diagnostic_summaries)
 151            })
 152            .await;
 153
 154        let worktree = cx.update(|cx| {
 155            cx.add_model(|cx: &mut ModelContext<Worktree>| {
 156                let snapshot = Snapshot {
 157                    id: WorktreeId(remote_id as usize),
 158                    scan_id: 0,
 159                    abs_path: Path::new("").into(),
 160                    root_name,
 161                    root_char_bag,
 162                    ignores: Default::default(),
 163                    entries_by_path,
 164                    entries_by_id,
 165                    removed_entry_ids: Default::default(),
 166                    next_entry_id: Default::default(),
 167                };
 168
 169                let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
 170                let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
 171
 172                cx.background()
 173                    .spawn(async move {
 174                        while let Some(update) = updates_rx.recv().await {
 175                            let mut snapshot = snapshot_tx.borrow().clone();
 176                            if let Err(error) = snapshot.apply_update(update) {
 177                                log::error!("error applying worktree update: {}", error);
 178                            }
 179                            *snapshot_tx.borrow_mut() = snapshot;
 180                        }
 181                    })
 182                    .detach();
 183
 184                {
 185                    let mut snapshot_rx = snapshot_rx.clone();
 186                    cx.spawn_weak(|this, mut cx| async move {
 187                        while let Some(_) = snapshot_rx.recv().await {
 188                            if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
 189                                this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
 190                            } else {
 191                                break;
 192                            }
 193                        }
 194                    })
 195                    .detach();
 196                }
 197
 198                Worktree::Remote(RemoteWorktree {
 199                    project_id: project_remote_id,
 200                    replica_id,
 201                    snapshot,
 202                    snapshot_rx,
 203                    updates_tx,
 204                    client: client.clone(),
 205                    loading_buffers: Default::default(),
 206                    open_buffers: Default::default(),
 207                    queued_operations: Default::default(),
 208                    user_store,
 209                    diagnostic_summaries,
 210                })
 211            })
 212        });
 213
 214        Ok(worktree)
 215    }
 216
 217    pub fn as_local(&self) -> Option<&LocalWorktree> {
 218        if let Worktree::Local(worktree) = self {
 219            Some(worktree)
 220        } else {
 221            None
 222        }
 223    }
 224
 225    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 226        if let Worktree::Remote(worktree) = self {
 227            Some(worktree)
 228        } else {
 229            None
 230        }
 231    }
 232
 233    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 234        if let Worktree::Local(worktree) = self {
 235            Some(worktree)
 236        } else {
 237            None
 238        }
 239    }
 240
 241    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 242        if let Worktree::Remote(worktree) = self {
 243            Some(worktree)
 244        } else {
 245            None
 246        }
 247    }
 248
 249    pub fn snapshot(&self) -> Snapshot {
 250        match self {
 251            Worktree::Local(worktree) => worktree.snapshot(),
 252            Worktree::Remote(worktree) => worktree.snapshot(),
 253        }
 254    }
 255
 256    pub fn replica_id(&self) -> ReplicaId {
 257        match self {
 258            Worktree::Local(_) => 0,
 259            Worktree::Remote(worktree) => worktree.replica_id,
 260        }
 261    }
 262
 263    pub fn remove_collaborator(
 264        &mut self,
 265        peer_id: PeerId,
 266        replica_id: ReplicaId,
 267        cx: &mut ModelContext<Self>,
 268    ) {
 269        match self {
 270            Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
 271            Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
 272        }
 273    }
 274
 275    pub fn user_store(&self) -> &ModelHandle<UserStore> {
 276        match self {
 277            Worktree::Local(worktree) => &worktree.user_store,
 278            Worktree::Remote(worktree) => &worktree.user_store,
 279        }
 280    }
 281
 282    pub fn diagnostic_summaries<'a>(
 283        &'a self,
 284    ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
 285        match self {
 286            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 287            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 288        }
 289        .iter()
 290        .map(|(path, summary)| (path.0.clone(), summary.clone()))
 291    }
 292
 293    pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
 294        match self {
 295            Worktree::Local(worktree) => &mut worktree.loading_buffers,
 296            Worktree::Remote(worktree) => &mut worktree.loading_buffers,
 297        }
 298    }
 299
 300    pub fn open_buffer(
 301        &mut self,
 302        path: impl AsRef<Path>,
 303        cx: &mut ModelContext<Self>,
 304    ) -> Task<Result<(ModelHandle<Buffer>, bool)>> {
 305        let path = path.as_ref();
 306
 307        // If there is already a buffer for the given path, then return it.
 308        let existing_buffer = match self {
 309            Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
 310            Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
 311        };
 312        if let Some(existing_buffer) = existing_buffer {
 313            return cx.spawn(move |_, _| async move { Ok((existing_buffer, false)) });
 314        }
 315
 316        let is_new = Arc::new(AtomicBool::new(true));
 317        let path: Arc<Path> = Arc::from(path);
 318        let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
 319            // If the given path is already being loaded, then wait for that existing
 320            // task to complete and return the same buffer.
 321            hash_map::Entry::Occupied(e) => e.get().clone(),
 322
 323            // Otherwise, record the fact that this path is now being loaded.
 324            hash_map::Entry::Vacant(entry) => {
 325                let (mut tx, rx) = postage::watch::channel();
 326                entry.insert(rx.clone());
 327
 328                let load_buffer = match self {
 329                    Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
 330                    Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
 331                };
 332                cx.spawn(move |this, mut cx| async move {
 333                    let result = load_buffer.await;
 334
 335                    // After the buffer loads, record the fact that it is no longer
 336                    // loading.
 337                    this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
 338                    *tx.borrow_mut() = Some(match result {
 339                        Ok(buffer) => Ok((buffer, is_new)),
 340                        Err(error) => Err(Arc::new(error)),
 341                    });
 342                })
 343                .detach();
 344                rx
 345            }
 346        };
 347
 348        cx.spawn(|_, _| async move {
 349            loop {
 350                if let Some(result) = loading_watch.borrow().as_ref() {
 351                    return match result {
 352                        Ok((buf, is_new)) => Ok((buf.clone(), is_new.fetch_and(false, SeqCst))),
 353                        Err(error) => Err(anyhow!("{}", error)),
 354                    };
 355                }
 356                loading_watch.recv().await;
 357            }
 358        })
 359    }
 360
 361    #[cfg(feature = "test-support")]
 362    pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
 363        let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
 364            Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
 365            Worktree::Remote(worktree) => {
 366                Box::new(worktree.open_buffers.values().filter_map(|buf| {
 367                    if let RemoteBuffer::Loaded(buf) = buf {
 368                        Some(buf)
 369                    } else {
 370                        None
 371                    }
 372                }))
 373            }
 374        };
 375
 376        let path = path.as_ref();
 377        open_buffers
 378            .find(|buffer| {
 379                if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
 380                    file.path().as_ref() == path
 381                } else {
 382                    false
 383                }
 384            })
 385            .is_some()
 386    }
 387
 388    pub fn handle_update_buffer(
 389        &mut self,
 390        envelope: TypedEnvelope<proto::UpdateBuffer>,
 391        cx: &mut ModelContext<Self>,
 392    ) -> Result<()> {
 393        let payload = envelope.payload.clone();
 394        let buffer_id = payload.buffer_id as usize;
 395        let ops = payload
 396            .operations
 397            .into_iter()
 398            .map(|op| language::proto::deserialize_operation(op))
 399            .collect::<Result<Vec<_>, _>>()?;
 400
 401        match self {
 402            Worktree::Local(worktree) => {
 403                let buffer = worktree
 404                    .open_buffers
 405                    .get(&buffer_id)
 406                    .and_then(|buf| buf.upgrade(cx))
 407                    .ok_or_else(|| {
 408                        anyhow!("invalid buffer {} in update buffer message", buffer_id)
 409                    })?;
 410                buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
 411            }
 412            Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
 413                Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
 414                Some(RemoteBuffer::Loaded(buffer)) => {
 415                    if let Some(buffer) = buffer.upgrade(cx) {
 416                        buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
 417                    } else {
 418                        worktree
 419                            .open_buffers
 420                            .insert(buffer_id, RemoteBuffer::Operations(ops));
 421                    }
 422                }
 423                None => {
 424                    worktree
 425                        .open_buffers
 426                        .insert(buffer_id, RemoteBuffer::Operations(ops));
 427                }
 428            },
 429        }
 430
 431        Ok(())
 432    }
 433
 434    pub fn handle_save_buffer(
 435        &mut self,
 436        envelope: TypedEnvelope<proto::SaveBuffer>,
 437        rpc: Arc<Client>,
 438        cx: &mut ModelContext<Self>,
 439    ) -> Result<()> {
 440        let sender_id = envelope.original_sender_id()?;
 441        let this = self.as_local().unwrap();
 442        let project_id = this
 443            .share
 444            .as_ref()
 445            .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
 446            .project_id;
 447
 448        let buffer = this
 449            .shared_buffers
 450            .get(&sender_id)
 451            .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
 452            .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
 453
 454        let receipt = envelope.receipt();
 455        let worktree_id = envelope.payload.worktree_id;
 456        let buffer_id = envelope.payload.buffer_id;
 457        let save = cx.spawn(|_, mut cx| async move {
 458            buffer.update(&mut cx, |buffer, cx| buffer.save(cx)).await
 459        });
 460
 461        cx.background()
 462            .spawn(
 463                async move {
 464                    let (version, mtime) = save.await?;
 465
 466                    rpc.respond(
 467                        receipt,
 468                        proto::BufferSaved {
 469                            project_id,
 470                            worktree_id,
 471                            buffer_id,
 472                            version: (&version).into(),
 473                            mtime: Some(mtime.into()),
 474                        },
 475                    )
 476                    .await?;
 477
 478                    Ok(())
 479                }
 480                .log_err(),
 481            )
 482            .detach();
 483
 484        Ok(())
 485    }
 486
 487    pub fn handle_buffer_saved(
 488        &mut self,
 489        envelope: TypedEnvelope<proto::BufferSaved>,
 490        cx: &mut ModelContext<Self>,
 491    ) -> Result<()> {
 492        let payload = envelope.payload.clone();
 493        let worktree = self.as_remote_mut().unwrap();
 494        if let Some(buffer) = worktree
 495            .open_buffers
 496            .get(&(payload.buffer_id as usize))
 497            .and_then(|buf| buf.upgrade(cx))
 498        {
 499            buffer.update(cx, |buffer, cx| {
 500                let version = payload.version.try_into()?;
 501                let mtime = payload
 502                    .mtime
 503                    .ok_or_else(|| anyhow!("missing mtime"))?
 504                    .into();
 505                buffer.did_save(version, mtime, None, cx);
 506                Result::<_, anyhow::Error>::Ok(())
 507            })?;
 508        }
 509        Ok(())
 510    }
 511
 512    pub fn handle_format_buffer(
 513        &mut self,
 514        envelope: TypedEnvelope<proto::FormatBuffer>,
 515        rpc: Arc<Client>,
 516        cx: &mut ModelContext<Self>,
 517    ) -> Result<()> {
 518        let sender_id = envelope.original_sender_id()?;
 519        let this = self.as_local().unwrap();
 520        let buffer = this
 521            .shared_buffers
 522            .get(&sender_id)
 523            .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
 524            .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
 525
 526        let receipt = envelope.receipt();
 527        cx.spawn(|_, mut cx| async move {
 528            let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
 529            // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
 530            // associated with formatting.
 531            cx.spawn(|_| async move {
 532                match format {
 533                    Ok(()) => rpc.respond(receipt, proto::Ack {}).await?,
 534                    Err(error) => {
 535                        rpc.respond_with_error(
 536                            receipt,
 537                            proto::Error {
 538                                message: error.to_string(),
 539                            },
 540                        )
 541                        .await?
 542                    }
 543                }
 544                Ok::<_, anyhow::Error>(())
 545            })
 546            .await
 547            .log_err();
 548        })
 549        .detach();
 550
 551        Ok(())
 552    }
 553
 554    fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
 555        match self {
 556            Self::Local(worktree) => {
 557                let is_fake_fs = worktree.fs.is_fake();
 558                worktree.snapshot = worktree.background_snapshot.lock().clone();
 559                if worktree.is_scanning() {
 560                    if worktree.poll_task.is_none() {
 561                        worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
 562                            if is_fake_fs {
 563                                smol::future::yield_now().await;
 564                            } else {
 565                                smol::Timer::after(Duration::from_millis(100)).await;
 566                            }
 567                            this.update(&mut cx, |this, cx| {
 568                                this.as_local_mut().unwrap().poll_task = None;
 569                                this.poll_snapshot(cx);
 570                            })
 571                        }));
 572                    }
 573                } else {
 574                    worktree.poll_task.take();
 575                    self.update_open_buffers(cx);
 576                }
 577            }
 578            Self::Remote(worktree) => {
 579                worktree.snapshot = worktree.snapshot_rx.borrow().clone();
 580                self.update_open_buffers(cx);
 581            }
 582        };
 583
 584        cx.notify();
 585    }
 586
 587    fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
 588        let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
 589            Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
 590            Self::Remote(worktree) => {
 591                Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
 592                    if let RemoteBuffer::Loaded(buf) = buf {
 593                        Some((id, buf))
 594                    } else {
 595                        None
 596                    }
 597                }))
 598            }
 599        };
 600
 601        let local = self.as_local().is_some();
 602        let worktree_path = self.abs_path.clone();
 603        let worktree_handle = cx.handle();
 604        let mut buffers_to_delete = Vec::new();
 605        for (buffer_id, buffer) in open_buffers {
 606            if let Some(buffer) = buffer.upgrade(cx) {
 607                buffer.update(cx, |buffer, cx| {
 608                    if let Some(old_file) = File::from_dyn(buffer.file()) {
 609                        let new_file = if let Some(entry) = old_file
 610                            .entry_id
 611                            .and_then(|entry_id| self.entry_for_id(entry_id))
 612                        {
 613                            File {
 614                                is_local: local,
 615                                worktree_path: worktree_path.clone(),
 616                                entry_id: Some(entry.id),
 617                                mtime: entry.mtime,
 618                                path: entry.path.clone(),
 619                                worktree: worktree_handle.clone(),
 620                            }
 621                        } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
 622                            File {
 623                                is_local: local,
 624                                worktree_path: worktree_path.clone(),
 625                                entry_id: Some(entry.id),
 626                                mtime: entry.mtime,
 627                                path: entry.path.clone(),
 628                                worktree: worktree_handle.clone(),
 629                            }
 630                        } else {
 631                            File {
 632                                is_local: local,
 633                                worktree_path: worktree_path.clone(),
 634                                entry_id: None,
 635                                path: old_file.path().clone(),
 636                                mtime: old_file.mtime(),
 637                                worktree: worktree_handle.clone(),
 638                            }
 639                        };
 640
 641                        if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
 642                            task.detach();
 643                        }
 644                    }
 645                });
 646            } else {
 647                buffers_to_delete.push(*buffer_id);
 648            }
 649        }
 650
 651        for buffer_id in buffers_to_delete {
 652            match self {
 653                Self::Local(worktree) => {
 654                    worktree.open_buffers.remove(&buffer_id);
 655                }
 656                Self::Remote(worktree) => {
 657                    worktree.open_buffers.remove(&buffer_id);
 658                }
 659            }
 660        }
 661    }
 662
 663    fn send_buffer_update(
 664        &mut self,
 665        buffer_id: u64,
 666        operation: Operation,
 667        cx: &mut ModelContext<Self>,
 668    ) {
 669        if let Some((project_id, worktree_id, rpc)) = match self {
 670            Worktree::Local(worktree) => worktree
 671                .share
 672                .as_ref()
 673                .map(|share| (share.project_id, worktree.id(), worktree.client.clone())),
 674            Worktree::Remote(worktree) => Some((
 675                worktree.project_id,
 676                worktree.snapshot.id(),
 677                worktree.client.clone(),
 678            )),
 679        } {
 680            cx.spawn(|worktree, mut cx| async move {
 681                if let Err(error) = rpc
 682                    .request(proto::UpdateBuffer {
 683                        project_id,
 684                        worktree_id: worktree_id.0 as u64,
 685                        buffer_id,
 686                        operations: vec![language::proto::serialize_operation(&operation)],
 687                    })
 688                    .await
 689                {
 690                    worktree.update(&mut cx, |worktree, _| {
 691                        log::error!("error sending buffer operation: {}", error);
 692                        match worktree {
 693                            Worktree::Local(t) => &mut t.queued_operations,
 694                            Worktree::Remote(t) => &mut t.queued_operations,
 695                        }
 696                        .push((buffer_id, operation));
 697                    });
 698                }
 699            })
 700            .detach();
 701        }
 702    }
 703}
 704
 705impl WorktreeId {
 706    pub fn from_usize(handle_id: usize) -> Self {
 707        Self(handle_id)
 708    }
 709
 710    pub(crate) fn from_proto(id: u64) -> Self {
 711        Self(id as usize)
 712    }
 713
 714    pub fn to_proto(&self) -> u64 {
 715        self.0 as u64
 716    }
 717
 718    pub fn to_usize(&self) -> usize {
 719        self.0
 720    }
 721}
 722
 723impl fmt::Display for WorktreeId {
 724    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 725        self.0.fmt(f)
 726    }
 727}
 728
 729#[derive(Clone)]
 730pub struct Snapshot {
 731    id: WorktreeId,
 732    scan_id: usize,
 733    abs_path: Arc<Path>,
 734    root_name: String,
 735    root_char_bag: CharBag,
 736    ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
 737    entries_by_path: SumTree<Entry>,
 738    entries_by_id: SumTree<PathEntry>,
 739    removed_entry_ids: HashMap<u64, usize>,
 740    next_entry_id: Arc<AtomicUsize>,
 741}
 742
 743pub struct LocalWorktree {
 744    snapshot: Snapshot,
 745    config: WorktreeConfig,
 746    background_snapshot: Arc<Mutex<Snapshot>>,
 747    last_scan_state_rx: watch::Receiver<ScanState>,
 748    _background_scanner_task: Option<Task<()>>,
 749    poll_task: Option<Task<()>>,
 750    share: Option<ShareState>,
 751    loading_buffers: LoadingBuffers,
 752    open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
 753    shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
 754    diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
 755    diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
 756    queued_operations: Vec<(u64, Operation)>,
 757    client: Arc<Client>,
 758    user_store: ModelHandle<UserStore>,
 759    fs: Arc<dyn Fs>,
 760}
 761
 762struct ShareState {
 763    project_id: u64,
 764    snapshots_tx: Sender<Snapshot>,
 765    _maintain_remote_snapshot: Option<Task<()>>,
 766}
 767
 768pub struct RemoteWorktree {
 769    project_id: u64,
 770    snapshot: Snapshot,
 771    snapshot_rx: watch::Receiver<Snapshot>,
 772    client: Arc<Client>,
 773    updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
 774    replica_id: ReplicaId,
 775    loading_buffers: LoadingBuffers,
 776    open_buffers: HashMap<usize, RemoteBuffer>,
 777    user_store: ModelHandle<UserStore>,
 778    queued_operations: Vec<(u64, Operation)>,
 779    diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
 780}
 781
 782type LoadingBuffers = HashMap<
 783    Arc<Path>,
 784    postage::watch::Receiver<
 785        Option<Result<(ModelHandle<Buffer>, Arc<AtomicBool>), Arc<anyhow::Error>>>,
 786    >,
 787>;
 788
 789#[derive(Default, Deserialize)]
 790struct WorktreeConfig {
 791    collaborators: Vec<String>,
 792}
 793
 794impl LocalWorktree {
 795    async fn new(
 796        client: Arc<Client>,
 797        user_store: ModelHandle<UserStore>,
 798        path: impl Into<Arc<Path>>,
 799        fs: Arc<dyn Fs>,
 800        cx: &mut AsyncAppContext,
 801    ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
 802        let abs_path = path.into();
 803        let path: Arc<Path> = Arc::from(Path::new(""));
 804        let next_entry_id = AtomicUsize::new(0);
 805
 806        // After determining whether the root entry is a file or a directory, populate the
 807        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 808        let root_name = abs_path
 809            .file_name()
 810            .map_or(String::new(), |f| f.to_string_lossy().to_string());
 811        let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
 812        let metadata = fs.metadata(&abs_path).await?;
 813
 814        let mut config = WorktreeConfig::default();
 815        if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
 816            if let Ok(parsed) = toml::from_str(&zed_toml) {
 817                config = parsed;
 818            }
 819        }
 820
 821        let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
 822        let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
 823        let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
 824            let mut snapshot = Snapshot {
 825                id: WorktreeId::from_usize(cx.model_id()),
 826                scan_id: 0,
 827                abs_path,
 828                root_name: root_name.clone(),
 829                root_char_bag,
 830                ignores: Default::default(),
 831                entries_by_path: Default::default(),
 832                entries_by_id: Default::default(),
 833                removed_entry_ids: Default::default(),
 834                next_entry_id: Arc::new(next_entry_id),
 835            };
 836            if let Some(metadata) = metadata {
 837                snapshot.insert_entry(
 838                    Entry::new(
 839                        path.into(),
 840                        &metadata,
 841                        &snapshot.next_entry_id,
 842                        snapshot.root_char_bag,
 843                    ),
 844                    fs.as_ref(),
 845                );
 846            }
 847
 848            let tree = Self {
 849                snapshot: snapshot.clone(),
 850                config,
 851                background_snapshot: Arc::new(Mutex::new(snapshot)),
 852                last_scan_state_rx,
 853                _background_scanner_task: None,
 854                share: None,
 855                poll_task: None,
 856                loading_buffers: Default::default(),
 857                open_buffers: Default::default(),
 858                shared_buffers: Default::default(),
 859                diagnostics: Default::default(),
 860                diagnostic_summaries: Default::default(),
 861                queued_operations: Default::default(),
 862                client,
 863                user_store,
 864                fs,
 865            };
 866
 867            cx.spawn_weak(|this, mut cx| async move {
 868                while let Ok(scan_state) = scan_states_rx.recv().await {
 869                    if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
 870                        let to_send = handle.update(&mut cx, |this, cx| {
 871                            last_scan_state_tx.blocking_send(scan_state).ok();
 872                            this.poll_snapshot(cx);
 873                            let tree = this.as_local_mut().unwrap();
 874                            if !tree.is_scanning() {
 875                                if let Some(share) = tree.share.as_ref() {
 876                                    return Some((tree.snapshot(), share.snapshots_tx.clone()));
 877                                }
 878                            }
 879                            None
 880                        });
 881
 882                        if let Some((snapshot, snapshots_to_send_tx)) = to_send {
 883                            if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
 884                                log::error!("error submitting snapshot to send {}", err);
 885                            }
 886                        }
 887                    } else {
 888                        break;
 889                    }
 890                }
 891            })
 892            .detach();
 893
 894            Worktree::Local(tree)
 895        });
 896
 897        Ok((tree, scan_states_tx))
 898    }
 899
 900    pub fn authorized_logins(&self) -> Vec<String> {
 901        self.config.collaborators.clone()
 902    }
 903
 904    fn get_open_buffer(
 905        &mut self,
 906        path: &Path,
 907        cx: &mut ModelContext<Worktree>,
 908    ) -> Option<ModelHandle<Buffer>> {
 909        let handle = cx.handle();
 910        let mut result = None;
 911        self.open_buffers.retain(|_buffer_id, buffer| {
 912            if let Some(buffer) = buffer.upgrade(cx) {
 913                if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
 914                    if file.worktree == handle && file.path().as_ref() == path {
 915                        result = Some(buffer);
 916                    }
 917                }
 918                true
 919            } else {
 920                false
 921            }
 922        });
 923        result
 924    }
 925
 926    fn open_buffer(
 927        &mut self,
 928        path: &Path,
 929        cx: &mut ModelContext<Worktree>,
 930    ) -> Task<Result<ModelHandle<Buffer>>> {
 931        let path = Arc::from(path);
 932        cx.spawn(move |this, mut cx| async move {
 933            let (file, contents) = this
 934                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
 935                .await?;
 936
 937            let diagnostics = this.update(&mut cx, |this, _| {
 938                this.as_local_mut().unwrap().diagnostics.get(&path).cloned()
 939            });
 940
 941            let mut buffer_operations = Vec::new();
 942            let buffer = cx.add_model(|cx| {
 943                let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
 944                if let Some(diagnostics) = diagnostics {
 945                    let op = buffer.update_diagnostics(None, diagnostics, cx).unwrap();
 946                    buffer_operations.push(op);
 947                }
 948                buffer
 949            });
 950
 951            this.update(&mut cx, |this, cx| {
 952                for op in buffer_operations {
 953                    this.send_buffer_update(buffer.read(cx).remote_id(), op, cx);
 954                }
 955                let this = this.as_local_mut().unwrap();
 956                this.open_buffers.insert(buffer.id(), buffer.downgrade());
 957            });
 958
 959            Ok(buffer)
 960        })
 961    }
 962
 963    pub fn open_remote_buffer(
 964        &mut self,
 965        peer_id: PeerId,
 966        buffer: ModelHandle<Buffer>,
 967        cx: &mut ModelContext<Worktree>,
 968    ) -> proto::OpenBufferResponse {
 969        self.shared_buffers
 970            .entry(peer_id)
 971            .or_default()
 972            .insert(buffer.id() as u64, buffer.clone());
 973        proto::OpenBufferResponse {
 974            buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
 975        }
 976    }
 977
 978    pub fn close_remote_buffer(
 979        &mut self,
 980        envelope: TypedEnvelope<proto::CloseBuffer>,
 981        cx: &mut ModelContext<Worktree>,
 982    ) -> Result<()> {
 983        if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
 984            shared_buffers.remove(&envelope.payload.buffer_id);
 985            cx.notify();
 986        }
 987
 988        Ok(())
 989    }
 990
 991    pub fn remove_collaborator(
 992        &mut self,
 993        peer_id: PeerId,
 994        replica_id: ReplicaId,
 995        cx: &mut ModelContext<Worktree>,
 996    ) {
 997        self.shared_buffers.remove(&peer_id);
 998        for (_, buffer) in &self.open_buffers {
 999            if let Some(buffer) = buffer.upgrade(cx) {
1000                buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1001            }
1002        }
1003        cx.notify();
1004    }
1005
1006    pub fn update_diagnostics(
1007        &mut self,
1008        worktree_path: Arc<Path>,
1009        params: lsp::PublishDiagnosticsParams,
1010        disk_based_sources: &HashSet<String>,
1011        cx: &mut ModelContext<Worktree>,
1012    ) -> Result<()> {
1013        let mut next_group_id = 0;
1014        let mut diagnostics = Vec::default();
1015        let mut primary_diagnostic_group_ids = HashMap::default();
1016        let mut sources_by_group_id = HashMap::default();
1017        let mut supporting_diagnostic_severities = HashMap::default();
1018        for diagnostic in &params.diagnostics {
1019            let source = diagnostic.source.as_ref();
1020            let code = diagnostic.code.as_ref().map(|code| match code {
1021                lsp::NumberOrString::Number(code) => code.to_string(),
1022                lsp::NumberOrString::String(code) => code.clone(),
1023            });
1024            let range = range_from_lsp(diagnostic.range);
1025            let is_supporting = diagnostic
1026                .related_information
1027                .as_ref()
1028                .map_or(false, |infos| {
1029                    infos.iter().any(|info| {
1030                        primary_diagnostic_group_ids.contains_key(&(
1031                            source,
1032                            code.clone(),
1033                            range_from_lsp(info.location.range),
1034                        ))
1035                    })
1036                });
1037
1038            if is_supporting {
1039                if let Some(severity) = diagnostic.severity {
1040                    supporting_diagnostic_severities
1041                        .insert((source, code.clone(), range), severity);
1042                }
1043            } else {
1044                let group_id = post_inc(&mut next_group_id);
1045                let is_disk_based =
1046                    source.map_or(false, |source| disk_based_sources.contains(source));
1047
1048                sources_by_group_id.insert(group_id, source);
1049                primary_diagnostic_group_ids
1050                    .insert((source, code.clone(), range.clone()), group_id);
1051
1052                diagnostics.push(DiagnosticEntry {
1053                    range,
1054                    diagnostic: Diagnostic {
1055                        code: code.clone(),
1056                        severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
1057                        message: diagnostic.message.clone(),
1058                        group_id,
1059                        is_primary: true,
1060                        is_valid: true,
1061                        is_disk_based,
1062                    },
1063                });
1064                if let Some(infos) = &diagnostic.related_information {
1065                    for info in infos {
1066                        if info.location.uri == params.uri {
1067                            let range = range_from_lsp(info.location.range);
1068                            diagnostics.push(DiagnosticEntry {
1069                                range,
1070                                diagnostic: Diagnostic {
1071                                    code: code.clone(),
1072                                    severity: DiagnosticSeverity::INFORMATION,
1073                                    message: info.message.clone(),
1074                                    group_id,
1075                                    is_primary: false,
1076                                    is_valid: true,
1077                                    is_disk_based,
1078                                },
1079                            });
1080                        }
1081                    }
1082                }
1083            }
1084        }
1085
1086        for entry in &mut diagnostics {
1087            let diagnostic = &mut entry.diagnostic;
1088            if !diagnostic.is_primary {
1089                let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
1090                if let Some(&severity) = supporting_diagnostic_severities.get(&(
1091                    source,
1092                    diagnostic.code.clone(),
1093                    entry.range.clone(),
1094                )) {
1095                    diagnostic.severity = severity;
1096                }
1097            }
1098        }
1099
1100        self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)?;
1101        Ok(())
1102    }
1103
1104    pub fn update_diagnostic_entries(
1105        &mut self,
1106        worktree_path: Arc<Path>,
1107        version: Option<i32>,
1108        diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
1109        cx: &mut ModelContext<Worktree>,
1110    ) -> Result<()> {
1111        for buffer in self.open_buffers.values() {
1112            if let Some(buffer) = buffer.upgrade(cx) {
1113                if buffer
1114                    .read(cx)
1115                    .file()
1116                    .map_or(false, |file| *file.path() == worktree_path)
1117                {
1118                    let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
1119                        (
1120                            buffer.remote_id(),
1121                            buffer.update_diagnostics(version, diagnostics.clone(), cx),
1122                        )
1123                    });
1124                    self.send_buffer_update(remote_id, operation?, cx);
1125                    break;
1126                }
1127            }
1128        }
1129
1130        let summary = DiagnosticSummary::new(&diagnostics);
1131        self.diagnostic_summaries
1132            .insert(PathKey(worktree_path.clone()), summary.clone());
1133        self.diagnostics.insert(worktree_path.clone(), diagnostics);
1134
1135        if let Some(share) = self.share.as_ref() {
1136            cx.foreground()
1137                .spawn({
1138                    let client = self.client.clone();
1139                    let project_id = share.project_id;
1140                    let worktree_id = self.id().to_proto();
1141                    let path = worktree_path.to_string_lossy().to_string();
1142                    async move {
1143                        client
1144                            .send(proto::UpdateDiagnosticSummary {
1145                                project_id,
1146                                worktree_id,
1147                                summary: Some(proto::DiagnosticSummary {
1148                                    path,
1149                                    error_count: summary.error_count as u32,
1150                                    warning_count: summary.warning_count as u32,
1151                                    info_count: summary.info_count as u32,
1152                                    hint_count: summary.hint_count as u32,
1153                                }),
1154                            })
1155                            .await
1156                            .log_err()
1157                    }
1158                })
1159                .detach();
1160        }
1161
1162        Ok(())
1163    }
1164
1165    fn send_buffer_update(
1166        &mut self,
1167        buffer_id: u64,
1168        operation: Operation,
1169        cx: &mut ModelContext<Worktree>,
1170    ) -> Option<()> {
1171        let share = self.share.as_ref()?;
1172        let project_id = share.project_id;
1173        let worktree_id = self.id();
1174        let rpc = self.client.clone();
1175        cx.spawn(|worktree, mut cx| async move {
1176            if let Err(error) = rpc
1177                .request(proto::UpdateBuffer {
1178                    project_id,
1179                    worktree_id: worktree_id.0 as u64,
1180                    buffer_id,
1181                    operations: vec![language::proto::serialize_operation(&operation)],
1182                })
1183                .await
1184            {
1185                worktree.update(&mut cx, |worktree, _| {
1186                    log::error!("error sending buffer operation: {}", error);
1187                    worktree
1188                        .as_local_mut()
1189                        .unwrap()
1190                        .queued_operations
1191                        .push((buffer_id, operation));
1192                });
1193            }
1194        })
1195        .detach();
1196        None
1197    }
1198
1199    pub fn scan_complete(&self) -> impl Future<Output = ()> {
1200        let mut scan_state_rx = self.last_scan_state_rx.clone();
1201        async move {
1202            let mut scan_state = Some(scan_state_rx.borrow().clone());
1203            while let Some(ScanState::Scanning) = scan_state {
1204                scan_state = scan_state_rx.recv().await;
1205            }
1206        }
1207    }
1208
1209    fn is_scanning(&self) -> bool {
1210        if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1211            true
1212        } else {
1213            false
1214        }
1215    }
1216
1217    pub fn snapshot(&self) -> Snapshot {
1218        self.snapshot.clone()
1219    }
1220
1221    pub fn abs_path(&self) -> &Arc<Path> {
1222        &self.snapshot.abs_path
1223    }
1224
1225    pub fn contains_abs_path(&self, path: &Path) -> bool {
1226        path.starts_with(&self.snapshot.abs_path)
1227    }
1228
1229    fn absolutize(&self, path: &Path) -> PathBuf {
1230        if path.file_name().is_some() {
1231            self.snapshot.abs_path.join(path)
1232        } else {
1233            self.snapshot.abs_path.to_path_buf()
1234        }
1235    }
1236
1237    fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1238        let handle = cx.handle();
1239        let path = Arc::from(path);
1240        let worktree_path = self.abs_path.clone();
1241        let abs_path = self.absolutize(&path);
1242        let background_snapshot = self.background_snapshot.clone();
1243        let fs = self.fs.clone();
1244        cx.spawn(|this, mut cx| async move {
1245            let text = fs.load(&abs_path).await?;
1246            // Eagerly populate the snapshot with an updated entry for the loaded file
1247            let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1248            this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1249            Ok((
1250                File {
1251                    entry_id: Some(entry.id),
1252                    worktree: handle,
1253                    worktree_path,
1254                    path: entry.path,
1255                    mtime: entry.mtime,
1256                    is_local: true,
1257                },
1258                text,
1259            ))
1260        })
1261    }
1262
1263    pub fn save_buffer_as(
1264        &self,
1265        buffer_handle: ModelHandle<Buffer>,
1266        path: impl Into<Arc<Path>>,
1267        cx: &mut ModelContext<Worktree>,
1268    ) -> Task<Result<()>> {
1269        let buffer = buffer_handle.read(cx);
1270        let text = buffer.as_rope().clone();
1271        let version = buffer.version();
1272        let save = self.save(path, text, cx);
1273        cx.spawn(|this, mut cx| async move {
1274            let entry = save.await?;
1275            let file = this.update(&mut cx, |this, cx| {
1276                let this = this.as_local_mut().unwrap();
1277                this.open_buffers
1278                    .insert(buffer_handle.id(), buffer_handle.downgrade());
1279                File {
1280                    entry_id: Some(entry.id),
1281                    worktree: cx.handle(),
1282                    worktree_path: this.abs_path.clone(),
1283                    path: entry.path,
1284                    mtime: entry.mtime,
1285                    is_local: true,
1286                }
1287            });
1288
1289            buffer_handle.update(&mut cx, |buffer, cx| {
1290                buffer.did_save(version, file.mtime, Some(Box::new(file)), cx);
1291            });
1292
1293            Ok(())
1294        })
1295    }
1296
1297    fn save(
1298        &self,
1299        path: impl Into<Arc<Path>>,
1300        text: Rope,
1301        cx: &mut ModelContext<Worktree>,
1302    ) -> Task<Result<Entry>> {
1303        let path = path.into();
1304        let abs_path = self.absolutize(&path);
1305        let background_snapshot = self.background_snapshot.clone();
1306        let fs = self.fs.clone();
1307        let save = cx.background().spawn(async move {
1308            fs.save(&abs_path, &text).await?;
1309            refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1310        });
1311
1312        cx.spawn(|this, mut cx| async move {
1313            let entry = save.await?;
1314            this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1315            Ok(entry)
1316        })
1317    }
1318
1319    pub fn share(
1320        &mut self,
1321        project_id: u64,
1322        cx: &mut ModelContext<Worktree>,
1323    ) -> Task<anyhow::Result<()>> {
1324        if self.share.is_some() {
1325            return Task::ready(Ok(()));
1326        }
1327
1328        let snapshot = self.snapshot();
1329        let rpc = self.client.clone();
1330        let worktree_id = cx.model_id() as u64;
1331        let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1332        let maintain_remote_snapshot = cx.background().spawn({
1333            let rpc = rpc.clone();
1334            let snapshot = snapshot.clone();
1335            async move {
1336                let mut prev_snapshot = snapshot;
1337                while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1338                    let message =
1339                        snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1340                    match rpc.send(message).await {
1341                        Ok(()) => prev_snapshot = snapshot,
1342                        Err(err) => log::error!("error sending snapshot diff {}", err),
1343                    }
1344                }
1345            }
1346        });
1347        self.share = Some(ShareState {
1348            project_id,
1349            snapshots_tx: snapshots_to_send_tx,
1350            _maintain_remote_snapshot: Some(maintain_remote_snapshot),
1351        });
1352
1353        let diagnostic_summaries = self.diagnostic_summaries.clone();
1354        let share_message = cx.background().spawn(async move {
1355            proto::ShareWorktree {
1356                project_id,
1357                worktree: Some(snapshot.to_proto(&diagnostic_summaries)),
1358            }
1359        });
1360
1361        cx.foreground().spawn(async move {
1362            rpc.request(share_message.await).await?;
1363            Ok(())
1364        })
1365    }
1366
1367    pub fn unshare(&mut self) {
1368        self.share.take();
1369    }
1370
1371    pub fn is_shared(&self) -> bool {
1372        self.share.is_some()
1373    }
1374}
1375
1376fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1377    let contents = smol::block_on(fs.load(&abs_path))?;
1378    let parent = abs_path.parent().unwrap_or(Path::new("/"));
1379    let mut builder = GitignoreBuilder::new(parent);
1380    for line in contents.lines() {
1381        builder.add_line(Some(abs_path.into()), line)?;
1382    }
1383    Ok(builder.build()?)
1384}
1385
1386impl Deref for Worktree {
1387    type Target = Snapshot;
1388
1389    fn deref(&self) -> &Self::Target {
1390        match self {
1391            Worktree::Local(worktree) => &worktree.snapshot,
1392            Worktree::Remote(worktree) => &worktree.snapshot,
1393        }
1394    }
1395}
1396
1397impl Deref for LocalWorktree {
1398    type Target = Snapshot;
1399
1400    fn deref(&self) -> &Self::Target {
1401        &self.snapshot
1402    }
1403}
1404
1405impl Deref for RemoteWorktree {
1406    type Target = Snapshot;
1407
1408    fn deref(&self) -> &Self::Target {
1409        &self.snapshot
1410    }
1411}
1412
1413impl fmt::Debug for LocalWorktree {
1414    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1415        self.snapshot.fmt(f)
1416    }
1417}
1418
1419impl RemoteWorktree {
1420    fn get_open_buffer(
1421        &mut self,
1422        path: &Path,
1423        cx: &mut ModelContext<Worktree>,
1424    ) -> Option<ModelHandle<Buffer>> {
1425        let handle = cx.handle();
1426        let mut existing_buffer = None;
1427        self.open_buffers.retain(|_buffer_id, buffer| {
1428            if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1429                if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1430                    if file.worktree == handle && file.path().as_ref() == path {
1431                        existing_buffer = Some(buffer);
1432                    }
1433                }
1434                true
1435            } else {
1436                false
1437            }
1438        });
1439        existing_buffer
1440    }
1441
1442    fn open_buffer(
1443        &mut self,
1444        path: &Path,
1445        cx: &mut ModelContext<Worktree>,
1446    ) -> Task<Result<ModelHandle<Buffer>>> {
1447        let rpc = self.client.clone();
1448        let replica_id = self.replica_id;
1449        let project_id = self.project_id;
1450        let remote_worktree_id = self.id();
1451        let root_path = self.snapshot.abs_path.clone();
1452        let path: Arc<Path> = Arc::from(path);
1453        let path_string = path.to_string_lossy().to_string();
1454        cx.spawn_weak(move |this, mut cx| async move {
1455            let entry = this
1456                .upgrade(&cx)
1457                .ok_or_else(|| anyhow!("worktree was closed"))?
1458                .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1459                .ok_or_else(|| anyhow!("file does not exist"))?;
1460            let response = rpc
1461                .request(proto::OpenBuffer {
1462                    project_id,
1463                    worktree_id: remote_worktree_id.to_proto(),
1464                    path: path_string,
1465                })
1466                .await?;
1467
1468            let this = this
1469                .upgrade(&cx)
1470                .ok_or_else(|| anyhow!("worktree was closed"))?;
1471            let file = File {
1472                entry_id: Some(entry.id),
1473                worktree: this.clone(),
1474                worktree_path: root_path,
1475                path: entry.path,
1476                mtime: entry.mtime,
1477                is_local: false,
1478            };
1479            let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1480            let buffer_id = remote_buffer.id as usize;
1481            let buffer = cx.add_model(|cx| {
1482                Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx).unwrap()
1483            });
1484            this.update(&mut cx, move |this, cx| {
1485                let this = this.as_remote_mut().unwrap();
1486                if let Some(RemoteBuffer::Operations(pending_ops)) = this
1487                    .open_buffers
1488                    .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1489                {
1490                    buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1491                }
1492                Result::<_, anyhow::Error>::Ok(buffer)
1493            })
1494        })
1495    }
1496
1497    pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1498        for (_, buffer) in self.open_buffers.drain() {
1499            if let RemoteBuffer::Loaded(buffer) = buffer {
1500                if let Some(buffer) = buffer.upgrade(cx) {
1501                    buffer.update(cx, |buffer, cx| buffer.close(cx))
1502                }
1503            }
1504        }
1505    }
1506
1507    fn snapshot(&self) -> Snapshot {
1508        self.snapshot.clone()
1509    }
1510
1511    pub fn update_from_remote(
1512        &mut self,
1513        envelope: TypedEnvelope<proto::UpdateWorktree>,
1514        cx: &mut ModelContext<Worktree>,
1515    ) -> Result<()> {
1516        let mut tx = self.updates_tx.clone();
1517        let payload = envelope.payload.clone();
1518        cx.background()
1519            .spawn(async move {
1520                tx.send(payload).await.expect("receiver runs to completion");
1521            })
1522            .detach();
1523
1524        Ok(())
1525    }
1526
1527    pub fn update_diagnostic_summary(
1528        &mut self,
1529        path: Arc<Path>,
1530        summary: &proto::DiagnosticSummary,
1531    ) {
1532        self.diagnostic_summaries.insert(
1533            PathKey(path.clone()),
1534            DiagnosticSummary {
1535                error_count: summary.error_count as usize,
1536                warning_count: summary.warning_count as usize,
1537                info_count: summary.info_count as usize,
1538                hint_count: summary.hint_count as usize,
1539            },
1540        );
1541    }
1542
1543    pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1544        for (_, buffer) in &self.open_buffers {
1545            if let Some(buffer) = buffer.upgrade(cx) {
1546                buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1547            }
1548        }
1549        cx.notify();
1550    }
1551}
1552
1553enum RemoteBuffer {
1554    Operations(Vec<Operation>),
1555    Loaded(WeakModelHandle<Buffer>),
1556}
1557
1558impl RemoteBuffer {
1559    fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1560        match self {
1561            Self::Operations(_) => None,
1562            Self::Loaded(buffer) => buffer.upgrade(cx),
1563        }
1564    }
1565}
1566
1567impl Snapshot {
1568    pub fn id(&self) -> WorktreeId {
1569        self.id
1570    }
1571
1572    pub fn to_proto(
1573        &self,
1574        diagnostic_summaries: &TreeMap<PathKey, DiagnosticSummary>,
1575    ) -> proto::Worktree {
1576        let root_name = self.root_name.clone();
1577        proto::Worktree {
1578            id: self.id.0 as u64,
1579            root_name,
1580            entries: self
1581                .entries_by_path
1582                .iter()
1583                .filter(|e| !e.is_ignored)
1584                .map(Into::into)
1585                .collect(),
1586            diagnostic_summaries: diagnostic_summaries
1587                .iter()
1588                .map(|(path, summary)| summary.to_proto(path.0.clone()))
1589                .collect(),
1590        }
1591    }
1592
1593    pub fn build_update(
1594        &self,
1595        other: &Self,
1596        project_id: u64,
1597        worktree_id: u64,
1598        include_ignored: bool,
1599    ) -> proto::UpdateWorktree {
1600        let mut updated_entries = Vec::new();
1601        let mut removed_entries = Vec::new();
1602        let mut self_entries = self
1603            .entries_by_id
1604            .cursor::<()>()
1605            .filter(|e| include_ignored || !e.is_ignored)
1606            .peekable();
1607        let mut other_entries = other
1608            .entries_by_id
1609            .cursor::<()>()
1610            .filter(|e| include_ignored || !e.is_ignored)
1611            .peekable();
1612        loop {
1613            match (self_entries.peek(), other_entries.peek()) {
1614                (Some(self_entry), Some(other_entry)) => {
1615                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1616                        Ordering::Less => {
1617                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1618                            updated_entries.push(entry);
1619                            self_entries.next();
1620                        }
1621                        Ordering::Equal => {
1622                            if self_entry.scan_id != other_entry.scan_id {
1623                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1624                                updated_entries.push(entry);
1625                            }
1626
1627                            self_entries.next();
1628                            other_entries.next();
1629                        }
1630                        Ordering::Greater => {
1631                            removed_entries.push(other_entry.id as u64);
1632                            other_entries.next();
1633                        }
1634                    }
1635                }
1636                (Some(self_entry), None) => {
1637                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1638                    updated_entries.push(entry);
1639                    self_entries.next();
1640                }
1641                (None, Some(other_entry)) => {
1642                    removed_entries.push(other_entry.id as u64);
1643                    other_entries.next();
1644                }
1645                (None, None) => break,
1646            }
1647        }
1648
1649        proto::UpdateWorktree {
1650            project_id,
1651            worktree_id,
1652            root_name: self.root_name().to_string(),
1653            updated_entries,
1654            removed_entries,
1655        }
1656    }
1657
1658    fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1659        self.scan_id += 1;
1660        let scan_id = self.scan_id;
1661
1662        let mut entries_by_path_edits = Vec::new();
1663        let mut entries_by_id_edits = Vec::new();
1664        for entry_id in update.removed_entries {
1665            let entry_id = entry_id as usize;
1666            let entry = self
1667                .entry_for_id(entry_id)
1668                .ok_or_else(|| anyhow!("unknown entry"))?;
1669            entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1670            entries_by_id_edits.push(Edit::Remove(entry.id));
1671        }
1672
1673        for entry in update.updated_entries {
1674            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1675            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1676                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1677            }
1678            entries_by_id_edits.push(Edit::Insert(PathEntry {
1679                id: entry.id,
1680                path: entry.path.clone(),
1681                is_ignored: entry.is_ignored,
1682                scan_id,
1683            }));
1684            entries_by_path_edits.push(Edit::Insert(entry));
1685        }
1686
1687        self.entries_by_path.edit(entries_by_path_edits, &());
1688        self.entries_by_id.edit(entries_by_id_edits, &());
1689
1690        Ok(())
1691    }
1692
1693    pub fn file_count(&self) -> usize {
1694        self.entries_by_path.summary().file_count
1695    }
1696
1697    pub fn visible_file_count(&self) -> usize {
1698        self.entries_by_path.summary().visible_file_count
1699    }
1700
1701    fn traverse_from_offset(
1702        &self,
1703        include_dirs: bool,
1704        include_ignored: bool,
1705        start_offset: usize,
1706    ) -> Traversal {
1707        let mut cursor = self.entries_by_path.cursor();
1708        cursor.seek(
1709            &TraversalTarget::Count {
1710                count: start_offset,
1711                include_dirs,
1712                include_ignored,
1713            },
1714            Bias::Right,
1715            &(),
1716        );
1717        Traversal {
1718            cursor,
1719            include_dirs,
1720            include_ignored,
1721        }
1722    }
1723
1724    fn traverse_from_path(
1725        &self,
1726        include_dirs: bool,
1727        include_ignored: bool,
1728        path: &Path,
1729    ) -> Traversal {
1730        let mut cursor = self.entries_by_path.cursor();
1731        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1732        Traversal {
1733            cursor,
1734            include_dirs,
1735            include_ignored,
1736        }
1737    }
1738
1739    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1740        self.traverse_from_offset(false, include_ignored, start)
1741    }
1742
1743    pub fn entries(&self, include_ignored: bool) -> Traversal {
1744        self.traverse_from_offset(true, include_ignored, 0)
1745    }
1746
1747    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1748        let empty_path = Path::new("");
1749        self.entries_by_path
1750            .cursor::<()>()
1751            .filter(move |entry| entry.path.as_ref() != empty_path)
1752            .map(|entry| &entry.path)
1753    }
1754
1755    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1756        let mut cursor = self.entries_by_path.cursor();
1757        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1758        let traversal = Traversal {
1759            cursor,
1760            include_dirs: true,
1761            include_ignored: true,
1762        };
1763        ChildEntriesIter {
1764            traversal,
1765            parent_path,
1766        }
1767    }
1768
1769    pub fn root_entry(&self) -> Option<&Entry> {
1770        self.entry_for_path("")
1771    }
1772
1773    pub fn root_name(&self) -> &str {
1774        &self.root_name
1775    }
1776
1777    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1778        let path = path.as_ref();
1779        self.traverse_from_path(true, true, path)
1780            .entry()
1781            .and_then(|entry| {
1782                if entry.path.as_ref() == path {
1783                    Some(entry)
1784                } else {
1785                    None
1786                }
1787            })
1788    }
1789
1790    pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1791        let entry = self.entries_by_id.get(&id, &())?;
1792        self.entry_for_path(&entry.path)
1793    }
1794
1795    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1796        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1797    }
1798
1799    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1800        if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1801            let abs_path = self.abs_path.join(&entry.path);
1802            match build_gitignore(&abs_path, fs) {
1803                Ok(ignore) => {
1804                    let ignore_dir_path = entry.path.parent().unwrap();
1805                    self.ignores
1806                        .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1807                }
1808                Err(error) => {
1809                    log::error!(
1810                        "error loading .gitignore file {:?} - {:?}",
1811                        &entry.path,
1812                        error
1813                    );
1814                }
1815            }
1816        }
1817
1818        self.reuse_entry_id(&mut entry);
1819        self.entries_by_path.insert_or_replace(entry.clone(), &());
1820        self.entries_by_id.insert_or_replace(
1821            PathEntry {
1822                id: entry.id,
1823                path: entry.path.clone(),
1824                is_ignored: entry.is_ignored,
1825                scan_id: self.scan_id,
1826            },
1827            &(),
1828        );
1829        entry
1830    }
1831
1832    fn populate_dir(
1833        &mut self,
1834        parent_path: Arc<Path>,
1835        entries: impl IntoIterator<Item = Entry>,
1836        ignore: Option<Arc<Gitignore>>,
1837    ) {
1838        let mut parent_entry = self
1839            .entries_by_path
1840            .get(&PathKey(parent_path.clone()), &())
1841            .unwrap()
1842            .clone();
1843        if let Some(ignore) = ignore {
1844            self.ignores.insert(parent_path, (ignore, self.scan_id));
1845        }
1846        if matches!(parent_entry.kind, EntryKind::PendingDir) {
1847            parent_entry.kind = EntryKind::Dir;
1848        } else {
1849            unreachable!();
1850        }
1851
1852        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1853        let mut entries_by_id_edits = Vec::new();
1854
1855        for mut entry in entries {
1856            self.reuse_entry_id(&mut entry);
1857            entries_by_id_edits.push(Edit::Insert(PathEntry {
1858                id: entry.id,
1859                path: entry.path.clone(),
1860                is_ignored: entry.is_ignored,
1861                scan_id: self.scan_id,
1862            }));
1863            entries_by_path_edits.push(Edit::Insert(entry));
1864        }
1865
1866        self.entries_by_path.edit(entries_by_path_edits, &());
1867        self.entries_by_id.edit(entries_by_id_edits, &());
1868    }
1869
1870    fn reuse_entry_id(&mut self, entry: &mut Entry) {
1871        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1872            entry.id = removed_entry_id;
1873        } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1874            entry.id = existing_entry.id;
1875        }
1876    }
1877
1878    fn remove_path(&mut self, path: &Path) {
1879        let mut new_entries;
1880        let removed_entries;
1881        {
1882            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1883            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1884            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1885            new_entries.push_tree(cursor.suffix(&()), &());
1886        }
1887        self.entries_by_path = new_entries;
1888
1889        let mut entries_by_id_edits = Vec::new();
1890        for entry in removed_entries.cursor::<()>() {
1891            let removed_entry_id = self
1892                .removed_entry_ids
1893                .entry(entry.inode)
1894                .or_insert(entry.id);
1895            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1896            entries_by_id_edits.push(Edit::Remove(entry.id));
1897        }
1898        self.entries_by_id.edit(entries_by_id_edits, &());
1899
1900        if path.file_name() == Some(&GITIGNORE) {
1901            if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1902                *scan_id = self.scan_id;
1903            }
1904        }
1905    }
1906
1907    fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1908        let mut new_ignores = Vec::new();
1909        for ancestor in path.ancestors().skip(1) {
1910            if let Some((ignore, _)) = self.ignores.get(ancestor) {
1911                new_ignores.push((ancestor, Some(ignore.clone())));
1912            } else {
1913                new_ignores.push((ancestor, None));
1914            }
1915        }
1916
1917        let mut ignore_stack = IgnoreStack::none();
1918        for (parent_path, ignore) in new_ignores.into_iter().rev() {
1919            if ignore_stack.is_path_ignored(&parent_path, true) {
1920                ignore_stack = IgnoreStack::all();
1921                break;
1922            } else if let Some(ignore) = ignore {
1923                ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
1924            }
1925        }
1926
1927        if ignore_stack.is_path_ignored(path, is_dir) {
1928            ignore_stack = IgnoreStack::all();
1929        }
1930
1931        ignore_stack
1932    }
1933}
1934
1935impl fmt::Debug for Snapshot {
1936    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1937        for entry in self.entries_by_path.cursor::<()>() {
1938            for _ in entry.path.ancestors().skip(1) {
1939                write!(f, " ")?;
1940            }
1941            writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
1942        }
1943        Ok(())
1944    }
1945}
1946
1947#[derive(Clone, PartialEq)]
1948pub struct File {
1949    entry_id: Option<usize>,
1950    pub worktree: ModelHandle<Worktree>,
1951    worktree_path: Arc<Path>,
1952    pub path: Arc<Path>,
1953    pub mtime: SystemTime,
1954    is_local: bool,
1955}
1956
1957impl language::File for File {
1958    fn mtime(&self) -> SystemTime {
1959        self.mtime
1960    }
1961
1962    fn path(&self) -> &Arc<Path> {
1963        &self.path
1964    }
1965
1966    fn abs_path(&self) -> Option<PathBuf> {
1967        if self.is_local {
1968            Some(self.worktree_path.join(&self.path))
1969        } else {
1970            None
1971        }
1972    }
1973
1974    fn full_path(&self) -> PathBuf {
1975        let mut full_path = PathBuf::new();
1976        if let Some(worktree_name) = self.worktree_path.file_name() {
1977            full_path.push(worktree_name);
1978        }
1979        full_path.push(&self.path);
1980        full_path
1981    }
1982
1983    /// Returns the last component of this handle's absolute path. If this handle refers to the root
1984    /// of its worktree, then this method will return the name of the worktree itself.
1985    fn file_name<'a>(&'a self) -> Option<OsString> {
1986        self.path
1987            .file_name()
1988            .or_else(|| self.worktree_path.file_name())
1989            .map(Into::into)
1990    }
1991
1992    fn is_deleted(&self) -> bool {
1993        self.entry_id.is_none()
1994    }
1995
1996    fn save(
1997        &self,
1998        buffer_id: u64,
1999        text: Rope,
2000        version: clock::Global,
2001        cx: &mut MutableAppContext,
2002    ) -> Task<Result<(clock::Global, SystemTime)>> {
2003        let worktree_id = self.worktree.read(cx).id().to_proto();
2004        self.worktree.update(cx, |worktree, cx| match worktree {
2005            Worktree::Local(worktree) => {
2006                let rpc = worktree.client.clone();
2007                let project_id = worktree.share.as_ref().map(|share| share.project_id);
2008                let save = worktree.save(self.path.clone(), text, cx);
2009                cx.background().spawn(async move {
2010                    let entry = save.await?;
2011                    if let Some(project_id) = project_id {
2012                        rpc.send(proto::BufferSaved {
2013                            project_id,
2014                            worktree_id,
2015                            buffer_id,
2016                            version: (&version).into(),
2017                            mtime: Some(entry.mtime.into()),
2018                        })
2019                        .await?;
2020                    }
2021                    Ok((version, entry.mtime))
2022                })
2023            }
2024            Worktree::Remote(worktree) => {
2025                let rpc = worktree.client.clone();
2026                let project_id = worktree.project_id;
2027                cx.foreground().spawn(async move {
2028                    let response = rpc
2029                        .request(proto::SaveBuffer {
2030                            project_id,
2031                            worktree_id,
2032                            buffer_id,
2033                        })
2034                        .await?;
2035                    let version = response.version.try_into()?;
2036                    let mtime = response
2037                        .mtime
2038                        .ok_or_else(|| anyhow!("missing mtime"))?
2039                        .into();
2040                    Ok((version, mtime))
2041                })
2042            }
2043        })
2044    }
2045
2046    fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2047        let worktree = self.worktree.read(cx).as_local()?;
2048        let abs_path = worktree.absolutize(&self.path);
2049        let fs = worktree.fs.clone();
2050        Some(
2051            cx.background()
2052                .spawn(async move { fs.load(&abs_path).await }),
2053        )
2054    }
2055
2056    fn format_remote(
2057        &self,
2058        buffer_id: u64,
2059        cx: &mut MutableAppContext,
2060    ) -> Option<Task<Result<()>>> {
2061        let worktree = self.worktree.read(cx);
2062        let worktree_id = worktree.id().to_proto();
2063        let worktree = worktree.as_remote()?;
2064        let rpc = worktree.client.clone();
2065        let project_id = worktree.project_id;
2066        Some(cx.foreground().spawn(async move {
2067            rpc.request(proto::FormatBuffer {
2068                project_id,
2069                worktree_id,
2070                buffer_id,
2071            })
2072            .await?;
2073            Ok(())
2074        }))
2075    }
2076
2077    fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2078        self.worktree.update(cx, |worktree, cx| {
2079            worktree.send_buffer_update(buffer_id, operation, cx);
2080        });
2081    }
2082
2083    fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2084        self.worktree.update(cx, |worktree, cx| {
2085            if let Worktree::Remote(worktree) = worktree {
2086                let project_id = worktree.project_id;
2087                let worktree_id = worktree.id().to_proto();
2088                let rpc = worktree.client.clone();
2089                cx.background()
2090                    .spawn(async move {
2091                        if let Err(error) = rpc
2092                            .send(proto::CloseBuffer {
2093                                project_id,
2094                                worktree_id,
2095                                buffer_id,
2096                            })
2097                            .await
2098                        {
2099                            log::error!("error closing remote buffer: {}", error);
2100                        }
2101                    })
2102                    .detach();
2103            }
2104        });
2105    }
2106
2107    fn as_any(&self) -> &dyn Any {
2108        self
2109    }
2110}
2111
2112impl File {
2113    pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2114        file.and_then(|f| f.as_any().downcast_ref())
2115    }
2116
2117    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2118        self.worktree.read(cx).id()
2119    }
2120
2121    pub fn project_entry(&self, cx: &AppContext) -> Option<ProjectEntry> {
2122        self.entry_id.map(|entry_id| ProjectEntry {
2123            worktree_id: self.worktree_id(cx),
2124            entry_id,
2125        })
2126    }
2127}
2128
2129#[derive(Clone, Debug)]
2130pub struct Entry {
2131    pub id: usize,
2132    pub kind: EntryKind,
2133    pub path: Arc<Path>,
2134    pub inode: u64,
2135    pub mtime: SystemTime,
2136    pub is_symlink: bool,
2137    pub is_ignored: bool,
2138}
2139
2140#[derive(Clone, Debug)]
2141pub enum EntryKind {
2142    PendingDir,
2143    Dir,
2144    File(CharBag),
2145}
2146
2147impl Entry {
2148    fn new(
2149        path: Arc<Path>,
2150        metadata: &fs::Metadata,
2151        next_entry_id: &AtomicUsize,
2152        root_char_bag: CharBag,
2153    ) -> Self {
2154        Self {
2155            id: next_entry_id.fetch_add(1, SeqCst),
2156            kind: if metadata.is_dir {
2157                EntryKind::PendingDir
2158            } else {
2159                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2160            },
2161            path,
2162            inode: metadata.inode,
2163            mtime: metadata.mtime,
2164            is_symlink: metadata.is_symlink,
2165            is_ignored: false,
2166        }
2167    }
2168
2169    pub fn is_dir(&self) -> bool {
2170        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2171    }
2172
2173    pub fn is_file(&self) -> bool {
2174        matches!(self.kind, EntryKind::File(_))
2175    }
2176}
2177
2178impl sum_tree::Item for Entry {
2179    type Summary = EntrySummary;
2180
2181    fn summary(&self) -> Self::Summary {
2182        let visible_count = if self.is_ignored { 0 } else { 1 };
2183        let file_count;
2184        let visible_file_count;
2185        if self.is_file() {
2186            file_count = 1;
2187            visible_file_count = visible_count;
2188        } else {
2189            file_count = 0;
2190            visible_file_count = 0;
2191        }
2192
2193        EntrySummary {
2194            max_path: self.path.clone(),
2195            count: 1,
2196            visible_count,
2197            file_count,
2198            visible_file_count,
2199        }
2200    }
2201}
2202
2203impl sum_tree::KeyedItem for Entry {
2204    type Key = PathKey;
2205
2206    fn key(&self) -> Self::Key {
2207        PathKey(self.path.clone())
2208    }
2209}
2210
2211#[derive(Clone, Debug)]
2212pub struct EntrySummary {
2213    max_path: Arc<Path>,
2214    count: usize,
2215    visible_count: usize,
2216    file_count: usize,
2217    visible_file_count: usize,
2218}
2219
2220impl Default for EntrySummary {
2221    fn default() -> Self {
2222        Self {
2223            max_path: Arc::from(Path::new("")),
2224            count: 0,
2225            visible_count: 0,
2226            file_count: 0,
2227            visible_file_count: 0,
2228        }
2229    }
2230}
2231
2232impl sum_tree::Summary for EntrySummary {
2233    type Context = ();
2234
2235    fn add_summary(&mut self, rhs: &Self, _: &()) {
2236        self.max_path = rhs.max_path.clone();
2237        self.visible_count += rhs.visible_count;
2238        self.file_count += rhs.file_count;
2239        self.visible_file_count += rhs.visible_file_count;
2240    }
2241}
2242
2243#[derive(Clone, Debug)]
2244struct PathEntry {
2245    id: usize,
2246    path: Arc<Path>,
2247    is_ignored: bool,
2248    scan_id: usize,
2249}
2250
2251impl sum_tree::Item for PathEntry {
2252    type Summary = PathEntrySummary;
2253
2254    fn summary(&self) -> Self::Summary {
2255        PathEntrySummary { max_id: self.id }
2256    }
2257}
2258
2259impl sum_tree::KeyedItem for PathEntry {
2260    type Key = usize;
2261
2262    fn key(&self) -> Self::Key {
2263        self.id
2264    }
2265}
2266
2267#[derive(Clone, Debug, Default)]
2268struct PathEntrySummary {
2269    max_id: usize,
2270}
2271
2272impl sum_tree::Summary for PathEntrySummary {
2273    type Context = ();
2274
2275    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2276        self.max_id = summary.max_id;
2277    }
2278}
2279
2280impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2281    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2282        *self = summary.max_id;
2283    }
2284}
2285
2286#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2287pub struct PathKey(Arc<Path>);
2288
2289impl Default for PathKey {
2290    fn default() -> Self {
2291        Self(Path::new("").into())
2292    }
2293}
2294
2295impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2296    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2297        self.0 = summary.max_path.clone();
2298    }
2299}
2300
2301struct BackgroundScanner {
2302    fs: Arc<dyn Fs>,
2303    snapshot: Arc<Mutex<Snapshot>>,
2304    notify: Sender<ScanState>,
2305    executor: Arc<executor::Background>,
2306}
2307
2308impl BackgroundScanner {
2309    fn new(
2310        snapshot: Arc<Mutex<Snapshot>>,
2311        notify: Sender<ScanState>,
2312        fs: Arc<dyn Fs>,
2313        executor: Arc<executor::Background>,
2314    ) -> Self {
2315        Self {
2316            fs,
2317            snapshot,
2318            notify,
2319            executor,
2320        }
2321    }
2322
2323    fn abs_path(&self) -> Arc<Path> {
2324        self.snapshot.lock().abs_path.clone()
2325    }
2326
2327    fn snapshot(&self) -> Snapshot {
2328        self.snapshot.lock().clone()
2329    }
2330
2331    async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2332        if self.notify.send(ScanState::Scanning).await.is_err() {
2333            return;
2334        }
2335
2336        if let Err(err) = self.scan_dirs().await {
2337            if self
2338                .notify
2339                .send(ScanState::Err(Arc::new(err)))
2340                .await
2341                .is_err()
2342            {
2343                return;
2344            }
2345        }
2346
2347        if self.notify.send(ScanState::Idle).await.is_err() {
2348            return;
2349        }
2350
2351        futures::pin_mut!(events_rx);
2352        while let Some(events) = events_rx.next().await {
2353            if self.notify.send(ScanState::Scanning).await.is_err() {
2354                break;
2355            }
2356
2357            if !self.process_events(events).await {
2358                break;
2359            }
2360
2361            if self.notify.send(ScanState::Idle).await.is_err() {
2362                break;
2363            }
2364        }
2365    }
2366
2367    async fn scan_dirs(&mut self) -> Result<()> {
2368        let root_char_bag;
2369        let next_entry_id;
2370        let is_dir;
2371        {
2372            let snapshot = self.snapshot.lock();
2373            root_char_bag = snapshot.root_char_bag;
2374            next_entry_id = snapshot.next_entry_id.clone();
2375            is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2376        };
2377
2378        if is_dir {
2379            let path: Arc<Path> = Arc::from(Path::new(""));
2380            let abs_path = self.abs_path();
2381            let (tx, rx) = channel::unbounded();
2382            tx.send(ScanJob {
2383                abs_path: abs_path.to_path_buf(),
2384                path,
2385                ignore_stack: IgnoreStack::none(),
2386                scan_queue: tx.clone(),
2387            })
2388            .await
2389            .unwrap();
2390            drop(tx);
2391
2392            self.executor
2393                .scoped(|scope| {
2394                    for _ in 0..self.executor.num_cpus() {
2395                        scope.spawn(async {
2396                            while let Ok(job) = rx.recv().await {
2397                                if let Err(err) = self
2398                                    .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2399                                    .await
2400                                {
2401                                    log::error!("error scanning {:?}: {}", job.abs_path, err);
2402                                }
2403                            }
2404                        });
2405                    }
2406                })
2407                .await;
2408        }
2409
2410        Ok(())
2411    }
2412
2413    async fn scan_dir(
2414        &self,
2415        root_char_bag: CharBag,
2416        next_entry_id: Arc<AtomicUsize>,
2417        job: &ScanJob,
2418    ) -> Result<()> {
2419        let mut new_entries: Vec<Entry> = Vec::new();
2420        let mut new_jobs: Vec<ScanJob> = Vec::new();
2421        let mut ignore_stack = job.ignore_stack.clone();
2422        let mut new_ignore = None;
2423
2424        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2425        while let Some(child_abs_path) = child_paths.next().await {
2426            let child_abs_path = match child_abs_path {
2427                Ok(child_abs_path) => child_abs_path,
2428                Err(error) => {
2429                    log::error!("error processing entry {:?}", error);
2430                    continue;
2431                }
2432            };
2433            let child_name = child_abs_path.file_name().unwrap();
2434            let child_path: Arc<Path> = job.path.join(child_name).into();
2435            let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2436                Some(metadata) => metadata,
2437                None => continue,
2438            };
2439
2440            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2441            if child_name == *GITIGNORE {
2442                match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2443                    Ok(ignore) => {
2444                        let ignore = Arc::new(ignore);
2445                        ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2446                        new_ignore = Some(ignore);
2447                    }
2448                    Err(error) => {
2449                        log::error!(
2450                            "error loading .gitignore file {:?} - {:?}",
2451                            child_name,
2452                            error
2453                        );
2454                    }
2455                }
2456
2457                // Update ignore status of any child entries we've already processed to reflect the
2458                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2459                // there should rarely be too numerous. Update the ignore stack associated with any
2460                // new jobs as well.
2461                let mut new_jobs = new_jobs.iter_mut();
2462                for entry in &mut new_entries {
2463                    entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2464                    if entry.is_dir() {
2465                        new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2466                            IgnoreStack::all()
2467                        } else {
2468                            ignore_stack.clone()
2469                        };
2470                    }
2471                }
2472            }
2473
2474            let mut child_entry = Entry::new(
2475                child_path.clone(),
2476                &child_metadata,
2477                &next_entry_id,
2478                root_char_bag,
2479            );
2480
2481            if child_metadata.is_dir {
2482                let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2483                child_entry.is_ignored = is_ignored;
2484                new_entries.push(child_entry);
2485                new_jobs.push(ScanJob {
2486                    abs_path: child_abs_path,
2487                    path: child_path,
2488                    ignore_stack: if is_ignored {
2489                        IgnoreStack::all()
2490                    } else {
2491                        ignore_stack.clone()
2492                    },
2493                    scan_queue: job.scan_queue.clone(),
2494                });
2495            } else {
2496                child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2497                new_entries.push(child_entry);
2498            };
2499        }
2500
2501        self.snapshot
2502            .lock()
2503            .populate_dir(job.path.clone(), new_entries, new_ignore);
2504        for new_job in new_jobs {
2505            job.scan_queue.send(new_job).await.unwrap();
2506        }
2507
2508        Ok(())
2509    }
2510
2511    async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2512        let mut snapshot = self.snapshot();
2513        snapshot.scan_id += 1;
2514
2515        let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2516            abs_path
2517        } else {
2518            return false;
2519        };
2520        let root_char_bag = snapshot.root_char_bag;
2521        let next_entry_id = snapshot.next_entry_id.clone();
2522
2523        events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2524        events.dedup_by(|a, b| a.path.starts_with(&b.path));
2525
2526        for event in &events {
2527            match event.path.strip_prefix(&root_abs_path) {
2528                Ok(path) => snapshot.remove_path(&path),
2529                Err(_) => {
2530                    log::error!(
2531                        "unexpected event {:?} for root path {:?}",
2532                        event.path,
2533                        root_abs_path
2534                    );
2535                    continue;
2536                }
2537            }
2538        }
2539
2540        let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2541        for event in events {
2542            let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2543                Ok(path) => Arc::from(path.to_path_buf()),
2544                Err(_) => {
2545                    log::error!(
2546                        "unexpected event {:?} for root path {:?}",
2547                        event.path,
2548                        root_abs_path
2549                    );
2550                    continue;
2551                }
2552            };
2553
2554            match self.fs.metadata(&event.path).await {
2555                Ok(Some(metadata)) => {
2556                    let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2557                    let mut fs_entry = Entry::new(
2558                        path.clone(),
2559                        &metadata,
2560                        snapshot.next_entry_id.as_ref(),
2561                        snapshot.root_char_bag,
2562                    );
2563                    fs_entry.is_ignored = ignore_stack.is_all();
2564                    snapshot.insert_entry(fs_entry, self.fs.as_ref());
2565                    if metadata.is_dir {
2566                        scan_queue_tx
2567                            .send(ScanJob {
2568                                abs_path: event.path,
2569                                path,
2570                                ignore_stack,
2571                                scan_queue: scan_queue_tx.clone(),
2572                            })
2573                            .await
2574                            .unwrap();
2575                    }
2576                }
2577                Ok(None) => {}
2578                Err(err) => {
2579                    // TODO - create a special 'error' entry in the entries tree to mark this
2580                    log::error!("error reading file on event {:?}", err);
2581                }
2582            }
2583        }
2584
2585        *self.snapshot.lock() = snapshot;
2586
2587        // Scan any directories that were created as part of this event batch.
2588        drop(scan_queue_tx);
2589        self.executor
2590            .scoped(|scope| {
2591                for _ in 0..self.executor.num_cpus() {
2592                    scope.spawn(async {
2593                        while let Ok(job) = scan_queue_rx.recv().await {
2594                            if let Err(err) = self
2595                                .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2596                                .await
2597                            {
2598                                log::error!("error scanning {:?}: {}", job.abs_path, err);
2599                            }
2600                        }
2601                    });
2602                }
2603            })
2604            .await;
2605
2606        // Attempt to detect renames only over a single batch of file-system events.
2607        self.snapshot.lock().removed_entry_ids.clear();
2608
2609        self.update_ignore_statuses().await;
2610        true
2611    }
2612
2613    async fn update_ignore_statuses(&self) {
2614        let mut snapshot = self.snapshot();
2615
2616        let mut ignores_to_update = Vec::new();
2617        let mut ignores_to_delete = Vec::new();
2618        for (parent_path, (_, scan_id)) in &snapshot.ignores {
2619            if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2620                ignores_to_update.push(parent_path.clone());
2621            }
2622
2623            let ignore_path = parent_path.join(&*GITIGNORE);
2624            if snapshot.entry_for_path(ignore_path).is_none() {
2625                ignores_to_delete.push(parent_path.clone());
2626            }
2627        }
2628
2629        for parent_path in ignores_to_delete {
2630            snapshot.ignores.remove(&parent_path);
2631            self.snapshot.lock().ignores.remove(&parent_path);
2632        }
2633
2634        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2635        ignores_to_update.sort_unstable();
2636        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2637        while let Some(parent_path) = ignores_to_update.next() {
2638            while ignores_to_update
2639                .peek()
2640                .map_or(false, |p| p.starts_with(&parent_path))
2641            {
2642                ignores_to_update.next().unwrap();
2643            }
2644
2645            let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2646            ignore_queue_tx
2647                .send(UpdateIgnoreStatusJob {
2648                    path: parent_path,
2649                    ignore_stack,
2650                    ignore_queue: ignore_queue_tx.clone(),
2651                })
2652                .await
2653                .unwrap();
2654        }
2655        drop(ignore_queue_tx);
2656
2657        self.executor
2658            .scoped(|scope| {
2659                for _ in 0..self.executor.num_cpus() {
2660                    scope.spawn(async {
2661                        while let Ok(job) = ignore_queue_rx.recv().await {
2662                            self.update_ignore_status(job, &snapshot).await;
2663                        }
2664                    });
2665                }
2666            })
2667            .await;
2668    }
2669
2670    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2671        let mut ignore_stack = job.ignore_stack;
2672        if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2673            ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2674        }
2675
2676        let mut entries_by_id_edits = Vec::new();
2677        let mut entries_by_path_edits = Vec::new();
2678        for mut entry in snapshot.child_entries(&job.path).cloned() {
2679            let was_ignored = entry.is_ignored;
2680            entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2681            if entry.is_dir() {
2682                let child_ignore_stack = if entry.is_ignored {
2683                    IgnoreStack::all()
2684                } else {
2685                    ignore_stack.clone()
2686                };
2687                job.ignore_queue
2688                    .send(UpdateIgnoreStatusJob {
2689                        path: entry.path.clone(),
2690                        ignore_stack: child_ignore_stack,
2691                        ignore_queue: job.ignore_queue.clone(),
2692                    })
2693                    .await
2694                    .unwrap();
2695            }
2696
2697            if entry.is_ignored != was_ignored {
2698                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2699                path_entry.scan_id = snapshot.scan_id;
2700                path_entry.is_ignored = entry.is_ignored;
2701                entries_by_id_edits.push(Edit::Insert(path_entry));
2702                entries_by_path_edits.push(Edit::Insert(entry));
2703            }
2704        }
2705
2706        let mut snapshot = self.snapshot.lock();
2707        snapshot.entries_by_path.edit(entries_by_path_edits, &());
2708        snapshot.entries_by_id.edit(entries_by_id_edits, &());
2709    }
2710}
2711
2712async fn refresh_entry(
2713    fs: &dyn Fs,
2714    snapshot: &Mutex<Snapshot>,
2715    path: Arc<Path>,
2716    abs_path: &Path,
2717) -> Result<Entry> {
2718    let root_char_bag;
2719    let next_entry_id;
2720    {
2721        let snapshot = snapshot.lock();
2722        root_char_bag = snapshot.root_char_bag;
2723        next_entry_id = snapshot.next_entry_id.clone();
2724    }
2725    let entry = Entry::new(
2726        path,
2727        &fs.metadata(abs_path)
2728            .await?
2729            .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2730        &next_entry_id,
2731        root_char_bag,
2732    );
2733    Ok(snapshot.lock().insert_entry(entry, fs))
2734}
2735
2736fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2737    let mut result = root_char_bag;
2738    result.extend(
2739        path.to_string_lossy()
2740            .chars()
2741            .map(|c| c.to_ascii_lowercase()),
2742    );
2743    result
2744}
2745
2746struct ScanJob {
2747    abs_path: PathBuf,
2748    path: Arc<Path>,
2749    ignore_stack: Arc<IgnoreStack>,
2750    scan_queue: Sender<ScanJob>,
2751}
2752
2753struct UpdateIgnoreStatusJob {
2754    path: Arc<Path>,
2755    ignore_stack: Arc<IgnoreStack>,
2756    ignore_queue: Sender<UpdateIgnoreStatusJob>,
2757}
2758
2759pub trait WorktreeHandle {
2760    #[cfg(test)]
2761    fn flush_fs_events<'a>(
2762        &self,
2763        cx: &'a gpui::TestAppContext,
2764    ) -> futures::future::LocalBoxFuture<'a, ()>;
2765}
2766
2767impl WorktreeHandle for ModelHandle<Worktree> {
2768    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2769    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2770    // extra directory scans, and emit extra scan-state notifications.
2771    //
2772    // This function mutates the worktree's directory and waits for those mutations to be picked up,
2773    // to ensure that all redundant FS events have already been processed.
2774    #[cfg(test)]
2775    fn flush_fs_events<'a>(
2776        &self,
2777        cx: &'a gpui::TestAppContext,
2778    ) -> futures::future::LocalBoxFuture<'a, ()> {
2779        use smol::future::FutureExt;
2780
2781        let filename = "fs-event-sentinel";
2782        let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2783        let tree = self.clone();
2784        async move {
2785            std::fs::write(root_path.join(filename), "").unwrap();
2786            tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2787                .await;
2788
2789            std::fs::remove_file(root_path.join(filename)).unwrap();
2790            tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2791                .await;
2792
2793            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2794                .await;
2795        }
2796        .boxed_local()
2797    }
2798}
2799
2800#[derive(Clone, Debug)]
2801struct TraversalProgress<'a> {
2802    max_path: &'a Path,
2803    count: usize,
2804    visible_count: usize,
2805    file_count: usize,
2806    visible_file_count: usize,
2807}
2808
2809impl<'a> TraversalProgress<'a> {
2810    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2811        match (include_ignored, include_dirs) {
2812            (true, true) => self.count,
2813            (true, false) => self.file_count,
2814            (false, true) => self.visible_count,
2815            (false, false) => self.visible_file_count,
2816        }
2817    }
2818}
2819
2820impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2821    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2822        self.max_path = summary.max_path.as_ref();
2823        self.count += summary.count;
2824        self.visible_count += summary.visible_count;
2825        self.file_count += summary.file_count;
2826        self.visible_file_count += summary.visible_file_count;
2827    }
2828}
2829
2830impl<'a> Default for TraversalProgress<'a> {
2831    fn default() -> Self {
2832        Self {
2833            max_path: Path::new(""),
2834            count: 0,
2835            visible_count: 0,
2836            file_count: 0,
2837            visible_file_count: 0,
2838        }
2839    }
2840}
2841
2842pub struct Traversal<'a> {
2843    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2844    include_ignored: bool,
2845    include_dirs: bool,
2846}
2847
2848impl<'a> Traversal<'a> {
2849    pub fn advance(&mut self) -> bool {
2850        self.advance_to_offset(self.offset() + 1)
2851    }
2852
2853    pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2854        self.cursor.seek_forward(
2855            &TraversalTarget::Count {
2856                count: offset,
2857                include_dirs: self.include_dirs,
2858                include_ignored: self.include_ignored,
2859            },
2860            Bias::Right,
2861            &(),
2862        )
2863    }
2864
2865    pub fn advance_to_sibling(&mut self) -> bool {
2866        while let Some(entry) = self.cursor.item() {
2867            self.cursor.seek_forward(
2868                &TraversalTarget::PathSuccessor(&entry.path),
2869                Bias::Left,
2870                &(),
2871            );
2872            if let Some(entry) = self.cursor.item() {
2873                if (self.include_dirs || !entry.is_dir())
2874                    && (self.include_ignored || !entry.is_ignored)
2875                {
2876                    return true;
2877                }
2878            }
2879        }
2880        false
2881    }
2882
2883    pub fn entry(&self) -> Option<&'a Entry> {
2884        self.cursor.item()
2885    }
2886
2887    pub fn offset(&self) -> usize {
2888        self.cursor
2889            .start()
2890            .count(self.include_dirs, self.include_ignored)
2891    }
2892}
2893
2894impl<'a> Iterator for Traversal<'a> {
2895    type Item = &'a Entry;
2896
2897    fn next(&mut self) -> Option<Self::Item> {
2898        if let Some(item) = self.entry() {
2899            self.advance();
2900            Some(item)
2901        } else {
2902            None
2903        }
2904    }
2905}
2906
2907#[derive(Debug)]
2908enum TraversalTarget<'a> {
2909    Path(&'a Path),
2910    PathSuccessor(&'a Path),
2911    Count {
2912        count: usize,
2913        include_ignored: bool,
2914        include_dirs: bool,
2915    },
2916}
2917
2918impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2919    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2920        match self {
2921            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2922            TraversalTarget::PathSuccessor(path) => {
2923                if !cursor_location.max_path.starts_with(path) {
2924                    Ordering::Equal
2925                } else {
2926                    Ordering::Greater
2927                }
2928            }
2929            TraversalTarget::Count {
2930                count,
2931                include_dirs,
2932                include_ignored,
2933            } => Ord::cmp(
2934                count,
2935                &cursor_location.count(*include_dirs, *include_ignored),
2936            ),
2937        }
2938    }
2939}
2940
2941struct ChildEntriesIter<'a> {
2942    parent_path: &'a Path,
2943    traversal: Traversal<'a>,
2944}
2945
2946impl<'a> Iterator for ChildEntriesIter<'a> {
2947    type Item = &'a Entry;
2948
2949    fn next(&mut self) -> Option<Self::Item> {
2950        if let Some(item) = self.traversal.entry() {
2951            if item.path.starts_with(&self.parent_path) {
2952                self.traversal.advance_to_sibling();
2953                return Some(item);
2954            }
2955        }
2956        None
2957    }
2958}
2959
2960impl<'a> From<&'a Entry> for proto::Entry {
2961    fn from(entry: &'a Entry) -> Self {
2962        Self {
2963            id: entry.id as u64,
2964            is_dir: entry.is_dir(),
2965            path: entry.path.to_string_lossy().to_string(),
2966            inode: entry.inode,
2967            mtime: Some(entry.mtime.into()),
2968            is_symlink: entry.is_symlink,
2969            is_ignored: entry.is_ignored,
2970        }
2971    }
2972}
2973
2974impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2975    type Error = anyhow::Error;
2976
2977    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2978        if let Some(mtime) = entry.mtime {
2979            let kind = if entry.is_dir {
2980                EntryKind::Dir
2981            } else {
2982                let mut char_bag = root_char_bag.clone();
2983                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
2984                EntryKind::File(char_bag)
2985            };
2986            let path: Arc<Path> = Arc::from(Path::new(&entry.path));
2987            Ok(Entry {
2988                id: entry.id as usize,
2989                kind,
2990                path: path.clone(),
2991                inode: entry.inode,
2992                mtime: mtime.into(),
2993                is_symlink: entry.is_symlink,
2994                is_ignored: entry.is_ignored,
2995            })
2996        } else {
2997            Err(anyhow!(
2998                "missing mtime in remote worktree entry {:?}",
2999                entry.path
3000            ))
3001        }
3002    }
3003}
3004
3005#[cfg(test)]
3006mod tests {
3007    use super::*;
3008    use crate::fs::FakeFs;
3009    use anyhow::Result;
3010    use client::test::{FakeHttpClient, FakeServer};
3011    use fs::RealFs;
3012    use language::{Diagnostic, DiagnosticEntry};
3013    use lsp::Url;
3014    use rand::prelude::*;
3015    use serde_json::json;
3016    use std::{cell::RefCell, rc::Rc};
3017    use std::{
3018        env,
3019        fmt::Write,
3020        time::{SystemTime, UNIX_EPOCH},
3021    };
3022    use text::Point;
3023    use unindent::Unindent as _;
3024    use util::test::temp_tree;
3025
3026    #[gpui::test]
3027    async fn test_traversal(mut cx: gpui::TestAppContext) {
3028        let fs = FakeFs::new();
3029        fs.insert_tree(
3030            "/root",
3031            json!({
3032               ".gitignore": "a/b\n",
3033               "a": {
3034                   "b": "",
3035                   "c": "",
3036               }
3037            }),
3038        )
3039        .await;
3040
3041        let http_client = FakeHttpClient::with_404_response();
3042        let client = Client::new(http_client.clone());
3043        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3044
3045        let tree = Worktree::open_local(
3046            client,
3047            user_store,
3048            Arc::from(Path::new("/root")),
3049            Arc::new(fs),
3050            &mut cx.to_async(),
3051        )
3052        .await
3053        .unwrap();
3054        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3055            .await;
3056
3057        tree.read_with(&cx, |tree, _| {
3058            assert_eq!(
3059                tree.entries(false)
3060                    .map(|entry| entry.path.as_ref())
3061                    .collect::<Vec<_>>(),
3062                vec![
3063                    Path::new(""),
3064                    Path::new(".gitignore"),
3065                    Path::new("a"),
3066                    Path::new("a/c"),
3067                ]
3068            );
3069        })
3070    }
3071
3072    #[gpui::test]
3073    async fn test_save_file(mut cx: gpui::TestAppContext) {
3074        let dir = temp_tree(json!({
3075            "file1": "the old contents",
3076        }));
3077
3078        let http_client = FakeHttpClient::with_404_response();
3079        let client = Client::new(http_client.clone());
3080        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3081
3082        let tree = Worktree::open_local(
3083            client,
3084            user_store,
3085            dir.path(),
3086            Arc::new(RealFs),
3087            &mut cx.to_async(),
3088        )
3089        .await
3090        .unwrap();
3091        let (buffer, _) = tree
3092            .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3093            .await
3094            .unwrap();
3095        let save = buffer.update(&mut cx, |buffer, cx| {
3096            buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3097            buffer.save(cx)
3098        });
3099        save.await.unwrap();
3100
3101        let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3102        assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3103    }
3104
3105    #[gpui::test]
3106    async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3107        let dir = temp_tree(json!({
3108            "file1": "the old contents",
3109        }));
3110        let file_path = dir.path().join("file1");
3111
3112        let http_client = FakeHttpClient::with_404_response();
3113        let client = Client::new(http_client.clone());
3114        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3115
3116        let tree = Worktree::open_local(
3117            client,
3118            user_store,
3119            file_path.clone(),
3120            Arc::new(RealFs),
3121            &mut cx.to_async(),
3122        )
3123        .await
3124        .unwrap();
3125        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3126            .await;
3127        cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3128
3129        let (buffer, _) = tree
3130            .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3131            .await
3132            .unwrap();
3133        let save = buffer.update(&mut cx, |buffer, cx| {
3134            buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3135            buffer.save(cx)
3136        });
3137        save.await.unwrap();
3138
3139        let new_text = std::fs::read_to_string(file_path).unwrap();
3140        assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3141    }
3142
3143    #[gpui::test]
3144    async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3145        let dir = temp_tree(json!({
3146            "a": {
3147                "file1": "",
3148                "file2": "",
3149                "file3": "",
3150            },
3151            "b": {
3152                "c": {
3153                    "file4": "",
3154                    "file5": "",
3155                }
3156            }
3157        }));
3158
3159        let user_id = 5;
3160        let http_client = FakeHttpClient::with_404_response();
3161        let mut client = Client::new(http_client.clone());
3162        let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3163        let user_store = server.build_user_store(client.clone(), &mut cx).await;
3164        let tree = Worktree::open_local(
3165            client,
3166            user_store.clone(),
3167            dir.path(),
3168            Arc::new(RealFs),
3169            &mut cx.to_async(),
3170        )
3171        .await
3172        .unwrap();
3173
3174        let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3175            let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3176            async move { buffer.await.unwrap().0 }
3177        };
3178        let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3179            tree.read_with(cx, |tree, _| {
3180                tree.entry_for_path(path)
3181                    .expect(&format!("no entry for path {}", path))
3182                    .id
3183            })
3184        };
3185
3186        let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3187        let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3188        let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3189        let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3190
3191        let file2_id = id_for_path("a/file2", &cx);
3192        let file3_id = id_for_path("a/file3", &cx);
3193        let file4_id = id_for_path("b/c/file4", &cx);
3194
3195        // Wait for the initial scan.
3196        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3197            .await;
3198
3199        // Create a remote copy of this worktree.
3200        let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3201        let remote = Worktree::remote(
3202            1,
3203            1,
3204            initial_snapshot.to_proto(&Default::default()),
3205            Client::new(http_client.clone()),
3206            user_store,
3207            &mut cx.to_async(),
3208        )
3209        .await
3210        .unwrap();
3211
3212        cx.read(|cx| {
3213            assert!(!buffer2.read(cx).is_dirty());
3214            assert!(!buffer3.read(cx).is_dirty());
3215            assert!(!buffer4.read(cx).is_dirty());
3216            assert!(!buffer5.read(cx).is_dirty());
3217        });
3218
3219        // Rename and delete files and directories.
3220        tree.flush_fs_events(&cx).await;
3221        std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3222        std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3223        std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3224        std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3225        tree.flush_fs_events(&cx).await;
3226
3227        let expected_paths = vec![
3228            "a",
3229            "a/file1",
3230            "a/file2.new",
3231            "b",
3232            "d",
3233            "d/file3",
3234            "d/file4",
3235        ];
3236
3237        cx.read(|app| {
3238            assert_eq!(
3239                tree.read(app)
3240                    .paths()
3241                    .map(|p| p.to_str().unwrap())
3242                    .collect::<Vec<_>>(),
3243                expected_paths
3244            );
3245
3246            assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3247            assert_eq!(id_for_path("d/file3", &cx), file3_id);
3248            assert_eq!(id_for_path("d/file4", &cx), file4_id);
3249
3250            assert_eq!(
3251                buffer2.read(app).file().unwrap().path().as_ref(),
3252                Path::new("a/file2.new")
3253            );
3254            assert_eq!(
3255                buffer3.read(app).file().unwrap().path().as_ref(),
3256                Path::new("d/file3")
3257            );
3258            assert_eq!(
3259                buffer4.read(app).file().unwrap().path().as_ref(),
3260                Path::new("d/file4")
3261            );
3262            assert_eq!(
3263                buffer5.read(app).file().unwrap().path().as_ref(),
3264                Path::new("b/c/file5")
3265            );
3266
3267            assert!(!buffer2.read(app).file().unwrap().is_deleted());
3268            assert!(!buffer3.read(app).file().unwrap().is_deleted());
3269            assert!(!buffer4.read(app).file().unwrap().is_deleted());
3270            assert!(buffer5.read(app).file().unwrap().is_deleted());
3271        });
3272
3273        // Update the remote worktree. Check that it becomes consistent with the
3274        // local worktree.
3275        remote.update(&mut cx, |remote, cx| {
3276            let update_message =
3277                tree.read(cx)
3278                    .snapshot()
3279                    .build_update(&initial_snapshot, 1, 1, true);
3280            remote
3281                .as_remote_mut()
3282                .unwrap()
3283                .snapshot
3284                .apply_update(update_message)
3285                .unwrap();
3286
3287            assert_eq!(
3288                remote
3289                    .paths()
3290                    .map(|p| p.to_str().unwrap())
3291                    .collect::<Vec<_>>(),
3292                expected_paths
3293            );
3294        });
3295    }
3296
3297    #[gpui::test]
3298    async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3299        let dir = temp_tree(json!({
3300            ".git": {},
3301            ".gitignore": "ignored-dir\n",
3302            "tracked-dir": {
3303                "tracked-file1": "tracked contents",
3304            },
3305            "ignored-dir": {
3306                "ignored-file1": "ignored contents",
3307            }
3308        }));
3309
3310        let http_client = FakeHttpClient::with_404_response();
3311        let client = Client::new(http_client.clone());
3312        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3313
3314        let tree = Worktree::open_local(
3315            client,
3316            user_store,
3317            dir.path(),
3318            Arc::new(RealFs),
3319            &mut cx.to_async(),
3320        )
3321        .await
3322        .unwrap();
3323        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3324            .await;
3325        tree.flush_fs_events(&cx).await;
3326        cx.read(|cx| {
3327            let tree = tree.read(cx);
3328            let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3329            let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3330            assert_eq!(tracked.is_ignored, false);
3331            assert_eq!(ignored.is_ignored, true);
3332        });
3333
3334        std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3335        std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3336        tree.flush_fs_events(&cx).await;
3337        cx.read(|cx| {
3338            let tree = tree.read(cx);
3339            let dot_git = tree.entry_for_path(".git").unwrap();
3340            let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3341            let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3342            assert_eq!(tracked.is_ignored, false);
3343            assert_eq!(ignored.is_ignored, true);
3344            assert_eq!(dot_git.is_ignored, true);
3345        });
3346    }
3347
3348    #[gpui::test]
3349    async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3350        let user_id = 100;
3351        let http_client = FakeHttpClient::with_404_response();
3352        let mut client = Client::new(http_client);
3353        let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3354        let user_store = server.build_user_store(client.clone(), &mut cx).await;
3355
3356        let fs = Arc::new(FakeFs::new());
3357        fs.insert_tree(
3358            "/the-dir",
3359            json!({
3360                "a.txt": "a-contents",
3361                "b.txt": "b-contents",
3362            }),
3363        )
3364        .await;
3365
3366        let worktree = Worktree::open_local(
3367            client.clone(),
3368            user_store,
3369            "/the-dir".as_ref(),
3370            fs,
3371            &mut cx.to_async(),
3372        )
3373        .await
3374        .unwrap();
3375
3376        // Spawn multiple tasks to open paths, repeating some paths.
3377        let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3378            (
3379                worktree.open_buffer("a.txt", cx),
3380                worktree.open_buffer("b.txt", cx),
3381                worktree.open_buffer("a.txt", cx),
3382            )
3383        });
3384
3385        let buffer_a_1 = buffer_a_1.await.unwrap().0;
3386        let buffer_a_2 = buffer_a_2.await.unwrap().0;
3387        let buffer_b = buffer_b.await.unwrap().0;
3388        assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3389        assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3390
3391        // There is only one buffer per path.
3392        let buffer_a_id = buffer_a_1.id();
3393        assert_eq!(buffer_a_2.id(), buffer_a_id);
3394
3395        // Open the same path again while it is still open.
3396        drop(buffer_a_1);
3397        let buffer_a_3 = worktree
3398            .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3399            .await
3400            .unwrap()
3401            .0;
3402
3403        // There's still only one buffer per path.
3404        assert_eq!(buffer_a_3.id(), buffer_a_id);
3405    }
3406
3407    #[gpui::test]
3408    async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3409        use std::fs;
3410
3411        let dir = temp_tree(json!({
3412            "file1": "abc",
3413            "file2": "def",
3414            "file3": "ghi",
3415        }));
3416        let http_client = FakeHttpClient::with_404_response();
3417        let client = Client::new(http_client.clone());
3418        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3419
3420        let tree = Worktree::open_local(
3421            client,
3422            user_store,
3423            dir.path(),
3424            Arc::new(RealFs),
3425            &mut cx.to_async(),
3426        )
3427        .await
3428        .unwrap();
3429        tree.flush_fs_events(&cx).await;
3430        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3431            .await;
3432
3433        let (buffer1, _) = tree
3434            .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3435            .await
3436            .unwrap();
3437        let events = Rc::new(RefCell::new(Vec::new()));
3438
3439        // initially, the buffer isn't dirty.
3440        buffer1.update(&mut cx, |buffer, cx| {
3441            cx.subscribe(&buffer1, {
3442                let events = events.clone();
3443                move |_, _, event, _| events.borrow_mut().push(event.clone())
3444            })
3445            .detach();
3446
3447            assert!(!buffer.is_dirty());
3448            assert!(events.borrow().is_empty());
3449
3450            buffer.edit(vec![1..2], "", cx);
3451        });
3452
3453        // after the first edit, the buffer is dirty, and emits a dirtied event.
3454        buffer1.update(&mut cx, |buffer, cx| {
3455            assert!(buffer.text() == "ac");
3456            assert!(buffer.is_dirty());
3457            assert_eq!(
3458                *events.borrow(),
3459                &[language::Event::Edited, language::Event::Dirtied]
3460            );
3461            events.borrow_mut().clear();
3462            buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3463        });
3464
3465        // after saving, the buffer is not dirty, and emits a saved event.
3466        buffer1.update(&mut cx, |buffer, cx| {
3467            assert!(!buffer.is_dirty());
3468            assert_eq!(*events.borrow(), &[language::Event::Saved]);
3469            events.borrow_mut().clear();
3470
3471            buffer.edit(vec![1..1], "B", cx);
3472            buffer.edit(vec![2..2], "D", cx);
3473        });
3474
3475        // after editing again, the buffer is dirty, and emits another dirty event.
3476        buffer1.update(&mut cx, |buffer, cx| {
3477            assert!(buffer.text() == "aBDc");
3478            assert!(buffer.is_dirty());
3479            assert_eq!(
3480                *events.borrow(),
3481                &[
3482                    language::Event::Edited,
3483                    language::Event::Dirtied,
3484                    language::Event::Edited,
3485                ],
3486            );
3487            events.borrow_mut().clear();
3488
3489            // TODO - currently, after restoring the buffer to its
3490            // previously-saved state, the is still considered dirty.
3491            buffer.edit([1..3], "", cx);
3492            assert!(buffer.text() == "ac");
3493            assert!(buffer.is_dirty());
3494        });
3495
3496        assert_eq!(*events.borrow(), &[language::Event::Edited]);
3497
3498        // When a file is deleted, the buffer is considered dirty.
3499        let events = Rc::new(RefCell::new(Vec::new()));
3500        let (buffer2, _) = tree
3501            .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3502            .await
3503            .unwrap();
3504        buffer2.update(&mut cx, |_, cx| {
3505            cx.subscribe(&buffer2, {
3506                let events = events.clone();
3507                move |_, _, event, _| events.borrow_mut().push(event.clone())
3508            })
3509            .detach();
3510        });
3511
3512        fs::remove_file(dir.path().join("file2")).unwrap();
3513        buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3514        assert_eq!(
3515            *events.borrow(),
3516            &[language::Event::Dirtied, language::Event::FileHandleChanged]
3517        );
3518
3519        // When a file is already dirty when deleted, we don't emit a Dirtied event.
3520        let events = Rc::new(RefCell::new(Vec::new()));
3521        let (buffer3, _) = tree
3522            .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3523            .await
3524            .unwrap();
3525        buffer3.update(&mut cx, |_, cx| {
3526            cx.subscribe(&buffer3, {
3527                let events = events.clone();
3528                move |_, _, event, _| events.borrow_mut().push(event.clone())
3529            })
3530            .detach();
3531        });
3532
3533        tree.flush_fs_events(&cx).await;
3534        buffer3.update(&mut cx, |buffer, cx| {
3535            buffer.edit(Some(0..0), "x", cx);
3536        });
3537        events.borrow_mut().clear();
3538        fs::remove_file(dir.path().join("file3")).unwrap();
3539        buffer3
3540            .condition(&cx, |_, _| !events.borrow().is_empty())
3541            .await;
3542        assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3543        cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3544    }
3545
3546    #[gpui::test]
3547    async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3548        use std::fs;
3549
3550        let initial_contents = "aaa\nbbbbb\nc\n";
3551        let dir = temp_tree(json!({ "the-file": initial_contents }));
3552        let http_client = FakeHttpClient::with_404_response();
3553        let client = Client::new(http_client.clone());
3554        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3555
3556        let tree = Worktree::open_local(
3557            client,
3558            user_store,
3559            dir.path(),
3560            Arc::new(RealFs),
3561            &mut cx.to_async(),
3562        )
3563        .await
3564        .unwrap();
3565        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3566            .await;
3567
3568        let abs_path = dir.path().join("the-file");
3569        let (buffer, _) = tree
3570            .update(&mut cx, |tree, cx| {
3571                tree.open_buffer(Path::new("the-file"), cx)
3572            })
3573            .await
3574            .unwrap();
3575
3576        // TODO
3577        // Add a cursor on each row.
3578        // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3579        //     assert!(!buffer.is_dirty());
3580        //     buffer.add_selection_set(
3581        //         &(0..3)
3582        //             .map(|row| Selection {
3583        //                 id: row as usize,
3584        //                 start: Point::new(row, 1),
3585        //                 end: Point::new(row, 1),
3586        //                 reversed: false,
3587        //                 goal: SelectionGoal::None,
3588        //             })
3589        //             .collect::<Vec<_>>(),
3590        //         cx,
3591        //     )
3592        // });
3593
3594        // Change the file on disk, adding two new lines of text, and removing
3595        // one line.
3596        buffer.read_with(&cx, |buffer, _| {
3597            assert!(!buffer.is_dirty());
3598            assert!(!buffer.has_conflict());
3599        });
3600        let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3601        fs::write(&abs_path, new_contents).unwrap();
3602
3603        // Because the buffer was not modified, it is reloaded from disk. Its
3604        // contents are edited according to the diff between the old and new
3605        // file contents.
3606        buffer
3607            .condition(&cx, |buffer, _| buffer.text() == new_contents)
3608            .await;
3609
3610        buffer.update(&mut cx, |buffer, _| {
3611            assert_eq!(buffer.text(), new_contents);
3612            assert!(!buffer.is_dirty());
3613            assert!(!buffer.has_conflict());
3614
3615            // TODO
3616            // let cursor_positions = buffer
3617            //     .selection_set(selection_set_id)
3618            //     .unwrap()
3619            //     .selections::<Point>(&*buffer)
3620            //     .map(|selection| {
3621            //         assert_eq!(selection.start, selection.end);
3622            //         selection.start
3623            //     })
3624            //     .collect::<Vec<_>>();
3625            // assert_eq!(
3626            //     cursor_positions,
3627            //     [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3628            // );
3629        });
3630
3631        // Modify the buffer
3632        buffer.update(&mut cx, |buffer, cx| {
3633            buffer.edit(vec![0..0], " ", cx);
3634            assert!(buffer.is_dirty());
3635            assert!(!buffer.has_conflict());
3636        });
3637
3638        // Change the file on disk again, adding blank lines to the beginning.
3639        fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3640
3641        // Because the buffer is modified, it doesn't reload from disk, but is
3642        // marked as having a conflict.
3643        buffer
3644            .condition(&cx, |buffer, _| buffer.has_conflict())
3645            .await;
3646    }
3647
3648    #[gpui::test]
3649    async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3650        let fs = Arc::new(FakeFs::new());
3651        let http_client = FakeHttpClient::with_404_response();
3652        let client = Client::new(http_client.clone());
3653        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3654
3655        fs.insert_tree(
3656            "/the-dir",
3657            json!({
3658                "a.rs": "
3659                    fn foo(mut v: Vec<usize>) {
3660                        for x in &v {
3661                            v.push(1);
3662                        }
3663                    }
3664                "
3665                .unindent(),
3666            }),
3667        )
3668        .await;
3669
3670        let worktree = Worktree::open_local(
3671            client.clone(),
3672            user_store,
3673            "/the-dir".as_ref(),
3674            fs,
3675            &mut cx.to_async(),
3676        )
3677        .await
3678        .unwrap();
3679
3680        let (buffer, _) = worktree
3681            .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3682            .await
3683            .unwrap();
3684
3685        let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3686        let message = lsp::PublishDiagnosticsParams {
3687            uri: buffer_uri.clone(),
3688            diagnostics: vec![
3689                lsp::Diagnostic {
3690                    range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3691                    severity: Some(DiagnosticSeverity::WARNING),
3692                    message: "error 1".to_string(),
3693                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3694                        location: lsp::Location {
3695                            uri: buffer_uri.clone(),
3696                            range: lsp::Range::new(
3697                                lsp::Position::new(1, 8),
3698                                lsp::Position::new(1, 9),
3699                            ),
3700                        },
3701                        message: "error 1 hint 1".to_string(),
3702                    }]),
3703                    ..Default::default()
3704                },
3705                lsp::Diagnostic {
3706                    range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3707                    severity: Some(DiagnosticSeverity::HINT),
3708                    message: "error 1 hint 1".to_string(),
3709                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3710                        location: lsp::Location {
3711                            uri: buffer_uri.clone(),
3712                            range: lsp::Range::new(
3713                                lsp::Position::new(1, 8),
3714                                lsp::Position::new(1, 9),
3715                            ),
3716                        },
3717                        message: "original diagnostic".to_string(),
3718                    }]),
3719                    ..Default::default()
3720                },
3721                lsp::Diagnostic {
3722                    range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3723                    severity: Some(DiagnosticSeverity::ERROR),
3724                    message: "error 2".to_string(),
3725                    related_information: Some(vec![
3726                        lsp::DiagnosticRelatedInformation {
3727                            location: lsp::Location {
3728                                uri: buffer_uri.clone(),
3729                                range: lsp::Range::new(
3730                                    lsp::Position::new(1, 13),
3731                                    lsp::Position::new(1, 15),
3732                                ),
3733                            },
3734                            message: "error 2 hint 1".to_string(),
3735                        },
3736                        lsp::DiagnosticRelatedInformation {
3737                            location: lsp::Location {
3738                                uri: buffer_uri.clone(),
3739                                range: lsp::Range::new(
3740                                    lsp::Position::new(1, 13),
3741                                    lsp::Position::new(1, 15),
3742                                ),
3743                            },
3744                            message: "error 2 hint 2".to_string(),
3745                        },
3746                    ]),
3747                    ..Default::default()
3748                },
3749                lsp::Diagnostic {
3750                    range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3751                    severity: Some(DiagnosticSeverity::HINT),
3752                    message: "error 2 hint 1".to_string(),
3753                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3754                        location: lsp::Location {
3755                            uri: buffer_uri.clone(),
3756                            range: lsp::Range::new(
3757                                lsp::Position::new(2, 8),
3758                                lsp::Position::new(2, 17),
3759                            ),
3760                        },
3761                        message: "original diagnostic".to_string(),
3762                    }]),
3763                    ..Default::default()
3764                },
3765                lsp::Diagnostic {
3766                    range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3767                    severity: Some(DiagnosticSeverity::HINT),
3768                    message: "error 2 hint 2".to_string(),
3769                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3770                        location: lsp::Location {
3771                            uri: buffer_uri.clone(),
3772                            range: lsp::Range::new(
3773                                lsp::Position::new(2, 8),
3774                                lsp::Position::new(2, 17),
3775                            ),
3776                        },
3777                        message: "original diagnostic".to_string(),
3778                    }]),
3779                    ..Default::default()
3780                },
3781            ],
3782            version: None,
3783        };
3784
3785        worktree
3786            .update(&mut cx, |tree, cx| {
3787                tree.as_local_mut().unwrap().update_diagnostics(
3788                    Arc::from("a.rs".as_ref()),
3789                    message,
3790                    &Default::default(),
3791                    cx,
3792                )
3793            })
3794            .unwrap();
3795        let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
3796
3797        assert_eq!(
3798            buffer
3799                .diagnostics_in_range::<_, Point>(0..buffer.len())
3800                .collect::<Vec<_>>(),
3801            &[
3802                DiagnosticEntry {
3803                    range: Point::new(1, 8)..Point::new(1, 9),
3804                    diagnostic: Diagnostic {
3805                        severity: DiagnosticSeverity::WARNING,
3806                        message: "error 1".to_string(),
3807                        group_id: 0,
3808                        is_primary: true,
3809                        ..Default::default()
3810                    }
3811                },
3812                DiagnosticEntry {
3813                    range: Point::new(1, 8)..Point::new(1, 9),
3814                    diagnostic: Diagnostic {
3815                        severity: DiagnosticSeverity::HINT,
3816                        message: "error 1 hint 1".to_string(),
3817                        group_id: 0,
3818                        is_primary: false,
3819                        ..Default::default()
3820                    }
3821                },
3822                DiagnosticEntry {
3823                    range: Point::new(1, 13)..Point::new(1, 15),
3824                    diagnostic: Diagnostic {
3825                        severity: DiagnosticSeverity::HINT,
3826                        message: "error 2 hint 1".to_string(),
3827                        group_id: 1,
3828                        is_primary: false,
3829                        ..Default::default()
3830                    }
3831                },
3832                DiagnosticEntry {
3833                    range: Point::new(1, 13)..Point::new(1, 15),
3834                    diagnostic: Diagnostic {
3835                        severity: DiagnosticSeverity::HINT,
3836                        message: "error 2 hint 2".to_string(),
3837                        group_id: 1,
3838                        is_primary: false,
3839                        ..Default::default()
3840                    }
3841                },
3842                DiagnosticEntry {
3843                    range: Point::new(2, 8)..Point::new(2, 17),
3844                    diagnostic: Diagnostic {
3845                        severity: DiagnosticSeverity::ERROR,
3846                        message: "error 2".to_string(),
3847                        group_id: 1,
3848                        is_primary: true,
3849                        ..Default::default()
3850                    }
3851                }
3852            ]
3853        );
3854
3855        assert_eq!(
3856            buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
3857            &[
3858                DiagnosticEntry {
3859                    range: Point::new(1, 8)..Point::new(1, 9),
3860                    diagnostic: Diagnostic {
3861                        severity: DiagnosticSeverity::WARNING,
3862                        message: "error 1".to_string(),
3863                        group_id: 0,
3864                        is_primary: true,
3865                        ..Default::default()
3866                    }
3867                },
3868                DiagnosticEntry {
3869                    range: Point::new(1, 8)..Point::new(1, 9),
3870                    diagnostic: Diagnostic {
3871                        severity: DiagnosticSeverity::HINT,
3872                        message: "error 1 hint 1".to_string(),
3873                        group_id: 0,
3874                        is_primary: false,
3875                        ..Default::default()
3876                    }
3877                },
3878            ]
3879        );
3880        assert_eq!(
3881            buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
3882            &[
3883                DiagnosticEntry {
3884                    range: Point::new(1, 13)..Point::new(1, 15),
3885                    diagnostic: Diagnostic {
3886                        severity: DiagnosticSeverity::HINT,
3887                        message: "error 2 hint 1".to_string(),
3888                        group_id: 1,
3889                        is_primary: false,
3890                        ..Default::default()
3891                    }
3892                },
3893                DiagnosticEntry {
3894                    range: Point::new(1, 13)..Point::new(1, 15),
3895                    diagnostic: Diagnostic {
3896                        severity: DiagnosticSeverity::HINT,
3897                        message: "error 2 hint 2".to_string(),
3898                        group_id: 1,
3899                        is_primary: false,
3900                        ..Default::default()
3901                    }
3902                },
3903                DiagnosticEntry {
3904                    range: Point::new(2, 8)..Point::new(2, 17),
3905                    diagnostic: Diagnostic {
3906                        severity: DiagnosticSeverity::ERROR,
3907                        message: "error 2".to_string(),
3908                        group_id: 1,
3909                        is_primary: true,
3910                        ..Default::default()
3911                    }
3912                }
3913            ]
3914        );
3915    }
3916
3917    #[gpui::test(iterations = 100)]
3918    fn test_random(mut rng: StdRng) {
3919        let operations = env::var("OPERATIONS")
3920            .map(|o| o.parse().unwrap())
3921            .unwrap_or(40);
3922        let initial_entries = env::var("INITIAL_ENTRIES")
3923            .map(|o| o.parse().unwrap())
3924            .unwrap_or(20);
3925
3926        let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3927        for _ in 0..initial_entries {
3928            randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3929        }
3930        log::info!("Generated initial tree");
3931
3932        let (notify_tx, _notify_rx) = smol::channel::unbounded();
3933        let fs = Arc::new(RealFs);
3934        let next_entry_id = Arc::new(AtomicUsize::new(0));
3935        let mut initial_snapshot = Snapshot {
3936            id: WorktreeId::from_usize(0),
3937            scan_id: 0,
3938            abs_path: root_dir.path().into(),
3939            entries_by_path: Default::default(),
3940            entries_by_id: Default::default(),
3941            removed_entry_ids: Default::default(),
3942            ignores: Default::default(),
3943            root_name: Default::default(),
3944            root_char_bag: Default::default(),
3945            next_entry_id: next_entry_id.clone(),
3946        };
3947        initial_snapshot.insert_entry(
3948            Entry::new(
3949                Path::new("").into(),
3950                &smol::block_on(fs.metadata(root_dir.path()))
3951                    .unwrap()
3952                    .unwrap(),
3953                &next_entry_id,
3954                Default::default(),
3955            ),
3956            fs.as_ref(),
3957        );
3958        let mut scanner = BackgroundScanner::new(
3959            Arc::new(Mutex::new(initial_snapshot.clone())),
3960            notify_tx,
3961            fs.clone(),
3962            Arc::new(gpui::executor::Background::new()),
3963        );
3964        smol::block_on(scanner.scan_dirs()).unwrap();
3965        scanner.snapshot().check_invariants();
3966
3967        let mut events = Vec::new();
3968        let mut snapshots = Vec::new();
3969        let mut mutations_len = operations;
3970        while mutations_len > 1 {
3971            if !events.is_empty() && rng.gen_bool(0.4) {
3972                let len = rng.gen_range(0..=events.len());
3973                let to_deliver = events.drain(0..len).collect::<Vec<_>>();
3974                log::info!("Delivering events: {:#?}", to_deliver);
3975                smol::block_on(scanner.process_events(to_deliver));
3976                scanner.snapshot().check_invariants();
3977            } else {
3978                events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
3979                mutations_len -= 1;
3980            }
3981
3982            if rng.gen_bool(0.2) {
3983                snapshots.push(scanner.snapshot());
3984            }
3985        }
3986        log::info!("Quiescing: {:#?}", events);
3987        smol::block_on(scanner.process_events(events));
3988        scanner.snapshot().check_invariants();
3989
3990        let (notify_tx, _notify_rx) = smol::channel::unbounded();
3991        let mut new_scanner = BackgroundScanner::new(
3992            Arc::new(Mutex::new(initial_snapshot)),
3993            notify_tx,
3994            scanner.fs.clone(),
3995            scanner.executor.clone(),
3996        );
3997        smol::block_on(new_scanner.scan_dirs()).unwrap();
3998        assert_eq!(
3999            scanner.snapshot().to_vec(true),
4000            new_scanner.snapshot().to_vec(true)
4001        );
4002
4003        for mut prev_snapshot in snapshots {
4004            let include_ignored = rng.gen::<bool>();
4005            if !include_ignored {
4006                let mut entries_by_path_edits = Vec::new();
4007                let mut entries_by_id_edits = Vec::new();
4008                for entry in prev_snapshot
4009                    .entries_by_id
4010                    .cursor::<()>()
4011                    .filter(|e| e.is_ignored)
4012                {
4013                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4014                    entries_by_id_edits.push(Edit::Remove(entry.id));
4015                }
4016
4017                prev_snapshot
4018                    .entries_by_path
4019                    .edit(entries_by_path_edits, &());
4020                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4021            }
4022
4023            let update = scanner
4024                .snapshot()
4025                .build_update(&prev_snapshot, 0, 0, include_ignored);
4026            prev_snapshot.apply_update(update).unwrap();
4027            assert_eq!(
4028                prev_snapshot.to_vec(true),
4029                scanner.snapshot().to_vec(include_ignored)
4030            );
4031        }
4032    }
4033
4034    fn randomly_mutate_tree(
4035        root_path: &Path,
4036        insertion_probability: f64,
4037        rng: &mut impl Rng,
4038    ) -> Result<Vec<fsevent::Event>> {
4039        let root_path = root_path.canonicalize().unwrap();
4040        let (dirs, files) = read_dir_recursive(root_path.clone());
4041
4042        let mut events = Vec::new();
4043        let mut record_event = |path: PathBuf| {
4044            events.push(fsevent::Event {
4045                event_id: SystemTime::now()
4046                    .duration_since(UNIX_EPOCH)
4047                    .unwrap()
4048                    .as_secs(),
4049                flags: fsevent::StreamFlags::empty(),
4050                path,
4051            });
4052        };
4053
4054        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4055            let path = dirs.choose(rng).unwrap();
4056            let new_path = path.join(gen_name(rng));
4057
4058            if rng.gen() {
4059                log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4060                std::fs::create_dir(&new_path)?;
4061            } else {
4062                log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4063                std::fs::write(&new_path, "")?;
4064            }
4065            record_event(new_path);
4066        } else if rng.gen_bool(0.05) {
4067            let ignore_dir_path = dirs.choose(rng).unwrap();
4068            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4069
4070            let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4071            let files_to_ignore = {
4072                let len = rng.gen_range(0..=subfiles.len());
4073                subfiles.choose_multiple(rng, len)
4074            };
4075            let dirs_to_ignore = {
4076                let len = rng.gen_range(0..subdirs.len());
4077                subdirs.choose_multiple(rng, len)
4078            };
4079
4080            let mut ignore_contents = String::new();
4081            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4082                write!(
4083                    ignore_contents,
4084                    "{}\n",
4085                    path_to_ignore
4086                        .strip_prefix(&ignore_dir_path)?
4087                        .to_str()
4088                        .unwrap()
4089                )
4090                .unwrap();
4091            }
4092            log::info!(
4093                "Creating {:?} with contents:\n{}",
4094                ignore_path.strip_prefix(&root_path)?,
4095                ignore_contents
4096            );
4097            std::fs::write(&ignore_path, ignore_contents).unwrap();
4098            record_event(ignore_path);
4099        } else {
4100            let old_path = {
4101                let file_path = files.choose(rng);
4102                let dir_path = dirs[1..].choose(rng);
4103                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4104            };
4105
4106            let is_rename = rng.gen();
4107            if is_rename {
4108                let new_path_parent = dirs
4109                    .iter()
4110                    .filter(|d| !d.starts_with(old_path))
4111                    .choose(rng)
4112                    .unwrap();
4113
4114                let overwrite_existing_dir =
4115                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4116                let new_path = if overwrite_existing_dir {
4117                    std::fs::remove_dir_all(&new_path_parent).ok();
4118                    new_path_parent.to_path_buf()
4119                } else {
4120                    new_path_parent.join(gen_name(rng))
4121                };
4122
4123                log::info!(
4124                    "Renaming {:?} to {}{:?}",
4125                    old_path.strip_prefix(&root_path)?,
4126                    if overwrite_existing_dir {
4127                        "overwrite "
4128                    } else {
4129                        ""
4130                    },
4131                    new_path.strip_prefix(&root_path)?
4132                );
4133                std::fs::rename(&old_path, &new_path)?;
4134                record_event(old_path.clone());
4135                record_event(new_path);
4136            } else if old_path.is_dir() {
4137                let (dirs, files) = read_dir_recursive(old_path.clone());
4138
4139                log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4140                std::fs::remove_dir_all(&old_path).unwrap();
4141                for file in files {
4142                    record_event(file);
4143                }
4144                for dir in dirs {
4145                    record_event(dir);
4146                }
4147            } else {
4148                log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4149                std::fs::remove_file(old_path).unwrap();
4150                record_event(old_path.clone());
4151            }
4152        }
4153
4154        Ok(events)
4155    }
4156
4157    fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4158        let child_entries = std::fs::read_dir(&path).unwrap();
4159        let mut dirs = vec![path];
4160        let mut files = Vec::new();
4161        for child_entry in child_entries {
4162            let child_path = child_entry.unwrap().path();
4163            if child_path.is_dir() {
4164                let (child_dirs, child_files) = read_dir_recursive(child_path);
4165                dirs.extend(child_dirs);
4166                files.extend(child_files);
4167            } else {
4168                files.push(child_path);
4169            }
4170        }
4171        (dirs, files)
4172    }
4173
4174    fn gen_name(rng: &mut impl Rng) -> String {
4175        (0..6)
4176            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4177            .map(char::from)
4178            .collect()
4179    }
4180
4181    impl Snapshot {
4182        fn check_invariants(&self) {
4183            let mut files = self.files(true, 0);
4184            let mut visible_files = self.files(false, 0);
4185            for entry in self.entries_by_path.cursor::<()>() {
4186                if entry.is_file() {
4187                    assert_eq!(files.next().unwrap().inode, entry.inode);
4188                    if !entry.is_ignored {
4189                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4190                    }
4191                }
4192            }
4193            assert!(files.next().is_none());
4194            assert!(visible_files.next().is_none());
4195
4196            let mut bfs_paths = Vec::new();
4197            let mut stack = vec![Path::new("")];
4198            while let Some(path) = stack.pop() {
4199                bfs_paths.push(path);
4200                let ix = stack.len();
4201                for child_entry in self.child_entries(path) {
4202                    stack.insert(ix, &child_entry.path);
4203                }
4204            }
4205
4206            let dfs_paths = self
4207                .entries_by_path
4208                .cursor::<()>()
4209                .map(|e| e.path.as_ref())
4210                .collect::<Vec<_>>();
4211            assert_eq!(bfs_paths, dfs_paths);
4212
4213            for (ignore_parent_path, _) in &self.ignores {
4214                assert!(self.entry_for_path(ignore_parent_path).is_some());
4215                assert!(self
4216                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4217                    .is_some());
4218            }
4219        }
4220
4221        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4222            let mut paths = Vec::new();
4223            for entry in self.entries_by_path.cursor::<()>() {
4224                if include_ignored || !entry.is_ignored {
4225                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4226                }
4227            }
4228            paths.sort_by(|a, b| a.0.cmp(&b.0));
4229            paths
4230        }
4231    }
4232}