worktree.rs

   1use super::{
   2    fs::{self, Fs},
   3    ignore::IgnoreStack,
   4    DiagnosticSummary,
   5};
   6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   7use anyhow::{anyhow, Context, Result};
   8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
   9use clock::ReplicaId;
  10use collections::BTreeMap;
  11use collections::{hash_map, HashMap};
  12use futures::{Stream, StreamExt};
  13use fuzzy::CharBag;
  14use gpui::{
  15    executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
  16    Task, UpgradeModelHandle, WeakModelHandle,
  17};
  18use language::{
  19    Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Language, LanguageRegistry,
  20    Operation, PointUtf16, Rope,
  21};
  22use lazy_static::lazy_static;
  23use lsp::LanguageServer;
  24use parking_lot::Mutex;
  25use postage::{
  26    prelude::{Sink as _, Stream as _},
  27    watch,
  28};
  29use serde::Deserialize;
  30use smol::channel::{self, Sender};
  31use std::{
  32    any::Any,
  33    cmp::{self, Ordering},
  34    convert::{TryFrom, TryInto},
  35    ffi::{OsStr, OsString},
  36    fmt,
  37    future::Future,
  38    mem,
  39    ops::{Deref, Range},
  40    path::{Path, PathBuf},
  41    sync::{
  42        atomic::{AtomicUsize, Ordering::SeqCst},
  43        Arc,
  44    },
  45    time::{Duration, SystemTime},
  46};
  47use sum_tree::Bias;
  48use sum_tree::{Edit, SeekTarget, SumTree};
  49use util::{post_inc, ResultExt, TryFutureExt};
  50
  51lazy_static! {
  52    static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
  53}
  54
  55#[derive(Clone, Debug)]
  56enum ScanState {
  57    Idle,
  58    Scanning,
  59    Err(Arc<anyhow::Error>),
  60}
  61
  62pub enum Worktree {
  63    Local(LocalWorktree),
  64    Remote(RemoteWorktree),
  65}
  66
  67pub enum Event {
  68    Closed,
  69}
  70
  71impl Entity for Worktree {
  72    type Event = Event;
  73
  74    fn app_will_quit(
  75        &mut self,
  76        _: &mut MutableAppContext,
  77    ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
  78        use futures::FutureExt;
  79
  80        if let Self::Local(worktree) = self {
  81            let shutdown_futures = worktree
  82                .language_servers
  83                .drain()
  84                .filter_map(|(_, server)| server.shutdown())
  85                .collect::<Vec<_>>();
  86            Some(
  87                async move {
  88                    futures::future::join_all(shutdown_futures).await;
  89                }
  90                .boxed(),
  91            )
  92        } else {
  93            None
  94        }
  95    }
  96}
  97
  98impl Worktree {
  99    pub async fn open_local(
 100        client: Arc<Client>,
 101        user_store: ModelHandle<UserStore>,
 102        path: impl Into<Arc<Path>>,
 103        fs: Arc<dyn Fs>,
 104        languages: Arc<LanguageRegistry>,
 105        cx: &mut AsyncAppContext,
 106    ) -> Result<ModelHandle<Self>> {
 107        let (tree, scan_states_tx) =
 108            LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
 109        tree.update(cx, |tree, cx| {
 110            let tree = tree.as_local_mut().unwrap();
 111            let abs_path = tree.snapshot.abs_path.clone();
 112            let background_snapshot = tree.background_snapshot.clone();
 113            let background = cx.background().clone();
 114            tree._background_scanner_task = Some(cx.background().spawn(async move {
 115                let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 116                let scanner =
 117                    BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
 118                scanner.run(events).await;
 119            }));
 120        });
 121        Ok(tree)
 122    }
 123
 124    pub async fn remote(
 125        project_remote_id: u64,
 126        replica_id: ReplicaId,
 127        worktree: proto::Worktree,
 128        client: Arc<Client>,
 129        user_store: ModelHandle<UserStore>,
 130        languages: Arc<LanguageRegistry>,
 131        cx: &mut AsyncAppContext,
 132    ) -> Result<ModelHandle<Self>> {
 133        let remote_id = worktree.id;
 134        let root_char_bag: CharBag = worktree
 135            .root_name
 136            .chars()
 137            .map(|c| c.to_ascii_lowercase())
 138            .collect();
 139        let root_name = worktree.root_name.clone();
 140        let (entries_by_path, entries_by_id) = cx
 141            .background()
 142            .spawn(async move {
 143                let mut entries_by_path_edits = Vec::new();
 144                let mut entries_by_id_edits = Vec::new();
 145                for entry in worktree.entries {
 146                    match Entry::try_from((&root_char_bag, entry)) {
 147                        Ok(entry) => {
 148                            entries_by_id_edits.push(Edit::Insert(PathEntry {
 149                                id: entry.id,
 150                                path: entry.path.clone(),
 151                                is_ignored: entry.is_ignored,
 152                                scan_id: 0,
 153                            }));
 154                            entries_by_path_edits.push(Edit::Insert(entry));
 155                        }
 156                        Err(err) => log::warn!("error for remote worktree entry {:?}", err),
 157                    }
 158                }
 159
 160                let mut entries_by_path = SumTree::new();
 161                let mut entries_by_id = SumTree::new();
 162                entries_by_path.edit(entries_by_path_edits, &());
 163                entries_by_id.edit(entries_by_id_edits, &());
 164                (entries_by_path, entries_by_id)
 165            })
 166            .await;
 167
 168        let worktree = cx.update(|cx| {
 169            cx.add_model(|cx: &mut ModelContext<Worktree>| {
 170                let snapshot = Snapshot {
 171                    id: remote_id as usize,
 172                    scan_id: 0,
 173                    abs_path: Path::new("").into(),
 174                    root_name,
 175                    root_char_bag,
 176                    ignores: Default::default(),
 177                    entries_by_path,
 178                    entries_by_id,
 179                    removed_entry_ids: Default::default(),
 180                    next_entry_id: Default::default(),
 181                };
 182
 183                let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
 184                let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
 185
 186                cx.background()
 187                    .spawn(async move {
 188                        while let Some(update) = updates_rx.recv().await {
 189                            let mut snapshot = snapshot_tx.borrow().clone();
 190                            if let Err(error) = snapshot.apply_update(update) {
 191                                log::error!("error applying worktree update: {}", error);
 192                            }
 193                            *snapshot_tx.borrow_mut() = snapshot;
 194                        }
 195                    })
 196                    .detach();
 197
 198                {
 199                    let mut snapshot_rx = snapshot_rx.clone();
 200                    cx.spawn_weak(|this, mut cx| async move {
 201                        while let Some(_) = snapshot_rx.recv().await {
 202                            if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
 203                                this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
 204                            } else {
 205                                break;
 206                            }
 207                        }
 208                    })
 209                    .detach();
 210                }
 211
 212                Worktree::Remote(RemoteWorktree {
 213                    project_id: project_remote_id,
 214                    remote_id,
 215                    replica_id,
 216                    snapshot,
 217                    snapshot_rx,
 218                    updates_tx,
 219                    client: client.clone(),
 220                    loading_buffers: Default::default(),
 221                    open_buffers: Default::default(),
 222                    diagnostic_summaries: Default::default(),
 223                    queued_operations: Default::default(),
 224                    languages,
 225                    user_store,
 226                })
 227            })
 228        });
 229
 230        Ok(worktree)
 231    }
 232
 233    pub fn as_local(&self) -> Option<&LocalWorktree> {
 234        if let Worktree::Local(worktree) = self {
 235            Some(worktree)
 236        } else {
 237            None
 238        }
 239    }
 240
 241    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 242        if let Worktree::Remote(worktree) = self {
 243            Some(worktree)
 244        } else {
 245            None
 246        }
 247    }
 248
 249    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 250        if let Worktree::Local(worktree) = self {
 251            Some(worktree)
 252        } else {
 253            None
 254        }
 255    }
 256
 257    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 258        if let Worktree::Remote(worktree) = self {
 259            Some(worktree)
 260        } else {
 261            None
 262        }
 263    }
 264
 265    pub fn snapshot(&self) -> Snapshot {
 266        match self {
 267            Worktree::Local(worktree) => worktree.snapshot(),
 268            Worktree::Remote(worktree) => worktree.snapshot(),
 269        }
 270    }
 271
 272    pub fn replica_id(&self) -> ReplicaId {
 273        match self {
 274            Worktree::Local(_) => 0,
 275            Worktree::Remote(worktree) => worktree.replica_id,
 276        }
 277    }
 278
 279    pub fn remove_collaborator(
 280        &mut self,
 281        peer_id: PeerId,
 282        replica_id: ReplicaId,
 283        cx: &mut ModelContext<Self>,
 284    ) {
 285        match self {
 286            Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
 287            Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
 288        }
 289    }
 290
 291    pub fn languages(&self) -> &Arc<LanguageRegistry> {
 292        match self {
 293            Worktree::Local(worktree) => &worktree.languages,
 294            Worktree::Remote(worktree) => &worktree.languages,
 295        }
 296    }
 297
 298    pub fn user_store(&self) -> &ModelHandle<UserStore> {
 299        match self {
 300            Worktree::Local(worktree) => &worktree.user_store,
 301            Worktree::Remote(worktree) => &worktree.user_store,
 302        }
 303    }
 304
 305    pub fn handle_open_buffer(
 306        &mut self,
 307        envelope: TypedEnvelope<proto::OpenBuffer>,
 308        rpc: Arc<Client>,
 309        cx: &mut ModelContext<Self>,
 310    ) -> anyhow::Result<()> {
 311        let receipt = envelope.receipt();
 312
 313        let response = self
 314            .as_local_mut()
 315            .unwrap()
 316            .open_remote_buffer(envelope, cx);
 317
 318        cx.background()
 319            .spawn(
 320                async move {
 321                    rpc.respond(receipt, response.await?).await?;
 322                    Ok(())
 323                }
 324                .log_err(),
 325            )
 326            .detach();
 327
 328        Ok(())
 329    }
 330
 331    pub fn handle_close_buffer(
 332        &mut self,
 333        envelope: TypedEnvelope<proto::CloseBuffer>,
 334        _: Arc<Client>,
 335        cx: &mut ModelContext<Self>,
 336    ) -> anyhow::Result<()> {
 337        self.as_local_mut()
 338            .unwrap()
 339            .close_remote_buffer(envelope, cx)
 340    }
 341
 342    pub fn diagnostic_summaries<'a>(
 343        &'a self,
 344    ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
 345        match self {
 346            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 347            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 348        }
 349        .iter()
 350        .map(|(path, summary)| (path.clone(), summary.clone()))
 351    }
 352
 353    pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
 354        match self {
 355            Worktree::Local(worktree) => &mut worktree.loading_buffers,
 356            Worktree::Remote(worktree) => &mut worktree.loading_buffers,
 357        }
 358    }
 359
 360    pub fn open_buffer(
 361        &mut self,
 362        path: impl AsRef<Path>,
 363        cx: &mut ModelContext<Self>,
 364    ) -> Task<Result<ModelHandle<Buffer>>> {
 365        let path = path.as_ref();
 366
 367        // If there is already a buffer for the given path, then return it.
 368        let existing_buffer = match self {
 369            Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
 370            Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
 371        };
 372        if let Some(existing_buffer) = existing_buffer {
 373            return cx.spawn(move |_, _| async move { Ok(existing_buffer) });
 374        }
 375
 376        let path: Arc<Path> = Arc::from(path);
 377        let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
 378            // If the given path is already being loaded, then wait for that existing
 379            // task to complete and return the same buffer.
 380            hash_map::Entry::Occupied(e) => e.get().clone(),
 381
 382            // Otherwise, record the fact that this path is now being loaded.
 383            hash_map::Entry::Vacant(entry) => {
 384                let (mut tx, rx) = postage::watch::channel();
 385                entry.insert(rx.clone());
 386
 387                let load_buffer = match self {
 388                    Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
 389                    Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
 390                };
 391                cx.spawn(move |this, mut cx| async move {
 392                    let result = load_buffer.await;
 393
 394                    // After the buffer loads, record the fact that it is no longer
 395                    // loading.
 396                    this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
 397                    *tx.borrow_mut() = Some(result.map_err(|e| Arc::new(e)));
 398                })
 399                .detach();
 400                rx
 401            }
 402        };
 403
 404        cx.spawn(|_, _| async move {
 405            loop {
 406                if let Some(result) = loading_watch.borrow().as_ref() {
 407                    return result.clone().map_err(|e| anyhow!("{}", e));
 408                }
 409                loading_watch.recv().await;
 410            }
 411        })
 412    }
 413
 414    #[cfg(feature = "test-support")]
 415    pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
 416        let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
 417            Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
 418            Worktree::Remote(worktree) => {
 419                Box::new(worktree.open_buffers.values().filter_map(|buf| {
 420                    if let RemoteBuffer::Loaded(buf) = buf {
 421                        Some(buf)
 422                    } else {
 423                        None
 424                    }
 425                }))
 426            }
 427        };
 428
 429        let path = path.as_ref();
 430        open_buffers
 431            .find(|buffer| {
 432                if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
 433                    file.path().as_ref() == path
 434                } else {
 435                    false
 436                }
 437            })
 438            .is_some()
 439    }
 440
 441    pub fn handle_update_buffer(
 442        &mut self,
 443        envelope: TypedEnvelope<proto::UpdateBuffer>,
 444        cx: &mut ModelContext<Self>,
 445    ) -> Result<()> {
 446        let payload = envelope.payload.clone();
 447        let buffer_id = payload.buffer_id as usize;
 448        let ops = payload
 449            .operations
 450            .into_iter()
 451            .map(|op| language::proto::deserialize_operation(op))
 452            .collect::<Result<Vec<_>, _>>()?;
 453
 454        match self {
 455            Worktree::Local(worktree) => {
 456                let buffer = worktree
 457                    .open_buffers
 458                    .get(&buffer_id)
 459                    .and_then(|buf| buf.upgrade(cx))
 460                    .ok_or_else(|| {
 461                        anyhow!("invalid buffer {} in update buffer message", buffer_id)
 462                    })?;
 463                buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
 464            }
 465            Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
 466                Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
 467                Some(RemoteBuffer::Loaded(buffer)) => {
 468                    if let Some(buffer) = buffer.upgrade(cx) {
 469                        buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
 470                    } else {
 471                        worktree
 472                            .open_buffers
 473                            .insert(buffer_id, RemoteBuffer::Operations(ops));
 474                    }
 475                }
 476                None => {
 477                    worktree
 478                        .open_buffers
 479                        .insert(buffer_id, RemoteBuffer::Operations(ops));
 480                }
 481            },
 482        }
 483
 484        Ok(())
 485    }
 486
 487    pub fn handle_save_buffer(
 488        &mut self,
 489        envelope: TypedEnvelope<proto::SaveBuffer>,
 490        rpc: Arc<Client>,
 491        cx: &mut ModelContext<Self>,
 492    ) -> Result<()> {
 493        let sender_id = envelope.original_sender_id()?;
 494        let this = self.as_local().unwrap();
 495        let project_id = this
 496            .share
 497            .as_ref()
 498            .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
 499            .project_id;
 500
 501        let buffer = this
 502            .shared_buffers
 503            .get(&sender_id)
 504            .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
 505            .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
 506
 507        let receipt = envelope.receipt();
 508        let worktree_id = envelope.payload.worktree_id;
 509        let buffer_id = envelope.payload.buffer_id;
 510        let save = cx.spawn(|_, mut cx| async move {
 511            buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
 512        });
 513
 514        cx.background()
 515            .spawn(
 516                async move {
 517                    let (version, mtime) = save.await?;
 518
 519                    rpc.respond(
 520                        receipt,
 521                        proto::BufferSaved {
 522                            project_id,
 523                            worktree_id,
 524                            buffer_id,
 525                            version: (&version).into(),
 526                            mtime: Some(mtime.into()),
 527                        },
 528                    )
 529                    .await?;
 530
 531                    Ok(())
 532                }
 533                .log_err(),
 534            )
 535            .detach();
 536
 537        Ok(())
 538    }
 539
 540    pub fn handle_buffer_saved(
 541        &mut self,
 542        envelope: TypedEnvelope<proto::BufferSaved>,
 543        cx: &mut ModelContext<Self>,
 544    ) -> Result<()> {
 545        let payload = envelope.payload.clone();
 546        let worktree = self.as_remote_mut().unwrap();
 547        if let Some(buffer) = worktree
 548            .open_buffers
 549            .get(&(payload.buffer_id as usize))
 550            .and_then(|buf| buf.upgrade(cx))
 551        {
 552            buffer.update(cx, |buffer, cx| {
 553                let version = payload.version.try_into()?;
 554                let mtime = payload
 555                    .mtime
 556                    .ok_or_else(|| anyhow!("missing mtime"))?
 557                    .into();
 558                buffer.did_save(version, mtime, None, cx);
 559                Result::<_, anyhow::Error>::Ok(())
 560            })?;
 561        }
 562        Ok(())
 563    }
 564
 565    fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
 566        match self {
 567            Self::Local(worktree) => {
 568                let is_fake_fs = worktree.fs.is_fake();
 569                worktree.snapshot = worktree.background_snapshot.lock().clone();
 570                if worktree.is_scanning() {
 571                    if worktree.poll_task.is_none() {
 572                        worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
 573                            if is_fake_fs {
 574                                smol::future::yield_now().await;
 575                            } else {
 576                                smol::Timer::after(Duration::from_millis(100)).await;
 577                            }
 578                            this.update(&mut cx, |this, cx| {
 579                                this.as_local_mut().unwrap().poll_task = None;
 580                                this.poll_snapshot(cx);
 581                            })
 582                        }));
 583                    }
 584                } else {
 585                    worktree.poll_task.take();
 586                    self.update_open_buffers(cx);
 587                }
 588            }
 589            Self::Remote(worktree) => {
 590                worktree.snapshot = worktree.snapshot_rx.borrow().clone();
 591                self.update_open_buffers(cx);
 592            }
 593        };
 594
 595        cx.notify();
 596    }
 597
 598    fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
 599        let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
 600            Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
 601            Self::Remote(worktree) => {
 602                Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
 603                    if let RemoteBuffer::Loaded(buf) = buf {
 604                        Some((id, buf))
 605                    } else {
 606                        None
 607                    }
 608                }))
 609            }
 610        };
 611
 612        let local = self.as_local().is_some();
 613        let worktree_path = self.abs_path.clone();
 614        let worktree_handle = cx.handle();
 615        let mut buffers_to_delete = Vec::new();
 616        for (buffer_id, buffer) in open_buffers {
 617            if let Some(buffer) = buffer.upgrade(cx) {
 618                buffer.update(cx, |buffer, cx| {
 619                    if let Some(old_file) = buffer.file() {
 620                        let new_file = if let Some(entry) = old_file
 621                            .entry_id()
 622                            .and_then(|entry_id| self.entry_for_id(entry_id))
 623                        {
 624                            File {
 625                                is_local: local,
 626                                worktree_path: worktree_path.clone(),
 627                                entry_id: Some(entry.id),
 628                                mtime: entry.mtime,
 629                                path: entry.path.clone(),
 630                                worktree: worktree_handle.clone(),
 631                            }
 632                        } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
 633                            File {
 634                                is_local: local,
 635                                worktree_path: worktree_path.clone(),
 636                                entry_id: Some(entry.id),
 637                                mtime: entry.mtime,
 638                                path: entry.path.clone(),
 639                                worktree: worktree_handle.clone(),
 640                            }
 641                        } else {
 642                            File {
 643                                is_local: local,
 644                                worktree_path: worktree_path.clone(),
 645                                entry_id: None,
 646                                path: old_file.path().clone(),
 647                                mtime: old_file.mtime(),
 648                                worktree: worktree_handle.clone(),
 649                            }
 650                        };
 651
 652                        if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
 653                            task.detach();
 654                        }
 655                    }
 656                });
 657            } else {
 658                buffers_to_delete.push(*buffer_id);
 659            }
 660        }
 661
 662        for buffer_id in buffers_to_delete {
 663            match self {
 664                Self::Local(worktree) => {
 665                    worktree.open_buffers.remove(&buffer_id);
 666                }
 667                Self::Remote(worktree) => {
 668                    worktree.open_buffers.remove(&buffer_id);
 669                }
 670            }
 671        }
 672    }
 673
 674    fn update_diagnostics(
 675        &mut self,
 676        mut params: lsp::PublishDiagnosticsParams,
 677        cx: &mut ModelContext<Worktree>,
 678    ) -> Result<()> {
 679        let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
 680        let abs_path = params
 681            .uri
 682            .to_file_path()
 683            .map_err(|_| anyhow!("URI is not a file"))?;
 684        let worktree_path = Arc::from(
 685            abs_path
 686                .strip_prefix(&this.abs_path)
 687                .context("path is not within worktree")?,
 688        );
 689
 690        let mut group_ids_by_diagnostic_range = HashMap::default();
 691        let mut diagnostics_by_group_id = HashMap::default();
 692        let mut next_group_id = 0;
 693        for diagnostic in &mut params.diagnostics {
 694            let source = diagnostic.source.as_ref();
 695            let code = diagnostic.code.as_ref();
 696            let group_id = diagnostic_ranges(&diagnostic, &abs_path)
 697                .find_map(|range| group_ids_by_diagnostic_range.get(&(source, code, range)))
 698                .copied()
 699                .unwrap_or_else(|| {
 700                    let group_id = post_inc(&mut next_group_id);
 701                    for range in diagnostic_ranges(&diagnostic, &abs_path) {
 702                        group_ids_by_diagnostic_range.insert((source, code, range), group_id);
 703                    }
 704                    group_id
 705                });
 706
 707            diagnostics_by_group_id
 708                .entry(group_id)
 709                .or_insert(Vec::new())
 710                .push(DiagnosticEntry {
 711                    range: diagnostic.range.start.to_point_utf16()
 712                        ..diagnostic.range.end.to_point_utf16(),
 713                    diagnostic: Diagnostic {
 714                        source: diagnostic.source.clone(),
 715                        code: diagnostic.code.clone().map(|code| match code {
 716                            lsp::NumberOrString::Number(code) => code.to_string(),
 717                            lsp::NumberOrString::String(code) => code,
 718                        }),
 719                        severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
 720                        message: mem::take(&mut diagnostic.message),
 721                        group_id,
 722                        is_primary: false,
 723                    },
 724                });
 725        }
 726
 727        let diagnostics = diagnostics_by_group_id
 728            .into_values()
 729            .flat_map(|mut diagnostics| {
 730                let primary = diagnostics
 731                    .iter_mut()
 732                    .min_by_key(|entry| entry.diagnostic.severity)
 733                    .unwrap();
 734                primary.diagnostic.is_primary = true;
 735                diagnostics
 736            })
 737            .collect::<Vec<_>>();
 738
 739        for buffer in this.open_buffers.values() {
 740            if let Some(buffer) = buffer.upgrade(cx) {
 741                if buffer
 742                    .read(cx)
 743                    .file()
 744                    .map_or(false, |file| *file.path() == worktree_path)
 745                {
 746                    let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
 747                        (
 748                            buffer.remote_id(),
 749                            buffer.update_diagnostics(params.version, diagnostics.clone(), cx),
 750                        )
 751                    });
 752                    self.send_buffer_update(remote_id, operation?, cx);
 753                    break;
 754                }
 755            }
 756        }
 757
 758        let this = self.as_local_mut().unwrap();
 759        this.diagnostic_summaries
 760            .insert(worktree_path.clone(), DiagnosticSummary::new(&diagnostics));
 761        this.diagnostics.insert(worktree_path.clone(), diagnostics);
 762        Ok(())
 763    }
 764
 765    fn send_buffer_update(
 766        &mut self,
 767        buffer_id: u64,
 768        operation: Operation,
 769        cx: &mut ModelContext<Self>,
 770    ) {
 771        if let Some((project_id, worktree_id, rpc)) = match self {
 772            Worktree::Local(worktree) => worktree.share.as_ref().map(|share| {
 773                (
 774                    share.project_id,
 775                    worktree.id() as u64,
 776                    worktree.client.clone(),
 777                )
 778            }),
 779            Worktree::Remote(worktree) => Some((
 780                worktree.project_id,
 781                worktree.remote_id,
 782                worktree.client.clone(),
 783            )),
 784        } {
 785            cx.spawn(|worktree, mut cx| async move {
 786                if let Err(error) = rpc
 787                    .request(proto::UpdateBuffer {
 788                        project_id,
 789                        worktree_id,
 790                        buffer_id,
 791                        operations: vec![language::proto::serialize_operation(&operation)],
 792                    })
 793                    .await
 794                {
 795                    worktree.update(&mut cx, |worktree, _| {
 796                        log::error!("error sending buffer operation: {}", error);
 797                        match worktree {
 798                            Worktree::Local(t) => &mut t.queued_operations,
 799                            Worktree::Remote(t) => &mut t.queued_operations,
 800                        }
 801                        .push((buffer_id, operation));
 802                    });
 803                }
 804            })
 805            .detach();
 806        }
 807    }
 808}
 809
 810#[derive(Clone)]
 811pub struct Snapshot {
 812    id: usize,
 813    scan_id: usize,
 814    abs_path: Arc<Path>,
 815    root_name: String,
 816    root_char_bag: CharBag,
 817    ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
 818    entries_by_path: SumTree<Entry>,
 819    entries_by_id: SumTree<PathEntry>,
 820    removed_entry_ids: HashMap<u64, usize>,
 821    next_entry_id: Arc<AtomicUsize>,
 822}
 823
 824pub struct LocalWorktree {
 825    snapshot: Snapshot,
 826    config: WorktreeConfig,
 827    background_snapshot: Arc<Mutex<Snapshot>>,
 828    last_scan_state_rx: watch::Receiver<ScanState>,
 829    _background_scanner_task: Option<Task<()>>,
 830    poll_task: Option<Task<()>>,
 831    share: Option<ShareState>,
 832    loading_buffers: LoadingBuffers,
 833    open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
 834    shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
 835    diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
 836    diagnostic_summaries: BTreeMap<Arc<Path>, DiagnosticSummary>,
 837    queued_operations: Vec<(u64, Operation)>,
 838    languages: Arc<LanguageRegistry>,
 839    client: Arc<Client>,
 840    user_store: ModelHandle<UserStore>,
 841    fs: Arc<dyn Fs>,
 842    language_servers: HashMap<String, Arc<LanguageServer>>,
 843}
 844
 845struct ShareState {
 846    project_id: u64,
 847    snapshots_tx: Sender<Snapshot>,
 848}
 849
 850pub struct RemoteWorktree {
 851    project_id: u64,
 852    remote_id: u64,
 853    snapshot: Snapshot,
 854    snapshot_rx: watch::Receiver<Snapshot>,
 855    client: Arc<Client>,
 856    updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
 857    replica_id: ReplicaId,
 858    loading_buffers: LoadingBuffers,
 859    open_buffers: HashMap<usize, RemoteBuffer>,
 860    diagnostic_summaries: BTreeMap<Arc<Path>, DiagnosticSummary>,
 861    languages: Arc<LanguageRegistry>,
 862    user_store: ModelHandle<UserStore>,
 863    queued_operations: Vec<(u64, Operation)>,
 864}
 865
 866type LoadingBuffers = HashMap<
 867    Arc<Path>,
 868    postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
 869>;
 870
 871#[derive(Default, Deserialize)]
 872struct WorktreeConfig {
 873    collaborators: Vec<String>,
 874}
 875
 876impl LocalWorktree {
 877    async fn new(
 878        client: Arc<Client>,
 879        user_store: ModelHandle<UserStore>,
 880        path: impl Into<Arc<Path>>,
 881        fs: Arc<dyn Fs>,
 882        languages: Arc<LanguageRegistry>,
 883        cx: &mut AsyncAppContext,
 884    ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
 885        let abs_path = path.into();
 886        let path: Arc<Path> = Arc::from(Path::new(""));
 887        let next_entry_id = AtomicUsize::new(0);
 888
 889        // After determining whether the root entry is a file or a directory, populate the
 890        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 891        let root_name = abs_path
 892            .file_name()
 893            .map_or(String::new(), |f| f.to_string_lossy().to_string());
 894        let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
 895        let metadata = fs.metadata(&abs_path).await?;
 896
 897        let mut config = WorktreeConfig::default();
 898        if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
 899            if let Ok(parsed) = toml::from_str(&zed_toml) {
 900                config = parsed;
 901            }
 902        }
 903
 904        let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
 905        let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
 906        let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
 907            let mut snapshot = Snapshot {
 908                id: cx.model_id(),
 909                scan_id: 0,
 910                abs_path,
 911                root_name: root_name.clone(),
 912                root_char_bag,
 913                ignores: Default::default(),
 914                entries_by_path: Default::default(),
 915                entries_by_id: Default::default(),
 916                removed_entry_ids: Default::default(),
 917                next_entry_id: Arc::new(next_entry_id),
 918            };
 919            if let Some(metadata) = metadata {
 920                snapshot.insert_entry(
 921                    Entry::new(
 922                        path.into(),
 923                        &metadata,
 924                        &snapshot.next_entry_id,
 925                        snapshot.root_char_bag,
 926                    ),
 927                    fs.as_ref(),
 928                );
 929            }
 930
 931            let tree = Self {
 932                snapshot: snapshot.clone(),
 933                config,
 934                background_snapshot: Arc::new(Mutex::new(snapshot)),
 935                last_scan_state_rx,
 936                _background_scanner_task: None,
 937                share: None,
 938                poll_task: None,
 939                loading_buffers: Default::default(),
 940                open_buffers: Default::default(),
 941                shared_buffers: Default::default(),
 942                diagnostics: Default::default(),
 943                diagnostic_summaries: Default::default(),
 944                queued_operations: Default::default(),
 945                languages,
 946                client,
 947                user_store,
 948                fs,
 949                language_servers: Default::default(),
 950            };
 951
 952            cx.spawn_weak(|this, mut cx| async move {
 953                while let Ok(scan_state) = scan_states_rx.recv().await {
 954                    if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
 955                        let to_send = handle.update(&mut cx, |this, cx| {
 956                            last_scan_state_tx.blocking_send(scan_state).ok();
 957                            this.poll_snapshot(cx);
 958                            let tree = this.as_local_mut().unwrap();
 959                            if !tree.is_scanning() {
 960                                if let Some(share) = tree.share.as_ref() {
 961                                    return Some((tree.snapshot(), share.snapshots_tx.clone()));
 962                                }
 963                            }
 964                            None
 965                        });
 966
 967                        if let Some((snapshot, snapshots_to_send_tx)) = to_send {
 968                            if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
 969                                log::error!("error submitting snapshot to send {}", err);
 970                            }
 971                        }
 972                    } else {
 973                        break;
 974                    }
 975                }
 976            })
 977            .detach();
 978
 979            Worktree::Local(tree)
 980        });
 981
 982        Ok((tree, scan_states_tx))
 983    }
 984
 985    pub fn authorized_logins(&self) -> Vec<String> {
 986        self.config.collaborators.clone()
 987    }
 988
 989    pub fn languages(&self) -> &LanguageRegistry {
 990        &self.languages
 991    }
 992
 993    pub fn ensure_language_server(
 994        &mut self,
 995        language: &Language,
 996        cx: &mut ModelContext<Worktree>,
 997    ) -> Option<Arc<LanguageServer>> {
 998        if let Some(server) = self.language_servers.get(language.name()) {
 999            return Some(server.clone());
1000        }
1001
1002        if let Some(language_server) = language
1003            .start_server(self.abs_path(), cx)
1004            .log_err()
1005            .flatten()
1006        {
1007            let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
1008            language_server
1009                .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
1010                    smol::block_on(diagnostics_tx.send(params)).ok();
1011                })
1012                .detach();
1013            cx.spawn_weak(|this, mut cx| async move {
1014                while let Ok(diagnostics) = diagnostics_rx.recv().await {
1015                    if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1016                        handle.update(&mut cx, |this, cx| {
1017                            this.update_diagnostics(diagnostics, cx).log_err();
1018                        });
1019                    } else {
1020                        break;
1021                    }
1022                }
1023            })
1024            .detach();
1025
1026            self.language_servers
1027                .insert(language.name().to_string(), language_server.clone());
1028            Some(language_server.clone())
1029        } else {
1030            None
1031        }
1032    }
1033
1034    fn get_open_buffer(
1035        &mut self,
1036        path: &Path,
1037        cx: &mut ModelContext<Worktree>,
1038    ) -> Option<ModelHandle<Buffer>> {
1039        let worktree_id = self.id();
1040        let mut result = None;
1041        self.open_buffers.retain(|_buffer_id, buffer| {
1042            if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1043                if let Some(file) = buffer.read(cx.as_ref()).file() {
1044                    if file.worktree_id() == worktree_id && file.path().as_ref() == path {
1045                        result = Some(buffer);
1046                    }
1047                }
1048                true
1049            } else {
1050                false
1051            }
1052        });
1053        result
1054    }
1055
1056    fn open_buffer(
1057        &mut self,
1058        path: &Path,
1059        cx: &mut ModelContext<Worktree>,
1060    ) -> Task<Result<ModelHandle<Buffer>>> {
1061        let path = Arc::from(path);
1062        cx.spawn(move |this, mut cx| async move {
1063            let (file, contents) = this
1064                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
1065                .await?;
1066
1067            let (diagnostics, language, language_server) = this.update(&mut cx, |this, cx| {
1068                let this = this.as_local_mut().unwrap();
1069                let diagnostics = this.diagnostics.remove(&path);
1070                let language = this.languages.select_language(file.full_path()).cloned();
1071                let server = language
1072                    .as_ref()
1073                    .and_then(|language| this.ensure_language_server(language, cx));
1074                (diagnostics, language, server)
1075            });
1076
1077            let buffer = cx.add_model(|cx| {
1078                let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1079                buffer.set_language(language, language_server, cx);
1080                if let Some(diagnostics) = diagnostics {
1081                    buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1082                }
1083                buffer
1084            });
1085
1086            this.update(&mut cx, |this, _| {
1087                let this = this.as_local_mut().unwrap();
1088                this.open_buffers.insert(buffer.id(), buffer.downgrade());
1089            });
1090
1091            Ok(buffer)
1092        })
1093    }
1094
1095    pub fn open_remote_buffer(
1096        &mut self,
1097        envelope: TypedEnvelope<proto::OpenBuffer>,
1098        cx: &mut ModelContext<Worktree>,
1099    ) -> Task<Result<proto::OpenBufferResponse>> {
1100        cx.spawn(|this, mut cx| async move {
1101            let peer_id = envelope.original_sender_id();
1102            let path = Path::new(&envelope.payload.path);
1103            let buffer = this
1104                .update(&mut cx, |this, cx| this.open_buffer(path, cx))
1105                .await?;
1106            this.update(&mut cx, |this, cx| {
1107                this.as_local_mut()
1108                    .unwrap()
1109                    .shared_buffers
1110                    .entry(peer_id?)
1111                    .or_default()
1112                    .insert(buffer.id() as u64, buffer.clone());
1113
1114                Ok(proto::OpenBufferResponse {
1115                    buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1116                })
1117            })
1118        })
1119    }
1120
1121    pub fn close_remote_buffer(
1122        &mut self,
1123        envelope: TypedEnvelope<proto::CloseBuffer>,
1124        cx: &mut ModelContext<Worktree>,
1125    ) -> Result<()> {
1126        if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1127            shared_buffers.remove(&envelope.payload.buffer_id);
1128            cx.notify();
1129        }
1130
1131        Ok(())
1132    }
1133
1134    pub fn remove_collaborator(
1135        &mut self,
1136        peer_id: PeerId,
1137        replica_id: ReplicaId,
1138        cx: &mut ModelContext<Worktree>,
1139    ) {
1140        self.shared_buffers.remove(&peer_id);
1141        for (_, buffer) in &self.open_buffers {
1142            if let Some(buffer) = buffer.upgrade(cx) {
1143                buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1144            }
1145        }
1146        cx.notify();
1147    }
1148
1149    pub fn scan_complete(&self) -> impl Future<Output = ()> {
1150        let mut scan_state_rx = self.last_scan_state_rx.clone();
1151        async move {
1152            let mut scan_state = Some(scan_state_rx.borrow().clone());
1153            while let Some(ScanState::Scanning) = scan_state {
1154                scan_state = scan_state_rx.recv().await;
1155            }
1156        }
1157    }
1158
1159    fn is_scanning(&self) -> bool {
1160        if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1161            true
1162        } else {
1163            false
1164        }
1165    }
1166
1167    pub fn snapshot(&self) -> Snapshot {
1168        self.snapshot.clone()
1169    }
1170
1171    pub fn abs_path(&self) -> &Path {
1172        self.snapshot.abs_path.as_ref()
1173    }
1174
1175    pub fn contains_abs_path(&self, path: &Path) -> bool {
1176        path.starts_with(&self.snapshot.abs_path)
1177    }
1178
1179    fn absolutize(&self, path: &Path) -> PathBuf {
1180        if path.file_name().is_some() {
1181            self.snapshot.abs_path.join(path)
1182        } else {
1183            self.snapshot.abs_path.to_path_buf()
1184        }
1185    }
1186
1187    fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1188        let handle = cx.handle();
1189        let path = Arc::from(path);
1190        let worktree_path = self.abs_path.clone();
1191        let abs_path = self.absolutize(&path);
1192        let background_snapshot = self.background_snapshot.clone();
1193        let fs = self.fs.clone();
1194        cx.spawn(|this, mut cx| async move {
1195            let text = fs.load(&abs_path).await?;
1196            // Eagerly populate the snapshot with an updated entry for the loaded file
1197            let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1198            this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1199            Ok((
1200                File {
1201                    entry_id: Some(entry.id),
1202                    worktree: handle,
1203                    worktree_path,
1204                    path: entry.path,
1205                    mtime: entry.mtime,
1206                    is_local: true,
1207                },
1208                text,
1209            ))
1210        })
1211    }
1212
1213    pub fn save_buffer_as(
1214        &self,
1215        buffer: ModelHandle<Buffer>,
1216        path: impl Into<Arc<Path>>,
1217        text: Rope,
1218        cx: &mut ModelContext<Worktree>,
1219    ) -> Task<Result<File>> {
1220        let save = self.save(path, text, cx);
1221        cx.spawn(|this, mut cx| async move {
1222            let entry = save.await?;
1223            this.update(&mut cx, |this, cx| {
1224                let this = this.as_local_mut().unwrap();
1225                this.open_buffers.insert(buffer.id(), buffer.downgrade());
1226                Ok(File {
1227                    entry_id: Some(entry.id),
1228                    worktree: cx.handle(),
1229                    worktree_path: this.abs_path.clone(),
1230                    path: entry.path,
1231                    mtime: entry.mtime,
1232                    is_local: true,
1233                })
1234            })
1235        })
1236    }
1237
1238    fn save(
1239        &self,
1240        path: impl Into<Arc<Path>>,
1241        text: Rope,
1242        cx: &mut ModelContext<Worktree>,
1243    ) -> Task<Result<Entry>> {
1244        let path = path.into();
1245        let abs_path = self.absolutize(&path);
1246        let background_snapshot = self.background_snapshot.clone();
1247        let fs = self.fs.clone();
1248        let save = cx.background().spawn(async move {
1249            fs.save(&abs_path, &text).await?;
1250            refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1251        });
1252
1253        cx.spawn(|this, mut cx| async move {
1254            let entry = save.await?;
1255            this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1256            Ok(entry)
1257        })
1258    }
1259
1260    pub fn share(
1261        &mut self,
1262        project_id: u64,
1263        cx: &mut ModelContext<Worktree>,
1264    ) -> Task<anyhow::Result<()>> {
1265        if self.share.is_some() {
1266            return Task::ready(Ok(()));
1267        }
1268
1269        let snapshot = self.snapshot();
1270        let rpc = self.client.clone();
1271        let worktree_id = cx.model_id() as u64;
1272        let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1273        self.share = Some(ShareState {
1274            project_id,
1275            snapshots_tx: snapshots_to_send_tx,
1276        });
1277
1278        cx.background()
1279            .spawn({
1280                let rpc = rpc.clone();
1281                let snapshot = snapshot.clone();
1282                async move {
1283                    let mut prev_snapshot = snapshot;
1284                    while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1285                        let message =
1286                            snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1287                        match rpc.send(message).await {
1288                            Ok(()) => prev_snapshot = snapshot,
1289                            Err(err) => log::error!("error sending snapshot diff {}", err),
1290                        }
1291                    }
1292                }
1293            })
1294            .detach();
1295
1296        let share_message = cx.background().spawn(async move {
1297            proto::ShareWorktree {
1298                project_id,
1299                worktree: Some(snapshot.to_proto()),
1300            }
1301        });
1302
1303        cx.foreground().spawn(async move {
1304            rpc.request(share_message.await).await?;
1305            Ok(())
1306        })
1307    }
1308}
1309
1310fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1311    let contents = smol::block_on(fs.load(&abs_path))?;
1312    let parent = abs_path.parent().unwrap_or(Path::new("/"));
1313    let mut builder = GitignoreBuilder::new(parent);
1314    for line in contents.lines() {
1315        builder.add_line(Some(abs_path.into()), line)?;
1316    }
1317    Ok(builder.build()?)
1318}
1319
1320impl Deref for Worktree {
1321    type Target = Snapshot;
1322
1323    fn deref(&self) -> &Self::Target {
1324        match self {
1325            Worktree::Local(worktree) => &worktree.snapshot,
1326            Worktree::Remote(worktree) => &worktree.snapshot,
1327        }
1328    }
1329}
1330
1331impl Deref for LocalWorktree {
1332    type Target = Snapshot;
1333
1334    fn deref(&self) -> &Self::Target {
1335        &self.snapshot
1336    }
1337}
1338
1339impl fmt::Debug for LocalWorktree {
1340    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1341        self.snapshot.fmt(f)
1342    }
1343}
1344
1345impl RemoteWorktree {
1346    pub fn remote_id(&self) -> u64 {
1347        self.remote_id
1348    }
1349
1350    fn get_open_buffer(
1351        &mut self,
1352        path: &Path,
1353        cx: &mut ModelContext<Worktree>,
1354    ) -> Option<ModelHandle<Buffer>> {
1355        let handle = cx.handle();
1356        let mut existing_buffer = None;
1357        self.open_buffers.retain(|_buffer_id, buffer| {
1358            if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1359                if let Some(file) = buffer.read(cx.as_ref()).file() {
1360                    if file.worktree_id() == handle.id() && file.path().as_ref() == path {
1361                        existing_buffer = Some(buffer);
1362                    }
1363                }
1364                true
1365            } else {
1366                false
1367            }
1368        });
1369        existing_buffer
1370    }
1371
1372    fn open_buffer(
1373        &mut self,
1374        path: &Path,
1375        cx: &mut ModelContext<Worktree>,
1376    ) -> Task<Result<ModelHandle<Buffer>>> {
1377        let rpc = self.client.clone();
1378        let replica_id = self.replica_id;
1379        let project_id = self.project_id;
1380        let remote_worktree_id = self.remote_id;
1381        let root_path = self.snapshot.abs_path.clone();
1382        let path: Arc<Path> = Arc::from(path);
1383        let path_string = path.to_string_lossy().to_string();
1384        cx.spawn_weak(move |this, mut cx| async move {
1385            let entry = this
1386                .upgrade(&cx)
1387                .ok_or_else(|| anyhow!("worktree was closed"))?
1388                .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1389                .ok_or_else(|| anyhow!("file does not exist"))?;
1390            let response = rpc
1391                .request(proto::OpenBuffer {
1392                    project_id,
1393                    worktree_id: remote_worktree_id as u64,
1394                    path: path_string,
1395                })
1396                .await?;
1397
1398            let this = this
1399                .upgrade(&cx)
1400                .ok_or_else(|| anyhow!("worktree was closed"))?;
1401            let file = File {
1402                entry_id: Some(entry.id),
1403                worktree: this.clone(),
1404                worktree_path: root_path,
1405                path: entry.path,
1406                mtime: entry.mtime,
1407                is_local: false,
1408            };
1409            let language = this.read_with(&cx, |this, _| {
1410                use language::File;
1411                this.languages().select_language(file.full_path()).cloned()
1412            });
1413            let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1414            let buffer_id = remote_buffer.id as usize;
1415            let buffer = cx.add_model(|cx| {
1416                Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1417                    .unwrap()
1418                    .with_language(language, None, cx)
1419            });
1420            this.update(&mut cx, move |this, cx| {
1421                let this = this.as_remote_mut().unwrap();
1422                if let Some(RemoteBuffer::Operations(pending_ops)) = this
1423                    .open_buffers
1424                    .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1425                {
1426                    buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1427                }
1428                Result::<_, anyhow::Error>::Ok(buffer)
1429            })
1430        })
1431    }
1432
1433    pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1434        for (_, buffer) in self.open_buffers.drain() {
1435            if let RemoteBuffer::Loaded(buffer) = buffer {
1436                if let Some(buffer) = buffer.upgrade(cx) {
1437                    buffer.update(cx, |buffer, cx| buffer.close(cx))
1438                }
1439            }
1440        }
1441    }
1442
1443    fn snapshot(&self) -> Snapshot {
1444        self.snapshot.clone()
1445    }
1446
1447    pub fn update_from_remote(
1448        &mut self,
1449        envelope: TypedEnvelope<proto::UpdateWorktree>,
1450        cx: &mut ModelContext<Worktree>,
1451    ) -> Result<()> {
1452        let mut tx = self.updates_tx.clone();
1453        let payload = envelope.payload.clone();
1454        cx.background()
1455            .spawn(async move {
1456                tx.send(payload).await.expect("receiver runs to completion");
1457            })
1458            .detach();
1459
1460        Ok(())
1461    }
1462
1463    pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1464        for (_, buffer) in &self.open_buffers {
1465            if let Some(buffer) = buffer.upgrade(cx) {
1466                buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1467            }
1468        }
1469        cx.notify();
1470    }
1471}
1472
1473enum RemoteBuffer {
1474    Operations(Vec<Operation>),
1475    Loaded(WeakModelHandle<Buffer>),
1476}
1477
1478impl RemoteBuffer {
1479    fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1480        match self {
1481            Self::Operations(_) => None,
1482            Self::Loaded(buffer) => buffer.upgrade(cx),
1483        }
1484    }
1485}
1486
1487impl Snapshot {
1488    pub fn id(&self) -> usize {
1489        self.id
1490    }
1491
1492    pub fn to_proto(&self) -> proto::Worktree {
1493        let root_name = self.root_name.clone();
1494        proto::Worktree {
1495            id: self.id as u64,
1496            root_name,
1497            entries: self
1498                .entries_by_path
1499                .cursor::<()>()
1500                .filter(|e| !e.is_ignored)
1501                .map(Into::into)
1502                .collect(),
1503        }
1504    }
1505
1506    pub fn build_update(
1507        &self,
1508        other: &Self,
1509        project_id: u64,
1510        worktree_id: u64,
1511        include_ignored: bool,
1512    ) -> proto::UpdateWorktree {
1513        let mut updated_entries = Vec::new();
1514        let mut removed_entries = Vec::new();
1515        let mut self_entries = self
1516            .entries_by_id
1517            .cursor::<()>()
1518            .filter(|e| include_ignored || !e.is_ignored)
1519            .peekable();
1520        let mut other_entries = other
1521            .entries_by_id
1522            .cursor::<()>()
1523            .filter(|e| include_ignored || !e.is_ignored)
1524            .peekable();
1525        loop {
1526            match (self_entries.peek(), other_entries.peek()) {
1527                (Some(self_entry), Some(other_entry)) => {
1528                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1529                        Ordering::Less => {
1530                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1531                            updated_entries.push(entry);
1532                            self_entries.next();
1533                        }
1534                        Ordering::Equal => {
1535                            if self_entry.scan_id != other_entry.scan_id {
1536                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1537                                updated_entries.push(entry);
1538                            }
1539
1540                            self_entries.next();
1541                            other_entries.next();
1542                        }
1543                        Ordering::Greater => {
1544                            removed_entries.push(other_entry.id as u64);
1545                            other_entries.next();
1546                        }
1547                    }
1548                }
1549                (Some(self_entry), None) => {
1550                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1551                    updated_entries.push(entry);
1552                    self_entries.next();
1553                }
1554                (None, Some(other_entry)) => {
1555                    removed_entries.push(other_entry.id as u64);
1556                    other_entries.next();
1557                }
1558                (None, None) => break,
1559            }
1560        }
1561
1562        proto::UpdateWorktree {
1563            project_id,
1564            worktree_id,
1565            root_name: self.root_name().to_string(),
1566            updated_entries,
1567            removed_entries,
1568        }
1569    }
1570
1571    fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1572        self.scan_id += 1;
1573        let scan_id = self.scan_id;
1574
1575        let mut entries_by_path_edits = Vec::new();
1576        let mut entries_by_id_edits = Vec::new();
1577        for entry_id in update.removed_entries {
1578            let entry_id = entry_id as usize;
1579            let entry = self
1580                .entry_for_id(entry_id)
1581                .ok_or_else(|| anyhow!("unknown entry"))?;
1582            entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1583            entries_by_id_edits.push(Edit::Remove(entry.id));
1584        }
1585
1586        for entry in update.updated_entries {
1587            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1588            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1589                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1590            }
1591            entries_by_id_edits.push(Edit::Insert(PathEntry {
1592                id: entry.id,
1593                path: entry.path.clone(),
1594                is_ignored: entry.is_ignored,
1595                scan_id,
1596            }));
1597            entries_by_path_edits.push(Edit::Insert(entry));
1598        }
1599
1600        self.entries_by_path.edit(entries_by_path_edits, &());
1601        self.entries_by_id.edit(entries_by_id_edits, &());
1602
1603        Ok(())
1604    }
1605
1606    pub fn file_count(&self) -> usize {
1607        self.entries_by_path.summary().file_count
1608    }
1609
1610    pub fn visible_file_count(&self) -> usize {
1611        self.entries_by_path.summary().visible_file_count
1612    }
1613
1614    fn traverse_from_offset(
1615        &self,
1616        include_dirs: bool,
1617        include_ignored: bool,
1618        start_offset: usize,
1619    ) -> Traversal {
1620        let mut cursor = self.entries_by_path.cursor();
1621        cursor.seek(
1622            &TraversalTarget::Count {
1623                count: start_offset,
1624                include_dirs,
1625                include_ignored,
1626            },
1627            Bias::Right,
1628            &(),
1629        );
1630        Traversal {
1631            cursor,
1632            include_dirs,
1633            include_ignored,
1634        }
1635    }
1636
1637    fn traverse_from_path(
1638        &self,
1639        include_dirs: bool,
1640        include_ignored: bool,
1641        path: &Path,
1642    ) -> Traversal {
1643        let mut cursor = self.entries_by_path.cursor();
1644        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1645        Traversal {
1646            cursor,
1647            include_dirs,
1648            include_ignored,
1649        }
1650    }
1651
1652    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1653        self.traverse_from_offset(false, include_ignored, start)
1654    }
1655
1656    pub fn entries(&self, include_ignored: bool) -> Traversal {
1657        self.traverse_from_offset(true, include_ignored, 0)
1658    }
1659
1660    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1661        let empty_path = Path::new("");
1662        self.entries_by_path
1663            .cursor::<()>()
1664            .filter(move |entry| entry.path.as_ref() != empty_path)
1665            .map(|entry| &entry.path)
1666    }
1667
1668    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1669        let mut cursor = self.entries_by_path.cursor();
1670        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1671        let traversal = Traversal {
1672            cursor,
1673            include_dirs: true,
1674            include_ignored: true,
1675        };
1676        ChildEntriesIter {
1677            traversal,
1678            parent_path,
1679        }
1680    }
1681
1682    pub fn root_entry(&self) -> Option<&Entry> {
1683        self.entry_for_path("")
1684    }
1685
1686    pub fn root_name(&self) -> &str {
1687        &self.root_name
1688    }
1689
1690    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1691        let path = path.as_ref();
1692        self.traverse_from_path(true, true, path)
1693            .entry()
1694            .and_then(|entry| {
1695                if entry.path.as_ref() == path {
1696                    Some(entry)
1697                } else {
1698                    None
1699                }
1700            })
1701    }
1702
1703    pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1704        let entry = self.entries_by_id.get(&id, &())?;
1705        self.entry_for_path(&entry.path)
1706    }
1707
1708    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1709        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1710    }
1711
1712    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1713        if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1714            let abs_path = self.abs_path.join(&entry.path);
1715            match build_gitignore(&abs_path, fs) {
1716                Ok(ignore) => {
1717                    let ignore_dir_path = entry.path.parent().unwrap();
1718                    self.ignores
1719                        .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1720                }
1721                Err(error) => {
1722                    log::error!(
1723                        "error loading .gitignore file {:?} - {:?}",
1724                        &entry.path,
1725                        error
1726                    );
1727                }
1728            }
1729        }
1730
1731        self.reuse_entry_id(&mut entry);
1732        self.entries_by_path.insert_or_replace(entry.clone(), &());
1733        self.entries_by_id.insert_or_replace(
1734            PathEntry {
1735                id: entry.id,
1736                path: entry.path.clone(),
1737                is_ignored: entry.is_ignored,
1738                scan_id: self.scan_id,
1739            },
1740            &(),
1741        );
1742        entry
1743    }
1744
1745    fn populate_dir(
1746        &mut self,
1747        parent_path: Arc<Path>,
1748        entries: impl IntoIterator<Item = Entry>,
1749        ignore: Option<Arc<Gitignore>>,
1750    ) {
1751        let mut parent_entry = self
1752            .entries_by_path
1753            .get(&PathKey(parent_path.clone()), &())
1754            .unwrap()
1755            .clone();
1756        if let Some(ignore) = ignore {
1757            self.ignores.insert(parent_path, (ignore, self.scan_id));
1758        }
1759        if matches!(parent_entry.kind, EntryKind::PendingDir) {
1760            parent_entry.kind = EntryKind::Dir;
1761        } else {
1762            unreachable!();
1763        }
1764
1765        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1766        let mut entries_by_id_edits = Vec::new();
1767
1768        for mut entry in entries {
1769            self.reuse_entry_id(&mut entry);
1770            entries_by_id_edits.push(Edit::Insert(PathEntry {
1771                id: entry.id,
1772                path: entry.path.clone(),
1773                is_ignored: entry.is_ignored,
1774                scan_id: self.scan_id,
1775            }));
1776            entries_by_path_edits.push(Edit::Insert(entry));
1777        }
1778
1779        self.entries_by_path.edit(entries_by_path_edits, &());
1780        self.entries_by_id.edit(entries_by_id_edits, &());
1781    }
1782
1783    fn reuse_entry_id(&mut self, entry: &mut Entry) {
1784        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1785            entry.id = removed_entry_id;
1786        } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1787            entry.id = existing_entry.id;
1788        }
1789    }
1790
1791    fn remove_path(&mut self, path: &Path) {
1792        let mut new_entries;
1793        let removed_entries;
1794        {
1795            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1796            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1797            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1798            new_entries.push_tree(cursor.suffix(&()), &());
1799        }
1800        self.entries_by_path = new_entries;
1801
1802        let mut entries_by_id_edits = Vec::new();
1803        for entry in removed_entries.cursor::<()>() {
1804            let removed_entry_id = self
1805                .removed_entry_ids
1806                .entry(entry.inode)
1807                .or_insert(entry.id);
1808            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1809            entries_by_id_edits.push(Edit::Remove(entry.id));
1810        }
1811        self.entries_by_id.edit(entries_by_id_edits, &());
1812
1813        if path.file_name() == Some(&GITIGNORE) {
1814            if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1815                *scan_id = self.scan_id;
1816            }
1817        }
1818    }
1819
1820    fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1821        let mut new_ignores = Vec::new();
1822        for ancestor in path.ancestors().skip(1) {
1823            if let Some((ignore, _)) = self.ignores.get(ancestor) {
1824                new_ignores.push((ancestor, Some(ignore.clone())));
1825            } else {
1826                new_ignores.push((ancestor, None));
1827            }
1828        }
1829
1830        let mut ignore_stack = IgnoreStack::none();
1831        for (parent_path, ignore) in new_ignores.into_iter().rev() {
1832            if ignore_stack.is_path_ignored(&parent_path, true) {
1833                ignore_stack = IgnoreStack::all();
1834                break;
1835            } else if let Some(ignore) = ignore {
1836                ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
1837            }
1838        }
1839
1840        if ignore_stack.is_path_ignored(path, is_dir) {
1841            ignore_stack = IgnoreStack::all();
1842        }
1843
1844        ignore_stack
1845    }
1846}
1847
1848impl fmt::Debug for Snapshot {
1849    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1850        for entry in self.entries_by_path.cursor::<()>() {
1851            for _ in entry.path.ancestors().skip(1) {
1852                write!(f, " ")?;
1853            }
1854            writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
1855        }
1856        Ok(())
1857    }
1858}
1859
1860#[derive(Clone, PartialEq)]
1861pub struct File {
1862    entry_id: Option<usize>,
1863    worktree: ModelHandle<Worktree>,
1864    worktree_path: Arc<Path>,
1865    pub path: Arc<Path>,
1866    pub mtime: SystemTime,
1867    is_local: bool,
1868}
1869
1870impl language::File for File {
1871    fn worktree_id(&self) -> usize {
1872        self.worktree.id()
1873    }
1874
1875    fn entry_id(&self) -> Option<usize> {
1876        self.entry_id
1877    }
1878
1879    fn mtime(&self) -> SystemTime {
1880        self.mtime
1881    }
1882
1883    fn path(&self) -> &Arc<Path> {
1884        &self.path
1885    }
1886
1887    fn abs_path(&self) -> Option<PathBuf> {
1888        if self.is_local {
1889            Some(self.worktree_path.join(&self.path))
1890        } else {
1891            None
1892        }
1893    }
1894
1895    fn full_path(&self) -> PathBuf {
1896        let mut full_path = PathBuf::new();
1897        if let Some(worktree_name) = self.worktree_path.file_name() {
1898            full_path.push(worktree_name);
1899        }
1900        full_path.push(&self.path);
1901        full_path
1902    }
1903
1904    /// Returns the last component of this handle's absolute path. If this handle refers to the root
1905    /// of its worktree, then this method will return the name of the worktree itself.
1906    fn file_name<'a>(&'a self) -> Option<OsString> {
1907        self.path
1908            .file_name()
1909            .or_else(|| self.worktree_path.file_name())
1910            .map(Into::into)
1911    }
1912
1913    fn is_deleted(&self) -> bool {
1914        self.entry_id.is_none()
1915    }
1916
1917    fn save(
1918        &self,
1919        buffer_id: u64,
1920        text: Rope,
1921        version: clock::Global,
1922        cx: &mut MutableAppContext,
1923    ) -> Task<Result<(clock::Global, SystemTime)>> {
1924        let worktree_id = self.worktree.read(cx).id() as u64;
1925        self.worktree.update(cx, |worktree, cx| match worktree {
1926            Worktree::Local(worktree) => {
1927                let rpc = worktree.client.clone();
1928                let project_id = worktree.share.as_ref().map(|share| share.project_id);
1929                let save = worktree.save(self.path.clone(), text, cx);
1930                cx.background().spawn(async move {
1931                    let entry = save.await?;
1932                    if let Some(project_id) = project_id {
1933                        rpc.send(proto::BufferSaved {
1934                            project_id,
1935                            worktree_id,
1936                            buffer_id,
1937                            version: (&version).into(),
1938                            mtime: Some(entry.mtime.into()),
1939                        })
1940                        .await?;
1941                    }
1942                    Ok((version, entry.mtime))
1943                })
1944            }
1945            Worktree::Remote(worktree) => {
1946                let rpc = worktree.client.clone();
1947                let project_id = worktree.project_id;
1948                cx.foreground().spawn(async move {
1949                    let response = rpc
1950                        .request(proto::SaveBuffer {
1951                            project_id,
1952                            worktree_id,
1953                            buffer_id,
1954                        })
1955                        .await?;
1956                    let version = response.version.try_into()?;
1957                    let mtime = response
1958                        .mtime
1959                        .ok_or_else(|| anyhow!("missing mtime"))?
1960                        .into();
1961                    Ok((version, mtime))
1962                })
1963            }
1964        })
1965    }
1966
1967    fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
1968        let worktree = self.worktree.read(cx).as_local()?;
1969        let abs_path = worktree.absolutize(&self.path);
1970        let fs = worktree.fs.clone();
1971        Some(
1972            cx.background()
1973                .spawn(async move { fs.load(&abs_path).await }),
1974        )
1975    }
1976
1977    fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
1978        self.worktree.update(cx, |worktree, cx| {
1979            worktree.send_buffer_update(buffer_id, operation, cx);
1980        });
1981    }
1982
1983    fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
1984        self.worktree.update(cx, |worktree, cx| {
1985            if let Worktree::Remote(worktree) = worktree {
1986                let project_id = worktree.project_id;
1987                let worktree_id = worktree.remote_id;
1988                let rpc = worktree.client.clone();
1989                cx.background()
1990                    .spawn(async move {
1991                        if let Err(error) = rpc
1992                            .send(proto::CloseBuffer {
1993                                project_id,
1994                                worktree_id,
1995                                buffer_id,
1996                            })
1997                            .await
1998                        {
1999                            log::error!("error closing remote buffer: {}", error);
2000                        }
2001                    })
2002                    .detach();
2003            }
2004        });
2005    }
2006
2007    fn boxed_clone(&self) -> Box<dyn language::File> {
2008        Box::new(self.clone())
2009    }
2010
2011    fn as_any(&self) -> &dyn Any {
2012        self
2013    }
2014}
2015
2016#[derive(Clone, Debug)]
2017pub struct Entry {
2018    pub id: usize,
2019    pub kind: EntryKind,
2020    pub path: Arc<Path>,
2021    pub inode: u64,
2022    pub mtime: SystemTime,
2023    pub is_symlink: bool,
2024    pub is_ignored: bool,
2025}
2026
2027#[derive(Clone, Debug)]
2028pub enum EntryKind {
2029    PendingDir,
2030    Dir,
2031    File(CharBag),
2032}
2033
2034impl Entry {
2035    fn new(
2036        path: Arc<Path>,
2037        metadata: &fs::Metadata,
2038        next_entry_id: &AtomicUsize,
2039        root_char_bag: CharBag,
2040    ) -> Self {
2041        Self {
2042            id: next_entry_id.fetch_add(1, SeqCst),
2043            kind: if metadata.is_dir {
2044                EntryKind::PendingDir
2045            } else {
2046                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2047            },
2048            path,
2049            inode: metadata.inode,
2050            mtime: metadata.mtime,
2051            is_symlink: metadata.is_symlink,
2052            is_ignored: false,
2053        }
2054    }
2055
2056    pub fn is_dir(&self) -> bool {
2057        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2058    }
2059
2060    pub fn is_file(&self) -> bool {
2061        matches!(self.kind, EntryKind::File(_))
2062    }
2063}
2064
2065impl sum_tree::Item for Entry {
2066    type Summary = EntrySummary;
2067
2068    fn summary(&self) -> Self::Summary {
2069        let visible_count = if self.is_ignored { 0 } else { 1 };
2070        let file_count;
2071        let visible_file_count;
2072        if self.is_file() {
2073            file_count = 1;
2074            visible_file_count = visible_count;
2075        } else {
2076            file_count = 0;
2077            visible_file_count = 0;
2078        }
2079
2080        EntrySummary {
2081            max_path: self.path.clone(),
2082            count: 1,
2083            visible_count,
2084            file_count,
2085            visible_file_count,
2086        }
2087    }
2088}
2089
2090impl sum_tree::KeyedItem for Entry {
2091    type Key = PathKey;
2092
2093    fn key(&self) -> Self::Key {
2094        PathKey(self.path.clone())
2095    }
2096}
2097
2098#[derive(Clone, Debug)]
2099pub struct EntrySummary {
2100    max_path: Arc<Path>,
2101    count: usize,
2102    visible_count: usize,
2103    file_count: usize,
2104    visible_file_count: usize,
2105}
2106
2107impl Default for EntrySummary {
2108    fn default() -> Self {
2109        Self {
2110            max_path: Arc::from(Path::new("")),
2111            count: 0,
2112            visible_count: 0,
2113            file_count: 0,
2114            visible_file_count: 0,
2115        }
2116    }
2117}
2118
2119impl sum_tree::Summary for EntrySummary {
2120    type Context = ();
2121
2122    fn add_summary(&mut self, rhs: &Self, _: &()) {
2123        self.max_path = rhs.max_path.clone();
2124        self.visible_count += rhs.visible_count;
2125        self.file_count += rhs.file_count;
2126        self.visible_file_count += rhs.visible_file_count;
2127    }
2128}
2129
2130#[derive(Clone, Debug)]
2131struct PathEntry {
2132    id: usize,
2133    path: Arc<Path>,
2134    is_ignored: bool,
2135    scan_id: usize,
2136}
2137
2138impl sum_tree::Item for PathEntry {
2139    type Summary = PathEntrySummary;
2140
2141    fn summary(&self) -> Self::Summary {
2142        PathEntrySummary { max_id: self.id }
2143    }
2144}
2145
2146impl sum_tree::KeyedItem for PathEntry {
2147    type Key = usize;
2148
2149    fn key(&self) -> Self::Key {
2150        self.id
2151    }
2152}
2153
2154#[derive(Clone, Debug, Default)]
2155struct PathEntrySummary {
2156    max_id: usize,
2157}
2158
2159impl sum_tree::Summary for PathEntrySummary {
2160    type Context = ();
2161
2162    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2163        self.max_id = summary.max_id;
2164    }
2165}
2166
2167impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2168    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2169        *self = summary.max_id;
2170    }
2171}
2172
2173#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2174pub struct PathKey(Arc<Path>);
2175
2176impl Default for PathKey {
2177    fn default() -> Self {
2178        Self(Path::new("").into())
2179    }
2180}
2181
2182impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2183    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2184        self.0 = summary.max_path.clone();
2185    }
2186}
2187
2188struct BackgroundScanner {
2189    fs: Arc<dyn Fs>,
2190    snapshot: Arc<Mutex<Snapshot>>,
2191    notify: Sender<ScanState>,
2192    executor: Arc<executor::Background>,
2193}
2194
2195impl BackgroundScanner {
2196    fn new(
2197        snapshot: Arc<Mutex<Snapshot>>,
2198        notify: Sender<ScanState>,
2199        fs: Arc<dyn Fs>,
2200        executor: Arc<executor::Background>,
2201    ) -> Self {
2202        Self {
2203            fs,
2204            snapshot,
2205            notify,
2206            executor,
2207        }
2208    }
2209
2210    fn abs_path(&self) -> Arc<Path> {
2211        self.snapshot.lock().abs_path.clone()
2212    }
2213
2214    fn snapshot(&self) -> Snapshot {
2215        self.snapshot.lock().clone()
2216    }
2217
2218    async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2219        if self.notify.send(ScanState::Scanning).await.is_err() {
2220            return;
2221        }
2222
2223        if let Err(err) = self.scan_dirs().await {
2224            if self
2225                .notify
2226                .send(ScanState::Err(Arc::new(err)))
2227                .await
2228                .is_err()
2229            {
2230                return;
2231            }
2232        }
2233
2234        if self.notify.send(ScanState::Idle).await.is_err() {
2235            return;
2236        }
2237
2238        futures::pin_mut!(events_rx);
2239        while let Some(events) = events_rx.next().await {
2240            if self.notify.send(ScanState::Scanning).await.is_err() {
2241                break;
2242            }
2243
2244            if !self.process_events(events).await {
2245                break;
2246            }
2247
2248            if self.notify.send(ScanState::Idle).await.is_err() {
2249                break;
2250            }
2251        }
2252    }
2253
2254    async fn scan_dirs(&mut self) -> Result<()> {
2255        let root_char_bag;
2256        let next_entry_id;
2257        let is_dir;
2258        {
2259            let snapshot = self.snapshot.lock();
2260            root_char_bag = snapshot.root_char_bag;
2261            next_entry_id = snapshot.next_entry_id.clone();
2262            is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2263        };
2264
2265        if is_dir {
2266            let path: Arc<Path> = Arc::from(Path::new(""));
2267            let abs_path = self.abs_path();
2268            let (tx, rx) = channel::unbounded();
2269            tx.send(ScanJob {
2270                abs_path: abs_path.to_path_buf(),
2271                path,
2272                ignore_stack: IgnoreStack::none(),
2273                scan_queue: tx.clone(),
2274            })
2275            .await
2276            .unwrap();
2277            drop(tx);
2278
2279            self.executor
2280                .scoped(|scope| {
2281                    for _ in 0..self.executor.num_cpus() {
2282                        scope.spawn(async {
2283                            while let Ok(job) = rx.recv().await {
2284                                if let Err(err) = self
2285                                    .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2286                                    .await
2287                                {
2288                                    log::error!("error scanning {:?}: {}", job.abs_path, err);
2289                                }
2290                            }
2291                        });
2292                    }
2293                })
2294                .await;
2295        }
2296
2297        Ok(())
2298    }
2299
2300    async fn scan_dir(
2301        &self,
2302        root_char_bag: CharBag,
2303        next_entry_id: Arc<AtomicUsize>,
2304        job: &ScanJob,
2305    ) -> Result<()> {
2306        let mut new_entries: Vec<Entry> = Vec::new();
2307        let mut new_jobs: Vec<ScanJob> = Vec::new();
2308        let mut ignore_stack = job.ignore_stack.clone();
2309        let mut new_ignore = None;
2310
2311        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2312        while let Some(child_abs_path) = child_paths.next().await {
2313            let child_abs_path = match child_abs_path {
2314                Ok(child_abs_path) => child_abs_path,
2315                Err(error) => {
2316                    log::error!("error processing entry {:?}", error);
2317                    continue;
2318                }
2319            };
2320            let child_name = child_abs_path.file_name().unwrap();
2321            let child_path: Arc<Path> = job.path.join(child_name).into();
2322            let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2323                Some(metadata) => metadata,
2324                None => continue,
2325            };
2326
2327            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2328            if child_name == *GITIGNORE {
2329                match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2330                    Ok(ignore) => {
2331                        let ignore = Arc::new(ignore);
2332                        ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2333                        new_ignore = Some(ignore);
2334                    }
2335                    Err(error) => {
2336                        log::error!(
2337                            "error loading .gitignore file {:?} - {:?}",
2338                            child_name,
2339                            error
2340                        );
2341                    }
2342                }
2343
2344                // Update ignore status of any child entries we've already processed to reflect the
2345                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2346                // there should rarely be too numerous. Update the ignore stack associated with any
2347                // new jobs as well.
2348                let mut new_jobs = new_jobs.iter_mut();
2349                for entry in &mut new_entries {
2350                    entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2351                    if entry.is_dir() {
2352                        new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2353                            IgnoreStack::all()
2354                        } else {
2355                            ignore_stack.clone()
2356                        };
2357                    }
2358                }
2359            }
2360
2361            let mut child_entry = Entry::new(
2362                child_path.clone(),
2363                &child_metadata,
2364                &next_entry_id,
2365                root_char_bag,
2366            );
2367
2368            if child_metadata.is_dir {
2369                let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2370                child_entry.is_ignored = is_ignored;
2371                new_entries.push(child_entry);
2372                new_jobs.push(ScanJob {
2373                    abs_path: child_abs_path,
2374                    path: child_path,
2375                    ignore_stack: if is_ignored {
2376                        IgnoreStack::all()
2377                    } else {
2378                        ignore_stack.clone()
2379                    },
2380                    scan_queue: job.scan_queue.clone(),
2381                });
2382            } else {
2383                child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2384                new_entries.push(child_entry);
2385            };
2386        }
2387
2388        self.snapshot
2389            .lock()
2390            .populate_dir(job.path.clone(), new_entries, new_ignore);
2391        for new_job in new_jobs {
2392            job.scan_queue.send(new_job).await.unwrap();
2393        }
2394
2395        Ok(())
2396    }
2397
2398    async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2399        let mut snapshot = self.snapshot();
2400        snapshot.scan_id += 1;
2401
2402        let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2403            abs_path
2404        } else {
2405            return false;
2406        };
2407        let root_char_bag = snapshot.root_char_bag;
2408        let next_entry_id = snapshot.next_entry_id.clone();
2409
2410        events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2411        events.dedup_by(|a, b| a.path.starts_with(&b.path));
2412
2413        for event in &events {
2414            match event.path.strip_prefix(&root_abs_path) {
2415                Ok(path) => snapshot.remove_path(&path),
2416                Err(_) => {
2417                    log::error!(
2418                        "unexpected event {:?} for root path {:?}",
2419                        event.path,
2420                        root_abs_path
2421                    );
2422                    continue;
2423                }
2424            }
2425        }
2426
2427        let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2428        for event in events {
2429            let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2430                Ok(path) => Arc::from(path.to_path_buf()),
2431                Err(_) => {
2432                    log::error!(
2433                        "unexpected event {:?} for root path {:?}",
2434                        event.path,
2435                        root_abs_path
2436                    );
2437                    continue;
2438                }
2439            };
2440
2441            match self.fs.metadata(&event.path).await {
2442                Ok(Some(metadata)) => {
2443                    let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2444                    let mut fs_entry = Entry::new(
2445                        path.clone(),
2446                        &metadata,
2447                        snapshot.next_entry_id.as_ref(),
2448                        snapshot.root_char_bag,
2449                    );
2450                    fs_entry.is_ignored = ignore_stack.is_all();
2451                    snapshot.insert_entry(fs_entry, self.fs.as_ref());
2452                    if metadata.is_dir {
2453                        scan_queue_tx
2454                            .send(ScanJob {
2455                                abs_path: event.path,
2456                                path,
2457                                ignore_stack,
2458                                scan_queue: scan_queue_tx.clone(),
2459                            })
2460                            .await
2461                            .unwrap();
2462                    }
2463                }
2464                Ok(None) => {}
2465                Err(err) => {
2466                    // TODO - create a special 'error' entry in the entries tree to mark this
2467                    log::error!("error reading file on event {:?}", err);
2468                }
2469            }
2470        }
2471
2472        *self.snapshot.lock() = snapshot;
2473
2474        // Scan any directories that were created as part of this event batch.
2475        drop(scan_queue_tx);
2476        self.executor
2477            .scoped(|scope| {
2478                for _ in 0..self.executor.num_cpus() {
2479                    scope.spawn(async {
2480                        while let Ok(job) = scan_queue_rx.recv().await {
2481                            if let Err(err) = self
2482                                .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2483                                .await
2484                            {
2485                                log::error!("error scanning {:?}: {}", job.abs_path, err);
2486                            }
2487                        }
2488                    });
2489                }
2490            })
2491            .await;
2492
2493        // Attempt to detect renames only over a single batch of file-system events.
2494        self.snapshot.lock().removed_entry_ids.clear();
2495
2496        self.update_ignore_statuses().await;
2497        true
2498    }
2499
2500    async fn update_ignore_statuses(&self) {
2501        let mut snapshot = self.snapshot();
2502
2503        let mut ignores_to_update = Vec::new();
2504        let mut ignores_to_delete = Vec::new();
2505        for (parent_path, (_, scan_id)) in &snapshot.ignores {
2506            if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2507                ignores_to_update.push(parent_path.clone());
2508            }
2509
2510            let ignore_path = parent_path.join(&*GITIGNORE);
2511            if snapshot.entry_for_path(ignore_path).is_none() {
2512                ignores_to_delete.push(parent_path.clone());
2513            }
2514        }
2515
2516        for parent_path in ignores_to_delete {
2517            snapshot.ignores.remove(&parent_path);
2518            self.snapshot.lock().ignores.remove(&parent_path);
2519        }
2520
2521        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2522        ignores_to_update.sort_unstable();
2523        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2524        while let Some(parent_path) = ignores_to_update.next() {
2525            while ignores_to_update
2526                .peek()
2527                .map_or(false, |p| p.starts_with(&parent_path))
2528            {
2529                ignores_to_update.next().unwrap();
2530            }
2531
2532            let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2533            ignore_queue_tx
2534                .send(UpdateIgnoreStatusJob {
2535                    path: parent_path,
2536                    ignore_stack,
2537                    ignore_queue: ignore_queue_tx.clone(),
2538                })
2539                .await
2540                .unwrap();
2541        }
2542        drop(ignore_queue_tx);
2543
2544        self.executor
2545            .scoped(|scope| {
2546                for _ in 0..self.executor.num_cpus() {
2547                    scope.spawn(async {
2548                        while let Ok(job) = ignore_queue_rx.recv().await {
2549                            self.update_ignore_status(job, &snapshot).await;
2550                        }
2551                    });
2552                }
2553            })
2554            .await;
2555    }
2556
2557    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2558        let mut ignore_stack = job.ignore_stack;
2559        if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2560            ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2561        }
2562
2563        let mut entries_by_id_edits = Vec::new();
2564        let mut entries_by_path_edits = Vec::new();
2565        for mut entry in snapshot.child_entries(&job.path).cloned() {
2566            let was_ignored = entry.is_ignored;
2567            entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2568            if entry.is_dir() {
2569                let child_ignore_stack = if entry.is_ignored {
2570                    IgnoreStack::all()
2571                } else {
2572                    ignore_stack.clone()
2573                };
2574                job.ignore_queue
2575                    .send(UpdateIgnoreStatusJob {
2576                        path: entry.path.clone(),
2577                        ignore_stack: child_ignore_stack,
2578                        ignore_queue: job.ignore_queue.clone(),
2579                    })
2580                    .await
2581                    .unwrap();
2582            }
2583
2584            if entry.is_ignored != was_ignored {
2585                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2586                path_entry.scan_id = snapshot.scan_id;
2587                path_entry.is_ignored = entry.is_ignored;
2588                entries_by_id_edits.push(Edit::Insert(path_entry));
2589                entries_by_path_edits.push(Edit::Insert(entry));
2590            }
2591        }
2592
2593        let mut snapshot = self.snapshot.lock();
2594        snapshot.entries_by_path.edit(entries_by_path_edits, &());
2595        snapshot.entries_by_id.edit(entries_by_id_edits, &());
2596    }
2597}
2598
2599async fn refresh_entry(
2600    fs: &dyn Fs,
2601    snapshot: &Mutex<Snapshot>,
2602    path: Arc<Path>,
2603    abs_path: &Path,
2604) -> Result<Entry> {
2605    let root_char_bag;
2606    let next_entry_id;
2607    {
2608        let snapshot = snapshot.lock();
2609        root_char_bag = snapshot.root_char_bag;
2610        next_entry_id = snapshot.next_entry_id.clone();
2611    }
2612    let entry = Entry::new(
2613        path,
2614        &fs.metadata(abs_path)
2615            .await?
2616            .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2617        &next_entry_id,
2618        root_char_bag,
2619    );
2620    Ok(snapshot.lock().insert_entry(entry, fs))
2621}
2622
2623fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2624    let mut result = root_char_bag;
2625    result.extend(
2626        path.to_string_lossy()
2627            .chars()
2628            .map(|c| c.to_ascii_lowercase()),
2629    );
2630    result
2631}
2632
2633struct ScanJob {
2634    abs_path: PathBuf,
2635    path: Arc<Path>,
2636    ignore_stack: Arc<IgnoreStack>,
2637    scan_queue: Sender<ScanJob>,
2638}
2639
2640struct UpdateIgnoreStatusJob {
2641    path: Arc<Path>,
2642    ignore_stack: Arc<IgnoreStack>,
2643    ignore_queue: Sender<UpdateIgnoreStatusJob>,
2644}
2645
2646pub trait WorktreeHandle {
2647    #[cfg(test)]
2648    fn flush_fs_events<'a>(
2649        &self,
2650        cx: &'a gpui::TestAppContext,
2651    ) -> futures::future::LocalBoxFuture<'a, ()>;
2652}
2653
2654impl WorktreeHandle for ModelHandle<Worktree> {
2655    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2656    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2657    // extra directory scans, and emit extra scan-state notifications.
2658    //
2659    // This function mutates the worktree's directory and waits for those mutations to be picked up,
2660    // to ensure that all redundant FS events have already been processed.
2661    #[cfg(test)]
2662    fn flush_fs_events<'a>(
2663        &self,
2664        cx: &'a gpui::TestAppContext,
2665    ) -> futures::future::LocalBoxFuture<'a, ()> {
2666        use smol::future::FutureExt;
2667
2668        let filename = "fs-event-sentinel";
2669        let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2670        let tree = self.clone();
2671        async move {
2672            std::fs::write(root_path.join(filename), "").unwrap();
2673            tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2674                .await;
2675
2676            std::fs::remove_file(root_path.join(filename)).unwrap();
2677            tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2678                .await;
2679
2680            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2681                .await;
2682        }
2683        .boxed_local()
2684    }
2685}
2686
2687#[derive(Clone, Debug)]
2688struct TraversalProgress<'a> {
2689    max_path: &'a Path,
2690    count: usize,
2691    visible_count: usize,
2692    file_count: usize,
2693    visible_file_count: usize,
2694}
2695
2696impl<'a> TraversalProgress<'a> {
2697    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2698        match (include_ignored, include_dirs) {
2699            (true, true) => self.count,
2700            (true, false) => self.file_count,
2701            (false, true) => self.visible_count,
2702            (false, false) => self.visible_file_count,
2703        }
2704    }
2705}
2706
2707impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2708    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2709        self.max_path = summary.max_path.as_ref();
2710        self.count += summary.count;
2711        self.visible_count += summary.visible_count;
2712        self.file_count += summary.file_count;
2713        self.visible_file_count += summary.visible_file_count;
2714    }
2715}
2716
2717impl<'a> Default for TraversalProgress<'a> {
2718    fn default() -> Self {
2719        Self {
2720            max_path: Path::new(""),
2721            count: 0,
2722            visible_count: 0,
2723            file_count: 0,
2724            visible_file_count: 0,
2725        }
2726    }
2727}
2728
2729pub struct Traversal<'a> {
2730    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2731    include_ignored: bool,
2732    include_dirs: bool,
2733}
2734
2735impl<'a> Traversal<'a> {
2736    pub fn advance(&mut self) -> bool {
2737        self.advance_to_offset(self.offset() + 1)
2738    }
2739
2740    pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2741        self.cursor.seek_forward(
2742            &TraversalTarget::Count {
2743                count: offset,
2744                include_dirs: self.include_dirs,
2745                include_ignored: self.include_ignored,
2746            },
2747            Bias::Right,
2748            &(),
2749        )
2750    }
2751
2752    pub fn advance_to_sibling(&mut self) -> bool {
2753        while let Some(entry) = self.cursor.item() {
2754            self.cursor.seek_forward(
2755                &TraversalTarget::PathSuccessor(&entry.path),
2756                Bias::Left,
2757                &(),
2758            );
2759            if let Some(entry) = self.cursor.item() {
2760                if (self.include_dirs || !entry.is_dir())
2761                    && (self.include_ignored || !entry.is_ignored)
2762                {
2763                    return true;
2764                }
2765            }
2766        }
2767        false
2768    }
2769
2770    pub fn entry(&self) -> Option<&'a Entry> {
2771        self.cursor.item()
2772    }
2773
2774    pub fn offset(&self) -> usize {
2775        self.cursor
2776            .start()
2777            .count(self.include_dirs, self.include_ignored)
2778    }
2779}
2780
2781impl<'a> Iterator for Traversal<'a> {
2782    type Item = &'a Entry;
2783
2784    fn next(&mut self) -> Option<Self::Item> {
2785        if let Some(item) = self.entry() {
2786            self.advance();
2787            Some(item)
2788        } else {
2789            None
2790        }
2791    }
2792}
2793
2794#[derive(Debug)]
2795enum TraversalTarget<'a> {
2796    Path(&'a Path),
2797    PathSuccessor(&'a Path),
2798    Count {
2799        count: usize,
2800        include_ignored: bool,
2801        include_dirs: bool,
2802    },
2803}
2804
2805impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2806    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2807        match self {
2808            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2809            TraversalTarget::PathSuccessor(path) => {
2810                if !cursor_location.max_path.starts_with(path) {
2811                    Ordering::Equal
2812                } else {
2813                    Ordering::Greater
2814                }
2815            }
2816            TraversalTarget::Count {
2817                count,
2818                include_dirs,
2819                include_ignored,
2820            } => Ord::cmp(
2821                count,
2822                &cursor_location.count(*include_dirs, *include_ignored),
2823            ),
2824        }
2825    }
2826}
2827
2828struct ChildEntriesIter<'a> {
2829    parent_path: &'a Path,
2830    traversal: Traversal<'a>,
2831}
2832
2833impl<'a> Iterator for ChildEntriesIter<'a> {
2834    type Item = &'a Entry;
2835
2836    fn next(&mut self) -> Option<Self::Item> {
2837        if let Some(item) = self.traversal.entry() {
2838            if item.path.starts_with(&self.parent_path) {
2839                self.traversal.advance_to_sibling();
2840                return Some(item);
2841            }
2842        }
2843        None
2844    }
2845}
2846
2847impl<'a> From<&'a Entry> for proto::Entry {
2848    fn from(entry: &'a Entry) -> Self {
2849        Self {
2850            id: entry.id as u64,
2851            is_dir: entry.is_dir(),
2852            path: entry.path.to_string_lossy().to_string(),
2853            inode: entry.inode,
2854            mtime: Some(entry.mtime.into()),
2855            is_symlink: entry.is_symlink,
2856            is_ignored: entry.is_ignored,
2857        }
2858    }
2859}
2860
2861impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2862    type Error = anyhow::Error;
2863
2864    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2865        if let Some(mtime) = entry.mtime {
2866            let kind = if entry.is_dir {
2867                EntryKind::Dir
2868            } else {
2869                let mut char_bag = root_char_bag.clone();
2870                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
2871                EntryKind::File(char_bag)
2872            };
2873            let path: Arc<Path> = Arc::from(Path::new(&entry.path));
2874            Ok(Entry {
2875                id: entry.id as usize,
2876                kind,
2877                path: path.clone(),
2878                inode: entry.inode,
2879                mtime: mtime.into(),
2880                is_symlink: entry.is_symlink,
2881                is_ignored: entry.is_ignored,
2882            })
2883        } else {
2884            Err(anyhow!(
2885                "missing mtime in remote worktree entry {:?}",
2886                entry.path
2887            ))
2888        }
2889    }
2890}
2891
2892trait ToPointUtf16 {
2893    fn to_point_utf16(self) -> PointUtf16;
2894}
2895
2896impl ToPointUtf16 for lsp::Position {
2897    fn to_point_utf16(self) -> PointUtf16 {
2898        PointUtf16::new(self.line, self.character)
2899    }
2900}
2901
2902fn diagnostic_ranges<'a>(
2903    diagnostic: &'a lsp::Diagnostic,
2904    abs_path: &'a Path,
2905) -> impl 'a + Iterator<Item = Range<PointUtf16>> {
2906    diagnostic
2907        .related_information
2908        .iter()
2909        .flatten()
2910        .filter_map(move |info| {
2911            if info.location.uri.to_file_path().ok()? == abs_path {
2912                let info_start = PointUtf16::new(
2913                    info.location.range.start.line,
2914                    info.location.range.start.character,
2915                );
2916                let info_end = PointUtf16::new(
2917                    info.location.range.end.line,
2918                    info.location.range.end.character,
2919                );
2920                Some(info_start..info_end)
2921            } else {
2922                None
2923            }
2924        })
2925        .chain(Some(
2926            diagnostic.range.start.to_point_utf16()..diagnostic.range.end.to_point_utf16(),
2927        ))
2928}
2929
2930#[cfg(test)]
2931mod tests {
2932    use super::*;
2933    use crate::fs::FakeFs;
2934    use anyhow::Result;
2935    use client::test::{FakeHttpClient, FakeServer};
2936    use fs::RealFs;
2937    use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
2938    use language::{Diagnostic, LanguageConfig};
2939    use lsp::Url;
2940    use rand::prelude::*;
2941    use serde_json::json;
2942    use std::{cell::RefCell, rc::Rc};
2943    use std::{
2944        env,
2945        fmt::Write,
2946        time::{SystemTime, UNIX_EPOCH},
2947    };
2948    use text::Point;
2949    use unindent::Unindent as _;
2950    use util::test::temp_tree;
2951
2952    #[gpui::test]
2953    async fn test_traversal(mut cx: gpui::TestAppContext) {
2954        let fs = FakeFs::new();
2955        fs.insert_tree(
2956            "/root",
2957            json!({
2958               ".gitignore": "a/b\n",
2959               "a": {
2960                   "b": "",
2961                   "c": "",
2962               }
2963            }),
2964        )
2965        .await;
2966
2967        let client = Client::new();
2968        let http_client = FakeHttpClient::with_404_response();
2969        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2970
2971        let tree = Worktree::open_local(
2972            client,
2973            user_store,
2974            Arc::from(Path::new("/root")),
2975            Arc::new(fs),
2976            Default::default(),
2977            &mut cx.to_async(),
2978        )
2979        .await
2980        .unwrap();
2981        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2982            .await;
2983
2984        tree.read_with(&cx, |tree, _| {
2985            assert_eq!(
2986                tree.entries(false)
2987                    .map(|entry| entry.path.as_ref())
2988                    .collect::<Vec<_>>(),
2989                vec![
2990                    Path::new(""),
2991                    Path::new(".gitignore"),
2992                    Path::new("a"),
2993                    Path::new("a/c"),
2994                ]
2995            );
2996        })
2997    }
2998
2999    #[gpui::test]
3000    async fn test_save_file(mut cx: gpui::TestAppContext) {
3001        let dir = temp_tree(json!({
3002            "file1": "the old contents",
3003        }));
3004
3005        let client = Client::new();
3006        let http_client = FakeHttpClient::with_404_response();
3007        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3008
3009        let tree = Worktree::open_local(
3010            client,
3011            user_store,
3012            dir.path(),
3013            Arc::new(RealFs),
3014            Default::default(),
3015            &mut cx.to_async(),
3016        )
3017        .await
3018        .unwrap();
3019        let buffer = tree
3020            .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3021            .await
3022            .unwrap();
3023        let save = buffer.update(&mut cx, |buffer, cx| {
3024            buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3025            buffer.save(cx).unwrap()
3026        });
3027        save.await.unwrap();
3028
3029        let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3030        assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3031    }
3032
3033    #[gpui::test]
3034    async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3035        let dir = temp_tree(json!({
3036            "file1": "the old contents",
3037        }));
3038        let file_path = dir.path().join("file1");
3039
3040        let client = Client::new();
3041        let http_client = FakeHttpClient::with_404_response();
3042        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3043
3044        let tree = Worktree::open_local(
3045            client,
3046            user_store,
3047            file_path.clone(),
3048            Arc::new(RealFs),
3049            Default::default(),
3050            &mut cx.to_async(),
3051        )
3052        .await
3053        .unwrap();
3054        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3055            .await;
3056        cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3057
3058        let buffer = tree
3059            .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3060            .await
3061            .unwrap();
3062        let save = buffer.update(&mut cx, |buffer, cx| {
3063            buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3064            buffer.save(cx).unwrap()
3065        });
3066        save.await.unwrap();
3067
3068        let new_text = std::fs::read_to_string(file_path).unwrap();
3069        assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3070    }
3071
3072    #[gpui::test]
3073    async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3074        let dir = temp_tree(json!({
3075            "a": {
3076                "file1": "",
3077                "file2": "",
3078                "file3": "",
3079            },
3080            "b": {
3081                "c": {
3082                    "file4": "",
3083                    "file5": "",
3084                }
3085            }
3086        }));
3087
3088        let user_id = 5;
3089        let mut client = Client::new();
3090        let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3091        let user_store = server.build_user_store(client.clone(), &mut cx).await;
3092        let tree = Worktree::open_local(
3093            client,
3094            user_store.clone(),
3095            dir.path(),
3096            Arc::new(RealFs),
3097            Default::default(),
3098            &mut cx.to_async(),
3099        )
3100        .await
3101        .unwrap();
3102
3103        let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3104            let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3105            async move { buffer.await.unwrap() }
3106        };
3107        let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3108            tree.read_with(cx, |tree, _| {
3109                tree.entry_for_path(path)
3110                    .expect(&format!("no entry for path {}", path))
3111                    .id
3112            })
3113        };
3114
3115        let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3116        let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3117        let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3118        let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3119
3120        let file2_id = id_for_path("a/file2", &cx);
3121        let file3_id = id_for_path("a/file3", &cx);
3122        let file4_id = id_for_path("b/c/file4", &cx);
3123
3124        // Wait for the initial scan.
3125        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3126            .await;
3127
3128        // Create a remote copy of this worktree.
3129        let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3130        let remote = Worktree::remote(
3131            1,
3132            1,
3133            initial_snapshot.to_proto(),
3134            Client::new(),
3135            user_store,
3136            Default::default(),
3137            &mut cx.to_async(),
3138        )
3139        .await
3140        .unwrap();
3141
3142        cx.read(|cx| {
3143            assert!(!buffer2.read(cx).is_dirty());
3144            assert!(!buffer3.read(cx).is_dirty());
3145            assert!(!buffer4.read(cx).is_dirty());
3146            assert!(!buffer5.read(cx).is_dirty());
3147        });
3148
3149        // Rename and delete files and directories.
3150        tree.flush_fs_events(&cx).await;
3151        std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3152        std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3153        std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3154        std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3155        tree.flush_fs_events(&cx).await;
3156
3157        let expected_paths = vec![
3158            "a",
3159            "a/file1",
3160            "a/file2.new",
3161            "b",
3162            "d",
3163            "d/file3",
3164            "d/file4",
3165        ];
3166
3167        cx.read(|app| {
3168            assert_eq!(
3169                tree.read(app)
3170                    .paths()
3171                    .map(|p| p.to_str().unwrap())
3172                    .collect::<Vec<_>>(),
3173                expected_paths
3174            );
3175
3176            assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3177            assert_eq!(id_for_path("d/file3", &cx), file3_id);
3178            assert_eq!(id_for_path("d/file4", &cx), file4_id);
3179
3180            assert_eq!(
3181                buffer2.read(app).file().unwrap().path().as_ref(),
3182                Path::new("a/file2.new")
3183            );
3184            assert_eq!(
3185                buffer3.read(app).file().unwrap().path().as_ref(),
3186                Path::new("d/file3")
3187            );
3188            assert_eq!(
3189                buffer4.read(app).file().unwrap().path().as_ref(),
3190                Path::new("d/file4")
3191            );
3192            assert_eq!(
3193                buffer5.read(app).file().unwrap().path().as_ref(),
3194                Path::new("b/c/file5")
3195            );
3196
3197            assert!(!buffer2.read(app).file().unwrap().is_deleted());
3198            assert!(!buffer3.read(app).file().unwrap().is_deleted());
3199            assert!(!buffer4.read(app).file().unwrap().is_deleted());
3200            assert!(buffer5.read(app).file().unwrap().is_deleted());
3201        });
3202
3203        // Update the remote worktree. Check that it becomes consistent with the
3204        // local worktree.
3205        remote.update(&mut cx, |remote, cx| {
3206            let update_message =
3207                tree.read(cx)
3208                    .snapshot()
3209                    .build_update(&initial_snapshot, 1, 1, true);
3210            remote
3211                .as_remote_mut()
3212                .unwrap()
3213                .snapshot
3214                .apply_update(update_message)
3215                .unwrap();
3216
3217            assert_eq!(
3218                remote
3219                    .paths()
3220                    .map(|p| p.to_str().unwrap())
3221                    .collect::<Vec<_>>(),
3222                expected_paths
3223            );
3224        });
3225    }
3226
3227    #[gpui::test]
3228    async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3229        let dir = temp_tree(json!({
3230            ".git": {},
3231            ".gitignore": "ignored-dir\n",
3232            "tracked-dir": {
3233                "tracked-file1": "tracked contents",
3234            },
3235            "ignored-dir": {
3236                "ignored-file1": "ignored contents",
3237            }
3238        }));
3239
3240        let client = Client::new();
3241        let http_client = FakeHttpClient::with_404_response();
3242        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3243
3244        let tree = Worktree::open_local(
3245            client,
3246            user_store,
3247            dir.path(),
3248            Arc::new(RealFs),
3249            Default::default(),
3250            &mut cx.to_async(),
3251        )
3252        .await
3253        .unwrap();
3254        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3255            .await;
3256        tree.flush_fs_events(&cx).await;
3257        cx.read(|cx| {
3258            let tree = tree.read(cx);
3259            let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3260            let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3261            assert_eq!(tracked.is_ignored, false);
3262            assert_eq!(ignored.is_ignored, true);
3263        });
3264
3265        std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3266        std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3267        tree.flush_fs_events(&cx).await;
3268        cx.read(|cx| {
3269            let tree = tree.read(cx);
3270            let dot_git = tree.entry_for_path(".git").unwrap();
3271            let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3272            let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3273            assert_eq!(tracked.is_ignored, false);
3274            assert_eq!(ignored.is_ignored, true);
3275            assert_eq!(dot_git.is_ignored, true);
3276        });
3277    }
3278
3279    #[gpui::test]
3280    async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3281        let user_id = 100;
3282        let mut client = Client::new();
3283        let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3284        let user_store = server.build_user_store(client.clone(), &mut cx).await;
3285
3286        let fs = Arc::new(FakeFs::new());
3287        fs.insert_tree(
3288            "/the-dir",
3289            json!({
3290                "a.txt": "a-contents",
3291                "b.txt": "b-contents",
3292            }),
3293        )
3294        .await;
3295
3296        let worktree = Worktree::open_local(
3297            client.clone(),
3298            user_store,
3299            "/the-dir".as_ref(),
3300            fs,
3301            Default::default(),
3302            &mut cx.to_async(),
3303        )
3304        .await
3305        .unwrap();
3306
3307        // Spawn multiple tasks to open paths, repeating some paths.
3308        let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3309            (
3310                worktree.open_buffer("a.txt", cx),
3311                worktree.open_buffer("b.txt", cx),
3312                worktree.open_buffer("a.txt", cx),
3313            )
3314        });
3315
3316        let buffer_a_1 = buffer_a_1.await.unwrap();
3317        let buffer_a_2 = buffer_a_2.await.unwrap();
3318        let buffer_b = buffer_b.await.unwrap();
3319        assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3320        assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3321
3322        // There is only one buffer per path.
3323        let buffer_a_id = buffer_a_1.id();
3324        assert_eq!(buffer_a_2.id(), buffer_a_id);
3325
3326        // Open the same path again while it is still open.
3327        drop(buffer_a_1);
3328        let buffer_a_3 = worktree
3329            .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3330            .await
3331            .unwrap();
3332
3333        // There's still only one buffer per path.
3334        assert_eq!(buffer_a_3.id(), buffer_a_id);
3335    }
3336
3337    #[gpui::test]
3338    async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3339        use std::fs;
3340
3341        let dir = temp_tree(json!({
3342            "file1": "abc",
3343            "file2": "def",
3344            "file3": "ghi",
3345        }));
3346        let client = Client::new();
3347        let http_client = FakeHttpClient::with_404_response();
3348        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3349
3350        let tree = Worktree::open_local(
3351            client,
3352            user_store,
3353            dir.path(),
3354            Arc::new(RealFs),
3355            Default::default(),
3356            &mut cx.to_async(),
3357        )
3358        .await
3359        .unwrap();
3360        tree.flush_fs_events(&cx).await;
3361        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3362            .await;
3363
3364        let buffer1 = tree
3365            .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3366            .await
3367            .unwrap();
3368        let events = Rc::new(RefCell::new(Vec::new()));
3369
3370        // initially, the buffer isn't dirty.
3371        buffer1.update(&mut cx, |buffer, cx| {
3372            cx.subscribe(&buffer1, {
3373                let events = events.clone();
3374                move |_, _, event, _| events.borrow_mut().push(event.clone())
3375            })
3376            .detach();
3377
3378            assert!(!buffer.is_dirty());
3379            assert!(events.borrow().is_empty());
3380
3381            buffer.edit(vec![1..2], "", cx);
3382        });
3383
3384        // after the first edit, the buffer is dirty, and emits a dirtied event.
3385        buffer1.update(&mut cx, |buffer, cx| {
3386            assert!(buffer.text() == "ac");
3387            assert!(buffer.is_dirty());
3388            assert_eq!(
3389                *events.borrow(),
3390                &[language::Event::Edited, language::Event::Dirtied]
3391            );
3392            events.borrow_mut().clear();
3393            buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3394        });
3395
3396        // after saving, the buffer is not dirty, and emits a saved event.
3397        buffer1.update(&mut cx, |buffer, cx| {
3398            assert!(!buffer.is_dirty());
3399            assert_eq!(*events.borrow(), &[language::Event::Saved]);
3400            events.borrow_mut().clear();
3401
3402            buffer.edit(vec![1..1], "B", cx);
3403            buffer.edit(vec![2..2], "D", cx);
3404        });
3405
3406        // after editing again, the buffer is dirty, and emits another dirty event.
3407        buffer1.update(&mut cx, |buffer, cx| {
3408            assert!(buffer.text() == "aBDc");
3409            assert!(buffer.is_dirty());
3410            assert_eq!(
3411                *events.borrow(),
3412                &[
3413                    language::Event::Edited,
3414                    language::Event::Dirtied,
3415                    language::Event::Edited,
3416                ],
3417            );
3418            events.borrow_mut().clear();
3419
3420            // TODO - currently, after restoring the buffer to its
3421            // previously-saved state, the is still considered dirty.
3422            buffer.edit([1..3], "", cx);
3423            assert!(buffer.text() == "ac");
3424            assert!(buffer.is_dirty());
3425        });
3426
3427        assert_eq!(*events.borrow(), &[language::Event::Edited]);
3428
3429        // When a file is deleted, the buffer is considered dirty.
3430        let events = Rc::new(RefCell::new(Vec::new()));
3431        let buffer2 = tree
3432            .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3433            .await
3434            .unwrap();
3435        buffer2.update(&mut cx, |_, cx| {
3436            cx.subscribe(&buffer2, {
3437                let events = events.clone();
3438                move |_, _, event, _| events.borrow_mut().push(event.clone())
3439            })
3440            .detach();
3441        });
3442
3443        fs::remove_file(dir.path().join("file2")).unwrap();
3444        buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3445        assert_eq!(
3446            *events.borrow(),
3447            &[language::Event::Dirtied, language::Event::FileHandleChanged]
3448        );
3449
3450        // When a file is already dirty when deleted, we don't emit a Dirtied event.
3451        let events = Rc::new(RefCell::new(Vec::new()));
3452        let buffer3 = tree
3453            .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3454            .await
3455            .unwrap();
3456        buffer3.update(&mut cx, |_, cx| {
3457            cx.subscribe(&buffer3, {
3458                let events = events.clone();
3459                move |_, _, event, _| events.borrow_mut().push(event.clone())
3460            })
3461            .detach();
3462        });
3463
3464        tree.flush_fs_events(&cx).await;
3465        buffer3.update(&mut cx, |buffer, cx| {
3466            buffer.edit(Some(0..0), "x", cx);
3467        });
3468        events.borrow_mut().clear();
3469        fs::remove_file(dir.path().join("file3")).unwrap();
3470        buffer3
3471            .condition(&cx, |_, _| !events.borrow().is_empty())
3472            .await;
3473        assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3474        cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3475    }
3476
3477    #[gpui::test]
3478    async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3479        use std::fs;
3480
3481        let initial_contents = "aaa\nbbbbb\nc\n";
3482        let dir = temp_tree(json!({ "the-file": initial_contents }));
3483        let client = Client::new();
3484        let http_client = FakeHttpClient::with_404_response();
3485        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3486
3487        let tree = Worktree::open_local(
3488            client,
3489            user_store,
3490            dir.path(),
3491            Arc::new(RealFs),
3492            Default::default(),
3493            &mut cx.to_async(),
3494        )
3495        .await
3496        .unwrap();
3497        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3498            .await;
3499
3500        let abs_path = dir.path().join("the-file");
3501        let buffer = tree
3502            .update(&mut cx, |tree, cx| {
3503                tree.open_buffer(Path::new("the-file"), cx)
3504            })
3505            .await
3506            .unwrap();
3507
3508        // TODO
3509        // Add a cursor on each row.
3510        // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3511        //     assert!(!buffer.is_dirty());
3512        //     buffer.add_selection_set(
3513        //         &(0..3)
3514        //             .map(|row| Selection {
3515        //                 id: row as usize,
3516        //                 start: Point::new(row, 1),
3517        //                 end: Point::new(row, 1),
3518        //                 reversed: false,
3519        //                 goal: SelectionGoal::None,
3520        //             })
3521        //             .collect::<Vec<_>>(),
3522        //         cx,
3523        //     )
3524        // });
3525
3526        // Change the file on disk, adding two new lines of text, and removing
3527        // one line.
3528        buffer.read_with(&cx, |buffer, _| {
3529            assert!(!buffer.is_dirty());
3530            assert!(!buffer.has_conflict());
3531        });
3532        let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3533        fs::write(&abs_path, new_contents).unwrap();
3534
3535        // Because the buffer was not modified, it is reloaded from disk. Its
3536        // contents are edited according to the diff between the old and new
3537        // file contents.
3538        buffer
3539            .condition(&cx, |buffer, _| buffer.text() == new_contents)
3540            .await;
3541
3542        buffer.update(&mut cx, |buffer, _| {
3543            assert_eq!(buffer.text(), new_contents);
3544            assert!(!buffer.is_dirty());
3545            assert!(!buffer.has_conflict());
3546
3547            // TODO
3548            // let cursor_positions = buffer
3549            //     .selection_set(selection_set_id)
3550            //     .unwrap()
3551            //     .selections::<Point>(&*buffer)
3552            //     .map(|selection| {
3553            //         assert_eq!(selection.start, selection.end);
3554            //         selection.start
3555            //     })
3556            //     .collect::<Vec<_>>();
3557            // assert_eq!(
3558            //     cursor_positions,
3559            //     [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3560            // );
3561        });
3562
3563        // Modify the buffer
3564        buffer.update(&mut cx, |buffer, cx| {
3565            buffer.edit(vec![0..0], " ", cx);
3566            assert!(buffer.is_dirty());
3567            assert!(!buffer.has_conflict());
3568        });
3569
3570        // Change the file on disk again, adding blank lines to the beginning.
3571        fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3572
3573        // Because the buffer is modified, it doesn't reload from disk, but is
3574        // marked as having a conflict.
3575        buffer
3576            .condition(&cx, |buffer, _| buffer.has_conflict())
3577            .await;
3578    }
3579
3580    #[gpui::test]
3581    async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3582        let (language_server_config, mut fake_server) =
3583            LanguageServerConfig::fake(cx.background()).await;
3584        let mut languages = LanguageRegistry::new();
3585        languages.add(Arc::new(Language::new(
3586            LanguageConfig {
3587                name: "Rust".to_string(),
3588                path_suffixes: vec!["rs".to_string()],
3589                language_server: Some(language_server_config),
3590                ..Default::default()
3591            },
3592            Some(tree_sitter_rust::language()),
3593        )));
3594
3595        let dir = temp_tree(json!({
3596            "a.rs": "fn a() { A }",
3597            "b.rs": "const y: i32 = 1",
3598        }));
3599
3600        let client = Client::new();
3601        let http_client = FakeHttpClient::with_404_response();
3602        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3603
3604        let tree = Worktree::open_local(
3605            client,
3606            user_store,
3607            dir.path(),
3608            Arc::new(RealFs),
3609            Arc::new(languages),
3610            &mut cx.to_async(),
3611        )
3612        .await
3613        .unwrap();
3614        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3615            .await;
3616
3617        // Cause worktree to start the fake language server
3618        let _buffer = tree
3619            .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3620            .await
3621            .unwrap();
3622
3623        fake_server
3624            .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3625                uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3626                version: None,
3627                diagnostics: vec![lsp::Diagnostic {
3628                    range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
3629                    severity: Some(lsp::DiagnosticSeverity::ERROR),
3630                    message: "undefined variable 'A'".to_string(),
3631                    ..Default::default()
3632                }],
3633            })
3634            .await;
3635
3636        let buffer = tree
3637            .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3638            .await
3639            .unwrap();
3640
3641        buffer.read_with(&cx, |buffer, _| {
3642            let diagnostics = buffer
3643                .snapshot()
3644                .diagnostics_in_range::<_, Point>(0..buffer.len())
3645                .collect::<Vec<_>>();
3646            assert_eq!(
3647                diagnostics,
3648                &[DiagnosticEntry {
3649                    range: Point::new(0, 9)..Point::new(0, 10),
3650                    diagnostic: Diagnostic {
3651                        severity: lsp::DiagnosticSeverity::ERROR,
3652                        message: "undefined variable 'A'".to_string(),
3653                        group_id: 0,
3654                        is_primary: true,
3655                        ..Default::default()
3656                    }
3657                }]
3658            )
3659        });
3660    }
3661
3662    #[gpui::test]
3663    async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3664        let fs = Arc::new(FakeFs::new());
3665        let client = Client::new();
3666        let http_client = FakeHttpClient::with_404_response();
3667        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3668
3669        fs.insert_tree(
3670            "/the-dir",
3671            json!({
3672                "a.rs": "
3673                    fn foo(mut v: Vec<usize>) {
3674                        for x in &v {
3675                            v.push(1);
3676                        }
3677                    }
3678                "
3679                .unindent(),
3680            }),
3681        )
3682        .await;
3683
3684        let worktree = Worktree::open_local(
3685            client.clone(),
3686            user_store,
3687            "/the-dir".as_ref(),
3688            fs,
3689            Default::default(),
3690            &mut cx.to_async(),
3691        )
3692        .await
3693        .unwrap();
3694
3695        let buffer = worktree
3696            .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3697            .await
3698            .unwrap();
3699
3700        let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3701        let message = lsp::PublishDiagnosticsParams {
3702            uri: buffer_uri.clone(),
3703            diagnostics: vec![
3704                lsp::Diagnostic {
3705                    range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3706                    severity: Some(DiagnosticSeverity::WARNING),
3707                    message: "error 1".to_string(),
3708                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3709                        location: lsp::Location {
3710                            uri: buffer_uri.clone(),
3711                            range: lsp::Range::new(
3712                                lsp::Position::new(1, 8),
3713                                lsp::Position::new(1, 9),
3714                            ),
3715                        },
3716                        message: "error 1 hint 1".to_string(),
3717                    }]),
3718                    ..Default::default()
3719                },
3720                lsp::Diagnostic {
3721                    range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3722                    severity: Some(DiagnosticSeverity::HINT),
3723                    message: "error 1 hint 1".to_string(),
3724                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3725                        location: lsp::Location {
3726                            uri: buffer_uri.clone(),
3727                            range: lsp::Range::new(
3728                                lsp::Position::new(1, 8),
3729                                lsp::Position::new(1, 9),
3730                            ),
3731                        },
3732                        message: "original diagnostic".to_string(),
3733                    }]),
3734                    ..Default::default()
3735                },
3736                lsp::Diagnostic {
3737                    range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3738                    severity: Some(DiagnosticSeverity::ERROR),
3739                    message: "error 2".to_string(),
3740                    related_information: Some(vec![
3741                        lsp::DiagnosticRelatedInformation {
3742                            location: lsp::Location {
3743                                uri: buffer_uri.clone(),
3744                                range: lsp::Range::new(
3745                                    lsp::Position::new(1, 13),
3746                                    lsp::Position::new(1, 15),
3747                                ),
3748                            },
3749                            message: "error 2 hint 1".to_string(),
3750                        },
3751                        lsp::DiagnosticRelatedInformation {
3752                            location: lsp::Location {
3753                                uri: buffer_uri.clone(),
3754                                range: lsp::Range::new(
3755                                    lsp::Position::new(1, 13),
3756                                    lsp::Position::new(1, 15),
3757                                ),
3758                            },
3759                            message: "error 2 hint 2".to_string(),
3760                        },
3761                    ]),
3762                    ..Default::default()
3763                },
3764                lsp::Diagnostic {
3765                    range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3766                    severity: Some(DiagnosticSeverity::HINT),
3767                    message: "error 2 hint 1".to_string(),
3768                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3769                        location: lsp::Location {
3770                            uri: buffer_uri.clone(),
3771                            range: lsp::Range::new(
3772                                lsp::Position::new(2, 8),
3773                                lsp::Position::new(2, 17),
3774                            ),
3775                        },
3776                        message: "original diagnostic".to_string(),
3777                    }]),
3778                    ..Default::default()
3779                },
3780                lsp::Diagnostic {
3781                    range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3782                    severity: Some(DiagnosticSeverity::HINT),
3783                    message: "error 2 hint 2".to_string(),
3784                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3785                        location: lsp::Location {
3786                            uri: buffer_uri.clone(),
3787                            range: lsp::Range::new(
3788                                lsp::Position::new(2, 8),
3789                                lsp::Position::new(2, 17),
3790                            ),
3791                        },
3792                        message: "original diagnostic".to_string(),
3793                    }]),
3794                    ..Default::default()
3795                },
3796            ],
3797            version: None,
3798        };
3799
3800        worktree
3801            .update(&mut cx, |tree, cx| tree.update_diagnostics(message, cx))
3802            .unwrap();
3803        let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
3804
3805        assert_eq!(
3806            buffer
3807                .diagnostics_in_range::<_, Point>(0..buffer.len())
3808                .collect::<Vec<_>>(),
3809            &[
3810                DiagnosticEntry {
3811                    range: Point::new(1, 8)..Point::new(1, 9),
3812                    diagnostic: Diagnostic {
3813                        severity: DiagnosticSeverity::WARNING,
3814                        message: "error 1".to_string(),
3815                        group_id: 0,
3816                        is_primary: true,
3817                        ..Default::default()
3818                    }
3819                },
3820                DiagnosticEntry {
3821                    range: Point::new(1, 8)..Point::new(1, 9),
3822                    diagnostic: Diagnostic {
3823                        severity: DiagnosticSeverity::HINT,
3824                        message: "error 1 hint 1".to_string(),
3825                        group_id: 0,
3826                        is_primary: false,
3827                        ..Default::default()
3828                    }
3829                },
3830                DiagnosticEntry {
3831                    range: Point::new(1, 13)..Point::new(1, 15),
3832                    diagnostic: Diagnostic {
3833                        severity: DiagnosticSeverity::HINT,
3834                        message: "error 2 hint 1".to_string(),
3835                        group_id: 1,
3836                        is_primary: false,
3837                        ..Default::default()
3838                    }
3839                },
3840                DiagnosticEntry {
3841                    range: Point::new(1, 13)..Point::new(1, 15),
3842                    diagnostic: Diagnostic {
3843                        severity: DiagnosticSeverity::HINT,
3844                        message: "error 2 hint 2".to_string(),
3845                        group_id: 1,
3846                        is_primary: false,
3847                        ..Default::default()
3848                    }
3849                },
3850                DiagnosticEntry {
3851                    range: Point::new(2, 8)..Point::new(2, 17),
3852                    diagnostic: Diagnostic {
3853                        severity: DiagnosticSeverity::ERROR,
3854                        message: "error 2".to_string(),
3855                        group_id: 1,
3856                        is_primary: true,
3857                        ..Default::default()
3858                    }
3859                }
3860            ]
3861        );
3862
3863        assert_eq!(
3864            buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
3865            &[
3866                DiagnosticEntry {
3867                    range: Point::new(1, 8)..Point::new(1, 9),
3868                    diagnostic: Diagnostic {
3869                        severity: DiagnosticSeverity::WARNING,
3870                        message: "error 1".to_string(),
3871                        group_id: 0,
3872                        is_primary: true,
3873                        ..Default::default()
3874                    }
3875                },
3876                DiagnosticEntry {
3877                    range: Point::new(1, 8)..Point::new(1, 9),
3878                    diagnostic: Diagnostic {
3879                        severity: DiagnosticSeverity::HINT,
3880                        message: "error 1 hint 1".to_string(),
3881                        group_id: 0,
3882                        is_primary: false,
3883                        ..Default::default()
3884                    }
3885                },
3886            ]
3887        );
3888        assert_eq!(
3889            buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
3890            &[
3891                DiagnosticEntry {
3892                    range: Point::new(1, 13)..Point::new(1, 15),
3893                    diagnostic: Diagnostic {
3894                        severity: DiagnosticSeverity::HINT,
3895                        message: "error 2 hint 1".to_string(),
3896                        group_id: 1,
3897                        is_primary: false,
3898                        ..Default::default()
3899                    }
3900                },
3901                DiagnosticEntry {
3902                    range: Point::new(1, 13)..Point::new(1, 15),
3903                    diagnostic: Diagnostic {
3904                        severity: DiagnosticSeverity::HINT,
3905                        message: "error 2 hint 2".to_string(),
3906                        group_id: 1,
3907                        is_primary: false,
3908                        ..Default::default()
3909                    }
3910                },
3911                DiagnosticEntry {
3912                    range: Point::new(2, 8)..Point::new(2, 17),
3913                    diagnostic: Diagnostic {
3914                        severity: DiagnosticSeverity::ERROR,
3915                        message: "error 2".to_string(),
3916                        group_id: 1,
3917                        is_primary: true,
3918                        ..Default::default()
3919                    }
3920                }
3921            ]
3922        );
3923    }
3924
3925    #[gpui::test(iterations = 100)]
3926    fn test_random(mut rng: StdRng) {
3927        let operations = env::var("OPERATIONS")
3928            .map(|o| o.parse().unwrap())
3929            .unwrap_or(40);
3930        let initial_entries = env::var("INITIAL_ENTRIES")
3931            .map(|o| o.parse().unwrap())
3932            .unwrap_or(20);
3933
3934        let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3935        for _ in 0..initial_entries {
3936            randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3937        }
3938        log::info!("Generated initial tree");
3939
3940        let (notify_tx, _notify_rx) = smol::channel::unbounded();
3941        let fs = Arc::new(RealFs);
3942        let next_entry_id = Arc::new(AtomicUsize::new(0));
3943        let mut initial_snapshot = Snapshot {
3944            id: 0,
3945            scan_id: 0,
3946            abs_path: root_dir.path().into(),
3947            entries_by_path: Default::default(),
3948            entries_by_id: Default::default(),
3949            removed_entry_ids: Default::default(),
3950            ignores: Default::default(),
3951            root_name: Default::default(),
3952            root_char_bag: Default::default(),
3953            next_entry_id: next_entry_id.clone(),
3954        };
3955        initial_snapshot.insert_entry(
3956            Entry::new(
3957                Path::new("").into(),
3958                &smol::block_on(fs.metadata(root_dir.path()))
3959                    .unwrap()
3960                    .unwrap(),
3961                &next_entry_id,
3962                Default::default(),
3963            ),
3964            fs.as_ref(),
3965        );
3966        let mut scanner = BackgroundScanner::new(
3967            Arc::new(Mutex::new(initial_snapshot.clone())),
3968            notify_tx,
3969            fs.clone(),
3970            Arc::new(gpui::executor::Background::new()),
3971        );
3972        smol::block_on(scanner.scan_dirs()).unwrap();
3973        scanner.snapshot().check_invariants();
3974
3975        let mut events = Vec::new();
3976        let mut snapshots = Vec::new();
3977        let mut mutations_len = operations;
3978        while mutations_len > 1 {
3979            if !events.is_empty() && rng.gen_bool(0.4) {
3980                let len = rng.gen_range(0..=events.len());
3981                let to_deliver = events.drain(0..len).collect::<Vec<_>>();
3982                log::info!("Delivering events: {:#?}", to_deliver);
3983                smol::block_on(scanner.process_events(to_deliver));
3984                scanner.snapshot().check_invariants();
3985            } else {
3986                events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
3987                mutations_len -= 1;
3988            }
3989
3990            if rng.gen_bool(0.2) {
3991                snapshots.push(scanner.snapshot());
3992            }
3993        }
3994        log::info!("Quiescing: {:#?}", events);
3995        smol::block_on(scanner.process_events(events));
3996        scanner.snapshot().check_invariants();
3997
3998        let (notify_tx, _notify_rx) = smol::channel::unbounded();
3999        let mut new_scanner = BackgroundScanner::new(
4000            Arc::new(Mutex::new(initial_snapshot)),
4001            notify_tx,
4002            scanner.fs.clone(),
4003            scanner.executor.clone(),
4004        );
4005        smol::block_on(new_scanner.scan_dirs()).unwrap();
4006        assert_eq!(
4007            scanner.snapshot().to_vec(true),
4008            new_scanner.snapshot().to_vec(true)
4009        );
4010
4011        for mut prev_snapshot in snapshots {
4012            let include_ignored = rng.gen::<bool>();
4013            if !include_ignored {
4014                let mut entries_by_path_edits = Vec::new();
4015                let mut entries_by_id_edits = Vec::new();
4016                for entry in prev_snapshot
4017                    .entries_by_id
4018                    .cursor::<()>()
4019                    .filter(|e| e.is_ignored)
4020                {
4021                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4022                    entries_by_id_edits.push(Edit::Remove(entry.id));
4023                }
4024
4025                prev_snapshot
4026                    .entries_by_path
4027                    .edit(entries_by_path_edits, &());
4028                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4029            }
4030
4031            let update = scanner
4032                .snapshot()
4033                .build_update(&prev_snapshot, 0, 0, include_ignored);
4034            prev_snapshot.apply_update(update).unwrap();
4035            assert_eq!(
4036                prev_snapshot.to_vec(true),
4037                scanner.snapshot().to_vec(include_ignored)
4038            );
4039        }
4040    }
4041
4042    fn randomly_mutate_tree(
4043        root_path: &Path,
4044        insertion_probability: f64,
4045        rng: &mut impl Rng,
4046    ) -> Result<Vec<fsevent::Event>> {
4047        let root_path = root_path.canonicalize().unwrap();
4048        let (dirs, files) = read_dir_recursive(root_path.clone());
4049
4050        let mut events = Vec::new();
4051        let mut record_event = |path: PathBuf| {
4052            events.push(fsevent::Event {
4053                event_id: SystemTime::now()
4054                    .duration_since(UNIX_EPOCH)
4055                    .unwrap()
4056                    .as_secs(),
4057                flags: fsevent::StreamFlags::empty(),
4058                path,
4059            });
4060        };
4061
4062        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4063            let path = dirs.choose(rng).unwrap();
4064            let new_path = path.join(gen_name(rng));
4065
4066            if rng.gen() {
4067                log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4068                std::fs::create_dir(&new_path)?;
4069            } else {
4070                log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4071                std::fs::write(&new_path, "")?;
4072            }
4073            record_event(new_path);
4074        } else if rng.gen_bool(0.05) {
4075            let ignore_dir_path = dirs.choose(rng).unwrap();
4076            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4077
4078            let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4079            let files_to_ignore = {
4080                let len = rng.gen_range(0..=subfiles.len());
4081                subfiles.choose_multiple(rng, len)
4082            };
4083            let dirs_to_ignore = {
4084                let len = rng.gen_range(0..subdirs.len());
4085                subdirs.choose_multiple(rng, len)
4086            };
4087
4088            let mut ignore_contents = String::new();
4089            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4090                write!(
4091                    ignore_contents,
4092                    "{}\n",
4093                    path_to_ignore
4094                        .strip_prefix(&ignore_dir_path)?
4095                        .to_str()
4096                        .unwrap()
4097                )
4098                .unwrap();
4099            }
4100            log::info!(
4101                "Creating {:?} with contents:\n{}",
4102                ignore_path.strip_prefix(&root_path)?,
4103                ignore_contents
4104            );
4105            std::fs::write(&ignore_path, ignore_contents).unwrap();
4106            record_event(ignore_path);
4107        } else {
4108            let old_path = {
4109                let file_path = files.choose(rng);
4110                let dir_path = dirs[1..].choose(rng);
4111                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4112            };
4113
4114            let is_rename = rng.gen();
4115            if is_rename {
4116                let new_path_parent = dirs
4117                    .iter()
4118                    .filter(|d| !d.starts_with(old_path))
4119                    .choose(rng)
4120                    .unwrap();
4121
4122                let overwrite_existing_dir =
4123                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4124                let new_path = if overwrite_existing_dir {
4125                    std::fs::remove_dir_all(&new_path_parent).ok();
4126                    new_path_parent.to_path_buf()
4127                } else {
4128                    new_path_parent.join(gen_name(rng))
4129                };
4130
4131                log::info!(
4132                    "Renaming {:?} to {}{:?}",
4133                    old_path.strip_prefix(&root_path)?,
4134                    if overwrite_existing_dir {
4135                        "overwrite "
4136                    } else {
4137                        ""
4138                    },
4139                    new_path.strip_prefix(&root_path)?
4140                );
4141                std::fs::rename(&old_path, &new_path)?;
4142                record_event(old_path.clone());
4143                record_event(new_path);
4144            } else if old_path.is_dir() {
4145                let (dirs, files) = read_dir_recursive(old_path.clone());
4146
4147                log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4148                std::fs::remove_dir_all(&old_path).unwrap();
4149                for file in files {
4150                    record_event(file);
4151                }
4152                for dir in dirs {
4153                    record_event(dir);
4154                }
4155            } else {
4156                log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4157                std::fs::remove_file(old_path).unwrap();
4158                record_event(old_path.clone());
4159            }
4160        }
4161
4162        Ok(events)
4163    }
4164
4165    fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4166        let child_entries = std::fs::read_dir(&path).unwrap();
4167        let mut dirs = vec![path];
4168        let mut files = Vec::new();
4169        for child_entry in child_entries {
4170            let child_path = child_entry.unwrap().path();
4171            if child_path.is_dir() {
4172                let (child_dirs, child_files) = read_dir_recursive(child_path);
4173                dirs.extend(child_dirs);
4174                files.extend(child_files);
4175            } else {
4176                files.push(child_path);
4177            }
4178        }
4179        (dirs, files)
4180    }
4181
4182    fn gen_name(rng: &mut impl Rng) -> String {
4183        (0..6)
4184            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4185            .map(char::from)
4186            .collect()
4187    }
4188
4189    impl Snapshot {
4190        fn check_invariants(&self) {
4191            let mut files = self.files(true, 0);
4192            let mut visible_files = self.files(false, 0);
4193            for entry in self.entries_by_path.cursor::<()>() {
4194                if entry.is_file() {
4195                    assert_eq!(files.next().unwrap().inode, entry.inode);
4196                    if !entry.is_ignored {
4197                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4198                    }
4199                }
4200            }
4201            assert!(files.next().is_none());
4202            assert!(visible_files.next().is_none());
4203
4204            let mut bfs_paths = Vec::new();
4205            let mut stack = vec![Path::new("")];
4206            while let Some(path) = stack.pop() {
4207                bfs_paths.push(path);
4208                let ix = stack.len();
4209                for child_entry in self.child_entries(path) {
4210                    stack.insert(ix, &child_entry.path);
4211                }
4212            }
4213
4214            let dfs_paths = self
4215                .entries_by_path
4216                .cursor::<()>()
4217                .map(|e| e.path.as_ref())
4218                .collect::<Vec<_>>();
4219            assert_eq!(bfs_paths, dfs_paths);
4220
4221            for (ignore_parent_path, _) in &self.ignores {
4222                assert!(self.entry_for_path(ignore_parent_path).is_some());
4223                assert!(self
4224                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4225                    .is_some());
4226            }
4227        }
4228
4229        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4230            let mut paths = Vec::new();
4231            for entry in self.entries_by_path.cursor::<()>() {
4232                if include_ignored || !entry.is_ignored {
4233                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4234                }
4235            }
4236            paths.sort_by(|a, b| a.0.cmp(&b.0));
4237            paths
4238        }
4239    }
4240}