worktree.rs

   1use super::{
   2    fs::{self, Fs},
   3    ignore::IgnoreStack,
   4    DiagnosticSummary,
   5};
   6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   7use anyhow::{anyhow, Context, Result};
   8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
   9use clock::ReplicaId;
  10use collections::{hash_map, HashMap, HashSet};
  11use futures::{Stream, StreamExt};
  12use fuzzy::CharBag;
  13use gpui::{
  14    executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
  15    Task, UpgradeModelHandle, WeakModelHandle,
  16};
  17use language::{
  18    Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Language, LanguageRegistry,
  19    Operation, PointUtf16, Rope,
  20};
  21use lazy_static::lazy_static;
  22use lsp::LanguageServer;
  23use parking_lot::Mutex;
  24use postage::{
  25    prelude::{Sink as _, Stream as _},
  26    watch,
  27};
  28use serde::Deserialize;
  29use smol::channel::{self, Sender};
  30use std::{
  31    any::Any,
  32    cmp::{self, Ordering},
  33    convert::{TryFrom, TryInto},
  34    ffi::{OsStr, OsString},
  35    fmt,
  36    future::Future,
  37    mem,
  38    ops::{Deref, Range},
  39    path::{Path, PathBuf},
  40    sync::{
  41        atomic::{AtomicUsize, Ordering::SeqCst},
  42        Arc,
  43    },
  44    time::{Duration, SystemTime},
  45};
  46use sum_tree::{Bias, TreeMap};
  47use sum_tree::{Edit, SeekTarget, SumTree};
  48use util::{post_inc, ResultExt, TryFutureExt};
  49
  50lazy_static! {
  51    static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
  52}
  53
  54#[derive(Clone, Debug)]
  55enum ScanState {
  56    Idle,
  57    Scanning,
  58    Err(Arc<anyhow::Error>),
  59}
  60
  61#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
  62pub struct WorktreeId(usize);
  63
  64pub enum Worktree {
  65    Local(LocalWorktree),
  66    Remote(RemoteWorktree),
  67}
  68
  69#[derive(Clone, Debug, Eq, PartialEq)]
  70pub enum Event {
  71    DiskBasedDiagnosticsUpdated,
  72    DiagnosticsUpdated(Arc<Path>),
  73}
  74
  75impl Entity for Worktree {
  76    type Event = Event;
  77
  78    fn app_will_quit(
  79        &mut self,
  80        _: &mut MutableAppContext,
  81    ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
  82        use futures::FutureExt;
  83
  84        if let Self::Local(worktree) = self {
  85            let shutdown_futures = worktree
  86                .language_servers
  87                .drain()
  88                .filter_map(|(_, server)| server.shutdown())
  89                .collect::<Vec<_>>();
  90            Some(
  91                async move {
  92                    futures::future::join_all(shutdown_futures).await;
  93                }
  94                .boxed(),
  95            )
  96        } else {
  97            None
  98        }
  99    }
 100}
 101
 102impl Worktree {
 103    pub async fn open_local(
 104        client: Arc<Client>,
 105        user_store: ModelHandle<UserStore>,
 106        path: impl Into<Arc<Path>>,
 107        fs: Arc<dyn Fs>,
 108        languages: Arc<LanguageRegistry>,
 109        cx: &mut AsyncAppContext,
 110    ) -> Result<ModelHandle<Self>> {
 111        let (tree, scan_states_tx) =
 112            LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
 113        tree.update(cx, |tree, cx| {
 114            let tree = tree.as_local_mut().unwrap();
 115            let abs_path = tree.snapshot.abs_path.clone();
 116            let background_snapshot = tree.background_snapshot.clone();
 117            let background = cx.background().clone();
 118            tree._background_scanner_task = Some(cx.background().spawn(async move {
 119                let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 120                let scanner =
 121                    BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
 122                scanner.run(events).await;
 123            }));
 124        });
 125        Ok(tree)
 126    }
 127
 128    pub async fn remote(
 129        project_remote_id: u64,
 130        replica_id: ReplicaId,
 131        worktree: proto::Worktree,
 132        client: Arc<Client>,
 133        user_store: ModelHandle<UserStore>,
 134        languages: Arc<LanguageRegistry>,
 135        cx: &mut AsyncAppContext,
 136    ) -> Result<ModelHandle<Self>> {
 137        let remote_id = worktree.id;
 138        let root_char_bag: CharBag = worktree
 139            .root_name
 140            .chars()
 141            .map(|c| c.to_ascii_lowercase())
 142            .collect();
 143        let root_name = worktree.root_name.clone();
 144        let (entries_by_path, entries_by_id, diagnostic_summaries) = cx
 145            .background()
 146            .spawn(async move {
 147                let mut entries_by_path_edits = Vec::new();
 148                let mut entries_by_id_edits = Vec::new();
 149                for entry in worktree.entries {
 150                    match Entry::try_from((&root_char_bag, entry)) {
 151                        Ok(entry) => {
 152                            entries_by_id_edits.push(Edit::Insert(PathEntry {
 153                                id: entry.id,
 154                                path: entry.path.clone(),
 155                                is_ignored: entry.is_ignored,
 156                                scan_id: 0,
 157                            }));
 158                            entries_by_path_edits.push(Edit::Insert(entry));
 159                        }
 160                        Err(err) => log::warn!("error for remote worktree entry {:?}", err),
 161                    }
 162                }
 163
 164                let mut entries_by_path = SumTree::new();
 165                let mut entries_by_id = SumTree::new();
 166                entries_by_path.edit(entries_by_path_edits, &());
 167                entries_by_id.edit(entries_by_id_edits, &());
 168
 169                let diagnostic_summaries = TreeMap::from_ordered_entries(
 170                    worktree.diagnostic_summaries.into_iter().map(|summary| {
 171                        (
 172                            PathKey(PathBuf::from(summary.path).into()),
 173                            DiagnosticSummary {
 174                                error_count: summary.error_count as usize,
 175                                warning_count: summary.warning_count as usize,
 176                                info_count: summary.info_count as usize,
 177                                hint_count: summary.hint_count as usize,
 178                            },
 179                        )
 180                    }),
 181                );
 182
 183                (entries_by_path, entries_by_id, diagnostic_summaries)
 184            })
 185            .await;
 186
 187        let worktree = cx.update(|cx| {
 188            cx.add_model(|cx: &mut ModelContext<Worktree>| {
 189                let snapshot = Snapshot {
 190                    id: WorktreeId(remote_id as usize),
 191                    scan_id: 0,
 192                    abs_path: Path::new("").into(),
 193                    root_name,
 194                    root_char_bag,
 195                    ignores: Default::default(),
 196                    entries_by_path,
 197                    entries_by_id,
 198                    removed_entry_ids: Default::default(),
 199                    next_entry_id: Default::default(),
 200                    diagnostic_summaries,
 201                };
 202
 203                let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
 204                let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
 205
 206                cx.background()
 207                    .spawn(async move {
 208                        while let Some(update) = updates_rx.recv().await {
 209                            let mut snapshot = snapshot_tx.borrow().clone();
 210                            if let Err(error) = snapshot.apply_update(update) {
 211                                log::error!("error applying worktree update: {}", error);
 212                            }
 213                            *snapshot_tx.borrow_mut() = snapshot;
 214                        }
 215                    })
 216                    .detach();
 217
 218                {
 219                    let mut snapshot_rx = snapshot_rx.clone();
 220                    cx.spawn_weak(|this, mut cx| async move {
 221                        while let Some(_) = snapshot_rx.recv().await {
 222                            if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
 223                                this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
 224                            } else {
 225                                break;
 226                            }
 227                        }
 228                    })
 229                    .detach();
 230                }
 231
 232                Worktree::Remote(RemoteWorktree {
 233                    project_id: project_remote_id,
 234                    replica_id,
 235                    snapshot,
 236                    snapshot_rx,
 237                    updates_tx,
 238                    client: client.clone(),
 239                    loading_buffers: Default::default(),
 240                    open_buffers: Default::default(),
 241                    queued_operations: Default::default(),
 242                    languages,
 243                    user_store,
 244                })
 245            })
 246        });
 247
 248        Ok(worktree)
 249    }
 250
 251    pub fn as_local(&self) -> Option<&LocalWorktree> {
 252        if let Worktree::Local(worktree) = self {
 253            Some(worktree)
 254        } else {
 255            None
 256        }
 257    }
 258
 259    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 260        if let Worktree::Remote(worktree) = self {
 261            Some(worktree)
 262        } else {
 263            None
 264        }
 265    }
 266
 267    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 268        if let Worktree::Local(worktree) = self {
 269            Some(worktree)
 270        } else {
 271            None
 272        }
 273    }
 274
 275    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 276        if let Worktree::Remote(worktree) = self {
 277            Some(worktree)
 278        } else {
 279            None
 280        }
 281    }
 282
 283    pub fn snapshot(&self) -> Snapshot {
 284        match self {
 285            Worktree::Local(worktree) => worktree.snapshot(),
 286            Worktree::Remote(worktree) => worktree.snapshot(),
 287        }
 288    }
 289
 290    pub fn replica_id(&self) -> ReplicaId {
 291        match self {
 292            Worktree::Local(_) => 0,
 293            Worktree::Remote(worktree) => worktree.replica_id,
 294        }
 295    }
 296
 297    pub fn remove_collaborator(
 298        &mut self,
 299        peer_id: PeerId,
 300        replica_id: ReplicaId,
 301        cx: &mut ModelContext<Self>,
 302    ) {
 303        match self {
 304            Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
 305            Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
 306        }
 307    }
 308
 309    pub fn languages(&self) -> &Arc<LanguageRegistry> {
 310        match self {
 311            Worktree::Local(worktree) => &worktree.language_registry,
 312            Worktree::Remote(worktree) => &worktree.languages,
 313        }
 314    }
 315
 316    pub fn user_store(&self) -> &ModelHandle<UserStore> {
 317        match self {
 318            Worktree::Local(worktree) => &worktree.user_store,
 319            Worktree::Remote(worktree) => &worktree.user_store,
 320        }
 321    }
 322
 323    pub fn handle_open_buffer(
 324        &mut self,
 325        envelope: TypedEnvelope<proto::OpenBuffer>,
 326        rpc: Arc<Client>,
 327        cx: &mut ModelContext<Self>,
 328    ) -> anyhow::Result<()> {
 329        let receipt = envelope.receipt();
 330
 331        let response = self
 332            .as_local_mut()
 333            .unwrap()
 334            .open_remote_buffer(envelope, cx);
 335
 336        cx.background()
 337            .spawn(
 338                async move {
 339                    rpc.respond(receipt, response.await?).await?;
 340                    Ok(())
 341                }
 342                .log_err(),
 343            )
 344            .detach();
 345
 346        Ok(())
 347    }
 348
 349    pub fn handle_close_buffer(
 350        &mut self,
 351        envelope: TypedEnvelope<proto::CloseBuffer>,
 352        _: Arc<Client>,
 353        cx: &mut ModelContext<Self>,
 354    ) -> anyhow::Result<()> {
 355        self.as_local_mut()
 356            .unwrap()
 357            .close_remote_buffer(envelope, cx)
 358    }
 359
 360    pub fn diagnostic_summaries<'a>(
 361        &'a self,
 362    ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
 363        match self {
 364            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 365            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 366        }
 367        .iter()
 368        .map(|(path, summary)| (path.0.clone(), summary.clone()))
 369    }
 370
 371    pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
 372        match self {
 373            Worktree::Local(worktree) => &mut worktree.loading_buffers,
 374            Worktree::Remote(worktree) => &mut worktree.loading_buffers,
 375        }
 376    }
 377
 378    pub fn open_buffer(
 379        &mut self,
 380        path: impl AsRef<Path>,
 381        cx: &mut ModelContext<Self>,
 382    ) -> Task<Result<ModelHandle<Buffer>>> {
 383        let path = path.as_ref();
 384
 385        // If there is already a buffer for the given path, then return it.
 386        let existing_buffer = match self {
 387            Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
 388            Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
 389        };
 390        if let Some(existing_buffer) = existing_buffer {
 391            return cx.spawn(move |_, _| async move { Ok(existing_buffer) });
 392        }
 393
 394        let path: Arc<Path> = Arc::from(path);
 395        let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
 396            // If the given path is already being loaded, then wait for that existing
 397            // task to complete and return the same buffer.
 398            hash_map::Entry::Occupied(e) => e.get().clone(),
 399
 400            // Otherwise, record the fact that this path is now being loaded.
 401            hash_map::Entry::Vacant(entry) => {
 402                let (mut tx, rx) = postage::watch::channel();
 403                entry.insert(rx.clone());
 404
 405                let load_buffer = match self {
 406                    Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
 407                    Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
 408                };
 409                cx.spawn(move |this, mut cx| async move {
 410                    let result = load_buffer.await;
 411
 412                    // After the buffer loads, record the fact that it is no longer
 413                    // loading.
 414                    this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
 415                    *tx.borrow_mut() = Some(result.map_err(|e| Arc::new(e)));
 416                })
 417                .detach();
 418                rx
 419            }
 420        };
 421
 422        cx.spawn(|_, _| async move {
 423            loop {
 424                if let Some(result) = loading_watch.borrow().as_ref() {
 425                    return result.clone().map_err(|e| anyhow!("{}", e));
 426                }
 427                loading_watch.recv().await;
 428            }
 429        })
 430    }
 431
 432    #[cfg(feature = "test-support")]
 433    pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
 434        let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
 435            Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
 436            Worktree::Remote(worktree) => {
 437                Box::new(worktree.open_buffers.values().filter_map(|buf| {
 438                    if let RemoteBuffer::Loaded(buf) = buf {
 439                        Some(buf)
 440                    } else {
 441                        None
 442                    }
 443                }))
 444            }
 445        };
 446
 447        let path = path.as_ref();
 448        open_buffers
 449            .find(|buffer| {
 450                if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
 451                    file.path().as_ref() == path
 452                } else {
 453                    false
 454                }
 455            })
 456            .is_some()
 457    }
 458
 459    pub fn handle_update_buffer(
 460        &mut self,
 461        envelope: TypedEnvelope<proto::UpdateBuffer>,
 462        cx: &mut ModelContext<Self>,
 463    ) -> Result<()> {
 464        let payload = envelope.payload.clone();
 465        let buffer_id = payload.buffer_id as usize;
 466        let ops = payload
 467            .operations
 468            .into_iter()
 469            .map(|op| language::proto::deserialize_operation(op))
 470            .collect::<Result<Vec<_>, _>>()?;
 471
 472        match self {
 473            Worktree::Local(worktree) => {
 474                let buffer = worktree
 475                    .open_buffers
 476                    .get(&buffer_id)
 477                    .and_then(|buf| buf.upgrade(cx))
 478                    .ok_or_else(|| {
 479                        anyhow!("invalid buffer {} in update buffer message", buffer_id)
 480                    })?;
 481                buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
 482            }
 483            Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
 484                Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
 485                Some(RemoteBuffer::Loaded(buffer)) => {
 486                    if let Some(buffer) = buffer.upgrade(cx) {
 487                        buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
 488                    } else {
 489                        worktree
 490                            .open_buffers
 491                            .insert(buffer_id, RemoteBuffer::Operations(ops));
 492                    }
 493                }
 494                None => {
 495                    worktree
 496                        .open_buffers
 497                        .insert(buffer_id, RemoteBuffer::Operations(ops));
 498                }
 499            },
 500        }
 501
 502        Ok(())
 503    }
 504
 505    pub fn handle_save_buffer(
 506        &mut self,
 507        envelope: TypedEnvelope<proto::SaveBuffer>,
 508        rpc: Arc<Client>,
 509        cx: &mut ModelContext<Self>,
 510    ) -> Result<()> {
 511        let sender_id = envelope.original_sender_id()?;
 512        let this = self.as_local().unwrap();
 513        let project_id = this
 514            .share
 515            .as_ref()
 516            .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
 517            .project_id;
 518
 519        let buffer = this
 520            .shared_buffers
 521            .get(&sender_id)
 522            .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
 523            .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
 524
 525        let receipt = envelope.receipt();
 526        let worktree_id = envelope.payload.worktree_id;
 527        let buffer_id = envelope.payload.buffer_id;
 528        let save = cx.spawn(|_, mut cx| async move {
 529            buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
 530        });
 531
 532        cx.background()
 533            .spawn(
 534                async move {
 535                    let (version, mtime) = save.await?;
 536
 537                    rpc.respond(
 538                        receipt,
 539                        proto::BufferSaved {
 540                            project_id,
 541                            worktree_id,
 542                            buffer_id,
 543                            version: (&version).into(),
 544                            mtime: Some(mtime.into()),
 545                        },
 546                    )
 547                    .await?;
 548
 549                    Ok(())
 550                }
 551                .log_err(),
 552            )
 553            .detach();
 554
 555        Ok(())
 556    }
 557
 558    pub fn handle_buffer_saved(
 559        &mut self,
 560        envelope: TypedEnvelope<proto::BufferSaved>,
 561        cx: &mut ModelContext<Self>,
 562    ) -> Result<()> {
 563        let payload = envelope.payload.clone();
 564        let worktree = self.as_remote_mut().unwrap();
 565        if let Some(buffer) = worktree
 566            .open_buffers
 567            .get(&(payload.buffer_id as usize))
 568            .and_then(|buf| buf.upgrade(cx))
 569        {
 570            buffer.update(cx, |buffer, cx| {
 571                let version = payload.version.try_into()?;
 572                let mtime = payload
 573                    .mtime
 574                    .ok_or_else(|| anyhow!("missing mtime"))?
 575                    .into();
 576                buffer.did_save(version, mtime, None, cx);
 577                Result::<_, anyhow::Error>::Ok(())
 578            })?;
 579        }
 580        Ok(())
 581    }
 582
 583    fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
 584        match self {
 585            Self::Local(worktree) => {
 586                let is_fake_fs = worktree.fs.is_fake();
 587                worktree
 588                    .snapshot
 589                    .assign(worktree.background_snapshot.lock().clone());
 590                if worktree.is_scanning() {
 591                    if worktree.poll_task.is_none() {
 592                        worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
 593                            if is_fake_fs {
 594                                smol::future::yield_now().await;
 595                            } else {
 596                                smol::Timer::after(Duration::from_millis(100)).await;
 597                            }
 598                            this.update(&mut cx, |this, cx| {
 599                                this.as_local_mut().unwrap().poll_task = None;
 600                                this.poll_snapshot(cx);
 601                            })
 602                        }));
 603                    }
 604                } else {
 605                    worktree.poll_task.take();
 606                    self.update_open_buffers(cx);
 607                }
 608            }
 609            Self::Remote(worktree) => {
 610                worktree.snapshot = worktree.snapshot_rx.borrow().clone();
 611                self.update_open_buffers(cx);
 612            }
 613        };
 614
 615        cx.notify();
 616    }
 617
 618    fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
 619        let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
 620            Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
 621            Self::Remote(worktree) => {
 622                Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
 623                    if let RemoteBuffer::Loaded(buf) = buf {
 624                        Some((id, buf))
 625                    } else {
 626                        None
 627                    }
 628                }))
 629            }
 630        };
 631
 632        let local = self.as_local().is_some();
 633        let worktree_path = self.abs_path.clone();
 634        let worktree_handle = cx.handle();
 635        let mut buffers_to_delete = Vec::new();
 636        for (buffer_id, buffer) in open_buffers {
 637            if let Some(buffer) = buffer.upgrade(cx) {
 638                buffer.update(cx, |buffer, cx| {
 639                    if let Some(old_file) = File::from_dyn(buffer.file()) {
 640                        let new_file = if let Some(entry) = old_file
 641                            .entry_id
 642                            .and_then(|entry_id| self.entry_for_id(entry_id))
 643                        {
 644                            File {
 645                                is_local: local,
 646                                worktree_path: worktree_path.clone(),
 647                                entry_id: Some(entry.id),
 648                                mtime: entry.mtime,
 649                                path: entry.path.clone(),
 650                                worktree: worktree_handle.clone(),
 651                            }
 652                        } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
 653                            File {
 654                                is_local: local,
 655                                worktree_path: worktree_path.clone(),
 656                                entry_id: Some(entry.id),
 657                                mtime: entry.mtime,
 658                                path: entry.path.clone(),
 659                                worktree: worktree_handle.clone(),
 660                            }
 661                        } else {
 662                            File {
 663                                is_local: local,
 664                                worktree_path: worktree_path.clone(),
 665                                entry_id: None,
 666                                path: old_file.path().clone(),
 667                                mtime: old_file.mtime(),
 668                                worktree: worktree_handle.clone(),
 669                            }
 670                        };
 671
 672                        if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
 673                            task.detach();
 674                        }
 675                    }
 676                });
 677            } else {
 678                buffers_to_delete.push(*buffer_id);
 679            }
 680        }
 681
 682        for buffer_id in buffers_to_delete {
 683            match self {
 684                Self::Local(worktree) => {
 685                    worktree.open_buffers.remove(&buffer_id);
 686                }
 687                Self::Remote(worktree) => {
 688                    worktree.open_buffers.remove(&buffer_id);
 689                }
 690            }
 691        }
 692    }
 693
 694    pub fn update_diagnostics(
 695        &mut self,
 696        params: lsp::PublishDiagnosticsParams,
 697        disk_based_sources: &HashSet<String>,
 698        cx: &mut ModelContext<Worktree>,
 699    ) -> Result<()> {
 700        let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
 701        let abs_path = params
 702            .uri
 703            .to_file_path()
 704            .map_err(|_| anyhow!("URI is not a file"))?;
 705        let worktree_path = Arc::from(
 706            abs_path
 707                .strip_prefix(&this.abs_path)
 708                .context("path is not within worktree")?,
 709        );
 710
 711        let mut next_group_id = 0;
 712        let mut diagnostics = Vec::default();
 713        let mut primary_diagnostic_group_ids = HashMap::default();
 714        let mut sources_by_group_id = HashMap::default();
 715        let mut supporting_diagnostic_severities = HashMap::default();
 716        for diagnostic in &params.diagnostics {
 717            let source = diagnostic.source.as_ref();
 718            let code = diagnostic.code.as_ref().map(|code| match code {
 719                lsp::NumberOrString::Number(code) => code.to_string(),
 720                lsp::NumberOrString::String(code) => code.clone(),
 721            });
 722            let range = range_from_lsp(diagnostic.range);
 723            let is_supporting = diagnostic
 724                .related_information
 725                .as_ref()
 726                .map_or(false, |infos| {
 727                    infos.iter().any(|info| {
 728                        primary_diagnostic_group_ids.contains_key(&(
 729                            source,
 730                            code.clone(),
 731                            range_from_lsp(info.location.range),
 732                        ))
 733                    })
 734                });
 735
 736            if is_supporting {
 737                if let Some(severity) = diagnostic.severity {
 738                    supporting_diagnostic_severities
 739                        .insert((source, code.clone(), range), severity);
 740                }
 741            } else {
 742                let group_id = post_inc(&mut next_group_id);
 743                let is_disk_based =
 744                    source.map_or(false, |source| disk_based_sources.contains(source));
 745
 746                sources_by_group_id.insert(group_id, source);
 747                primary_diagnostic_group_ids
 748                    .insert((source, code.clone(), range.clone()), group_id);
 749
 750                diagnostics.push(DiagnosticEntry {
 751                    range,
 752                    diagnostic: Diagnostic {
 753                        code: code.clone(),
 754                        severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
 755                        message: diagnostic.message.clone(),
 756                        group_id,
 757                        is_primary: true,
 758                        is_valid: true,
 759                        is_disk_based,
 760                    },
 761                });
 762                if let Some(infos) = &diagnostic.related_information {
 763                    for info in infos {
 764                        if info.location.uri == params.uri {
 765                            let range = range_from_lsp(info.location.range);
 766                            diagnostics.push(DiagnosticEntry {
 767                                range,
 768                                diagnostic: Diagnostic {
 769                                    code: code.clone(),
 770                                    severity: DiagnosticSeverity::INFORMATION,
 771                                    message: info.message.clone(),
 772                                    group_id,
 773                                    is_primary: false,
 774                                    is_valid: true,
 775                                    is_disk_based,
 776                                },
 777                            });
 778                        }
 779                    }
 780                }
 781            }
 782        }
 783
 784        for entry in &mut diagnostics {
 785            let diagnostic = &mut entry.diagnostic;
 786            if !diagnostic.is_primary {
 787                let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
 788                if let Some(&severity) = supporting_diagnostic_severities.get(&(
 789                    source,
 790                    diagnostic.code.clone(),
 791                    entry.range.clone(),
 792                )) {
 793                    diagnostic.severity = severity;
 794                }
 795            }
 796        }
 797
 798        self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)?;
 799        Ok(())
 800    }
 801
 802    pub fn update_diagnostic_entries(
 803        &mut self,
 804        worktree_path: Arc<Path>,
 805        version: Option<i32>,
 806        diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
 807        cx: &mut ModelContext<Self>,
 808    ) -> Result<()> {
 809        let this = self.as_local_mut().unwrap();
 810        for buffer in this.open_buffers.values() {
 811            if let Some(buffer) = buffer.upgrade(cx) {
 812                if buffer
 813                    .read(cx)
 814                    .file()
 815                    .map_or(false, |file| *file.path() == worktree_path)
 816                {
 817                    let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
 818                        (
 819                            buffer.remote_id(),
 820                            buffer.update_diagnostics(version, diagnostics.clone(), cx),
 821                        )
 822                    });
 823                    self.send_buffer_update(remote_id, operation?, cx);
 824                    break;
 825                }
 826            }
 827        }
 828
 829        let this = self.as_local_mut().unwrap();
 830        let summary = DiagnosticSummary::new(&diagnostics);
 831        this.snapshot
 832            .diagnostic_summaries
 833            .insert(PathKey(worktree_path.clone()), summary.clone());
 834        this.diagnostics.insert(worktree_path.clone(), diagnostics);
 835
 836        cx.emit(Event::DiagnosticsUpdated(worktree_path.clone()));
 837
 838        if let Some(share) = this.share.as_ref() {
 839            cx.foreground()
 840                .spawn({
 841                    let client = this.client.clone();
 842                    let project_id = share.project_id;
 843                    let worktree_id = this.id().to_proto();
 844                    let path = worktree_path.to_string_lossy().to_string();
 845                    async move {
 846                        client
 847                            .send(proto::UpdateDiagnosticSummary {
 848                                project_id,
 849                                worktree_id,
 850                                summary: Some(proto::DiagnosticSummary {
 851                                    path,
 852                                    error_count: summary.error_count as u32,
 853                                    warning_count: summary.warning_count as u32,
 854                                    info_count: summary.info_count as u32,
 855                                    hint_count: summary.hint_count as u32,
 856                                }),
 857                            })
 858                            .await
 859                            .log_err()
 860                    }
 861                })
 862                .detach();
 863        }
 864
 865        Ok(())
 866    }
 867
 868    fn send_buffer_update(
 869        &mut self,
 870        buffer_id: u64,
 871        operation: Operation,
 872        cx: &mut ModelContext<Self>,
 873    ) {
 874        if let Some((project_id, worktree_id, rpc)) = match self {
 875            Worktree::Local(worktree) => worktree
 876                .share
 877                .as_ref()
 878                .map(|share| (share.project_id, worktree.id(), worktree.client.clone())),
 879            Worktree::Remote(worktree) => Some((
 880                worktree.project_id,
 881                worktree.snapshot.id(),
 882                worktree.client.clone(),
 883            )),
 884        } {
 885            cx.spawn(|worktree, mut cx| async move {
 886                if let Err(error) = rpc
 887                    .request(proto::UpdateBuffer {
 888                        project_id,
 889                        worktree_id: worktree_id.0 as u64,
 890                        buffer_id,
 891                        operations: vec![language::proto::serialize_operation(&operation)],
 892                    })
 893                    .await
 894                {
 895                    worktree.update(&mut cx, |worktree, _| {
 896                        log::error!("error sending buffer operation: {}", error);
 897                        match worktree {
 898                            Worktree::Local(t) => &mut t.queued_operations,
 899                            Worktree::Remote(t) => &mut t.queued_operations,
 900                        }
 901                        .push((buffer_id, operation));
 902                    });
 903                }
 904            })
 905            .detach();
 906        }
 907    }
 908}
 909
 910impl WorktreeId {
 911    pub fn from_usize(handle_id: usize) -> Self {
 912        Self(handle_id)
 913    }
 914
 915    pub(crate) fn from_proto(id: u64) -> Self {
 916        Self(id as usize)
 917    }
 918
 919    pub fn to_proto(&self) -> u64 {
 920        self.0 as u64
 921    }
 922
 923    pub fn to_usize(&self) -> usize {
 924        self.0
 925    }
 926}
 927
 928impl fmt::Display for WorktreeId {
 929    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 930        self.0.fmt(f)
 931    }
 932}
 933
 934#[derive(Clone)]
 935pub struct Snapshot {
 936    id: WorktreeId,
 937    scan_id: usize,
 938    abs_path: Arc<Path>,
 939    root_name: String,
 940    root_char_bag: CharBag,
 941    ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
 942    entries_by_path: SumTree<Entry>,
 943    entries_by_id: SumTree<PathEntry>,
 944    removed_entry_ids: HashMap<u64, usize>,
 945    next_entry_id: Arc<AtomicUsize>,
 946    diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
 947}
 948
 949pub struct LocalWorktree {
 950    snapshot: Snapshot,
 951    config: WorktreeConfig,
 952    background_snapshot: Arc<Mutex<Snapshot>>,
 953    last_scan_state_rx: watch::Receiver<ScanState>,
 954    _background_scanner_task: Option<Task<()>>,
 955    poll_task: Option<Task<()>>,
 956    share: Option<ShareState>,
 957    loading_buffers: LoadingBuffers,
 958    open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
 959    shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
 960    diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
 961    queued_operations: Vec<(u64, Operation)>,
 962    language_registry: Arc<LanguageRegistry>,
 963    client: Arc<Client>,
 964    user_store: ModelHandle<UserStore>,
 965    fs: Arc<dyn Fs>,
 966    languages: Vec<Arc<Language>>,
 967    language_servers: HashMap<String, Arc<LanguageServer>>,
 968}
 969
 970struct ShareState {
 971    project_id: u64,
 972    snapshots_tx: Sender<Snapshot>,
 973}
 974
 975pub struct RemoteWorktree {
 976    project_id: u64,
 977    snapshot: Snapshot,
 978    snapshot_rx: watch::Receiver<Snapshot>,
 979    client: Arc<Client>,
 980    updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
 981    replica_id: ReplicaId,
 982    loading_buffers: LoadingBuffers,
 983    open_buffers: HashMap<usize, RemoteBuffer>,
 984    languages: Arc<LanguageRegistry>,
 985    user_store: ModelHandle<UserStore>,
 986    queued_operations: Vec<(u64, Operation)>,
 987}
 988
 989type LoadingBuffers = HashMap<
 990    Arc<Path>,
 991    postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
 992>;
 993
 994#[derive(Default, Deserialize)]
 995struct WorktreeConfig {
 996    collaborators: Vec<String>,
 997}
 998
 999impl LocalWorktree {
1000    async fn new(
1001        client: Arc<Client>,
1002        user_store: ModelHandle<UserStore>,
1003        path: impl Into<Arc<Path>>,
1004        fs: Arc<dyn Fs>,
1005        languages: Arc<LanguageRegistry>,
1006        cx: &mut AsyncAppContext,
1007    ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
1008        let abs_path = path.into();
1009        let path: Arc<Path> = Arc::from(Path::new(""));
1010        let next_entry_id = AtomicUsize::new(0);
1011
1012        // After determining whether the root entry is a file or a directory, populate the
1013        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
1014        let root_name = abs_path
1015            .file_name()
1016            .map_or(String::new(), |f| f.to_string_lossy().to_string());
1017        let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
1018        let metadata = fs.metadata(&abs_path).await?;
1019
1020        let mut config = WorktreeConfig::default();
1021        if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
1022            if let Ok(parsed) = toml::from_str(&zed_toml) {
1023                config = parsed;
1024            }
1025        }
1026
1027        let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
1028        let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
1029        let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
1030            let mut snapshot = Snapshot {
1031                id: WorktreeId::from_usize(cx.model_id()),
1032                scan_id: 0,
1033                abs_path,
1034                root_name: root_name.clone(),
1035                root_char_bag,
1036                ignores: Default::default(),
1037                entries_by_path: Default::default(),
1038                entries_by_id: Default::default(),
1039                removed_entry_ids: Default::default(),
1040                next_entry_id: Arc::new(next_entry_id),
1041                diagnostic_summaries: Default::default(),
1042            };
1043            if let Some(metadata) = metadata {
1044                snapshot.insert_entry(
1045                    Entry::new(
1046                        path.into(),
1047                        &metadata,
1048                        &snapshot.next_entry_id,
1049                        snapshot.root_char_bag,
1050                    ),
1051                    fs.as_ref(),
1052                );
1053            }
1054
1055            let tree = Self {
1056                snapshot: snapshot.clone(),
1057                config,
1058                background_snapshot: Arc::new(Mutex::new(snapshot)),
1059                last_scan_state_rx,
1060                _background_scanner_task: None,
1061                share: None,
1062                poll_task: None,
1063                loading_buffers: Default::default(),
1064                open_buffers: Default::default(),
1065                shared_buffers: Default::default(),
1066                diagnostics: Default::default(),
1067                queued_operations: Default::default(),
1068                language_registry: languages,
1069                client,
1070                user_store,
1071                fs,
1072                languages: Default::default(),
1073                language_servers: Default::default(),
1074            };
1075
1076            cx.spawn_weak(|this, mut cx| async move {
1077                while let Ok(scan_state) = scan_states_rx.recv().await {
1078                    if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1079                        let to_send = handle.update(&mut cx, |this, cx| {
1080                            last_scan_state_tx.blocking_send(scan_state).ok();
1081                            this.poll_snapshot(cx);
1082                            let tree = this.as_local_mut().unwrap();
1083                            if !tree.is_scanning() {
1084                                if let Some(share) = tree.share.as_ref() {
1085                                    return Some((tree.snapshot(), share.snapshots_tx.clone()));
1086                                }
1087                            }
1088                            None
1089                        });
1090
1091                        if let Some((snapshot, snapshots_to_send_tx)) = to_send {
1092                            if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
1093                                log::error!("error submitting snapshot to send {}", err);
1094                            }
1095                        }
1096                    } else {
1097                        break;
1098                    }
1099                }
1100            })
1101            .detach();
1102
1103            Worktree::Local(tree)
1104        });
1105
1106        Ok((tree, scan_states_tx))
1107    }
1108
1109    pub fn authorized_logins(&self) -> Vec<String> {
1110        self.config.collaborators.clone()
1111    }
1112
1113    pub fn language_registry(&self) -> &LanguageRegistry {
1114        &self.language_registry
1115    }
1116
1117    pub fn languages(&self) -> &[Arc<Language>] {
1118        &self.languages
1119    }
1120
1121    pub fn register_language(
1122        &mut self,
1123        language: &Arc<Language>,
1124        cx: &mut ModelContext<Worktree>,
1125    ) -> Option<Arc<LanguageServer>> {
1126        if !self.languages.iter().any(|l| Arc::ptr_eq(l, language)) {
1127            self.languages.push(language.clone());
1128        }
1129
1130        if let Some(server) = self.language_servers.get(language.name()) {
1131            return Some(server.clone());
1132        }
1133
1134        if let Some(language_server) = language
1135            .start_server(self.abs_path(), cx)
1136            .log_err()
1137            .flatten()
1138        {
1139            let disk_based_sources = language
1140                .disk_based_diagnostic_sources()
1141                .cloned()
1142                .unwrap_or_default();
1143            let disk_based_diagnostics_progress_token =
1144                language.disk_based_diagnostics_progress_token().cloned();
1145            let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
1146            let (disk_based_diagnostics_done_tx, disk_based_diagnostics_done_rx) =
1147                smol::channel::unbounded();
1148            language_server
1149                .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
1150                    smol::block_on(diagnostics_tx.send(params)).ok();
1151                })
1152                .detach();
1153            cx.spawn_weak(|this, mut cx| {
1154                let has_disk_based_diagnostic_progress_token =
1155                    disk_based_diagnostics_progress_token.is_some();
1156                let disk_based_diagnostics_done_tx = disk_based_diagnostics_done_tx.clone();
1157                async move {
1158                    while let Ok(diagnostics) = diagnostics_rx.recv().await {
1159                        if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1160                            handle.update(&mut cx, |this, cx| {
1161                                this.update_diagnostics(diagnostics, &disk_based_sources, cx)
1162                                    .log_err();
1163                                if !has_disk_based_diagnostic_progress_token {
1164                                    smol::block_on(disk_based_diagnostics_done_tx.send(())).ok();
1165                                }
1166                            })
1167                        } else {
1168                            break;
1169                        }
1170                    }
1171                }
1172            })
1173            .detach();
1174
1175            let mut pending_disk_based_diagnostics: i32 = 0;
1176            language_server
1177                .on_notification::<lsp::notification::Progress, _>(move |params| {
1178                    let token = match params.token {
1179                        lsp::NumberOrString::Number(_) => None,
1180                        lsp::NumberOrString::String(token) => Some(token),
1181                    };
1182
1183                    if token == disk_based_diagnostics_progress_token {
1184                        match params.value {
1185                            lsp::ProgressParamsValue::WorkDone(progress) => match progress {
1186                                lsp::WorkDoneProgress::Begin(_) => {
1187                                    pending_disk_based_diagnostics += 1;
1188                                }
1189                                lsp::WorkDoneProgress::End(_) => {
1190                                    pending_disk_based_diagnostics -= 1;
1191                                    if pending_disk_based_diagnostics == 0 {
1192                                        smol::block_on(disk_based_diagnostics_done_tx.send(()))
1193                                            .ok();
1194                                    }
1195                                }
1196                                _ => {}
1197                            },
1198                        }
1199                    }
1200                })
1201                .detach();
1202            let rpc = self.client.clone();
1203            cx.spawn_weak(|this, mut cx| async move {
1204                while let Ok(()) = disk_based_diagnostics_done_rx.recv().await {
1205                    if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1206                        let message = handle.update(&mut cx, |this, cx| {
1207                            cx.emit(Event::DiskBasedDiagnosticsUpdated);
1208                            let this = this.as_local().unwrap();
1209                            this.share
1210                                .as_ref()
1211                                .map(|share| proto::DiskBasedDiagnosticsUpdated {
1212                                    project_id: share.project_id,
1213                                    worktree_id: this.id().to_proto(),
1214                                })
1215                        });
1216
1217                        if let Some(message) = message {
1218                            rpc.send(message).await.log_err();
1219                        }
1220                    } else {
1221                        break;
1222                    }
1223                }
1224            })
1225            .detach();
1226
1227            self.language_servers
1228                .insert(language.name().to_string(), language_server.clone());
1229            Some(language_server.clone())
1230        } else {
1231            None
1232        }
1233    }
1234
1235    fn get_open_buffer(
1236        &mut self,
1237        path: &Path,
1238        cx: &mut ModelContext<Worktree>,
1239    ) -> Option<ModelHandle<Buffer>> {
1240        let handle = cx.handle();
1241        let mut result = None;
1242        self.open_buffers.retain(|_buffer_id, buffer| {
1243            if let Some(buffer) = buffer.upgrade(cx) {
1244                if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1245                    if file.worktree == handle && file.path().as_ref() == path {
1246                        result = Some(buffer);
1247                    }
1248                }
1249                true
1250            } else {
1251                false
1252            }
1253        });
1254        result
1255    }
1256
1257    fn open_buffer(
1258        &mut self,
1259        path: &Path,
1260        cx: &mut ModelContext<Worktree>,
1261    ) -> Task<Result<ModelHandle<Buffer>>> {
1262        let path = Arc::from(path);
1263        cx.spawn(move |this, mut cx| async move {
1264            let (file, contents) = this
1265                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
1266                .await?;
1267
1268            let (diagnostics, language, language_server) = this.update(&mut cx, |this, cx| {
1269                let this = this.as_local_mut().unwrap();
1270                let diagnostics = this.diagnostics.remove(&path);
1271                let language = this
1272                    .language_registry
1273                    .select_language(file.full_path())
1274                    .cloned();
1275                let server = language
1276                    .as_ref()
1277                    .and_then(|language| this.register_language(language, cx));
1278                (diagnostics, language, server)
1279            });
1280
1281            let mut buffer_operations = Vec::new();
1282            let buffer = cx.add_model(|cx| {
1283                let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1284                buffer.set_language(language, language_server, cx);
1285                if let Some(diagnostics) = diagnostics {
1286                    let op = buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1287                    buffer_operations.push(op);
1288                }
1289                buffer
1290            });
1291
1292            this.update(&mut cx, |this, cx| {
1293                for op in buffer_operations {
1294                    this.send_buffer_update(buffer.read(cx).remote_id(), op, cx);
1295                }
1296                let this = this.as_local_mut().unwrap();
1297                this.open_buffers.insert(buffer.id(), buffer.downgrade());
1298            });
1299
1300            Ok(buffer)
1301        })
1302    }
1303
1304    pub fn open_remote_buffer(
1305        &mut self,
1306        envelope: TypedEnvelope<proto::OpenBuffer>,
1307        cx: &mut ModelContext<Worktree>,
1308    ) -> Task<Result<proto::OpenBufferResponse>> {
1309        cx.spawn(|this, mut cx| async move {
1310            let peer_id = envelope.original_sender_id();
1311            let path = Path::new(&envelope.payload.path);
1312            let buffer = this
1313                .update(&mut cx, |this, cx| this.open_buffer(path, cx))
1314                .await?;
1315            this.update(&mut cx, |this, cx| {
1316                this.as_local_mut()
1317                    .unwrap()
1318                    .shared_buffers
1319                    .entry(peer_id?)
1320                    .or_default()
1321                    .insert(buffer.id() as u64, buffer.clone());
1322
1323                Ok(proto::OpenBufferResponse {
1324                    buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1325                })
1326            })
1327        })
1328    }
1329
1330    pub fn close_remote_buffer(
1331        &mut self,
1332        envelope: TypedEnvelope<proto::CloseBuffer>,
1333        cx: &mut ModelContext<Worktree>,
1334    ) -> Result<()> {
1335        if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1336            shared_buffers.remove(&envelope.payload.buffer_id);
1337            cx.notify();
1338        }
1339
1340        Ok(())
1341    }
1342
1343    pub fn remove_collaborator(
1344        &mut self,
1345        peer_id: PeerId,
1346        replica_id: ReplicaId,
1347        cx: &mut ModelContext<Worktree>,
1348    ) {
1349        self.shared_buffers.remove(&peer_id);
1350        for (_, buffer) in &self.open_buffers {
1351            if let Some(buffer) = buffer.upgrade(cx) {
1352                buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1353            }
1354        }
1355        cx.notify();
1356    }
1357
1358    pub fn scan_complete(&self) -> impl Future<Output = ()> {
1359        let mut scan_state_rx = self.last_scan_state_rx.clone();
1360        async move {
1361            let mut scan_state = Some(scan_state_rx.borrow().clone());
1362            while let Some(ScanState::Scanning) = scan_state {
1363                scan_state = scan_state_rx.recv().await;
1364            }
1365        }
1366    }
1367
1368    fn is_scanning(&self) -> bool {
1369        if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1370            true
1371        } else {
1372            false
1373        }
1374    }
1375
1376    pub fn snapshot(&self) -> Snapshot {
1377        self.snapshot.clone()
1378    }
1379
1380    pub fn abs_path(&self) -> &Arc<Path> {
1381        &self.snapshot.abs_path
1382    }
1383
1384    pub fn contains_abs_path(&self, path: &Path) -> bool {
1385        path.starts_with(&self.snapshot.abs_path)
1386    }
1387
1388    fn absolutize(&self, path: &Path) -> PathBuf {
1389        if path.file_name().is_some() {
1390            self.snapshot.abs_path.join(path)
1391        } else {
1392            self.snapshot.abs_path.to_path_buf()
1393        }
1394    }
1395
1396    fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1397        let handle = cx.handle();
1398        let path = Arc::from(path);
1399        let worktree_path = self.abs_path.clone();
1400        let abs_path = self.absolutize(&path);
1401        let background_snapshot = self.background_snapshot.clone();
1402        let fs = self.fs.clone();
1403        cx.spawn(|this, mut cx| async move {
1404            let text = fs.load(&abs_path).await?;
1405            // Eagerly populate the snapshot with an updated entry for the loaded file
1406            let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1407            this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1408            Ok((
1409                File {
1410                    entry_id: Some(entry.id),
1411                    worktree: handle,
1412                    worktree_path,
1413                    path: entry.path,
1414                    mtime: entry.mtime,
1415                    is_local: true,
1416                },
1417                text,
1418            ))
1419        })
1420    }
1421
1422    pub fn save_buffer_as(
1423        &self,
1424        buffer: ModelHandle<Buffer>,
1425        path: impl Into<Arc<Path>>,
1426        text: Rope,
1427        cx: &mut ModelContext<Worktree>,
1428    ) -> Task<Result<File>> {
1429        let save = self.save(path, text, cx);
1430        cx.spawn(|this, mut cx| async move {
1431            let entry = save.await?;
1432            this.update(&mut cx, |this, cx| {
1433                let this = this.as_local_mut().unwrap();
1434                this.open_buffers.insert(buffer.id(), buffer.downgrade());
1435                Ok(File {
1436                    entry_id: Some(entry.id),
1437                    worktree: cx.handle(),
1438                    worktree_path: this.abs_path.clone(),
1439                    path: entry.path,
1440                    mtime: entry.mtime,
1441                    is_local: true,
1442                })
1443            })
1444        })
1445    }
1446
1447    fn save(
1448        &self,
1449        path: impl Into<Arc<Path>>,
1450        text: Rope,
1451        cx: &mut ModelContext<Worktree>,
1452    ) -> Task<Result<Entry>> {
1453        let path = path.into();
1454        let abs_path = self.absolutize(&path);
1455        let background_snapshot = self.background_snapshot.clone();
1456        let fs = self.fs.clone();
1457        let save = cx.background().spawn(async move {
1458            fs.save(&abs_path, &text).await?;
1459            refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1460        });
1461
1462        cx.spawn(|this, mut cx| async move {
1463            let entry = save.await?;
1464            this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1465            Ok(entry)
1466        })
1467    }
1468
1469    pub fn share(
1470        &mut self,
1471        project_id: u64,
1472        cx: &mut ModelContext<Worktree>,
1473    ) -> Task<anyhow::Result<()>> {
1474        if self.share.is_some() {
1475            return Task::ready(Ok(()));
1476        }
1477
1478        let snapshot = self.snapshot();
1479        let rpc = self.client.clone();
1480        let worktree_id = cx.model_id() as u64;
1481        let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1482        self.share = Some(ShareState {
1483            project_id,
1484            snapshots_tx: snapshots_to_send_tx,
1485        });
1486
1487        cx.background()
1488            .spawn({
1489                let rpc = rpc.clone();
1490                let snapshot = snapshot.clone();
1491                async move {
1492                    let mut prev_snapshot = snapshot;
1493                    while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1494                        let message =
1495                            snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1496                        match rpc.send(message).await {
1497                            Ok(()) => prev_snapshot = snapshot,
1498                            Err(err) => log::error!("error sending snapshot diff {}", err),
1499                        }
1500                    }
1501                }
1502            })
1503            .detach();
1504
1505        let share_message = cx.background().spawn(async move {
1506            proto::ShareWorktree {
1507                project_id,
1508                worktree: Some(snapshot.to_proto()),
1509            }
1510        });
1511
1512        cx.foreground().spawn(async move {
1513            rpc.request(share_message.await).await?;
1514            Ok(())
1515        })
1516    }
1517}
1518
1519fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1520    let contents = smol::block_on(fs.load(&abs_path))?;
1521    let parent = abs_path.parent().unwrap_or(Path::new("/"));
1522    let mut builder = GitignoreBuilder::new(parent);
1523    for line in contents.lines() {
1524        builder.add_line(Some(abs_path.into()), line)?;
1525    }
1526    Ok(builder.build()?)
1527}
1528
1529impl Deref for Worktree {
1530    type Target = Snapshot;
1531
1532    fn deref(&self) -> &Self::Target {
1533        match self {
1534            Worktree::Local(worktree) => &worktree.snapshot,
1535            Worktree::Remote(worktree) => &worktree.snapshot,
1536        }
1537    }
1538}
1539
1540impl Deref for LocalWorktree {
1541    type Target = Snapshot;
1542
1543    fn deref(&self) -> &Self::Target {
1544        &self.snapshot
1545    }
1546}
1547
1548impl Deref for RemoteWorktree {
1549    type Target = Snapshot;
1550
1551    fn deref(&self) -> &Self::Target {
1552        &self.snapshot
1553    }
1554}
1555
1556impl fmt::Debug for LocalWorktree {
1557    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1558        self.snapshot.fmt(f)
1559    }
1560}
1561
1562impl RemoteWorktree {
1563    fn get_open_buffer(
1564        &mut self,
1565        path: &Path,
1566        cx: &mut ModelContext<Worktree>,
1567    ) -> Option<ModelHandle<Buffer>> {
1568        let handle = cx.handle();
1569        let mut existing_buffer = None;
1570        self.open_buffers.retain(|_buffer_id, buffer| {
1571            if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1572                if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1573                    if file.worktree == handle && file.path().as_ref() == path {
1574                        existing_buffer = Some(buffer);
1575                    }
1576                }
1577                true
1578            } else {
1579                false
1580            }
1581        });
1582        existing_buffer
1583    }
1584
1585    fn open_buffer(
1586        &mut self,
1587        path: &Path,
1588        cx: &mut ModelContext<Worktree>,
1589    ) -> Task<Result<ModelHandle<Buffer>>> {
1590        let rpc = self.client.clone();
1591        let replica_id = self.replica_id;
1592        let project_id = self.project_id;
1593        let remote_worktree_id = self.id();
1594        let root_path = self.snapshot.abs_path.clone();
1595        let path: Arc<Path> = Arc::from(path);
1596        let path_string = path.to_string_lossy().to_string();
1597        cx.spawn_weak(move |this, mut cx| async move {
1598            let entry = this
1599                .upgrade(&cx)
1600                .ok_or_else(|| anyhow!("worktree was closed"))?
1601                .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1602                .ok_or_else(|| anyhow!("file does not exist"))?;
1603            let response = rpc
1604                .request(proto::OpenBuffer {
1605                    project_id,
1606                    worktree_id: remote_worktree_id.to_proto(),
1607                    path: path_string,
1608                })
1609                .await?;
1610
1611            let this = this
1612                .upgrade(&cx)
1613                .ok_or_else(|| anyhow!("worktree was closed"))?;
1614            let file = File {
1615                entry_id: Some(entry.id),
1616                worktree: this.clone(),
1617                worktree_path: root_path,
1618                path: entry.path,
1619                mtime: entry.mtime,
1620                is_local: false,
1621            };
1622            let language = this.read_with(&cx, |this, _| {
1623                use language::File;
1624                this.languages().select_language(file.full_path()).cloned()
1625            });
1626            let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1627            let buffer_id = remote_buffer.id as usize;
1628            let buffer = cx.add_model(|cx| {
1629                Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1630                    .unwrap()
1631                    .with_language(language, None, cx)
1632            });
1633            this.update(&mut cx, move |this, cx| {
1634                let this = this.as_remote_mut().unwrap();
1635                if let Some(RemoteBuffer::Operations(pending_ops)) = this
1636                    .open_buffers
1637                    .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1638                {
1639                    buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1640                }
1641                Result::<_, anyhow::Error>::Ok(buffer)
1642            })
1643        })
1644    }
1645
1646    pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1647        for (_, buffer) in self.open_buffers.drain() {
1648            if let RemoteBuffer::Loaded(buffer) = buffer {
1649                if let Some(buffer) = buffer.upgrade(cx) {
1650                    buffer.update(cx, |buffer, cx| buffer.close(cx))
1651                }
1652            }
1653        }
1654    }
1655
1656    fn snapshot(&self) -> Snapshot {
1657        self.snapshot.clone()
1658    }
1659
1660    pub fn update_from_remote(
1661        &mut self,
1662        envelope: TypedEnvelope<proto::UpdateWorktree>,
1663        cx: &mut ModelContext<Worktree>,
1664    ) -> Result<()> {
1665        let mut tx = self.updates_tx.clone();
1666        let payload = envelope.payload.clone();
1667        cx.background()
1668            .spawn(async move {
1669                tx.send(payload).await.expect("receiver runs to completion");
1670            })
1671            .detach();
1672
1673        Ok(())
1674    }
1675
1676    pub fn update_diagnostic_summary(
1677        &mut self,
1678        envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1679        cx: &mut ModelContext<Worktree>,
1680    ) {
1681        if let Some(summary) = envelope.payload.summary {
1682            let path: Arc<Path> = Path::new(&summary.path).into();
1683            self.snapshot.diagnostic_summaries.insert(
1684                PathKey(path.clone()),
1685                DiagnosticSummary {
1686                    error_count: summary.error_count as usize,
1687                    warning_count: summary.warning_count as usize,
1688                    info_count: summary.info_count as usize,
1689                    hint_count: summary.hint_count as usize,
1690                },
1691            );
1692            cx.emit(Event::DiagnosticsUpdated(path));
1693        }
1694    }
1695
1696    pub fn disk_based_diagnostics_updated(&self, cx: &mut ModelContext<Worktree>) {
1697        cx.emit(Event::DiskBasedDiagnosticsUpdated);
1698    }
1699
1700    pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1701        for (_, buffer) in &self.open_buffers {
1702            if let Some(buffer) = buffer.upgrade(cx) {
1703                buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1704            }
1705        }
1706        cx.notify();
1707    }
1708}
1709
1710enum RemoteBuffer {
1711    Operations(Vec<Operation>),
1712    Loaded(WeakModelHandle<Buffer>),
1713}
1714
1715impl RemoteBuffer {
1716    fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1717        match self {
1718            Self::Operations(_) => None,
1719            Self::Loaded(buffer) => buffer.upgrade(cx),
1720        }
1721    }
1722}
1723
1724impl Snapshot {
1725    pub fn id(&self) -> WorktreeId {
1726        self.id
1727    }
1728
1729    pub fn to_proto(&self) -> proto::Worktree {
1730        let root_name = self.root_name.clone();
1731        proto::Worktree {
1732            id: self.id.0 as u64,
1733            root_name,
1734            entries: self
1735                .entries_by_path
1736                .iter()
1737                .filter(|e| !e.is_ignored)
1738                .map(Into::into)
1739                .collect(),
1740            diagnostic_summaries: self
1741                .diagnostic_summaries
1742                .iter()
1743                .map(|(path, summary)| summary.to_proto(path.0.clone()))
1744                .collect(),
1745        }
1746    }
1747
1748    pub fn build_update(
1749        &self,
1750        other: &Self,
1751        project_id: u64,
1752        worktree_id: u64,
1753        include_ignored: bool,
1754    ) -> proto::UpdateWorktree {
1755        let mut updated_entries = Vec::new();
1756        let mut removed_entries = Vec::new();
1757        let mut self_entries = self
1758            .entries_by_id
1759            .cursor::<()>()
1760            .filter(|e| include_ignored || !e.is_ignored)
1761            .peekable();
1762        let mut other_entries = other
1763            .entries_by_id
1764            .cursor::<()>()
1765            .filter(|e| include_ignored || !e.is_ignored)
1766            .peekable();
1767        loop {
1768            match (self_entries.peek(), other_entries.peek()) {
1769                (Some(self_entry), Some(other_entry)) => {
1770                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1771                        Ordering::Less => {
1772                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1773                            updated_entries.push(entry);
1774                            self_entries.next();
1775                        }
1776                        Ordering::Equal => {
1777                            if self_entry.scan_id != other_entry.scan_id {
1778                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1779                                updated_entries.push(entry);
1780                            }
1781
1782                            self_entries.next();
1783                            other_entries.next();
1784                        }
1785                        Ordering::Greater => {
1786                            removed_entries.push(other_entry.id as u64);
1787                            other_entries.next();
1788                        }
1789                    }
1790                }
1791                (Some(self_entry), None) => {
1792                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1793                    updated_entries.push(entry);
1794                    self_entries.next();
1795                }
1796                (None, Some(other_entry)) => {
1797                    removed_entries.push(other_entry.id as u64);
1798                    other_entries.next();
1799                }
1800                (None, None) => break,
1801            }
1802        }
1803
1804        proto::UpdateWorktree {
1805            project_id,
1806            worktree_id,
1807            root_name: self.root_name().to_string(),
1808            updated_entries,
1809            removed_entries,
1810        }
1811    }
1812
1813    fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1814        self.scan_id += 1;
1815        let scan_id = self.scan_id;
1816
1817        let mut entries_by_path_edits = Vec::new();
1818        let mut entries_by_id_edits = Vec::new();
1819        for entry_id in update.removed_entries {
1820            let entry_id = entry_id as usize;
1821            let entry = self
1822                .entry_for_id(entry_id)
1823                .ok_or_else(|| anyhow!("unknown entry"))?;
1824            entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1825            entries_by_id_edits.push(Edit::Remove(entry.id));
1826        }
1827
1828        for entry in update.updated_entries {
1829            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1830            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1831                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1832            }
1833            entries_by_id_edits.push(Edit::Insert(PathEntry {
1834                id: entry.id,
1835                path: entry.path.clone(),
1836                is_ignored: entry.is_ignored,
1837                scan_id,
1838            }));
1839            entries_by_path_edits.push(Edit::Insert(entry));
1840        }
1841
1842        self.entries_by_path.edit(entries_by_path_edits, &());
1843        self.entries_by_id.edit(entries_by_id_edits, &());
1844
1845        Ok(())
1846    }
1847
1848    fn assign(&mut self, mut other: Self) {
1849        mem::swap(
1850            &mut self.diagnostic_summaries,
1851            &mut other.diagnostic_summaries,
1852        );
1853        *self = other;
1854    }
1855
1856    pub fn file_count(&self) -> usize {
1857        self.entries_by_path.summary().file_count
1858    }
1859
1860    pub fn visible_file_count(&self) -> usize {
1861        self.entries_by_path.summary().visible_file_count
1862    }
1863
1864    fn traverse_from_offset(
1865        &self,
1866        include_dirs: bool,
1867        include_ignored: bool,
1868        start_offset: usize,
1869    ) -> Traversal {
1870        let mut cursor = self.entries_by_path.cursor();
1871        cursor.seek(
1872            &TraversalTarget::Count {
1873                count: start_offset,
1874                include_dirs,
1875                include_ignored,
1876            },
1877            Bias::Right,
1878            &(),
1879        );
1880        Traversal {
1881            cursor,
1882            include_dirs,
1883            include_ignored,
1884        }
1885    }
1886
1887    fn traverse_from_path(
1888        &self,
1889        include_dirs: bool,
1890        include_ignored: bool,
1891        path: &Path,
1892    ) -> Traversal {
1893        let mut cursor = self.entries_by_path.cursor();
1894        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1895        Traversal {
1896            cursor,
1897            include_dirs,
1898            include_ignored,
1899        }
1900    }
1901
1902    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1903        self.traverse_from_offset(false, include_ignored, start)
1904    }
1905
1906    pub fn entries(&self, include_ignored: bool) -> Traversal {
1907        self.traverse_from_offset(true, include_ignored, 0)
1908    }
1909
1910    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1911        let empty_path = Path::new("");
1912        self.entries_by_path
1913            .cursor::<()>()
1914            .filter(move |entry| entry.path.as_ref() != empty_path)
1915            .map(|entry| &entry.path)
1916    }
1917
1918    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1919        let mut cursor = self.entries_by_path.cursor();
1920        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1921        let traversal = Traversal {
1922            cursor,
1923            include_dirs: true,
1924            include_ignored: true,
1925        };
1926        ChildEntriesIter {
1927            traversal,
1928            parent_path,
1929        }
1930    }
1931
1932    pub fn root_entry(&self) -> Option<&Entry> {
1933        self.entry_for_path("")
1934    }
1935
1936    pub fn root_name(&self) -> &str {
1937        &self.root_name
1938    }
1939
1940    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1941        let path = path.as_ref();
1942        self.traverse_from_path(true, true, path)
1943            .entry()
1944            .and_then(|entry| {
1945                if entry.path.as_ref() == path {
1946                    Some(entry)
1947                } else {
1948                    None
1949                }
1950            })
1951    }
1952
1953    pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1954        let entry = self.entries_by_id.get(&id, &())?;
1955        self.entry_for_path(&entry.path)
1956    }
1957
1958    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1959        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1960    }
1961
1962    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1963        if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1964            let abs_path = self.abs_path.join(&entry.path);
1965            match build_gitignore(&abs_path, fs) {
1966                Ok(ignore) => {
1967                    let ignore_dir_path = entry.path.parent().unwrap();
1968                    self.ignores
1969                        .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1970                }
1971                Err(error) => {
1972                    log::error!(
1973                        "error loading .gitignore file {:?} - {:?}",
1974                        &entry.path,
1975                        error
1976                    );
1977                }
1978            }
1979        }
1980
1981        self.reuse_entry_id(&mut entry);
1982        self.entries_by_path.insert_or_replace(entry.clone(), &());
1983        self.entries_by_id.insert_or_replace(
1984            PathEntry {
1985                id: entry.id,
1986                path: entry.path.clone(),
1987                is_ignored: entry.is_ignored,
1988                scan_id: self.scan_id,
1989            },
1990            &(),
1991        );
1992        entry
1993    }
1994
1995    fn populate_dir(
1996        &mut self,
1997        parent_path: Arc<Path>,
1998        entries: impl IntoIterator<Item = Entry>,
1999        ignore: Option<Arc<Gitignore>>,
2000    ) {
2001        let mut parent_entry = self
2002            .entries_by_path
2003            .get(&PathKey(parent_path.clone()), &())
2004            .unwrap()
2005            .clone();
2006        if let Some(ignore) = ignore {
2007            self.ignores.insert(parent_path, (ignore, self.scan_id));
2008        }
2009        if matches!(parent_entry.kind, EntryKind::PendingDir) {
2010            parent_entry.kind = EntryKind::Dir;
2011        } else {
2012            unreachable!();
2013        }
2014
2015        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2016        let mut entries_by_id_edits = Vec::new();
2017
2018        for mut entry in entries {
2019            self.reuse_entry_id(&mut entry);
2020            entries_by_id_edits.push(Edit::Insert(PathEntry {
2021                id: entry.id,
2022                path: entry.path.clone(),
2023                is_ignored: entry.is_ignored,
2024                scan_id: self.scan_id,
2025            }));
2026            entries_by_path_edits.push(Edit::Insert(entry));
2027        }
2028
2029        self.entries_by_path.edit(entries_by_path_edits, &());
2030        self.entries_by_id.edit(entries_by_id_edits, &());
2031    }
2032
2033    fn reuse_entry_id(&mut self, entry: &mut Entry) {
2034        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2035            entry.id = removed_entry_id;
2036        } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2037            entry.id = existing_entry.id;
2038        }
2039    }
2040
2041    fn remove_path(&mut self, path: &Path) {
2042        let mut new_entries;
2043        let removed_entries;
2044        {
2045            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2046            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2047            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2048            new_entries.push_tree(cursor.suffix(&()), &());
2049        }
2050        self.entries_by_path = new_entries;
2051
2052        let mut entries_by_id_edits = Vec::new();
2053        for entry in removed_entries.cursor::<()>() {
2054            let removed_entry_id = self
2055                .removed_entry_ids
2056                .entry(entry.inode)
2057                .or_insert(entry.id);
2058            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2059            entries_by_id_edits.push(Edit::Remove(entry.id));
2060        }
2061        self.entries_by_id.edit(entries_by_id_edits, &());
2062
2063        if path.file_name() == Some(&GITIGNORE) {
2064            if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
2065                *scan_id = self.scan_id;
2066            }
2067        }
2068    }
2069
2070    fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2071        let mut new_ignores = Vec::new();
2072        for ancestor in path.ancestors().skip(1) {
2073            if let Some((ignore, _)) = self.ignores.get(ancestor) {
2074                new_ignores.push((ancestor, Some(ignore.clone())));
2075            } else {
2076                new_ignores.push((ancestor, None));
2077            }
2078        }
2079
2080        let mut ignore_stack = IgnoreStack::none();
2081        for (parent_path, ignore) in new_ignores.into_iter().rev() {
2082            if ignore_stack.is_path_ignored(&parent_path, true) {
2083                ignore_stack = IgnoreStack::all();
2084                break;
2085            } else if let Some(ignore) = ignore {
2086                ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
2087            }
2088        }
2089
2090        if ignore_stack.is_path_ignored(path, is_dir) {
2091            ignore_stack = IgnoreStack::all();
2092        }
2093
2094        ignore_stack
2095    }
2096}
2097
2098impl fmt::Debug for Snapshot {
2099    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2100        for entry in self.entries_by_path.cursor::<()>() {
2101            for _ in entry.path.ancestors().skip(1) {
2102                write!(f, " ")?;
2103            }
2104            writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
2105        }
2106        Ok(())
2107    }
2108}
2109
2110#[derive(Clone, PartialEq)]
2111pub struct File {
2112    entry_id: Option<usize>,
2113    worktree: ModelHandle<Worktree>,
2114    worktree_path: Arc<Path>,
2115    pub path: Arc<Path>,
2116    pub mtime: SystemTime,
2117    is_local: bool,
2118}
2119
2120impl language::File for File {
2121    fn mtime(&self) -> SystemTime {
2122        self.mtime
2123    }
2124
2125    fn path(&self) -> &Arc<Path> {
2126        &self.path
2127    }
2128
2129    fn abs_path(&self) -> Option<PathBuf> {
2130        if self.is_local {
2131            Some(self.worktree_path.join(&self.path))
2132        } else {
2133            None
2134        }
2135    }
2136
2137    fn full_path(&self) -> PathBuf {
2138        let mut full_path = PathBuf::new();
2139        if let Some(worktree_name) = self.worktree_path.file_name() {
2140            full_path.push(worktree_name);
2141        }
2142        full_path.push(&self.path);
2143        full_path
2144    }
2145
2146    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2147    /// of its worktree, then this method will return the name of the worktree itself.
2148    fn file_name<'a>(&'a self) -> Option<OsString> {
2149        self.path
2150            .file_name()
2151            .or_else(|| self.worktree_path.file_name())
2152            .map(Into::into)
2153    }
2154
2155    fn is_deleted(&self) -> bool {
2156        self.entry_id.is_none()
2157    }
2158
2159    fn save(
2160        &self,
2161        buffer_id: u64,
2162        text: Rope,
2163        version: clock::Global,
2164        cx: &mut MutableAppContext,
2165    ) -> Task<Result<(clock::Global, SystemTime)>> {
2166        let worktree_id = self.worktree.read(cx).id().to_proto();
2167        self.worktree.update(cx, |worktree, cx| match worktree {
2168            Worktree::Local(worktree) => {
2169                let rpc = worktree.client.clone();
2170                let project_id = worktree.share.as_ref().map(|share| share.project_id);
2171                let save = worktree.save(self.path.clone(), text, cx);
2172                cx.background().spawn(async move {
2173                    let entry = save.await?;
2174                    if let Some(project_id) = project_id {
2175                        rpc.send(proto::BufferSaved {
2176                            project_id,
2177                            worktree_id,
2178                            buffer_id,
2179                            version: (&version).into(),
2180                            mtime: Some(entry.mtime.into()),
2181                        })
2182                        .await?;
2183                    }
2184                    Ok((version, entry.mtime))
2185                })
2186            }
2187            Worktree::Remote(worktree) => {
2188                let rpc = worktree.client.clone();
2189                let project_id = worktree.project_id;
2190                cx.foreground().spawn(async move {
2191                    let response = rpc
2192                        .request(proto::SaveBuffer {
2193                            project_id,
2194                            worktree_id,
2195                            buffer_id,
2196                        })
2197                        .await?;
2198                    let version = response.version.try_into()?;
2199                    let mtime = response
2200                        .mtime
2201                        .ok_or_else(|| anyhow!("missing mtime"))?
2202                        .into();
2203                    Ok((version, mtime))
2204                })
2205            }
2206        })
2207    }
2208
2209    fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2210        let worktree = self.worktree.read(cx).as_local()?;
2211        let abs_path = worktree.absolutize(&self.path);
2212        let fs = worktree.fs.clone();
2213        Some(
2214            cx.background()
2215                .spawn(async move { fs.load(&abs_path).await }),
2216        )
2217    }
2218
2219    fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2220        self.worktree.update(cx, |worktree, cx| {
2221            worktree.send_buffer_update(buffer_id, operation, cx);
2222        });
2223    }
2224
2225    fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2226        self.worktree.update(cx, |worktree, cx| {
2227            if let Worktree::Remote(worktree) = worktree {
2228                let project_id = worktree.project_id;
2229                let worktree_id = worktree.id().to_proto();
2230                let rpc = worktree.client.clone();
2231                cx.background()
2232                    .spawn(async move {
2233                        if let Err(error) = rpc
2234                            .send(proto::CloseBuffer {
2235                                project_id,
2236                                worktree_id,
2237                                buffer_id,
2238                            })
2239                            .await
2240                        {
2241                            log::error!("error closing remote buffer: {}", error);
2242                        }
2243                    })
2244                    .detach();
2245            }
2246        });
2247    }
2248
2249    fn as_any(&self) -> &dyn Any {
2250        self
2251    }
2252}
2253
2254impl File {
2255    pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2256        file.and_then(|f| f.as_any().downcast_ref())
2257    }
2258
2259    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2260        self.worktree.read(cx).id()
2261    }
2262}
2263
2264#[derive(Clone, Debug)]
2265pub struct Entry {
2266    pub id: usize,
2267    pub kind: EntryKind,
2268    pub path: Arc<Path>,
2269    pub inode: u64,
2270    pub mtime: SystemTime,
2271    pub is_symlink: bool,
2272    pub is_ignored: bool,
2273}
2274
2275#[derive(Clone, Debug)]
2276pub enum EntryKind {
2277    PendingDir,
2278    Dir,
2279    File(CharBag),
2280}
2281
2282impl Entry {
2283    fn new(
2284        path: Arc<Path>,
2285        metadata: &fs::Metadata,
2286        next_entry_id: &AtomicUsize,
2287        root_char_bag: CharBag,
2288    ) -> Self {
2289        Self {
2290            id: next_entry_id.fetch_add(1, SeqCst),
2291            kind: if metadata.is_dir {
2292                EntryKind::PendingDir
2293            } else {
2294                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2295            },
2296            path,
2297            inode: metadata.inode,
2298            mtime: metadata.mtime,
2299            is_symlink: metadata.is_symlink,
2300            is_ignored: false,
2301        }
2302    }
2303
2304    pub fn is_dir(&self) -> bool {
2305        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2306    }
2307
2308    pub fn is_file(&self) -> bool {
2309        matches!(self.kind, EntryKind::File(_))
2310    }
2311}
2312
2313impl sum_tree::Item for Entry {
2314    type Summary = EntrySummary;
2315
2316    fn summary(&self) -> Self::Summary {
2317        let visible_count = if self.is_ignored { 0 } else { 1 };
2318        let file_count;
2319        let visible_file_count;
2320        if self.is_file() {
2321            file_count = 1;
2322            visible_file_count = visible_count;
2323        } else {
2324            file_count = 0;
2325            visible_file_count = 0;
2326        }
2327
2328        EntrySummary {
2329            max_path: self.path.clone(),
2330            count: 1,
2331            visible_count,
2332            file_count,
2333            visible_file_count,
2334        }
2335    }
2336}
2337
2338impl sum_tree::KeyedItem for Entry {
2339    type Key = PathKey;
2340
2341    fn key(&self) -> Self::Key {
2342        PathKey(self.path.clone())
2343    }
2344}
2345
2346#[derive(Clone, Debug)]
2347pub struct EntrySummary {
2348    max_path: Arc<Path>,
2349    count: usize,
2350    visible_count: usize,
2351    file_count: usize,
2352    visible_file_count: usize,
2353}
2354
2355impl Default for EntrySummary {
2356    fn default() -> Self {
2357        Self {
2358            max_path: Arc::from(Path::new("")),
2359            count: 0,
2360            visible_count: 0,
2361            file_count: 0,
2362            visible_file_count: 0,
2363        }
2364    }
2365}
2366
2367impl sum_tree::Summary for EntrySummary {
2368    type Context = ();
2369
2370    fn add_summary(&mut self, rhs: &Self, _: &()) {
2371        self.max_path = rhs.max_path.clone();
2372        self.visible_count += rhs.visible_count;
2373        self.file_count += rhs.file_count;
2374        self.visible_file_count += rhs.visible_file_count;
2375    }
2376}
2377
2378#[derive(Clone, Debug)]
2379struct PathEntry {
2380    id: usize,
2381    path: Arc<Path>,
2382    is_ignored: bool,
2383    scan_id: usize,
2384}
2385
2386impl sum_tree::Item for PathEntry {
2387    type Summary = PathEntrySummary;
2388
2389    fn summary(&self) -> Self::Summary {
2390        PathEntrySummary { max_id: self.id }
2391    }
2392}
2393
2394impl sum_tree::KeyedItem for PathEntry {
2395    type Key = usize;
2396
2397    fn key(&self) -> Self::Key {
2398        self.id
2399    }
2400}
2401
2402#[derive(Clone, Debug, Default)]
2403struct PathEntrySummary {
2404    max_id: usize,
2405}
2406
2407impl sum_tree::Summary for PathEntrySummary {
2408    type Context = ();
2409
2410    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2411        self.max_id = summary.max_id;
2412    }
2413}
2414
2415impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2416    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2417        *self = summary.max_id;
2418    }
2419}
2420
2421#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2422pub struct PathKey(Arc<Path>);
2423
2424impl Default for PathKey {
2425    fn default() -> Self {
2426        Self(Path::new("").into())
2427    }
2428}
2429
2430impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2431    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2432        self.0 = summary.max_path.clone();
2433    }
2434}
2435
2436struct BackgroundScanner {
2437    fs: Arc<dyn Fs>,
2438    snapshot: Arc<Mutex<Snapshot>>,
2439    notify: Sender<ScanState>,
2440    executor: Arc<executor::Background>,
2441}
2442
2443impl BackgroundScanner {
2444    fn new(
2445        snapshot: Arc<Mutex<Snapshot>>,
2446        notify: Sender<ScanState>,
2447        fs: Arc<dyn Fs>,
2448        executor: Arc<executor::Background>,
2449    ) -> Self {
2450        Self {
2451            fs,
2452            snapshot,
2453            notify,
2454            executor,
2455        }
2456    }
2457
2458    fn abs_path(&self) -> Arc<Path> {
2459        self.snapshot.lock().abs_path.clone()
2460    }
2461
2462    fn snapshot(&self) -> Snapshot {
2463        self.snapshot.lock().clone()
2464    }
2465
2466    async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2467        if self.notify.send(ScanState::Scanning).await.is_err() {
2468            return;
2469        }
2470
2471        if let Err(err) = self.scan_dirs().await {
2472            if self
2473                .notify
2474                .send(ScanState::Err(Arc::new(err)))
2475                .await
2476                .is_err()
2477            {
2478                return;
2479            }
2480        }
2481
2482        if self.notify.send(ScanState::Idle).await.is_err() {
2483            return;
2484        }
2485
2486        futures::pin_mut!(events_rx);
2487        while let Some(events) = events_rx.next().await {
2488            if self.notify.send(ScanState::Scanning).await.is_err() {
2489                break;
2490            }
2491
2492            if !self.process_events(events).await {
2493                break;
2494            }
2495
2496            if self.notify.send(ScanState::Idle).await.is_err() {
2497                break;
2498            }
2499        }
2500    }
2501
2502    async fn scan_dirs(&mut self) -> Result<()> {
2503        let root_char_bag;
2504        let next_entry_id;
2505        let is_dir;
2506        {
2507            let snapshot = self.snapshot.lock();
2508            root_char_bag = snapshot.root_char_bag;
2509            next_entry_id = snapshot.next_entry_id.clone();
2510            is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2511        };
2512
2513        if is_dir {
2514            let path: Arc<Path> = Arc::from(Path::new(""));
2515            let abs_path = self.abs_path();
2516            let (tx, rx) = channel::unbounded();
2517            tx.send(ScanJob {
2518                abs_path: abs_path.to_path_buf(),
2519                path,
2520                ignore_stack: IgnoreStack::none(),
2521                scan_queue: tx.clone(),
2522            })
2523            .await
2524            .unwrap();
2525            drop(tx);
2526
2527            self.executor
2528                .scoped(|scope| {
2529                    for _ in 0..self.executor.num_cpus() {
2530                        scope.spawn(async {
2531                            while let Ok(job) = rx.recv().await {
2532                                if let Err(err) = self
2533                                    .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2534                                    .await
2535                                {
2536                                    log::error!("error scanning {:?}: {}", job.abs_path, err);
2537                                }
2538                            }
2539                        });
2540                    }
2541                })
2542                .await;
2543        }
2544
2545        Ok(())
2546    }
2547
2548    async fn scan_dir(
2549        &self,
2550        root_char_bag: CharBag,
2551        next_entry_id: Arc<AtomicUsize>,
2552        job: &ScanJob,
2553    ) -> Result<()> {
2554        let mut new_entries: Vec<Entry> = Vec::new();
2555        let mut new_jobs: Vec<ScanJob> = Vec::new();
2556        let mut ignore_stack = job.ignore_stack.clone();
2557        let mut new_ignore = None;
2558
2559        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2560        while let Some(child_abs_path) = child_paths.next().await {
2561            let child_abs_path = match child_abs_path {
2562                Ok(child_abs_path) => child_abs_path,
2563                Err(error) => {
2564                    log::error!("error processing entry {:?}", error);
2565                    continue;
2566                }
2567            };
2568            let child_name = child_abs_path.file_name().unwrap();
2569            let child_path: Arc<Path> = job.path.join(child_name).into();
2570            let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2571                Some(metadata) => metadata,
2572                None => continue,
2573            };
2574
2575            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2576            if child_name == *GITIGNORE {
2577                match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2578                    Ok(ignore) => {
2579                        let ignore = Arc::new(ignore);
2580                        ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2581                        new_ignore = Some(ignore);
2582                    }
2583                    Err(error) => {
2584                        log::error!(
2585                            "error loading .gitignore file {:?} - {:?}",
2586                            child_name,
2587                            error
2588                        );
2589                    }
2590                }
2591
2592                // Update ignore status of any child entries we've already processed to reflect the
2593                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2594                // there should rarely be too numerous. Update the ignore stack associated with any
2595                // new jobs as well.
2596                let mut new_jobs = new_jobs.iter_mut();
2597                for entry in &mut new_entries {
2598                    entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2599                    if entry.is_dir() {
2600                        new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2601                            IgnoreStack::all()
2602                        } else {
2603                            ignore_stack.clone()
2604                        };
2605                    }
2606                }
2607            }
2608
2609            let mut child_entry = Entry::new(
2610                child_path.clone(),
2611                &child_metadata,
2612                &next_entry_id,
2613                root_char_bag,
2614            );
2615
2616            if child_metadata.is_dir {
2617                let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2618                child_entry.is_ignored = is_ignored;
2619                new_entries.push(child_entry);
2620                new_jobs.push(ScanJob {
2621                    abs_path: child_abs_path,
2622                    path: child_path,
2623                    ignore_stack: if is_ignored {
2624                        IgnoreStack::all()
2625                    } else {
2626                        ignore_stack.clone()
2627                    },
2628                    scan_queue: job.scan_queue.clone(),
2629                });
2630            } else {
2631                child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2632                new_entries.push(child_entry);
2633            };
2634        }
2635
2636        self.snapshot
2637            .lock()
2638            .populate_dir(job.path.clone(), new_entries, new_ignore);
2639        for new_job in new_jobs {
2640            job.scan_queue.send(new_job).await.unwrap();
2641        }
2642
2643        Ok(())
2644    }
2645
2646    async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2647        let mut snapshot = self.snapshot();
2648        snapshot.scan_id += 1;
2649
2650        let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2651            abs_path
2652        } else {
2653            return false;
2654        };
2655        let root_char_bag = snapshot.root_char_bag;
2656        let next_entry_id = snapshot.next_entry_id.clone();
2657
2658        events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2659        events.dedup_by(|a, b| a.path.starts_with(&b.path));
2660
2661        for event in &events {
2662            match event.path.strip_prefix(&root_abs_path) {
2663                Ok(path) => snapshot.remove_path(&path),
2664                Err(_) => {
2665                    log::error!(
2666                        "unexpected event {:?} for root path {:?}",
2667                        event.path,
2668                        root_abs_path
2669                    );
2670                    continue;
2671                }
2672            }
2673        }
2674
2675        let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2676        for event in events {
2677            let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2678                Ok(path) => Arc::from(path.to_path_buf()),
2679                Err(_) => {
2680                    log::error!(
2681                        "unexpected event {:?} for root path {:?}",
2682                        event.path,
2683                        root_abs_path
2684                    );
2685                    continue;
2686                }
2687            };
2688
2689            match self.fs.metadata(&event.path).await {
2690                Ok(Some(metadata)) => {
2691                    let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2692                    let mut fs_entry = Entry::new(
2693                        path.clone(),
2694                        &metadata,
2695                        snapshot.next_entry_id.as_ref(),
2696                        snapshot.root_char_bag,
2697                    );
2698                    fs_entry.is_ignored = ignore_stack.is_all();
2699                    snapshot.insert_entry(fs_entry, self.fs.as_ref());
2700                    if metadata.is_dir {
2701                        scan_queue_tx
2702                            .send(ScanJob {
2703                                abs_path: event.path,
2704                                path,
2705                                ignore_stack,
2706                                scan_queue: scan_queue_tx.clone(),
2707                            })
2708                            .await
2709                            .unwrap();
2710                    }
2711                }
2712                Ok(None) => {}
2713                Err(err) => {
2714                    // TODO - create a special 'error' entry in the entries tree to mark this
2715                    log::error!("error reading file on event {:?}", err);
2716                }
2717            }
2718        }
2719
2720        *self.snapshot.lock() = snapshot;
2721
2722        // Scan any directories that were created as part of this event batch.
2723        drop(scan_queue_tx);
2724        self.executor
2725            .scoped(|scope| {
2726                for _ in 0..self.executor.num_cpus() {
2727                    scope.spawn(async {
2728                        while let Ok(job) = scan_queue_rx.recv().await {
2729                            if let Err(err) = self
2730                                .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2731                                .await
2732                            {
2733                                log::error!("error scanning {:?}: {}", job.abs_path, err);
2734                            }
2735                        }
2736                    });
2737                }
2738            })
2739            .await;
2740
2741        // Attempt to detect renames only over a single batch of file-system events.
2742        self.snapshot.lock().removed_entry_ids.clear();
2743
2744        self.update_ignore_statuses().await;
2745        true
2746    }
2747
2748    async fn update_ignore_statuses(&self) {
2749        let mut snapshot = self.snapshot();
2750
2751        let mut ignores_to_update = Vec::new();
2752        let mut ignores_to_delete = Vec::new();
2753        for (parent_path, (_, scan_id)) in &snapshot.ignores {
2754            if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2755                ignores_to_update.push(parent_path.clone());
2756            }
2757
2758            let ignore_path = parent_path.join(&*GITIGNORE);
2759            if snapshot.entry_for_path(ignore_path).is_none() {
2760                ignores_to_delete.push(parent_path.clone());
2761            }
2762        }
2763
2764        for parent_path in ignores_to_delete {
2765            snapshot.ignores.remove(&parent_path);
2766            self.snapshot.lock().ignores.remove(&parent_path);
2767        }
2768
2769        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2770        ignores_to_update.sort_unstable();
2771        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2772        while let Some(parent_path) = ignores_to_update.next() {
2773            while ignores_to_update
2774                .peek()
2775                .map_or(false, |p| p.starts_with(&parent_path))
2776            {
2777                ignores_to_update.next().unwrap();
2778            }
2779
2780            let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2781            ignore_queue_tx
2782                .send(UpdateIgnoreStatusJob {
2783                    path: parent_path,
2784                    ignore_stack,
2785                    ignore_queue: ignore_queue_tx.clone(),
2786                })
2787                .await
2788                .unwrap();
2789        }
2790        drop(ignore_queue_tx);
2791
2792        self.executor
2793            .scoped(|scope| {
2794                for _ in 0..self.executor.num_cpus() {
2795                    scope.spawn(async {
2796                        while let Ok(job) = ignore_queue_rx.recv().await {
2797                            self.update_ignore_status(job, &snapshot).await;
2798                        }
2799                    });
2800                }
2801            })
2802            .await;
2803    }
2804
2805    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2806        let mut ignore_stack = job.ignore_stack;
2807        if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2808            ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2809        }
2810
2811        let mut entries_by_id_edits = Vec::new();
2812        let mut entries_by_path_edits = Vec::new();
2813        for mut entry in snapshot.child_entries(&job.path).cloned() {
2814            let was_ignored = entry.is_ignored;
2815            entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2816            if entry.is_dir() {
2817                let child_ignore_stack = if entry.is_ignored {
2818                    IgnoreStack::all()
2819                } else {
2820                    ignore_stack.clone()
2821                };
2822                job.ignore_queue
2823                    .send(UpdateIgnoreStatusJob {
2824                        path: entry.path.clone(),
2825                        ignore_stack: child_ignore_stack,
2826                        ignore_queue: job.ignore_queue.clone(),
2827                    })
2828                    .await
2829                    .unwrap();
2830            }
2831
2832            if entry.is_ignored != was_ignored {
2833                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2834                path_entry.scan_id = snapshot.scan_id;
2835                path_entry.is_ignored = entry.is_ignored;
2836                entries_by_id_edits.push(Edit::Insert(path_entry));
2837                entries_by_path_edits.push(Edit::Insert(entry));
2838            }
2839        }
2840
2841        let mut snapshot = self.snapshot.lock();
2842        snapshot.entries_by_path.edit(entries_by_path_edits, &());
2843        snapshot.entries_by_id.edit(entries_by_id_edits, &());
2844    }
2845}
2846
2847async fn refresh_entry(
2848    fs: &dyn Fs,
2849    snapshot: &Mutex<Snapshot>,
2850    path: Arc<Path>,
2851    abs_path: &Path,
2852) -> Result<Entry> {
2853    let root_char_bag;
2854    let next_entry_id;
2855    {
2856        let snapshot = snapshot.lock();
2857        root_char_bag = snapshot.root_char_bag;
2858        next_entry_id = snapshot.next_entry_id.clone();
2859    }
2860    let entry = Entry::new(
2861        path,
2862        &fs.metadata(abs_path)
2863            .await?
2864            .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2865        &next_entry_id,
2866        root_char_bag,
2867    );
2868    Ok(snapshot.lock().insert_entry(entry, fs))
2869}
2870
2871fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2872    let mut result = root_char_bag;
2873    result.extend(
2874        path.to_string_lossy()
2875            .chars()
2876            .map(|c| c.to_ascii_lowercase()),
2877    );
2878    result
2879}
2880
2881struct ScanJob {
2882    abs_path: PathBuf,
2883    path: Arc<Path>,
2884    ignore_stack: Arc<IgnoreStack>,
2885    scan_queue: Sender<ScanJob>,
2886}
2887
2888struct UpdateIgnoreStatusJob {
2889    path: Arc<Path>,
2890    ignore_stack: Arc<IgnoreStack>,
2891    ignore_queue: Sender<UpdateIgnoreStatusJob>,
2892}
2893
2894pub trait WorktreeHandle {
2895    #[cfg(test)]
2896    fn flush_fs_events<'a>(
2897        &self,
2898        cx: &'a gpui::TestAppContext,
2899    ) -> futures::future::LocalBoxFuture<'a, ()>;
2900}
2901
2902impl WorktreeHandle for ModelHandle<Worktree> {
2903    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2904    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2905    // extra directory scans, and emit extra scan-state notifications.
2906    //
2907    // This function mutates the worktree's directory and waits for those mutations to be picked up,
2908    // to ensure that all redundant FS events have already been processed.
2909    #[cfg(test)]
2910    fn flush_fs_events<'a>(
2911        &self,
2912        cx: &'a gpui::TestAppContext,
2913    ) -> futures::future::LocalBoxFuture<'a, ()> {
2914        use smol::future::FutureExt;
2915
2916        let filename = "fs-event-sentinel";
2917        let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2918        let tree = self.clone();
2919        async move {
2920            std::fs::write(root_path.join(filename), "").unwrap();
2921            tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2922                .await;
2923
2924            std::fs::remove_file(root_path.join(filename)).unwrap();
2925            tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2926                .await;
2927
2928            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2929                .await;
2930        }
2931        .boxed_local()
2932    }
2933}
2934
2935#[derive(Clone, Debug)]
2936struct TraversalProgress<'a> {
2937    max_path: &'a Path,
2938    count: usize,
2939    visible_count: usize,
2940    file_count: usize,
2941    visible_file_count: usize,
2942}
2943
2944impl<'a> TraversalProgress<'a> {
2945    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2946        match (include_ignored, include_dirs) {
2947            (true, true) => self.count,
2948            (true, false) => self.file_count,
2949            (false, true) => self.visible_count,
2950            (false, false) => self.visible_file_count,
2951        }
2952    }
2953}
2954
2955impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2956    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2957        self.max_path = summary.max_path.as_ref();
2958        self.count += summary.count;
2959        self.visible_count += summary.visible_count;
2960        self.file_count += summary.file_count;
2961        self.visible_file_count += summary.visible_file_count;
2962    }
2963}
2964
2965impl<'a> Default for TraversalProgress<'a> {
2966    fn default() -> Self {
2967        Self {
2968            max_path: Path::new(""),
2969            count: 0,
2970            visible_count: 0,
2971            file_count: 0,
2972            visible_file_count: 0,
2973        }
2974    }
2975}
2976
2977pub struct Traversal<'a> {
2978    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2979    include_ignored: bool,
2980    include_dirs: bool,
2981}
2982
2983impl<'a> Traversal<'a> {
2984    pub fn advance(&mut self) -> bool {
2985        self.advance_to_offset(self.offset() + 1)
2986    }
2987
2988    pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2989        self.cursor.seek_forward(
2990            &TraversalTarget::Count {
2991                count: offset,
2992                include_dirs: self.include_dirs,
2993                include_ignored: self.include_ignored,
2994            },
2995            Bias::Right,
2996            &(),
2997        )
2998    }
2999
3000    pub fn advance_to_sibling(&mut self) -> bool {
3001        while let Some(entry) = self.cursor.item() {
3002            self.cursor.seek_forward(
3003                &TraversalTarget::PathSuccessor(&entry.path),
3004                Bias::Left,
3005                &(),
3006            );
3007            if let Some(entry) = self.cursor.item() {
3008                if (self.include_dirs || !entry.is_dir())
3009                    && (self.include_ignored || !entry.is_ignored)
3010                {
3011                    return true;
3012                }
3013            }
3014        }
3015        false
3016    }
3017
3018    pub fn entry(&self) -> Option<&'a Entry> {
3019        self.cursor.item()
3020    }
3021
3022    pub fn offset(&self) -> usize {
3023        self.cursor
3024            .start()
3025            .count(self.include_dirs, self.include_ignored)
3026    }
3027}
3028
3029impl<'a> Iterator for Traversal<'a> {
3030    type Item = &'a Entry;
3031
3032    fn next(&mut self) -> Option<Self::Item> {
3033        if let Some(item) = self.entry() {
3034            self.advance();
3035            Some(item)
3036        } else {
3037            None
3038        }
3039    }
3040}
3041
3042#[derive(Debug)]
3043enum TraversalTarget<'a> {
3044    Path(&'a Path),
3045    PathSuccessor(&'a Path),
3046    Count {
3047        count: usize,
3048        include_ignored: bool,
3049        include_dirs: bool,
3050    },
3051}
3052
3053impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3054    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3055        match self {
3056            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3057            TraversalTarget::PathSuccessor(path) => {
3058                if !cursor_location.max_path.starts_with(path) {
3059                    Ordering::Equal
3060                } else {
3061                    Ordering::Greater
3062                }
3063            }
3064            TraversalTarget::Count {
3065                count,
3066                include_dirs,
3067                include_ignored,
3068            } => Ord::cmp(
3069                count,
3070                &cursor_location.count(*include_dirs, *include_ignored),
3071            ),
3072        }
3073    }
3074}
3075
3076struct ChildEntriesIter<'a> {
3077    parent_path: &'a Path,
3078    traversal: Traversal<'a>,
3079}
3080
3081impl<'a> Iterator for ChildEntriesIter<'a> {
3082    type Item = &'a Entry;
3083
3084    fn next(&mut self) -> Option<Self::Item> {
3085        if let Some(item) = self.traversal.entry() {
3086            if item.path.starts_with(&self.parent_path) {
3087                self.traversal.advance_to_sibling();
3088                return Some(item);
3089            }
3090        }
3091        None
3092    }
3093}
3094
3095impl<'a> From<&'a Entry> for proto::Entry {
3096    fn from(entry: &'a Entry) -> Self {
3097        Self {
3098            id: entry.id as u64,
3099            is_dir: entry.is_dir(),
3100            path: entry.path.to_string_lossy().to_string(),
3101            inode: entry.inode,
3102            mtime: Some(entry.mtime.into()),
3103            is_symlink: entry.is_symlink,
3104            is_ignored: entry.is_ignored,
3105        }
3106    }
3107}
3108
3109impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3110    type Error = anyhow::Error;
3111
3112    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3113        if let Some(mtime) = entry.mtime {
3114            let kind = if entry.is_dir {
3115                EntryKind::Dir
3116            } else {
3117                let mut char_bag = root_char_bag.clone();
3118                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3119                EntryKind::File(char_bag)
3120            };
3121            let path: Arc<Path> = Arc::from(Path::new(&entry.path));
3122            Ok(Entry {
3123                id: entry.id as usize,
3124                kind,
3125                path: path.clone(),
3126                inode: entry.inode,
3127                mtime: mtime.into(),
3128                is_symlink: entry.is_symlink,
3129                is_ignored: entry.is_ignored,
3130            })
3131        } else {
3132            Err(anyhow!(
3133                "missing mtime in remote worktree entry {:?}",
3134                entry.path
3135            ))
3136        }
3137    }
3138}
3139
3140trait ToPointUtf16 {
3141    fn to_point_utf16(self) -> PointUtf16;
3142}
3143
3144impl ToPointUtf16 for lsp::Position {
3145    fn to_point_utf16(self) -> PointUtf16 {
3146        PointUtf16::new(self.line, self.character)
3147    }
3148}
3149
3150fn range_from_lsp(range: lsp::Range) -> Range<PointUtf16> {
3151    let start = PointUtf16::new(range.start.line, range.start.character);
3152    let end = PointUtf16::new(range.end.line, range.end.character);
3153    start..end
3154}
3155
3156#[cfg(test)]
3157mod tests {
3158    use super::*;
3159    use crate::fs::FakeFs;
3160    use anyhow::Result;
3161    use client::test::{FakeHttpClient, FakeServer};
3162    use fs::RealFs;
3163    use gpui::test::subscribe;
3164    use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
3165    use language::{Diagnostic, LanguageConfig};
3166    use lsp::Url;
3167    use rand::prelude::*;
3168    use serde_json::json;
3169    use std::{cell::RefCell, rc::Rc};
3170    use std::{
3171        env,
3172        fmt::Write,
3173        time::{SystemTime, UNIX_EPOCH},
3174    };
3175    use text::Point;
3176    use unindent::Unindent as _;
3177    use util::test::temp_tree;
3178
3179    #[gpui::test]
3180    async fn test_traversal(mut cx: gpui::TestAppContext) {
3181        let fs = FakeFs::new();
3182        fs.insert_tree(
3183            "/root",
3184            json!({
3185               ".gitignore": "a/b\n",
3186               "a": {
3187                   "b": "",
3188                   "c": "",
3189               }
3190            }),
3191        )
3192        .await;
3193
3194        let http_client = FakeHttpClient::with_404_response();
3195        let client = Client::new(http_client.clone());
3196        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3197
3198        let tree = Worktree::open_local(
3199            client,
3200            user_store,
3201            Arc::from(Path::new("/root")),
3202            Arc::new(fs),
3203            Default::default(),
3204            &mut cx.to_async(),
3205        )
3206        .await
3207        .unwrap();
3208        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3209            .await;
3210
3211        tree.read_with(&cx, |tree, _| {
3212            assert_eq!(
3213                tree.entries(false)
3214                    .map(|entry| entry.path.as_ref())
3215                    .collect::<Vec<_>>(),
3216                vec![
3217                    Path::new(""),
3218                    Path::new(".gitignore"),
3219                    Path::new("a"),
3220                    Path::new("a/c"),
3221                ]
3222            );
3223        })
3224    }
3225
3226    #[gpui::test]
3227    async fn test_save_file(mut cx: gpui::TestAppContext) {
3228        let dir = temp_tree(json!({
3229            "file1": "the old contents",
3230        }));
3231
3232        let http_client = FakeHttpClient::with_404_response();
3233        let client = Client::new(http_client.clone());
3234        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3235
3236        let tree = Worktree::open_local(
3237            client,
3238            user_store,
3239            dir.path(),
3240            Arc::new(RealFs),
3241            Default::default(),
3242            &mut cx.to_async(),
3243        )
3244        .await
3245        .unwrap();
3246        let buffer = tree
3247            .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3248            .await
3249            .unwrap();
3250        let save = buffer.update(&mut cx, |buffer, cx| {
3251            buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3252            buffer.save(cx).unwrap()
3253        });
3254        save.await.unwrap();
3255
3256        let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3257        assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3258    }
3259
3260    #[gpui::test]
3261    async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3262        let dir = temp_tree(json!({
3263            "file1": "the old contents",
3264        }));
3265        let file_path = dir.path().join("file1");
3266
3267        let http_client = FakeHttpClient::with_404_response();
3268        let client = Client::new(http_client.clone());
3269        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3270
3271        let tree = Worktree::open_local(
3272            client,
3273            user_store,
3274            file_path.clone(),
3275            Arc::new(RealFs),
3276            Default::default(),
3277            &mut cx.to_async(),
3278        )
3279        .await
3280        .unwrap();
3281        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3282            .await;
3283        cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3284
3285        let buffer = tree
3286            .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3287            .await
3288            .unwrap();
3289        let save = buffer.update(&mut cx, |buffer, cx| {
3290            buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3291            buffer.save(cx).unwrap()
3292        });
3293        save.await.unwrap();
3294
3295        let new_text = std::fs::read_to_string(file_path).unwrap();
3296        assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3297    }
3298
3299    #[gpui::test]
3300    async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3301        let dir = temp_tree(json!({
3302            "a": {
3303                "file1": "",
3304                "file2": "",
3305                "file3": "",
3306            },
3307            "b": {
3308                "c": {
3309                    "file4": "",
3310                    "file5": "",
3311                }
3312            }
3313        }));
3314
3315        let user_id = 5;
3316        let http_client = FakeHttpClient::with_404_response();
3317        let mut client = Client::new(http_client.clone());
3318        let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3319        let user_store = server.build_user_store(client.clone(), &mut cx).await;
3320        let tree = Worktree::open_local(
3321            client,
3322            user_store.clone(),
3323            dir.path(),
3324            Arc::new(RealFs),
3325            Default::default(),
3326            &mut cx.to_async(),
3327        )
3328        .await
3329        .unwrap();
3330
3331        let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3332            let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3333            async move { buffer.await.unwrap() }
3334        };
3335        let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3336            tree.read_with(cx, |tree, _| {
3337                tree.entry_for_path(path)
3338                    .expect(&format!("no entry for path {}", path))
3339                    .id
3340            })
3341        };
3342
3343        let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3344        let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3345        let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3346        let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3347
3348        let file2_id = id_for_path("a/file2", &cx);
3349        let file3_id = id_for_path("a/file3", &cx);
3350        let file4_id = id_for_path("b/c/file4", &cx);
3351
3352        // Wait for the initial scan.
3353        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3354            .await;
3355
3356        // Create a remote copy of this worktree.
3357        let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3358        let remote = Worktree::remote(
3359            1,
3360            1,
3361            initial_snapshot.to_proto(),
3362            Client::new(http_client.clone()),
3363            user_store,
3364            Default::default(),
3365            &mut cx.to_async(),
3366        )
3367        .await
3368        .unwrap();
3369
3370        cx.read(|cx| {
3371            assert!(!buffer2.read(cx).is_dirty());
3372            assert!(!buffer3.read(cx).is_dirty());
3373            assert!(!buffer4.read(cx).is_dirty());
3374            assert!(!buffer5.read(cx).is_dirty());
3375        });
3376
3377        // Rename and delete files and directories.
3378        tree.flush_fs_events(&cx).await;
3379        std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3380        std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3381        std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3382        std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3383        tree.flush_fs_events(&cx).await;
3384
3385        let expected_paths = vec![
3386            "a",
3387            "a/file1",
3388            "a/file2.new",
3389            "b",
3390            "d",
3391            "d/file3",
3392            "d/file4",
3393        ];
3394
3395        cx.read(|app| {
3396            assert_eq!(
3397                tree.read(app)
3398                    .paths()
3399                    .map(|p| p.to_str().unwrap())
3400                    .collect::<Vec<_>>(),
3401                expected_paths
3402            );
3403
3404            assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3405            assert_eq!(id_for_path("d/file3", &cx), file3_id);
3406            assert_eq!(id_for_path("d/file4", &cx), file4_id);
3407
3408            assert_eq!(
3409                buffer2.read(app).file().unwrap().path().as_ref(),
3410                Path::new("a/file2.new")
3411            );
3412            assert_eq!(
3413                buffer3.read(app).file().unwrap().path().as_ref(),
3414                Path::new("d/file3")
3415            );
3416            assert_eq!(
3417                buffer4.read(app).file().unwrap().path().as_ref(),
3418                Path::new("d/file4")
3419            );
3420            assert_eq!(
3421                buffer5.read(app).file().unwrap().path().as_ref(),
3422                Path::new("b/c/file5")
3423            );
3424
3425            assert!(!buffer2.read(app).file().unwrap().is_deleted());
3426            assert!(!buffer3.read(app).file().unwrap().is_deleted());
3427            assert!(!buffer4.read(app).file().unwrap().is_deleted());
3428            assert!(buffer5.read(app).file().unwrap().is_deleted());
3429        });
3430
3431        // Update the remote worktree. Check that it becomes consistent with the
3432        // local worktree.
3433        remote.update(&mut cx, |remote, cx| {
3434            let update_message =
3435                tree.read(cx)
3436                    .snapshot()
3437                    .build_update(&initial_snapshot, 1, 1, true);
3438            remote
3439                .as_remote_mut()
3440                .unwrap()
3441                .snapshot
3442                .apply_update(update_message)
3443                .unwrap();
3444
3445            assert_eq!(
3446                remote
3447                    .paths()
3448                    .map(|p| p.to_str().unwrap())
3449                    .collect::<Vec<_>>(),
3450                expected_paths
3451            );
3452        });
3453    }
3454
3455    #[gpui::test]
3456    async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3457        let dir = temp_tree(json!({
3458            ".git": {},
3459            ".gitignore": "ignored-dir\n",
3460            "tracked-dir": {
3461                "tracked-file1": "tracked contents",
3462            },
3463            "ignored-dir": {
3464                "ignored-file1": "ignored contents",
3465            }
3466        }));
3467
3468        let http_client = FakeHttpClient::with_404_response();
3469        let client = Client::new(http_client.clone());
3470        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3471
3472        let tree = Worktree::open_local(
3473            client,
3474            user_store,
3475            dir.path(),
3476            Arc::new(RealFs),
3477            Default::default(),
3478            &mut cx.to_async(),
3479        )
3480        .await
3481        .unwrap();
3482        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3483            .await;
3484        tree.flush_fs_events(&cx).await;
3485        cx.read(|cx| {
3486            let tree = tree.read(cx);
3487            let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3488            let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3489            assert_eq!(tracked.is_ignored, false);
3490            assert_eq!(ignored.is_ignored, true);
3491        });
3492
3493        std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3494        std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3495        tree.flush_fs_events(&cx).await;
3496        cx.read(|cx| {
3497            let tree = tree.read(cx);
3498            let dot_git = tree.entry_for_path(".git").unwrap();
3499            let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3500            let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3501            assert_eq!(tracked.is_ignored, false);
3502            assert_eq!(ignored.is_ignored, true);
3503            assert_eq!(dot_git.is_ignored, true);
3504        });
3505    }
3506
3507    #[gpui::test]
3508    async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3509        let user_id = 100;
3510        let http_client = FakeHttpClient::with_404_response();
3511        let mut client = Client::new(http_client);
3512        let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3513        let user_store = server.build_user_store(client.clone(), &mut cx).await;
3514
3515        let fs = Arc::new(FakeFs::new());
3516        fs.insert_tree(
3517            "/the-dir",
3518            json!({
3519                "a.txt": "a-contents",
3520                "b.txt": "b-contents",
3521            }),
3522        )
3523        .await;
3524
3525        let worktree = Worktree::open_local(
3526            client.clone(),
3527            user_store,
3528            "/the-dir".as_ref(),
3529            fs,
3530            Default::default(),
3531            &mut cx.to_async(),
3532        )
3533        .await
3534        .unwrap();
3535
3536        // Spawn multiple tasks to open paths, repeating some paths.
3537        let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3538            (
3539                worktree.open_buffer("a.txt", cx),
3540                worktree.open_buffer("b.txt", cx),
3541                worktree.open_buffer("a.txt", cx),
3542            )
3543        });
3544
3545        let buffer_a_1 = buffer_a_1.await.unwrap();
3546        let buffer_a_2 = buffer_a_2.await.unwrap();
3547        let buffer_b = buffer_b.await.unwrap();
3548        assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3549        assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3550
3551        // There is only one buffer per path.
3552        let buffer_a_id = buffer_a_1.id();
3553        assert_eq!(buffer_a_2.id(), buffer_a_id);
3554
3555        // Open the same path again while it is still open.
3556        drop(buffer_a_1);
3557        let buffer_a_3 = worktree
3558            .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3559            .await
3560            .unwrap();
3561
3562        // There's still only one buffer per path.
3563        assert_eq!(buffer_a_3.id(), buffer_a_id);
3564    }
3565
3566    #[gpui::test]
3567    async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3568        use std::fs;
3569
3570        let dir = temp_tree(json!({
3571            "file1": "abc",
3572            "file2": "def",
3573            "file3": "ghi",
3574        }));
3575        let http_client = FakeHttpClient::with_404_response();
3576        let client = Client::new(http_client.clone());
3577        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3578
3579        let tree = Worktree::open_local(
3580            client,
3581            user_store,
3582            dir.path(),
3583            Arc::new(RealFs),
3584            Default::default(),
3585            &mut cx.to_async(),
3586        )
3587        .await
3588        .unwrap();
3589        tree.flush_fs_events(&cx).await;
3590        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3591            .await;
3592
3593        let buffer1 = tree
3594            .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3595            .await
3596            .unwrap();
3597        let events = Rc::new(RefCell::new(Vec::new()));
3598
3599        // initially, the buffer isn't dirty.
3600        buffer1.update(&mut cx, |buffer, cx| {
3601            cx.subscribe(&buffer1, {
3602                let events = events.clone();
3603                move |_, _, event, _| events.borrow_mut().push(event.clone())
3604            })
3605            .detach();
3606
3607            assert!(!buffer.is_dirty());
3608            assert!(events.borrow().is_empty());
3609
3610            buffer.edit(vec![1..2], "", cx);
3611        });
3612
3613        // after the first edit, the buffer is dirty, and emits a dirtied event.
3614        buffer1.update(&mut cx, |buffer, cx| {
3615            assert!(buffer.text() == "ac");
3616            assert!(buffer.is_dirty());
3617            assert_eq!(
3618                *events.borrow(),
3619                &[language::Event::Edited, language::Event::Dirtied]
3620            );
3621            events.borrow_mut().clear();
3622            buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3623        });
3624
3625        // after saving, the buffer is not dirty, and emits a saved event.
3626        buffer1.update(&mut cx, |buffer, cx| {
3627            assert!(!buffer.is_dirty());
3628            assert_eq!(*events.borrow(), &[language::Event::Saved]);
3629            events.borrow_mut().clear();
3630
3631            buffer.edit(vec![1..1], "B", cx);
3632            buffer.edit(vec![2..2], "D", cx);
3633        });
3634
3635        // after editing again, the buffer is dirty, and emits another dirty event.
3636        buffer1.update(&mut cx, |buffer, cx| {
3637            assert!(buffer.text() == "aBDc");
3638            assert!(buffer.is_dirty());
3639            assert_eq!(
3640                *events.borrow(),
3641                &[
3642                    language::Event::Edited,
3643                    language::Event::Dirtied,
3644                    language::Event::Edited,
3645                ],
3646            );
3647            events.borrow_mut().clear();
3648
3649            // TODO - currently, after restoring the buffer to its
3650            // previously-saved state, the is still considered dirty.
3651            buffer.edit([1..3], "", cx);
3652            assert!(buffer.text() == "ac");
3653            assert!(buffer.is_dirty());
3654        });
3655
3656        assert_eq!(*events.borrow(), &[language::Event::Edited]);
3657
3658        // When a file is deleted, the buffer is considered dirty.
3659        let events = Rc::new(RefCell::new(Vec::new()));
3660        let buffer2 = tree
3661            .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3662            .await
3663            .unwrap();
3664        buffer2.update(&mut cx, |_, cx| {
3665            cx.subscribe(&buffer2, {
3666                let events = events.clone();
3667                move |_, _, event, _| events.borrow_mut().push(event.clone())
3668            })
3669            .detach();
3670        });
3671
3672        fs::remove_file(dir.path().join("file2")).unwrap();
3673        buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3674        assert_eq!(
3675            *events.borrow(),
3676            &[language::Event::Dirtied, language::Event::FileHandleChanged]
3677        );
3678
3679        // When a file is already dirty when deleted, we don't emit a Dirtied event.
3680        let events = Rc::new(RefCell::new(Vec::new()));
3681        let buffer3 = tree
3682            .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3683            .await
3684            .unwrap();
3685        buffer3.update(&mut cx, |_, cx| {
3686            cx.subscribe(&buffer3, {
3687                let events = events.clone();
3688                move |_, _, event, _| events.borrow_mut().push(event.clone())
3689            })
3690            .detach();
3691        });
3692
3693        tree.flush_fs_events(&cx).await;
3694        buffer3.update(&mut cx, |buffer, cx| {
3695            buffer.edit(Some(0..0), "x", cx);
3696        });
3697        events.borrow_mut().clear();
3698        fs::remove_file(dir.path().join("file3")).unwrap();
3699        buffer3
3700            .condition(&cx, |_, _| !events.borrow().is_empty())
3701            .await;
3702        assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3703        cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3704    }
3705
3706    #[gpui::test]
3707    async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3708        use std::fs;
3709
3710        let initial_contents = "aaa\nbbbbb\nc\n";
3711        let dir = temp_tree(json!({ "the-file": initial_contents }));
3712        let http_client = FakeHttpClient::with_404_response();
3713        let client = Client::new(http_client.clone());
3714        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3715
3716        let tree = Worktree::open_local(
3717            client,
3718            user_store,
3719            dir.path(),
3720            Arc::new(RealFs),
3721            Default::default(),
3722            &mut cx.to_async(),
3723        )
3724        .await
3725        .unwrap();
3726        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3727            .await;
3728
3729        let abs_path = dir.path().join("the-file");
3730        let buffer = tree
3731            .update(&mut cx, |tree, cx| {
3732                tree.open_buffer(Path::new("the-file"), cx)
3733            })
3734            .await
3735            .unwrap();
3736
3737        // TODO
3738        // Add a cursor on each row.
3739        // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3740        //     assert!(!buffer.is_dirty());
3741        //     buffer.add_selection_set(
3742        //         &(0..3)
3743        //             .map(|row| Selection {
3744        //                 id: row as usize,
3745        //                 start: Point::new(row, 1),
3746        //                 end: Point::new(row, 1),
3747        //                 reversed: false,
3748        //                 goal: SelectionGoal::None,
3749        //             })
3750        //             .collect::<Vec<_>>(),
3751        //         cx,
3752        //     )
3753        // });
3754
3755        // Change the file on disk, adding two new lines of text, and removing
3756        // one line.
3757        buffer.read_with(&cx, |buffer, _| {
3758            assert!(!buffer.is_dirty());
3759            assert!(!buffer.has_conflict());
3760        });
3761        let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3762        fs::write(&abs_path, new_contents).unwrap();
3763
3764        // Because the buffer was not modified, it is reloaded from disk. Its
3765        // contents are edited according to the diff between the old and new
3766        // file contents.
3767        buffer
3768            .condition(&cx, |buffer, _| buffer.text() == new_contents)
3769            .await;
3770
3771        buffer.update(&mut cx, |buffer, _| {
3772            assert_eq!(buffer.text(), new_contents);
3773            assert!(!buffer.is_dirty());
3774            assert!(!buffer.has_conflict());
3775
3776            // TODO
3777            // let cursor_positions = buffer
3778            //     .selection_set(selection_set_id)
3779            //     .unwrap()
3780            //     .selections::<Point>(&*buffer)
3781            //     .map(|selection| {
3782            //         assert_eq!(selection.start, selection.end);
3783            //         selection.start
3784            //     })
3785            //     .collect::<Vec<_>>();
3786            // assert_eq!(
3787            //     cursor_positions,
3788            //     [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3789            // );
3790        });
3791
3792        // Modify the buffer
3793        buffer.update(&mut cx, |buffer, cx| {
3794            buffer.edit(vec![0..0], " ", cx);
3795            assert!(buffer.is_dirty());
3796            assert!(!buffer.has_conflict());
3797        });
3798
3799        // Change the file on disk again, adding blank lines to the beginning.
3800        fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3801
3802        // Because the buffer is modified, it doesn't reload from disk, but is
3803        // marked as having a conflict.
3804        buffer
3805            .condition(&cx, |buffer, _| buffer.has_conflict())
3806            .await;
3807    }
3808
3809    #[gpui::test]
3810    async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3811        let (language_server_config, mut fake_server) =
3812            LanguageServerConfig::fake(cx.background()).await;
3813        let progress_token = language_server_config
3814            .disk_based_diagnostics_progress_token
3815            .clone()
3816            .unwrap();
3817        let mut languages = LanguageRegistry::new();
3818        languages.add(Arc::new(Language::new(
3819            LanguageConfig {
3820                name: "Rust".to_string(),
3821                path_suffixes: vec!["rs".to_string()],
3822                language_server: Some(language_server_config),
3823                ..Default::default()
3824            },
3825            Some(tree_sitter_rust::language()),
3826        )));
3827
3828        let dir = temp_tree(json!({
3829            "a.rs": "fn a() { A }",
3830            "b.rs": "const y: i32 = 1",
3831        }));
3832
3833        let http_client = FakeHttpClient::with_404_response();
3834        let client = Client::new(http_client.clone());
3835        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3836
3837        let tree = Worktree::open_local(
3838            client,
3839            user_store,
3840            dir.path(),
3841            Arc::new(RealFs),
3842            Arc::new(languages),
3843            &mut cx.to_async(),
3844        )
3845        .await
3846        .unwrap();
3847        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3848            .await;
3849
3850        // Cause worktree to start the fake language server
3851        let _buffer = tree
3852            .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3853            .await
3854            .unwrap();
3855
3856        let mut events = subscribe(&tree, &mut cx);
3857
3858        fake_server.start_progress(&progress_token).await;
3859        fake_server.start_progress(&progress_token).await;
3860        fake_server.end_progress(&progress_token).await;
3861        fake_server.start_progress(&progress_token).await;
3862
3863        fake_server
3864            .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3865                uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3866                version: None,
3867                diagnostics: vec![lsp::Diagnostic {
3868                    range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
3869                    severity: Some(lsp::DiagnosticSeverity::ERROR),
3870                    message: "undefined variable 'A'".to_string(),
3871                    ..Default::default()
3872                }],
3873            })
3874            .await;
3875
3876        let event = events.next().await.unwrap();
3877        assert_eq!(
3878            event,
3879            Event::DiagnosticsUpdated(Arc::from(Path::new("a.rs")))
3880        );
3881
3882        fake_server.end_progress(&progress_token).await;
3883        fake_server.end_progress(&progress_token).await;
3884
3885        let event = events.next().await.unwrap();
3886        assert_eq!(event, Event::DiskBasedDiagnosticsUpdated);
3887
3888        let buffer = tree
3889            .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3890            .await
3891            .unwrap();
3892
3893        buffer.read_with(&cx, |buffer, _| {
3894            let snapshot = buffer.snapshot();
3895            let diagnostics = snapshot
3896                .diagnostics_in_range::<_, Point>(0..buffer.len())
3897                .collect::<Vec<_>>();
3898            assert_eq!(
3899                diagnostics,
3900                &[DiagnosticEntry {
3901                    range: Point::new(0, 9)..Point::new(0, 10),
3902                    diagnostic: Diagnostic {
3903                        severity: lsp::DiagnosticSeverity::ERROR,
3904                        message: "undefined variable 'A'".to_string(),
3905                        group_id: 0,
3906                        is_primary: true,
3907                        ..Default::default()
3908                    }
3909                }]
3910            )
3911        });
3912    }
3913
3914    #[gpui::test]
3915    async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3916        let fs = Arc::new(FakeFs::new());
3917        let http_client = FakeHttpClient::with_404_response();
3918        let client = Client::new(http_client.clone());
3919        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3920
3921        fs.insert_tree(
3922            "/the-dir",
3923            json!({
3924                "a.rs": "
3925                    fn foo(mut v: Vec<usize>) {
3926                        for x in &v {
3927                            v.push(1);
3928                        }
3929                    }
3930                "
3931                .unindent(),
3932            }),
3933        )
3934        .await;
3935
3936        let worktree = Worktree::open_local(
3937            client.clone(),
3938            user_store,
3939            "/the-dir".as_ref(),
3940            fs,
3941            Default::default(),
3942            &mut cx.to_async(),
3943        )
3944        .await
3945        .unwrap();
3946
3947        let buffer = worktree
3948            .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3949            .await
3950            .unwrap();
3951
3952        let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3953        let message = lsp::PublishDiagnosticsParams {
3954            uri: buffer_uri.clone(),
3955            diagnostics: vec![
3956                lsp::Diagnostic {
3957                    range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3958                    severity: Some(DiagnosticSeverity::WARNING),
3959                    message: "error 1".to_string(),
3960                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3961                        location: lsp::Location {
3962                            uri: buffer_uri.clone(),
3963                            range: lsp::Range::new(
3964                                lsp::Position::new(1, 8),
3965                                lsp::Position::new(1, 9),
3966                            ),
3967                        },
3968                        message: "error 1 hint 1".to_string(),
3969                    }]),
3970                    ..Default::default()
3971                },
3972                lsp::Diagnostic {
3973                    range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3974                    severity: Some(DiagnosticSeverity::HINT),
3975                    message: "error 1 hint 1".to_string(),
3976                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3977                        location: lsp::Location {
3978                            uri: buffer_uri.clone(),
3979                            range: lsp::Range::new(
3980                                lsp::Position::new(1, 8),
3981                                lsp::Position::new(1, 9),
3982                            ),
3983                        },
3984                        message: "original diagnostic".to_string(),
3985                    }]),
3986                    ..Default::default()
3987                },
3988                lsp::Diagnostic {
3989                    range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3990                    severity: Some(DiagnosticSeverity::ERROR),
3991                    message: "error 2".to_string(),
3992                    related_information: Some(vec![
3993                        lsp::DiagnosticRelatedInformation {
3994                            location: lsp::Location {
3995                                uri: buffer_uri.clone(),
3996                                range: lsp::Range::new(
3997                                    lsp::Position::new(1, 13),
3998                                    lsp::Position::new(1, 15),
3999                                ),
4000                            },
4001                            message: "error 2 hint 1".to_string(),
4002                        },
4003                        lsp::DiagnosticRelatedInformation {
4004                            location: lsp::Location {
4005                                uri: buffer_uri.clone(),
4006                                range: lsp::Range::new(
4007                                    lsp::Position::new(1, 13),
4008                                    lsp::Position::new(1, 15),
4009                                ),
4010                            },
4011                            message: "error 2 hint 2".to_string(),
4012                        },
4013                    ]),
4014                    ..Default::default()
4015                },
4016                lsp::Diagnostic {
4017                    range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
4018                    severity: Some(DiagnosticSeverity::HINT),
4019                    message: "error 2 hint 1".to_string(),
4020                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4021                        location: lsp::Location {
4022                            uri: buffer_uri.clone(),
4023                            range: lsp::Range::new(
4024                                lsp::Position::new(2, 8),
4025                                lsp::Position::new(2, 17),
4026                            ),
4027                        },
4028                        message: "original diagnostic".to_string(),
4029                    }]),
4030                    ..Default::default()
4031                },
4032                lsp::Diagnostic {
4033                    range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
4034                    severity: Some(DiagnosticSeverity::HINT),
4035                    message: "error 2 hint 2".to_string(),
4036                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4037                        location: lsp::Location {
4038                            uri: buffer_uri.clone(),
4039                            range: lsp::Range::new(
4040                                lsp::Position::new(2, 8),
4041                                lsp::Position::new(2, 17),
4042                            ),
4043                        },
4044                        message: "original diagnostic".to_string(),
4045                    }]),
4046                    ..Default::default()
4047                },
4048            ],
4049            version: None,
4050        };
4051
4052        worktree
4053            .update(&mut cx, |tree, cx| {
4054                tree.update_diagnostics(message, &Default::default(), cx)
4055            })
4056            .unwrap();
4057        let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
4058
4059        assert_eq!(
4060            buffer
4061                .diagnostics_in_range::<_, Point>(0..buffer.len())
4062                .collect::<Vec<_>>(),
4063            &[
4064                DiagnosticEntry {
4065                    range: Point::new(1, 8)..Point::new(1, 9),
4066                    diagnostic: Diagnostic {
4067                        severity: DiagnosticSeverity::WARNING,
4068                        message: "error 1".to_string(),
4069                        group_id: 0,
4070                        is_primary: true,
4071                        ..Default::default()
4072                    }
4073                },
4074                DiagnosticEntry {
4075                    range: Point::new(1, 8)..Point::new(1, 9),
4076                    diagnostic: Diagnostic {
4077                        severity: DiagnosticSeverity::HINT,
4078                        message: "error 1 hint 1".to_string(),
4079                        group_id: 0,
4080                        is_primary: false,
4081                        ..Default::default()
4082                    }
4083                },
4084                DiagnosticEntry {
4085                    range: Point::new(1, 13)..Point::new(1, 15),
4086                    diagnostic: Diagnostic {
4087                        severity: DiagnosticSeverity::HINT,
4088                        message: "error 2 hint 1".to_string(),
4089                        group_id: 1,
4090                        is_primary: false,
4091                        ..Default::default()
4092                    }
4093                },
4094                DiagnosticEntry {
4095                    range: Point::new(1, 13)..Point::new(1, 15),
4096                    diagnostic: Diagnostic {
4097                        severity: DiagnosticSeverity::HINT,
4098                        message: "error 2 hint 2".to_string(),
4099                        group_id: 1,
4100                        is_primary: false,
4101                        ..Default::default()
4102                    }
4103                },
4104                DiagnosticEntry {
4105                    range: Point::new(2, 8)..Point::new(2, 17),
4106                    diagnostic: Diagnostic {
4107                        severity: DiagnosticSeverity::ERROR,
4108                        message: "error 2".to_string(),
4109                        group_id: 1,
4110                        is_primary: true,
4111                        ..Default::default()
4112                    }
4113                }
4114            ]
4115        );
4116
4117        assert_eq!(
4118            buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
4119            &[
4120                DiagnosticEntry {
4121                    range: Point::new(1, 8)..Point::new(1, 9),
4122                    diagnostic: Diagnostic {
4123                        severity: DiagnosticSeverity::WARNING,
4124                        message: "error 1".to_string(),
4125                        group_id: 0,
4126                        is_primary: true,
4127                        ..Default::default()
4128                    }
4129                },
4130                DiagnosticEntry {
4131                    range: Point::new(1, 8)..Point::new(1, 9),
4132                    diagnostic: Diagnostic {
4133                        severity: DiagnosticSeverity::HINT,
4134                        message: "error 1 hint 1".to_string(),
4135                        group_id: 0,
4136                        is_primary: false,
4137                        ..Default::default()
4138                    }
4139                },
4140            ]
4141        );
4142        assert_eq!(
4143            buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
4144            &[
4145                DiagnosticEntry {
4146                    range: Point::new(1, 13)..Point::new(1, 15),
4147                    diagnostic: Diagnostic {
4148                        severity: DiagnosticSeverity::HINT,
4149                        message: "error 2 hint 1".to_string(),
4150                        group_id: 1,
4151                        is_primary: false,
4152                        ..Default::default()
4153                    }
4154                },
4155                DiagnosticEntry {
4156                    range: Point::new(1, 13)..Point::new(1, 15),
4157                    diagnostic: Diagnostic {
4158                        severity: DiagnosticSeverity::HINT,
4159                        message: "error 2 hint 2".to_string(),
4160                        group_id: 1,
4161                        is_primary: false,
4162                        ..Default::default()
4163                    }
4164                },
4165                DiagnosticEntry {
4166                    range: Point::new(2, 8)..Point::new(2, 17),
4167                    diagnostic: Diagnostic {
4168                        severity: DiagnosticSeverity::ERROR,
4169                        message: "error 2".to_string(),
4170                        group_id: 1,
4171                        is_primary: true,
4172                        ..Default::default()
4173                    }
4174                }
4175            ]
4176        );
4177    }
4178
4179    #[gpui::test(iterations = 100)]
4180    fn test_random(mut rng: StdRng) {
4181        let operations = env::var("OPERATIONS")
4182            .map(|o| o.parse().unwrap())
4183            .unwrap_or(40);
4184        let initial_entries = env::var("INITIAL_ENTRIES")
4185            .map(|o| o.parse().unwrap())
4186            .unwrap_or(20);
4187
4188        let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
4189        for _ in 0..initial_entries {
4190            randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
4191        }
4192        log::info!("Generated initial tree");
4193
4194        let (notify_tx, _notify_rx) = smol::channel::unbounded();
4195        let fs = Arc::new(RealFs);
4196        let next_entry_id = Arc::new(AtomicUsize::new(0));
4197        let mut initial_snapshot = Snapshot {
4198            id: WorktreeId::from_usize(0),
4199            scan_id: 0,
4200            abs_path: root_dir.path().into(),
4201            entries_by_path: Default::default(),
4202            entries_by_id: Default::default(),
4203            removed_entry_ids: Default::default(),
4204            ignores: Default::default(),
4205            root_name: Default::default(),
4206            root_char_bag: Default::default(),
4207            next_entry_id: next_entry_id.clone(),
4208            diagnostic_summaries: Default::default(),
4209        };
4210        initial_snapshot.insert_entry(
4211            Entry::new(
4212                Path::new("").into(),
4213                &smol::block_on(fs.metadata(root_dir.path()))
4214                    .unwrap()
4215                    .unwrap(),
4216                &next_entry_id,
4217                Default::default(),
4218            ),
4219            fs.as_ref(),
4220        );
4221        let mut scanner = BackgroundScanner::new(
4222            Arc::new(Mutex::new(initial_snapshot.clone())),
4223            notify_tx,
4224            fs.clone(),
4225            Arc::new(gpui::executor::Background::new()),
4226        );
4227        smol::block_on(scanner.scan_dirs()).unwrap();
4228        scanner.snapshot().check_invariants();
4229
4230        let mut events = Vec::new();
4231        let mut snapshots = Vec::new();
4232        let mut mutations_len = operations;
4233        while mutations_len > 1 {
4234            if !events.is_empty() && rng.gen_bool(0.4) {
4235                let len = rng.gen_range(0..=events.len());
4236                let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4237                log::info!("Delivering events: {:#?}", to_deliver);
4238                smol::block_on(scanner.process_events(to_deliver));
4239                scanner.snapshot().check_invariants();
4240            } else {
4241                events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4242                mutations_len -= 1;
4243            }
4244
4245            if rng.gen_bool(0.2) {
4246                snapshots.push(scanner.snapshot());
4247            }
4248        }
4249        log::info!("Quiescing: {:#?}", events);
4250        smol::block_on(scanner.process_events(events));
4251        scanner.snapshot().check_invariants();
4252
4253        let (notify_tx, _notify_rx) = smol::channel::unbounded();
4254        let mut new_scanner = BackgroundScanner::new(
4255            Arc::new(Mutex::new(initial_snapshot)),
4256            notify_tx,
4257            scanner.fs.clone(),
4258            scanner.executor.clone(),
4259        );
4260        smol::block_on(new_scanner.scan_dirs()).unwrap();
4261        assert_eq!(
4262            scanner.snapshot().to_vec(true),
4263            new_scanner.snapshot().to_vec(true)
4264        );
4265
4266        for mut prev_snapshot in snapshots {
4267            let include_ignored = rng.gen::<bool>();
4268            if !include_ignored {
4269                let mut entries_by_path_edits = Vec::new();
4270                let mut entries_by_id_edits = Vec::new();
4271                for entry in prev_snapshot
4272                    .entries_by_id
4273                    .cursor::<()>()
4274                    .filter(|e| e.is_ignored)
4275                {
4276                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4277                    entries_by_id_edits.push(Edit::Remove(entry.id));
4278                }
4279
4280                prev_snapshot
4281                    .entries_by_path
4282                    .edit(entries_by_path_edits, &());
4283                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4284            }
4285
4286            let update = scanner
4287                .snapshot()
4288                .build_update(&prev_snapshot, 0, 0, include_ignored);
4289            prev_snapshot.apply_update(update).unwrap();
4290            assert_eq!(
4291                prev_snapshot.to_vec(true),
4292                scanner.snapshot().to_vec(include_ignored)
4293            );
4294        }
4295    }
4296
4297    fn randomly_mutate_tree(
4298        root_path: &Path,
4299        insertion_probability: f64,
4300        rng: &mut impl Rng,
4301    ) -> Result<Vec<fsevent::Event>> {
4302        let root_path = root_path.canonicalize().unwrap();
4303        let (dirs, files) = read_dir_recursive(root_path.clone());
4304
4305        let mut events = Vec::new();
4306        let mut record_event = |path: PathBuf| {
4307            events.push(fsevent::Event {
4308                event_id: SystemTime::now()
4309                    .duration_since(UNIX_EPOCH)
4310                    .unwrap()
4311                    .as_secs(),
4312                flags: fsevent::StreamFlags::empty(),
4313                path,
4314            });
4315        };
4316
4317        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4318            let path = dirs.choose(rng).unwrap();
4319            let new_path = path.join(gen_name(rng));
4320
4321            if rng.gen() {
4322                log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4323                std::fs::create_dir(&new_path)?;
4324            } else {
4325                log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4326                std::fs::write(&new_path, "")?;
4327            }
4328            record_event(new_path);
4329        } else if rng.gen_bool(0.05) {
4330            let ignore_dir_path = dirs.choose(rng).unwrap();
4331            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4332
4333            let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4334            let files_to_ignore = {
4335                let len = rng.gen_range(0..=subfiles.len());
4336                subfiles.choose_multiple(rng, len)
4337            };
4338            let dirs_to_ignore = {
4339                let len = rng.gen_range(0..subdirs.len());
4340                subdirs.choose_multiple(rng, len)
4341            };
4342
4343            let mut ignore_contents = String::new();
4344            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4345                write!(
4346                    ignore_contents,
4347                    "{}\n",
4348                    path_to_ignore
4349                        .strip_prefix(&ignore_dir_path)?
4350                        .to_str()
4351                        .unwrap()
4352                )
4353                .unwrap();
4354            }
4355            log::info!(
4356                "Creating {:?} with contents:\n{}",
4357                ignore_path.strip_prefix(&root_path)?,
4358                ignore_contents
4359            );
4360            std::fs::write(&ignore_path, ignore_contents).unwrap();
4361            record_event(ignore_path);
4362        } else {
4363            let old_path = {
4364                let file_path = files.choose(rng);
4365                let dir_path = dirs[1..].choose(rng);
4366                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4367            };
4368
4369            let is_rename = rng.gen();
4370            if is_rename {
4371                let new_path_parent = dirs
4372                    .iter()
4373                    .filter(|d| !d.starts_with(old_path))
4374                    .choose(rng)
4375                    .unwrap();
4376
4377                let overwrite_existing_dir =
4378                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4379                let new_path = if overwrite_existing_dir {
4380                    std::fs::remove_dir_all(&new_path_parent).ok();
4381                    new_path_parent.to_path_buf()
4382                } else {
4383                    new_path_parent.join(gen_name(rng))
4384                };
4385
4386                log::info!(
4387                    "Renaming {:?} to {}{:?}",
4388                    old_path.strip_prefix(&root_path)?,
4389                    if overwrite_existing_dir {
4390                        "overwrite "
4391                    } else {
4392                        ""
4393                    },
4394                    new_path.strip_prefix(&root_path)?
4395                );
4396                std::fs::rename(&old_path, &new_path)?;
4397                record_event(old_path.clone());
4398                record_event(new_path);
4399            } else if old_path.is_dir() {
4400                let (dirs, files) = read_dir_recursive(old_path.clone());
4401
4402                log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4403                std::fs::remove_dir_all(&old_path).unwrap();
4404                for file in files {
4405                    record_event(file);
4406                }
4407                for dir in dirs {
4408                    record_event(dir);
4409                }
4410            } else {
4411                log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4412                std::fs::remove_file(old_path).unwrap();
4413                record_event(old_path.clone());
4414            }
4415        }
4416
4417        Ok(events)
4418    }
4419
4420    fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4421        let child_entries = std::fs::read_dir(&path).unwrap();
4422        let mut dirs = vec![path];
4423        let mut files = Vec::new();
4424        for child_entry in child_entries {
4425            let child_path = child_entry.unwrap().path();
4426            if child_path.is_dir() {
4427                let (child_dirs, child_files) = read_dir_recursive(child_path);
4428                dirs.extend(child_dirs);
4429                files.extend(child_files);
4430            } else {
4431                files.push(child_path);
4432            }
4433        }
4434        (dirs, files)
4435    }
4436
4437    fn gen_name(rng: &mut impl Rng) -> String {
4438        (0..6)
4439            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4440            .map(char::from)
4441            .collect()
4442    }
4443
4444    impl Snapshot {
4445        fn check_invariants(&self) {
4446            let mut files = self.files(true, 0);
4447            let mut visible_files = self.files(false, 0);
4448            for entry in self.entries_by_path.cursor::<()>() {
4449                if entry.is_file() {
4450                    assert_eq!(files.next().unwrap().inode, entry.inode);
4451                    if !entry.is_ignored {
4452                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4453                    }
4454                }
4455            }
4456            assert!(files.next().is_none());
4457            assert!(visible_files.next().is_none());
4458
4459            let mut bfs_paths = Vec::new();
4460            let mut stack = vec![Path::new("")];
4461            while let Some(path) = stack.pop() {
4462                bfs_paths.push(path);
4463                let ix = stack.len();
4464                for child_entry in self.child_entries(path) {
4465                    stack.insert(ix, &child_entry.path);
4466                }
4467            }
4468
4469            let dfs_paths = self
4470                .entries_by_path
4471                .cursor::<()>()
4472                .map(|e| e.path.as_ref())
4473                .collect::<Vec<_>>();
4474            assert_eq!(bfs_paths, dfs_paths);
4475
4476            for (ignore_parent_path, _) in &self.ignores {
4477                assert!(self.entry_for_path(ignore_parent_path).is_some());
4478                assert!(self
4479                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4480                    .is_some());
4481            }
4482        }
4483
4484        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4485            let mut paths = Vec::new();
4486            for entry in self.entries_by_path.cursor::<()>() {
4487                if include_ignored || !entry.is_ignored {
4488                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4489                }
4490            }
4491            paths.sort_by(|a, b| a.0.cmp(&b.0));
4492            paths
4493        }
4494    }
4495}