worktree.rs

   1use super::{
   2    fs::{self, Fs},
   3    ignore::IgnoreStack,
   4    DiagnosticSummary, ProjectEntry,
   5};
   6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   7use anyhow::{anyhow, Context, Result};
   8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
   9use clock::ReplicaId;
  10use collections::{hash_map, HashMap, HashSet};
  11use futures::{Stream, StreamExt};
  12use fuzzy::CharBag;
  13use gpui::{
  14    executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
  15    Task, UpgradeModelHandle, WeakModelHandle,
  16};
  17use language::{
  18    range_from_lsp, Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Language,
  19    LanguageRegistry, Operation, PointUtf16, Rope,
  20};
  21use lazy_static::lazy_static;
  22use lsp::LanguageServer;
  23use parking_lot::Mutex;
  24use postage::{
  25    prelude::{Sink as _, Stream as _},
  26    watch,
  27};
  28use serde::Deserialize;
  29use smol::channel::{self, Sender};
  30use std::{
  31    any::Any,
  32    cmp::{self, Ordering},
  33    convert::{TryFrom, TryInto},
  34    ffi::{OsStr, OsString},
  35    fmt,
  36    future::Future,
  37    ops::Deref,
  38    path::{Path, PathBuf},
  39    sync::{
  40        atomic::{AtomicUsize, Ordering::SeqCst},
  41        Arc,
  42    },
  43    time::{Duration, SystemTime},
  44};
  45use sum_tree::{Bias, TreeMap};
  46use sum_tree::{Edit, SeekTarget, SumTree};
  47use util::{post_inc, ResultExt, TryFutureExt};
  48
  49lazy_static! {
  50    static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
  51}
  52
  53#[derive(Clone, Debug)]
  54enum ScanState {
  55    Idle,
  56    Scanning,
  57    Err(Arc<anyhow::Error>),
  58}
  59
  60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  61pub struct WorktreeId(usize);
  62
  63pub enum Worktree {
  64    Local(LocalWorktree),
  65    Remote(RemoteWorktree),
  66}
  67
  68#[derive(Clone, Debug, Eq, PartialEq)]
  69pub enum Event {
  70    DiskBasedDiagnosticsUpdating,
  71    DiskBasedDiagnosticsUpdated,
  72    DiagnosticsUpdated(Arc<Path>),
  73}
  74
  75impl Entity for Worktree {
  76    type Event = Event;
  77
  78    fn app_will_quit(
  79        &mut self,
  80        _: &mut MutableAppContext,
  81    ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
  82        use futures::FutureExt;
  83
  84        if let Self::Local(worktree) = self {
  85            let shutdown_futures = worktree
  86                .language_servers
  87                .drain()
  88                .filter_map(|(_, server)| server.shutdown())
  89                .collect::<Vec<_>>();
  90            Some(
  91                async move {
  92                    futures::future::join_all(shutdown_futures).await;
  93                }
  94                .boxed(),
  95            )
  96        } else {
  97            None
  98        }
  99    }
 100}
 101
 102impl Worktree {
 103    pub async fn open_local(
 104        client: Arc<Client>,
 105        user_store: ModelHandle<UserStore>,
 106        path: impl Into<Arc<Path>>,
 107        fs: Arc<dyn Fs>,
 108        languages: Arc<LanguageRegistry>,
 109        cx: &mut AsyncAppContext,
 110    ) -> Result<ModelHandle<Self>> {
 111        let (tree, scan_states_tx) =
 112            LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
 113        tree.update(cx, |tree, cx| {
 114            let tree = tree.as_local_mut().unwrap();
 115            let abs_path = tree.snapshot.abs_path.clone();
 116            let background_snapshot = tree.background_snapshot.clone();
 117            let background = cx.background().clone();
 118            tree._background_scanner_task = Some(cx.background().spawn(async move {
 119                let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 120                let scanner =
 121                    BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
 122                scanner.run(events).await;
 123            }));
 124        });
 125        Ok(tree)
 126    }
 127
 128    pub async fn remote(
 129        project_remote_id: u64,
 130        replica_id: ReplicaId,
 131        worktree: proto::Worktree,
 132        client: Arc<Client>,
 133        user_store: ModelHandle<UserStore>,
 134        languages: Arc<LanguageRegistry>,
 135        cx: &mut AsyncAppContext,
 136    ) -> Result<ModelHandle<Self>> {
 137        let remote_id = worktree.id;
 138        let root_char_bag: CharBag = worktree
 139            .root_name
 140            .chars()
 141            .map(|c| c.to_ascii_lowercase())
 142            .collect();
 143        let root_name = worktree.root_name.clone();
 144        let (entries_by_path, entries_by_id, diagnostic_summaries) = cx
 145            .background()
 146            .spawn(async move {
 147                let mut entries_by_path_edits = Vec::new();
 148                let mut entries_by_id_edits = Vec::new();
 149                for entry in worktree.entries {
 150                    match Entry::try_from((&root_char_bag, entry)) {
 151                        Ok(entry) => {
 152                            entries_by_id_edits.push(Edit::Insert(PathEntry {
 153                                id: entry.id,
 154                                path: entry.path.clone(),
 155                                is_ignored: entry.is_ignored,
 156                                scan_id: 0,
 157                            }));
 158                            entries_by_path_edits.push(Edit::Insert(entry));
 159                        }
 160                        Err(err) => log::warn!("error for remote worktree entry {:?}", err),
 161                    }
 162                }
 163
 164                let mut entries_by_path = SumTree::new();
 165                let mut entries_by_id = SumTree::new();
 166                entries_by_path.edit(entries_by_path_edits, &());
 167                entries_by_id.edit(entries_by_id_edits, &());
 168
 169                let diagnostic_summaries = TreeMap::from_ordered_entries(
 170                    worktree.diagnostic_summaries.into_iter().map(|summary| {
 171                        (
 172                            PathKey(PathBuf::from(summary.path).into()),
 173                            DiagnosticSummary {
 174                                error_count: summary.error_count as usize,
 175                                warning_count: summary.warning_count as usize,
 176                                info_count: summary.info_count as usize,
 177                                hint_count: summary.hint_count as usize,
 178                            },
 179                        )
 180                    }),
 181                );
 182
 183                (entries_by_path, entries_by_id, diagnostic_summaries)
 184            })
 185            .await;
 186
 187        let worktree = cx.update(|cx| {
 188            cx.add_model(|cx: &mut ModelContext<Worktree>| {
 189                let snapshot = Snapshot {
 190                    id: WorktreeId(remote_id as usize),
 191                    scan_id: 0,
 192                    abs_path: Path::new("").into(),
 193                    root_name,
 194                    root_char_bag,
 195                    ignores: Default::default(),
 196                    entries_by_path,
 197                    entries_by_id,
 198                    removed_entry_ids: Default::default(),
 199                    next_entry_id: Default::default(),
 200                };
 201
 202                let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
 203                let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
 204
 205                cx.background()
 206                    .spawn(async move {
 207                        while let Some(update) = updates_rx.recv().await {
 208                            let mut snapshot = snapshot_tx.borrow().clone();
 209                            if let Err(error) = snapshot.apply_update(update) {
 210                                log::error!("error applying worktree update: {}", error);
 211                            }
 212                            *snapshot_tx.borrow_mut() = snapshot;
 213                        }
 214                    })
 215                    .detach();
 216
 217                {
 218                    let mut snapshot_rx = snapshot_rx.clone();
 219                    cx.spawn_weak(|this, mut cx| async move {
 220                        while let Some(_) = snapshot_rx.recv().await {
 221                            if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
 222                                this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
 223                            } else {
 224                                break;
 225                            }
 226                        }
 227                    })
 228                    .detach();
 229                }
 230
 231                Worktree::Remote(RemoteWorktree {
 232                    project_id: project_remote_id,
 233                    replica_id,
 234                    snapshot,
 235                    snapshot_rx,
 236                    updates_tx,
 237                    client: client.clone(),
 238                    loading_buffers: Default::default(),
 239                    open_buffers: Default::default(),
 240                    queued_operations: Default::default(),
 241                    languages,
 242                    user_store,
 243                    diagnostic_summaries,
 244                })
 245            })
 246        });
 247
 248        Ok(worktree)
 249    }
 250
 251    pub fn as_local(&self) -> Option<&LocalWorktree> {
 252        if let Worktree::Local(worktree) = self {
 253            Some(worktree)
 254        } else {
 255            None
 256        }
 257    }
 258
 259    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 260        if let Worktree::Remote(worktree) = self {
 261            Some(worktree)
 262        } else {
 263            None
 264        }
 265    }
 266
 267    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 268        if let Worktree::Local(worktree) = self {
 269            Some(worktree)
 270        } else {
 271            None
 272        }
 273    }
 274
 275    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 276        if let Worktree::Remote(worktree) = self {
 277            Some(worktree)
 278        } else {
 279            None
 280        }
 281    }
 282
 283    pub fn snapshot(&self) -> Snapshot {
 284        match self {
 285            Worktree::Local(worktree) => worktree.snapshot(),
 286            Worktree::Remote(worktree) => worktree.snapshot(),
 287        }
 288    }
 289
 290    pub fn replica_id(&self) -> ReplicaId {
 291        match self {
 292            Worktree::Local(_) => 0,
 293            Worktree::Remote(worktree) => worktree.replica_id,
 294        }
 295    }
 296
 297    pub fn remove_collaborator(
 298        &mut self,
 299        peer_id: PeerId,
 300        replica_id: ReplicaId,
 301        cx: &mut ModelContext<Self>,
 302    ) {
 303        match self {
 304            Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
 305            Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
 306        }
 307    }
 308
 309    pub fn languages(&self) -> &Arc<LanguageRegistry> {
 310        match self {
 311            Worktree::Local(worktree) => &worktree.language_registry,
 312            Worktree::Remote(worktree) => &worktree.languages,
 313        }
 314    }
 315
 316    pub fn user_store(&self) -> &ModelHandle<UserStore> {
 317        match self {
 318            Worktree::Local(worktree) => &worktree.user_store,
 319            Worktree::Remote(worktree) => &worktree.user_store,
 320        }
 321    }
 322
 323    pub fn handle_open_buffer(
 324        &mut self,
 325        envelope: TypedEnvelope<proto::OpenBuffer>,
 326        rpc: Arc<Client>,
 327        cx: &mut ModelContext<Self>,
 328    ) -> anyhow::Result<()> {
 329        let receipt = envelope.receipt();
 330
 331        let response = self
 332            .as_local_mut()
 333            .unwrap()
 334            .open_remote_buffer(envelope, cx);
 335
 336        cx.background()
 337            .spawn(
 338                async move {
 339                    rpc.respond(receipt, response.await?).await?;
 340                    Ok(())
 341                }
 342                .log_err(),
 343            )
 344            .detach();
 345
 346        Ok(())
 347    }
 348
 349    pub fn handle_close_buffer(
 350        &mut self,
 351        envelope: TypedEnvelope<proto::CloseBuffer>,
 352        _: Arc<Client>,
 353        cx: &mut ModelContext<Self>,
 354    ) -> anyhow::Result<()> {
 355        self.as_local_mut()
 356            .unwrap()
 357            .close_remote_buffer(envelope, cx)
 358    }
 359
 360    pub fn diagnostic_summaries<'a>(
 361        &'a self,
 362    ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
 363        match self {
 364            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 365            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 366        }
 367        .iter()
 368        .map(|(path, summary)| (path.0.clone(), summary.clone()))
 369    }
 370
 371    pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
 372        match self {
 373            Worktree::Local(worktree) => &mut worktree.loading_buffers,
 374            Worktree::Remote(worktree) => &mut worktree.loading_buffers,
 375        }
 376    }
 377
 378    pub fn open_buffer(
 379        &mut self,
 380        path: impl AsRef<Path>,
 381        cx: &mut ModelContext<Self>,
 382    ) -> Task<Result<ModelHandle<Buffer>>> {
 383        let path = path.as_ref();
 384
 385        // If there is already a buffer for the given path, then return it.
 386        let existing_buffer = match self {
 387            Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
 388            Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
 389        };
 390        if let Some(existing_buffer) = existing_buffer {
 391            return cx.spawn(move |_, _| async move { Ok(existing_buffer) });
 392        }
 393
 394        let path: Arc<Path> = Arc::from(path);
 395        let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
 396            // If the given path is already being loaded, then wait for that existing
 397            // task to complete and return the same buffer.
 398            hash_map::Entry::Occupied(e) => e.get().clone(),
 399
 400            // Otherwise, record the fact that this path is now being loaded.
 401            hash_map::Entry::Vacant(entry) => {
 402                let (mut tx, rx) = postage::watch::channel();
 403                entry.insert(rx.clone());
 404
 405                let load_buffer = match self {
 406                    Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
 407                    Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
 408                };
 409                cx.spawn(move |this, mut cx| async move {
 410                    let result = load_buffer.await;
 411
 412                    // After the buffer loads, record the fact that it is no longer
 413                    // loading.
 414                    this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
 415                    *tx.borrow_mut() = Some(result.map_err(|e| Arc::new(e)));
 416                })
 417                .detach();
 418                rx
 419            }
 420        };
 421
 422        cx.spawn(|_, _| async move {
 423            loop {
 424                if let Some(result) = loading_watch.borrow().as_ref() {
 425                    return result.clone().map_err(|e| anyhow!("{}", e));
 426                }
 427                loading_watch.recv().await;
 428            }
 429        })
 430    }
 431
 432    #[cfg(feature = "test-support")]
 433    pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
 434        let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
 435            Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
 436            Worktree::Remote(worktree) => {
 437                Box::new(worktree.open_buffers.values().filter_map(|buf| {
 438                    if let RemoteBuffer::Loaded(buf) = buf {
 439                        Some(buf)
 440                    } else {
 441                        None
 442                    }
 443                }))
 444            }
 445        };
 446
 447        let path = path.as_ref();
 448        open_buffers
 449            .find(|buffer| {
 450                if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
 451                    file.path().as_ref() == path
 452                } else {
 453                    false
 454                }
 455            })
 456            .is_some()
 457    }
 458
 459    pub fn handle_update_buffer(
 460        &mut self,
 461        envelope: TypedEnvelope<proto::UpdateBuffer>,
 462        cx: &mut ModelContext<Self>,
 463    ) -> Result<()> {
 464        let payload = envelope.payload.clone();
 465        let buffer_id = payload.buffer_id as usize;
 466        let ops = payload
 467            .operations
 468            .into_iter()
 469            .map(|op| language::proto::deserialize_operation(op))
 470            .collect::<Result<Vec<_>, _>>()?;
 471
 472        match self {
 473            Worktree::Local(worktree) => {
 474                let buffer = worktree
 475                    .open_buffers
 476                    .get(&buffer_id)
 477                    .and_then(|buf| buf.upgrade(cx))
 478                    .ok_or_else(|| {
 479                        anyhow!("invalid buffer {} in update buffer message", buffer_id)
 480                    })?;
 481                buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
 482            }
 483            Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
 484                Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
 485                Some(RemoteBuffer::Loaded(buffer)) => {
 486                    if let Some(buffer) = buffer.upgrade(cx) {
 487                        buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
 488                    } else {
 489                        worktree
 490                            .open_buffers
 491                            .insert(buffer_id, RemoteBuffer::Operations(ops));
 492                    }
 493                }
 494                None => {
 495                    worktree
 496                        .open_buffers
 497                        .insert(buffer_id, RemoteBuffer::Operations(ops));
 498                }
 499            },
 500        }
 501
 502        Ok(())
 503    }
 504
 505    pub fn handle_save_buffer(
 506        &mut self,
 507        envelope: TypedEnvelope<proto::SaveBuffer>,
 508        rpc: Arc<Client>,
 509        cx: &mut ModelContext<Self>,
 510    ) -> Result<()> {
 511        let sender_id = envelope.original_sender_id()?;
 512        let this = self.as_local().unwrap();
 513        let project_id = this
 514            .share
 515            .as_ref()
 516            .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
 517            .project_id;
 518
 519        let buffer = this
 520            .shared_buffers
 521            .get(&sender_id)
 522            .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
 523            .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
 524
 525        let receipt = envelope.receipt();
 526        let worktree_id = envelope.payload.worktree_id;
 527        let buffer_id = envelope.payload.buffer_id;
 528        let save = cx.spawn(|_, mut cx| async move {
 529            buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
 530        });
 531
 532        cx.background()
 533            .spawn(
 534                async move {
 535                    let (version, mtime) = save.await?;
 536
 537                    rpc.respond(
 538                        receipt,
 539                        proto::BufferSaved {
 540                            project_id,
 541                            worktree_id,
 542                            buffer_id,
 543                            version: (&version).into(),
 544                            mtime: Some(mtime.into()),
 545                        },
 546                    )
 547                    .await?;
 548
 549                    Ok(())
 550                }
 551                .log_err(),
 552            )
 553            .detach();
 554
 555        Ok(())
 556    }
 557
 558    pub fn handle_buffer_saved(
 559        &mut self,
 560        envelope: TypedEnvelope<proto::BufferSaved>,
 561        cx: &mut ModelContext<Self>,
 562    ) -> Result<()> {
 563        let payload = envelope.payload.clone();
 564        let worktree = self.as_remote_mut().unwrap();
 565        if let Some(buffer) = worktree
 566            .open_buffers
 567            .get(&(payload.buffer_id as usize))
 568            .and_then(|buf| buf.upgrade(cx))
 569        {
 570            buffer.update(cx, |buffer, cx| {
 571                let version = payload.version.try_into()?;
 572                let mtime = payload
 573                    .mtime
 574                    .ok_or_else(|| anyhow!("missing mtime"))?
 575                    .into();
 576                buffer.did_save(version, mtime, None, cx);
 577                Result::<_, anyhow::Error>::Ok(())
 578            })?;
 579        }
 580        Ok(())
 581    }
 582
 583    pub fn handle_format_buffer(
 584        &mut self,
 585        envelope: TypedEnvelope<proto::FormatBuffer>,
 586        rpc: Arc<Client>,
 587        cx: &mut ModelContext<Self>,
 588    ) -> Result<()> {
 589        let sender_id = envelope.original_sender_id()?;
 590        let this = self.as_local().unwrap();
 591        let buffer = this
 592            .shared_buffers
 593            .get(&sender_id)
 594            .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
 595            .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
 596
 597        let receipt = envelope.receipt();
 598        cx.spawn(|_, mut cx| async move {
 599            let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
 600            // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
 601            // associated with formatting.
 602            cx.spawn(|_| async move {
 603                match format {
 604                    Ok(()) => rpc.respond(receipt, proto::Ack {}).await?,
 605                    Err(error) => {
 606                        rpc.respond_with_error(
 607                            receipt,
 608                            proto::Error {
 609                                message: error.to_string(),
 610                            },
 611                        )
 612                        .await?
 613                    }
 614                }
 615                Ok::<_, anyhow::Error>(())
 616            })
 617            .await
 618            .log_err();
 619        })
 620        .detach();
 621
 622        Ok(())
 623    }
 624
 625    fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
 626        match self {
 627            Self::Local(worktree) => {
 628                let is_fake_fs = worktree.fs.is_fake();
 629                worktree.snapshot = worktree.background_snapshot.lock().clone();
 630                if worktree.is_scanning() {
 631                    if worktree.poll_task.is_none() {
 632                        worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
 633                            if is_fake_fs {
 634                                smol::future::yield_now().await;
 635                            } else {
 636                                smol::Timer::after(Duration::from_millis(100)).await;
 637                            }
 638                            this.update(&mut cx, |this, cx| {
 639                                this.as_local_mut().unwrap().poll_task = None;
 640                                this.poll_snapshot(cx);
 641                            })
 642                        }));
 643                    }
 644                } else {
 645                    worktree.poll_task.take();
 646                    self.update_open_buffers(cx);
 647                }
 648            }
 649            Self::Remote(worktree) => {
 650                worktree.snapshot = worktree.snapshot_rx.borrow().clone();
 651                self.update_open_buffers(cx);
 652            }
 653        };
 654
 655        cx.notify();
 656    }
 657
 658    fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
 659        let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
 660            Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
 661            Self::Remote(worktree) => {
 662                Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
 663                    if let RemoteBuffer::Loaded(buf) = buf {
 664                        Some((id, buf))
 665                    } else {
 666                        None
 667                    }
 668                }))
 669            }
 670        };
 671
 672        let local = self.as_local().is_some();
 673        let worktree_path = self.abs_path.clone();
 674        let worktree_handle = cx.handle();
 675        let mut buffers_to_delete = Vec::new();
 676        for (buffer_id, buffer) in open_buffers {
 677            if let Some(buffer) = buffer.upgrade(cx) {
 678                buffer.update(cx, |buffer, cx| {
 679                    if let Some(old_file) = File::from_dyn(buffer.file()) {
 680                        let new_file = if let Some(entry) = old_file
 681                            .entry_id
 682                            .and_then(|entry_id| self.entry_for_id(entry_id))
 683                        {
 684                            File {
 685                                is_local: local,
 686                                worktree_path: worktree_path.clone(),
 687                                entry_id: Some(entry.id),
 688                                mtime: entry.mtime,
 689                                path: entry.path.clone(),
 690                                worktree: worktree_handle.clone(),
 691                            }
 692                        } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
 693                            File {
 694                                is_local: local,
 695                                worktree_path: worktree_path.clone(),
 696                                entry_id: Some(entry.id),
 697                                mtime: entry.mtime,
 698                                path: entry.path.clone(),
 699                                worktree: worktree_handle.clone(),
 700                            }
 701                        } else {
 702                            File {
 703                                is_local: local,
 704                                worktree_path: worktree_path.clone(),
 705                                entry_id: None,
 706                                path: old_file.path().clone(),
 707                                mtime: old_file.mtime(),
 708                                worktree: worktree_handle.clone(),
 709                            }
 710                        };
 711
 712                        if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
 713                            task.detach();
 714                        }
 715                    }
 716                });
 717            } else {
 718                buffers_to_delete.push(*buffer_id);
 719            }
 720        }
 721
 722        for buffer_id in buffers_to_delete {
 723            match self {
 724                Self::Local(worktree) => {
 725                    worktree.open_buffers.remove(&buffer_id);
 726                }
 727                Self::Remote(worktree) => {
 728                    worktree.open_buffers.remove(&buffer_id);
 729                }
 730            }
 731        }
 732    }
 733
 734    pub fn update_diagnostics(
 735        &mut self,
 736        params: lsp::PublishDiagnosticsParams,
 737        disk_based_sources: &HashSet<String>,
 738        cx: &mut ModelContext<Worktree>,
 739    ) -> Result<()> {
 740        let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
 741        let abs_path = params
 742            .uri
 743            .to_file_path()
 744            .map_err(|_| anyhow!("URI is not a file"))?;
 745        let worktree_path = Arc::from(
 746            abs_path
 747                .strip_prefix(&this.abs_path)
 748                .context("path is not within worktree")?,
 749        );
 750
 751        let mut next_group_id = 0;
 752        let mut diagnostics = Vec::default();
 753        let mut primary_diagnostic_group_ids = HashMap::default();
 754        let mut sources_by_group_id = HashMap::default();
 755        let mut supporting_diagnostic_severities = HashMap::default();
 756        for diagnostic in &params.diagnostics {
 757            let source = diagnostic.source.as_ref();
 758            let code = diagnostic.code.as_ref().map(|code| match code {
 759                lsp::NumberOrString::Number(code) => code.to_string(),
 760                lsp::NumberOrString::String(code) => code.clone(),
 761            });
 762            let range = range_from_lsp(diagnostic.range);
 763            let is_supporting = diagnostic
 764                .related_information
 765                .as_ref()
 766                .map_or(false, |infos| {
 767                    infos.iter().any(|info| {
 768                        primary_diagnostic_group_ids.contains_key(&(
 769                            source,
 770                            code.clone(),
 771                            range_from_lsp(info.location.range),
 772                        ))
 773                    })
 774                });
 775
 776            if is_supporting {
 777                if let Some(severity) = diagnostic.severity {
 778                    supporting_diagnostic_severities
 779                        .insert((source, code.clone(), range), severity);
 780                }
 781            } else {
 782                let group_id = post_inc(&mut next_group_id);
 783                let is_disk_based =
 784                    source.map_or(false, |source| disk_based_sources.contains(source));
 785
 786                sources_by_group_id.insert(group_id, source);
 787                primary_diagnostic_group_ids
 788                    .insert((source, code.clone(), range.clone()), group_id);
 789
 790                diagnostics.push(DiagnosticEntry {
 791                    range,
 792                    diagnostic: Diagnostic {
 793                        code: code.clone(),
 794                        severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
 795                        message: diagnostic.message.clone(),
 796                        group_id,
 797                        is_primary: true,
 798                        is_valid: true,
 799                        is_disk_based,
 800                    },
 801                });
 802                if let Some(infos) = &diagnostic.related_information {
 803                    for info in infos {
 804                        if info.location.uri == params.uri {
 805                            let range = range_from_lsp(info.location.range);
 806                            diagnostics.push(DiagnosticEntry {
 807                                range,
 808                                diagnostic: Diagnostic {
 809                                    code: code.clone(),
 810                                    severity: DiagnosticSeverity::INFORMATION,
 811                                    message: info.message.clone(),
 812                                    group_id,
 813                                    is_primary: false,
 814                                    is_valid: true,
 815                                    is_disk_based,
 816                                },
 817                            });
 818                        }
 819                    }
 820                }
 821            }
 822        }
 823
 824        for entry in &mut diagnostics {
 825            let diagnostic = &mut entry.diagnostic;
 826            if !diagnostic.is_primary {
 827                let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
 828                if let Some(&severity) = supporting_diagnostic_severities.get(&(
 829                    source,
 830                    diagnostic.code.clone(),
 831                    entry.range.clone(),
 832                )) {
 833                    diagnostic.severity = severity;
 834                }
 835            }
 836        }
 837
 838        self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)?;
 839        Ok(())
 840    }
 841
 842    pub fn update_diagnostic_entries(
 843        &mut self,
 844        worktree_path: Arc<Path>,
 845        version: Option<i32>,
 846        diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
 847        cx: &mut ModelContext<Self>,
 848    ) -> Result<()> {
 849        let this = self.as_local_mut().unwrap();
 850        for buffer in this.open_buffers.values() {
 851            if let Some(buffer) = buffer.upgrade(cx) {
 852                if buffer
 853                    .read(cx)
 854                    .file()
 855                    .map_or(false, |file| *file.path() == worktree_path)
 856                {
 857                    let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
 858                        (
 859                            buffer.remote_id(),
 860                            buffer.update_diagnostics(version, diagnostics.clone(), cx),
 861                        )
 862                    });
 863                    self.send_buffer_update(remote_id, operation?, cx);
 864                    break;
 865                }
 866            }
 867        }
 868
 869        let this = self.as_local_mut().unwrap();
 870        let summary = DiagnosticSummary::new(&diagnostics);
 871        this.diagnostic_summaries
 872            .insert(PathKey(worktree_path.clone()), summary.clone());
 873        this.diagnostics.insert(worktree_path.clone(), diagnostics);
 874
 875        cx.emit(Event::DiagnosticsUpdated(worktree_path.clone()));
 876
 877        if let Some(share) = this.share.as_ref() {
 878            cx.foreground()
 879                .spawn({
 880                    let client = this.client.clone();
 881                    let project_id = share.project_id;
 882                    let worktree_id = this.id().to_proto();
 883                    let path = worktree_path.to_string_lossy().to_string();
 884                    async move {
 885                        client
 886                            .send(proto::UpdateDiagnosticSummary {
 887                                project_id,
 888                                worktree_id,
 889                                summary: Some(proto::DiagnosticSummary {
 890                                    path,
 891                                    error_count: summary.error_count as u32,
 892                                    warning_count: summary.warning_count as u32,
 893                                    info_count: summary.info_count as u32,
 894                                    hint_count: summary.hint_count as u32,
 895                                }),
 896                            })
 897                            .await
 898                            .log_err()
 899                    }
 900                })
 901                .detach();
 902        }
 903
 904        Ok(())
 905    }
 906
 907    fn send_buffer_update(
 908        &mut self,
 909        buffer_id: u64,
 910        operation: Operation,
 911        cx: &mut ModelContext<Self>,
 912    ) {
 913        if let Some((project_id, worktree_id, rpc)) = match self {
 914            Worktree::Local(worktree) => worktree
 915                .share
 916                .as_ref()
 917                .map(|share| (share.project_id, worktree.id(), worktree.client.clone())),
 918            Worktree::Remote(worktree) => Some((
 919                worktree.project_id,
 920                worktree.snapshot.id(),
 921                worktree.client.clone(),
 922            )),
 923        } {
 924            cx.spawn(|worktree, mut cx| async move {
 925                if let Err(error) = rpc
 926                    .request(proto::UpdateBuffer {
 927                        project_id,
 928                        worktree_id: worktree_id.0 as u64,
 929                        buffer_id,
 930                        operations: vec![language::proto::serialize_operation(&operation)],
 931                    })
 932                    .await
 933                {
 934                    worktree.update(&mut cx, |worktree, _| {
 935                        log::error!("error sending buffer operation: {}", error);
 936                        match worktree {
 937                            Worktree::Local(t) => &mut t.queued_operations,
 938                            Worktree::Remote(t) => &mut t.queued_operations,
 939                        }
 940                        .push((buffer_id, operation));
 941                    });
 942                }
 943            })
 944            .detach();
 945        }
 946    }
 947}
 948
 949impl WorktreeId {
 950    pub fn from_usize(handle_id: usize) -> Self {
 951        Self(handle_id)
 952    }
 953
 954    pub(crate) fn from_proto(id: u64) -> Self {
 955        Self(id as usize)
 956    }
 957
 958    pub fn to_proto(&self) -> u64 {
 959        self.0 as u64
 960    }
 961
 962    pub fn to_usize(&self) -> usize {
 963        self.0
 964    }
 965}
 966
 967impl fmt::Display for WorktreeId {
 968    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 969        self.0.fmt(f)
 970    }
 971}
 972
 973#[derive(Clone)]
 974pub struct Snapshot {
 975    id: WorktreeId,
 976    scan_id: usize,
 977    abs_path: Arc<Path>,
 978    root_name: String,
 979    root_char_bag: CharBag,
 980    ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
 981    entries_by_path: SumTree<Entry>,
 982    entries_by_id: SumTree<PathEntry>,
 983    removed_entry_ids: HashMap<u64, usize>,
 984    next_entry_id: Arc<AtomicUsize>,
 985}
 986
 987pub struct LocalWorktree {
 988    snapshot: Snapshot,
 989    config: WorktreeConfig,
 990    background_snapshot: Arc<Mutex<Snapshot>>,
 991    last_scan_state_rx: watch::Receiver<ScanState>,
 992    _background_scanner_task: Option<Task<()>>,
 993    poll_task: Option<Task<()>>,
 994    share: Option<ShareState>,
 995    loading_buffers: LoadingBuffers,
 996    open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
 997    shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
 998    diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
 999    diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
1000    queued_operations: Vec<(u64, Operation)>,
1001    language_registry: Arc<LanguageRegistry>,
1002    client: Arc<Client>,
1003    user_store: ModelHandle<UserStore>,
1004    fs: Arc<dyn Fs>,
1005    language_servers: HashMap<String, Arc<LanguageServer>>,
1006}
1007
1008struct ShareState {
1009    project_id: u64,
1010    snapshots_tx: Sender<Snapshot>,
1011    _maintain_remote_snapshot: Option<Task<()>>,
1012}
1013
1014pub struct RemoteWorktree {
1015    project_id: u64,
1016    snapshot: Snapshot,
1017    snapshot_rx: watch::Receiver<Snapshot>,
1018    client: Arc<Client>,
1019    updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
1020    replica_id: ReplicaId,
1021    loading_buffers: LoadingBuffers,
1022    open_buffers: HashMap<usize, RemoteBuffer>,
1023    languages: Arc<LanguageRegistry>,
1024    user_store: ModelHandle<UserStore>,
1025    queued_operations: Vec<(u64, Operation)>,
1026    diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
1027}
1028
1029type LoadingBuffers = HashMap<
1030    Arc<Path>,
1031    postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
1032>;
1033
1034#[derive(Default, Deserialize)]
1035struct WorktreeConfig {
1036    collaborators: Vec<String>,
1037}
1038
1039impl LocalWorktree {
1040    async fn new(
1041        client: Arc<Client>,
1042        user_store: ModelHandle<UserStore>,
1043        path: impl Into<Arc<Path>>,
1044        fs: Arc<dyn Fs>,
1045        languages: Arc<LanguageRegistry>,
1046        cx: &mut AsyncAppContext,
1047    ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
1048        let abs_path = path.into();
1049        let path: Arc<Path> = Arc::from(Path::new(""));
1050        let next_entry_id = AtomicUsize::new(0);
1051
1052        // After determining whether the root entry is a file or a directory, populate the
1053        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
1054        let root_name = abs_path
1055            .file_name()
1056            .map_or(String::new(), |f| f.to_string_lossy().to_string());
1057        let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
1058        let metadata = fs.metadata(&abs_path).await?;
1059
1060        let mut config = WorktreeConfig::default();
1061        if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
1062            if let Ok(parsed) = toml::from_str(&zed_toml) {
1063                config = parsed;
1064            }
1065        }
1066
1067        let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
1068        let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
1069        let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
1070            let mut snapshot = Snapshot {
1071                id: WorktreeId::from_usize(cx.model_id()),
1072                scan_id: 0,
1073                abs_path,
1074                root_name: root_name.clone(),
1075                root_char_bag,
1076                ignores: Default::default(),
1077                entries_by_path: Default::default(),
1078                entries_by_id: Default::default(),
1079                removed_entry_ids: Default::default(),
1080                next_entry_id: Arc::new(next_entry_id),
1081            };
1082            if let Some(metadata) = metadata {
1083                snapshot.insert_entry(
1084                    Entry::new(
1085                        path.into(),
1086                        &metadata,
1087                        &snapshot.next_entry_id,
1088                        snapshot.root_char_bag,
1089                    ),
1090                    fs.as_ref(),
1091                );
1092            }
1093
1094            let tree = Self {
1095                snapshot: snapshot.clone(),
1096                config,
1097                background_snapshot: Arc::new(Mutex::new(snapshot)),
1098                last_scan_state_rx,
1099                _background_scanner_task: None,
1100                share: None,
1101                poll_task: None,
1102                loading_buffers: Default::default(),
1103                open_buffers: Default::default(),
1104                shared_buffers: Default::default(),
1105                diagnostics: Default::default(),
1106                diagnostic_summaries: Default::default(),
1107                queued_operations: Default::default(),
1108                language_registry: languages,
1109                client,
1110                user_store,
1111                fs,
1112                language_servers: Default::default(),
1113            };
1114
1115            cx.spawn_weak(|this, mut cx| async move {
1116                while let Ok(scan_state) = scan_states_rx.recv().await {
1117                    if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1118                        let to_send = handle.update(&mut cx, |this, cx| {
1119                            last_scan_state_tx.blocking_send(scan_state).ok();
1120                            this.poll_snapshot(cx);
1121                            let tree = this.as_local_mut().unwrap();
1122                            if !tree.is_scanning() {
1123                                if let Some(share) = tree.share.as_ref() {
1124                                    return Some((tree.snapshot(), share.snapshots_tx.clone()));
1125                                }
1126                            }
1127                            None
1128                        });
1129
1130                        if let Some((snapshot, snapshots_to_send_tx)) = to_send {
1131                            if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
1132                                log::error!("error submitting snapshot to send {}", err);
1133                            }
1134                        }
1135                    } else {
1136                        break;
1137                    }
1138                }
1139            })
1140            .detach();
1141
1142            Worktree::Local(tree)
1143        });
1144
1145        Ok((tree, scan_states_tx))
1146    }
1147
1148    pub fn authorized_logins(&self) -> Vec<String> {
1149        self.config.collaborators.clone()
1150    }
1151
1152    pub fn language_registry(&self) -> &LanguageRegistry {
1153        &self.language_registry
1154    }
1155
1156    pub fn register_language(
1157        &mut self,
1158        language: &Arc<Language>,
1159        cx: &mut ModelContext<Worktree>,
1160    ) -> Option<Arc<LanguageServer>> {
1161        if let Some(server) = self.language_servers.get(language.name()) {
1162            return Some(server.clone());
1163        }
1164
1165        if let Some(language_server) = language
1166            .start_server(self.abs_path(), cx)
1167            .log_err()
1168            .flatten()
1169        {
1170            enum DiagnosticProgress {
1171                Updating,
1172                Updated,
1173            }
1174
1175            let disk_based_sources = language
1176                .disk_based_diagnostic_sources()
1177                .cloned()
1178                .unwrap_or_default();
1179            let disk_based_diagnostics_progress_token =
1180                language.disk_based_diagnostics_progress_token().cloned();
1181            let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
1182            let (disk_based_diagnostics_done_tx, disk_based_diagnostics_done_rx) =
1183                smol::channel::unbounded();
1184            language_server
1185                .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
1186                    smol::block_on(diagnostics_tx.send(params)).ok();
1187                })
1188                .detach();
1189            cx.spawn_weak(|this, mut cx| {
1190                let has_disk_based_diagnostic_progress_token =
1191                    disk_based_diagnostics_progress_token.is_some();
1192                let disk_based_diagnostics_done_tx = disk_based_diagnostics_done_tx.clone();
1193                async move {
1194                    while let Ok(diagnostics) = diagnostics_rx.recv().await {
1195                        if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1196                            handle.update(&mut cx, |this, cx| {
1197                                if !has_disk_based_diagnostic_progress_token {
1198                                    smol::block_on(
1199                                        disk_based_diagnostics_done_tx
1200                                            .send(DiagnosticProgress::Updating),
1201                                    )
1202                                    .ok();
1203                                }
1204                                this.update_diagnostics(diagnostics, &disk_based_sources, cx)
1205                                    .log_err();
1206                                if !has_disk_based_diagnostic_progress_token {
1207                                    smol::block_on(
1208                                        disk_based_diagnostics_done_tx
1209                                            .send(DiagnosticProgress::Updated),
1210                                    )
1211                                    .ok();
1212                                }
1213                            })
1214                        } else {
1215                            break;
1216                        }
1217                    }
1218                }
1219            })
1220            .detach();
1221
1222            let mut pending_disk_based_diagnostics: i32 = 0;
1223            language_server
1224                .on_notification::<lsp::notification::Progress, _>(move |params| {
1225                    let token = match params.token {
1226                        lsp::NumberOrString::Number(_) => None,
1227                        lsp::NumberOrString::String(token) => Some(token),
1228                    };
1229
1230                    if token == disk_based_diagnostics_progress_token {
1231                        match params.value {
1232                            lsp::ProgressParamsValue::WorkDone(progress) => match progress {
1233                                lsp::WorkDoneProgress::Begin(_) => {
1234                                    if pending_disk_based_diagnostics == 0 {
1235                                        smol::block_on(
1236                                            disk_based_diagnostics_done_tx
1237                                                .send(DiagnosticProgress::Updating),
1238                                        )
1239                                        .ok();
1240                                    }
1241                                    pending_disk_based_diagnostics += 1;
1242                                }
1243                                lsp::WorkDoneProgress::End(_) => {
1244                                    pending_disk_based_diagnostics -= 1;
1245                                    if pending_disk_based_diagnostics == 0 {
1246                                        smol::block_on(
1247                                            disk_based_diagnostics_done_tx
1248                                                .send(DiagnosticProgress::Updated),
1249                                        )
1250                                        .ok();
1251                                    }
1252                                }
1253                                _ => {}
1254                            },
1255                        }
1256                    }
1257                })
1258                .detach();
1259            let rpc = self.client.clone();
1260            cx.spawn_weak(|this, mut cx| async move {
1261                while let Ok(progress) = disk_based_diagnostics_done_rx.recv().await {
1262                    if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1263                        match progress {
1264                            DiagnosticProgress::Updating => {
1265                                let message = handle.update(&mut cx, |this, cx| {
1266                                    cx.emit(Event::DiskBasedDiagnosticsUpdating);
1267                                    let this = this.as_local().unwrap();
1268                                    this.share.as_ref().map(|share| {
1269                                        proto::DiskBasedDiagnosticsUpdating {
1270                                            project_id: share.project_id,
1271                                            worktree_id: this.id().to_proto(),
1272                                        }
1273                                    })
1274                                });
1275
1276                                if let Some(message) = message {
1277                                    rpc.send(message).await.log_err();
1278                                }
1279                            }
1280                            DiagnosticProgress::Updated => {
1281                                let message = handle.update(&mut cx, |this, cx| {
1282                                    cx.emit(Event::DiskBasedDiagnosticsUpdated);
1283                                    let this = this.as_local().unwrap();
1284                                    this.share.as_ref().map(|share| {
1285                                        proto::DiskBasedDiagnosticsUpdated {
1286                                            project_id: share.project_id,
1287                                            worktree_id: this.id().to_proto(),
1288                                        }
1289                                    })
1290                                });
1291
1292                                if let Some(message) = message {
1293                                    rpc.send(message).await.log_err();
1294                                }
1295                            }
1296                        }
1297                    } else {
1298                        break;
1299                    }
1300                }
1301            })
1302            .detach();
1303
1304            self.language_servers
1305                .insert(language.name().to_string(), language_server.clone());
1306            Some(language_server.clone())
1307        } else {
1308            None
1309        }
1310    }
1311
1312    fn get_open_buffer(
1313        &mut self,
1314        path: &Path,
1315        cx: &mut ModelContext<Worktree>,
1316    ) -> Option<ModelHandle<Buffer>> {
1317        let handle = cx.handle();
1318        let mut result = None;
1319        self.open_buffers.retain(|_buffer_id, buffer| {
1320            if let Some(buffer) = buffer.upgrade(cx) {
1321                if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1322                    if file.worktree == handle && file.path().as_ref() == path {
1323                        result = Some(buffer);
1324                    }
1325                }
1326                true
1327            } else {
1328                false
1329            }
1330        });
1331        result
1332    }
1333
1334    fn open_buffer(
1335        &mut self,
1336        path: &Path,
1337        cx: &mut ModelContext<Worktree>,
1338    ) -> Task<Result<ModelHandle<Buffer>>> {
1339        let path = Arc::from(path);
1340        cx.spawn(move |this, mut cx| async move {
1341            let (file, contents) = this
1342                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
1343                .await?;
1344
1345            let (diagnostics, language, language_server) = this.update(&mut cx, |this, cx| {
1346                let this = this.as_local_mut().unwrap();
1347                let diagnostics = this.diagnostics.get(&path).cloned();
1348                let language = this
1349                    .language_registry
1350                    .select_language(file.full_path())
1351                    .cloned();
1352                let server = language
1353                    .as_ref()
1354                    .and_then(|language| this.register_language(language, cx));
1355                (diagnostics, language, server)
1356            });
1357
1358            let mut buffer_operations = Vec::new();
1359            let buffer = cx.add_model(|cx| {
1360                let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1361                buffer.set_language(language, language_server, cx);
1362                if let Some(diagnostics) = diagnostics {
1363                    let op = buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1364                    buffer_operations.push(op);
1365                }
1366                buffer
1367            });
1368
1369            this.update(&mut cx, |this, cx| {
1370                for op in buffer_operations {
1371                    this.send_buffer_update(buffer.read(cx).remote_id(), op, cx);
1372                }
1373                let this = this.as_local_mut().unwrap();
1374                this.open_buffers.insert(buffer.id(), buffer.downgrade());
1375            });
1376
1377            Ok(buffer)
1378        })
1379    }
1380
1381    pub fn open_remote_buffer(
1382        &mut self,
1383        envelope: TypedEnvelope<proto::OpenBuffer>,
1384        cx: &mut ModelContext<Worktree>,
1385    ) -> Task<Result<proto::OpenBufferResponse>> {
1386        cx.spawn(|this, mut cx| async move {
1387            let peer_id = envelope.original_sender_id();
1388            let path = Path::new(&envelope.payload.path);
1389            let buffer = this
1390                .update(&mut cx, |this, cx| this.open_buffer(path, cx))
1391                .await?;
1392            this.update(&mut cx, |this, cx| {
1393                this.as_local_mut()
1394                    .unwrap()
1395                    .shared_buffers
1396                    .entry(peer_id?)
1397                    .or_default()
1398                    .insert(buffer.id() as u64, buffer.clone());
1399
1400                Ok(proto::OpenBufferResponse {
1401                    buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1402                })
1403            })
1404        })
1405    }
1406
1407    pub fn close_remote_buffer(
1408        &mut self,
1409        envelope: TypedEnvelope<proto::CloseBuffer>,
1410        cx: &mut ModelContext<Worktree>,
1411    ) -> Result<()> {
1412        if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1413            shared_buffers.remove(&envelope.payload.buffer_id);
1414            cx.notify();
1415        }
1416
1417        Ok(())
1418    }
1419
1420    pub fn remove_collaborator(
1421        &mut self,
1422        peer_id: PeerId,
1423        replica_id: ReplicaId,
1424        cx: &mut ModelContext<Worktree>,
1425    ) {
1426        self.shared_buffers.remove(&peer_id);
1427        for (_, buffer) in &self.open_buffers {
1428            if let Some(buffer) = buffer.upgrade(cx) {
1429                buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1430            }
1431        }
1432        cx.notify();
1433    }
1434
1435    pub fn scan_complete(&self) -> impl Future<Output = ()> {
1436        let mut scan_state_rx = self.last_scan_state_rx.clone();
1437        async move {
1438            let mut scan_state = Some(scan_state_rx.borrow().clone());
1439            while let Some(ScanState::Scanning) = scan_state {
1440                scan_state = scan_state_rx.recv().await;
1441            }
1442        }
1443    }
1444
1445    fn is_scanning(&self) -> bool {
1446        if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1447            true
1448        } else {
1449            false
1450        }
1451    }
1452
1453    pub fn snapshot(&self) -> Snapshot {
1454        self.snapshot.clone()
1455    }
1456
1457    pub fn abs_path(&self) -> &Arc<Path> {
1458        &self.snapshot.abs_path
1459    }
1460
1461    pub fn contains_abs_path(&self, path: &Path) -> bool {
1462        path.starts_with(&self.snapshot.abs_path)
1463    }
1464
1465    fn absolutize(&self, path: &Path) -> PathBuf {
1466        if path.file_name().is_some() {
1467            self.snapshot.abs_path.join(path)
1468        } else {
1469            self.snapshot.abs_path.to_path_buf()
1470        }
1471    }
1472
1473    fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1474        let handle = cx.handle();
1475        let path = Arc::from(path);
1476        let worktree_path = self.abs_path.clone();
1477        let abs_path = self.absolutize(&path);
1478        let background_snapshot = self.background_snapshot.clone();
1479        let fs = self.fs.clone();
1480        cx.spawn(|this, mut cx| async move {
1481            let text = fs.load(&abs_path).await?;
1482            // Eagerly populate the snapshot with an updated entry for the loaded file
1483            let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1484            this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1485            Ok((
1486                File {
1487                    entry_id: Some(entry.id),
1488                    worktree: handle,
1489                    worktree_path,
1490                    path: entry.path,
1491                    mtime: entry.mtime,
1492                    is_local: true,
1493                },
1494                text,
1495            ))
1496        })
1497    }
1498
1499    pub fn save_buffer_as(
1500        &self,
1501        buffer: ModelHandle<Buffer>,
1502        path: impl Into<Arc<Path>>,
1503        text: Rope,
1504        cx: &mut ModelContext<Worktree>,
1505    ) -> Task<Result<File>> {
1506        let save = self.save(path, text, cx);
1507        cx.spawn(|this, mut cx| async move {
1508            let entry = save.await?;
1509            this.update(&mut cx, |this, cx| {
1510                let this = this.as_local_mut().unwrap();
1511                this.open_buffers.insert(buffer.id(), buffer.downgrade());
1512                Ok(File {
1513                    entry_id: Some(entry.id),
1514                    worktree: cx.handle(),
1515                    worktree_path: this.abs_path.clone(),
1516                    path: entry.path,
1517                    mtime: entry.mtime,
1518                    is_local: true,
1519                })
1520            })
1521        })
1522    }
1523
1524    fn save(
1525        &self,
1526        path: impl Into<Arc<Path>>,
1527        text: Rope,
1528        cx: &mut ModelContext<Worktree>,
1529    ) -> Task<Result<Entry>> {
1530        let path = path.into();
1531        let abs_path = self.absolutize(&path);
1532        let background_snapshot = self.background_snapshot.clone();
1533        let fs = self.fs.clone();
1534        let save = cx.background().spawn(async move {
1535            fs.save(&abs_path, &text).await?;
1536            refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1537        });
1538
1539        cx.spawn(|this, mut cx| async move {
1540            let entry = save.await?;
1541            this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1542            Ok(entry)
1543        })
1544    }
1545
1546    pub fn share(
1547        &mut self,
1548        project_id: u64,
1549        cx: &mut ModelContext<Worktree>,
1550    ) -> Task<anyhow::Result<()>> {
1551        if self.share.is_some() {
1552            return Task::ready(Ok(()));
1553        }
1554
1555        let snapshot = self.snapshot();
1556        let rpc = self.client.clone();
1557        let worktree_id = cx.model_id() as u64;
1558        let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1559        let maintain_remote_snapshot = cx.background().spawn({
1560            let rpc = rpc.clone();
1561            let snapshot = snapshot.clone();
1562            async move {
1563                let mut prev_snapshot = snapshot;
1564                while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1565                    let message =
1566                        snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1567                    match rpc.send(message).await {
1568                        Ok(()) => prev_snapshot = snapshot,
1569                        Err(err) => log::error!("error sending snapshot diff {}", err),
1570                    }
1571                }
1572            }
1573        });
1574        self.share = Some(ShareState {
1575            project_id,
1576            snapshots_tx: snapshots_to_send_tx,
1577            _maintain_remote_snapshot: Some(maintain_remote_snapshot),
1578        });
1579
1580        let diagnostic_summaries = self.diagnostic_summaries.clone();
1581        let share_message = cx.background().spawn(async move {
1582            proto::ShareWorktree {
1583                project_id,
1584                worktree: Some(snapshot.to_proto(&diagnostic_summaries)),
1585            }
1586        });
1587
1588        cx.foreground().spawn(async move {
1589            rpc.request(share_message.await).await?;
1590            Ok(())
1591        })
1592    }
1593
1594    pub fn unshare(&mut self) {
1595        self.share.take();
1596    }
1597
1598    pub fn is_shared(&self) -> bool {
1599        self.share.is_some()
1600    }
1601}
1602
1603fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1604    let contents = smol::block_on(fs.load(&abs_path))?;
1605    let parent = abs_path.parent().unwrap_or(Path::new("/"));
1606    let mut builder = GitignoreBuilder::new(parent);
1607    for line in contents.lines() {
1608        builder.add_line(Some(abs_path.into()), line)?;
1609    }
1610    Ok(builder.build()?)
1611}
1612
1613impl Deref for Worktree {
1614    type Target = Snapshot;
1615
1616    fn deref(&self) -> &Self::Target {
1617        match self {
1618            Worktree::Local(worktree) => &worktree.snapshot,
1619            Worktree::Remote(worktree) => &worktree.snapshot,
1620        }
1621    }
1622}
1623
1624impl Deref for LocalWorktree {
1625    type Target = Snapshot;
1626
1627    fn deref(&self) -> &Self::Target {
1628        &self.snapshot
1629    }
1630}
1631
1632impl Deref for RemoteWorktree {
1633    type Target = Snapshot;
1634
1635    fn deref(&self) -> &Self::Target {
1636        &self.snapshot
1637    }
1638}
1639
1640impl fmt::Debug for LocalWorktree {
1641    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1642        self.snapshot.fmt(f)
1643    }
1644}
1645
1646impl RemoteWorktree {
1647    fn get_open_buffer(
1648        &mut self,
1649        path: &Path,
1650        cx: &mut ModelContext<Worktree>,
1651    ) -> Option<ModelHandle<Buffer>> {
1652        let handle = cx.handle();
1653        let mut existing_buffer = None;
1654        self.open_buffers.retain(|_buffer_id, buffer| {
1655            if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1656                if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1657                    if file.worktree == handle && file.path().as_ref() == path {
1658                        existing_buffer = Some(buffer);
1659                    }
1660                }
1661                true
1662            } else {
1663                false
1664            }
1665        });
1666        existing_buffer
1667    }
1668
1669    fn open_buffer(
1670        &mut self,
1671        path: &Path,
1672        cx: &mut ModelContext<Worktree>,
1673    ) -> Task<Result<ModelHandle<Buffer>>> {
1674        let rpc = self.client.clone();
1675        let replica_id = self.replica_id;
1676        let project_id = self.project_id;
1677        let remote_worktree_id = self.id();
1678        let root_path = self.snapshot.abs_path.clone();
1679        let path: Arc<Path> = Arc::from(path);
1680        let path_string = path.to_string_lossy().to_string();
1681        cx.spawn_weak(move |this, mut cx| async move {
1682            let entry = this
1683                .upgrade(&cx)
1684                .ok_or_else(|| anyhow!("worktree was closed"))?
1685                .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1686                .ok_or_else(|| anyhow!("file does not exist"))?;
1687            let response = rpc
1688                .request(proto::OpenBuffer {
1689                    project_id,
1690                    worktree_id: remote_worktree_id.to_proto(),
1691                    path: path_string,
1692                })
1693                .await?;
1694
1695            let this = this
1696                .upgrade(&cx)
1697                .ok_or_else(|| anyhow!("worktree was closed"))?;
1698            let file = File {
1699                entry_id: Some(entry.id),
1700                worktree: this.clone(),
1701                worktree_path: root_path,
1702                path: entry.path,
1703                mtime: entry.mtime,
1704                is_local: false,
1705            };
1706            let language = this.read_with(&cx, |this, _| {
1707                use language::File;
1708                this.languages().select_language(file.full_path()).cloned()
1709            });
1710            let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1711            let buffer_id = remote_buffer.id as usize;
1712            let buffer = cx.add_model(|cx| {
1713                Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1714                    .unwrap()
1715                    .with_language(language, None, cx)
1716            });
1717            this.update(&mut cx, move |this, cx| {
1718                let this = this.as_remote_mut().unwrap();
1719                if let Some(RemoteBuffer::Operations(pending_ops)) = this
1720                    .open_buffers
1721                    .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1722                {
1723                    buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1724                }
1725                Result::<_, anyhow::Error>::Ok(buffer)
1726            })
1727        })
1728    }
1729
1730    pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1731        for (_, buffer) in self.open_buffers.drain() {
1732            if let RemoteBuffer::Loaded(buffer) = buffer {
1733                if let Some(buffer) = buffer.upgrade(cx) {
1734                    buffer.update(cx, |buffer, cx| buffer.close(cx))
1735                }
1736            }
1737        }
1738    }
1739
1740    fn snapshot(&self) -> Snapshot {
1741        self.snapshot.clone()
1742    }
1743
1744    pub fn update_from_remote(
1745        &mut self,
1746        envelope: TypedEnvelope<proto::UpdateWorktree>,
1747        cx: &mut ModelContext<Worktree>,
1748    ) -> Result<()> {
1749        let mut tx = self.updates_tx.clone();
1750        let payload = envelope.payload.clone();
1751        cx.background()
1752            .spawn(async move {
1753                tx.send(payload).await.expect("receiver runs to completion");
1754            })
1755            .detach();
1756
1757        Ok(())
1758    }
1759
1760    pub fn update_diagnostic_summary(
1761        &mut self,
1762        envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1763        cx: &mut ModelContext<Worktree>,
1764    ) {
1765        if let Some(summary) = envelope.payload.summary {
1766            let path: Arc<Path> = Path::new(&summary.path).into();
1767            self.diagnostic_summaries.insert(
1768                PathKey(path.clone()),
1769                DiagnosticSummary {
1770                    error_count: summary.error_count as usize,
1771                    warning_count: summary.warning_count as usize,
1772                    info_count: summary.info_count as usize,
1773                    hint_count: summary.hint_count as usize,
1774                },
1775            );
1776            cx.emit(Event::DiagnosticsUpdated(path));
1777        }
1778    }
1779
1780    pub fn disk_based_diagnostics_updating(&self, cx: &mut ModelContext<Worktree>) {
1781        cx.emit(Event::DiskBasedDiagnosticsUpdating);
1782    }
1783
1784    pub fn disk_based_diagnostics_updated(&self, cx: &mut ModelContext<Worktree>) {
1785        cx.emit(Event::DiskBasedDiagnosticsUpdated);
1786    }
1787
1788    pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1789        for (_, buffer) in &self.open_buffers {
1790            if let Some(buffer) = buffer.upgrade(cx) {
1791                buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1792            }
1793        }
1794        cx.notify();
1795    }
1796}
1797
1798enum RemoteBuffer {
1799    Operations(Vec<Operation>),
1800    Loaded(WeakModelHandle<Buffer>),
1801}
1802
1803impl RemoteBuffer {
1804    fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1805        match self {
1806            Self::Operations(_) => None,
1807            Self::Loaded(buffer) => buffer.upgrade(cx),
1808        }
1809    }
1810}
1811
1812impl Snapshot {
1813    pub fn id(&self) -> WorktreeId {
1814        self.id
1815    }
1816
1817    pub fn to_proto(
1818        &self,
1819        diagnostic_summaries: &TreeMap<PathKey, DiagnosticSummary>,
1820    ) -> proto::Worktree {
1821        let root_name = self.root_name.clone();
1822        proto::Worktree {
1823            id: self.id.0 as u64,
1824            root_name,
1825            entries: self
1826                .entries_by_path
1827                .iter()
1828                .filter(|e| !e.is_ignored)
1829                .map(Into::into)
1830                .collect(),
1831            diagnostic_summaries: diagnostic_summaries
1832                .iter()
1833                .map(|(path, summary)| summary.to_proto(path.0.clone()))
1834                .collect(),
1835        }
1836    }
1837
1838    pub fn build_update(
1839        &self,
1840        other: &Self,
1841        project_id: u64,
1842        worktree_id: u64,
1843        include_ignored: bool,
1844    ) -> proto::UpdateWorktree {
1845        let mut updated_entries = Vec::new();
1846        let mut removed_entries = Vec::new();
1847        let mut self_entries = self
1848            .entries_by_id
1849            .cursor::<()>()
1850            .filter(|e| include_ignored || !e.is_ignored)
1851            .peekable();
1852        let mut other_entries = other
1853            .entries_by_id
1854            .cursor::<()>()
1855            .filter(|e| include_ignored || !e.is_ignored)
1856            .peekable();
1857        loop {
1858            match (self_entries.peek(), other_entries.peek()) {
1859                (Some(self_entry), Some(other_entry)) => {
1860                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1861                        Ordering::Less => {
1862                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1863                            updated_entries.push(entry);
1864                            self_entries.next();
1865                        }
1866                        Ordering::Equal => {
1867                            if self_entry.scan_id != other_entry.scan_id {
1868                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1869                                updated_entries.push(entry);
1870                            }
1871
1872                            self_entries.next();
1873                            other_entries.next();
1874                        }
1875                        Ordering::Greater => {
1876                            removed_entries.push(other_entry.id as u64);
1877                            other_entries.next();
1878                        }
1879                    }
1880                }
1881                (Some(self_entry), None) => {
1882                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1883                    updated_entries.push(entry);
1884                    self_entries.next();
1885                }
1886                (None, Some(other_entry)) => {
1887                    removed_entries.push(other_entry.id as u64);
1888                    other_entries.next();
1889                }
1890                (None, None) => break,
1891            }
1892        }
1893
1894        proto::UpdateWorktree {
1895            project_id,
1896            worktree_id,
1897            root_name: self.root_name().to_string(),
1898            updated_entries,
1899            removed_entries,
1900        }
1901    }
1902
1903    fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1904        self.scan_id += 1;
1905        let scan_id = self.scan_id;
1906
1907        let mut entries_by_path_edits = Vec::new();
1908        let mut entries_by_id_edits = Vec::new();
1909        for entry_id in update.removed_entries {
1910            let entry_id = entry_id as usize;
1911            let entry = self
1912                .entry_for_id(entry_id)
1913                .ok_or_else(|| anyhow!("unknown entry"))?;
1914            entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1915            entries_by_id_edits.push(Edit::Remove(entry.id));
1916        }
1917
1918        for entry in update.updated_entries {
1919            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1920            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1921                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1922            }
1923            entries_by_id_edits.push(Edit::Insert(PathEntry {
1924                id: entry.id,
1925                path: entry.path.clone(),
1926                is_ignored: entry.is_ignored,
1927                scan_id,
1928            }));
1929            entries_by_path_edits.push(Edit::Insert(entry));
1930        }
1931
1932        self.entries_by_path.edit(entries_by_path_edits, &());
1933        self.entries_by_id.edit(entries_by_id_edits, &());
1934
1935        Ok(())
1936    }
1937
1938    pub fn file_count(&self) -> usize {
1939        self.entries_by_path.summary().file_count
1940    }
1941
1942    pub fn visible_file_count(&self) -> usize {
1943        self.entries_by_path.summary().visible_file_count
1944    }
1945
1946    fn traverse_from_offset(
1947        &self,
1948        include_dirs: bool,
1949        include_ignored: bool,
1950        start_offset: usize,
1951    ) -> Traversal {
1952        let mut cursor = self.entries_by_path.cursor();
1953        cursor.seek(
1954            &TraversalTarget::Count {
1955                count: start_offset,
1956                include_dirs,
1957                include_ignored,
1958            },
1959            Bias::Right,
1960            &(),
1961        );
1962        Traversal {
1963            cursor,
1964            include_dirs,
1965            include_ignored,
1966        }
1967    }
1968
1969    fn traverse_from_path(
1970        &self,
1971        include_dirs: bool,
1972        include_ignored: bool,
1973        path: &Path,
1974    ) -> Traversal {
1975        let mut cursor = self.entries_by_path.cursor();
1976        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1977        Traversal {
1978            cursor,
1979            include_dirs,
1980            include_ignored,
1981        }
1982    }
1983
1984    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1985        self.traverse_from_offset(false, include_ignored, start)
1986    }
1987
1988    pub fn entries(&self, include_ignored: bool) -> Traversal {
1989        self.traverse_from_offset(true, include_ignored, 0)
1990    }
1991
1992    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1993        let empty_path = Path::new("");
1994        self.entries_by_path
1995            .cursor::<()>()
1996            .filter(move |entry| entry.path.as_ref() != empty_path)
1997            .map(|entry| &entry.path)
1998    }
1999
2000    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
2001        let mut cursor = self.entries_by_path.cursor();
2002        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
2003        let traversal = Traversal {
2004            cursor,
2005            include_dirs: true,
2006            include_ignored: true,
2007        };
2008        ChildEntriesIter {
2009            traversal,
2010            parent_path,
2011        }
2012    }
2013
2014    pub fn root_entry(&self) -> Option<&Entry> {
2015        self.entry_for_path("")
2016    }
2017
2018    pub fn root_name(&self) -> &str {
2019        &self.root_name
2020    }
2021
2022    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
2023        let path = path.as_ref();
2024        self.traverse_from_path(true, true, path)
2025            .entry()
2026            .and_then(|entry| {
2027                if entry.path.as_ref() == path {
2028                    Some(entry)
2029                } else {
2030                    None
2031                }
2032            })
2033    }
2034
2035    pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
2036        let entry = self.entries_by_id.get(&id, &())?;
2037        self.entry_for_path(&entry.path)
2038    }
2039
2040    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
2041        self.entry_for_path(path.as_ref()).map(|e| e.inode)
2042    }
2043
2044    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2045        if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
2046            let abs_path = self.abs_path.join(&entry.path);
2047            match build_gitignore(&abs_path, fs) {
2048                Ok(ignore) => {
2049                    let ignore_dir_path = entry.path.parent().unwrap();
2050                    self.ignores
2051                        .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
2052                }
2053                Err(error) => {
2054                    log::error!(
2055                        "error loading .gitignore file {:?} - {:?}",
2056                        &entry.path,
2057                        error
2058                    );
2059                }
2060            }
2061        }
2062
2063        self.reuse_entry_id(&mut entry);
2064        self.entries_by_path.insert_or_replace(entry.clone(), &());
2065        self.entries_by_id.insert_or_replace(
2066            PathEntry {
2067                id: entry.id,
2068                path: entry.path.clone(),
2069                is_ignored: entry.is_ignored,
2070                scan_id: self.scan_id,
2071            },
2072            &(),
2073        );
2074        entry
2075    }
2076
2077    fn populate_dir(
2078        &mut self,
2079        parent_path: Arc<Path>,
2080        entries: impl IntoIterator<Item = Entry>,
2081        ignore: Option<Arc<Gitignore>>,
2082    ) {
2083        let mut parent_entry = self
2084            .entries_by_path
2085            .get(&PathKey(parent_path.clone()), &())
2086            .unwrap()
2087            .clone();
2088        if let Some(ignore) = ignore {
2089            self.ignores.insert(parent_path, (ignore, self.scan_id));
2090        }
2091        if matches!(parent_entry.kind, EntryKind::PendingDir) {
2092            parent_entry.kind = EntryKind::Dir;
2093        } else {
2094            unreachable!();
2095        }
2096
2097        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2098        let mut entries_by_id_edits = Vec::new();
2099
2100        for mut entry in entries {
2101            self.reuse_entry_id(&mut entry);
2102            entries_by_id_edits.push(Edit::Insert(PathEntry {
2103                id: entry.id,
2104                path: entry.path.clone(),
2105                is_ignored: entry.is_ignored,
2106                scan_id: self.scan_id,
2107            }));
2108            entries_by_path_edits.push(Edit::Insert(entry));
2109        }
2110
2111        self.entries_by_path.edit(entries_by_path_edits, &());
2112        self.entries_by_id.edit(entries_by_id_edits, &());
2113    }
2114
2115    fn reuse_entry_id(&mut self, entry: &mut Entry) {
2116        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2117            entry.id = removed_entry_id;
2118        } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2119            entry.id = existing_entry.id;
2120        }
2121    }
2122
2123    fn remove_path(&mut self, path: &Path) {
2124        let mut new_entries;
2125        let removed_entries;
2126        {
2127            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2128            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2129            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2130            new_entries.push_tree(cursor.suffix(&()), &());
2131        }
2132        self.entries_by_path = new_entries;
2133
2134        let mut entries_by_id_edits = Vec::new();
2135        for entry in removed_entries.cursor::<()>() {
2136            let removed_entry_id = self
2137                .removed_entry_ids
2138                .entry(entry.inode)
2139                .or_insert(entry.id);
2140            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2141            entries_by_id_edits.push(Edit::Remove(entry.id));
2142        }
2143        self.entries_by_id.edit(entries_by_id_edits, &());
2144
2145        if path.file_name() == Some(&GITIGNORE) {
2146            if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
2147                *scan_id = self.scan_id;
2148            }
2149        }
2150    }
2151
2152    fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2153        let mut new_ignores = Vec::new();
2154        for ancestor in path.ancestors().skip(1) {
2155            if let Some((ignore, _)) = self.ignores.get(ancestor) {
2156                new_ignores.push((ancestor, Some(ignore.clone())));
2157            } else {
2158                new_ignores.push((ancestor, None));
2159            }
2160        }
2161
2162        let mut ignore_stack = IgnoreStack::none();
2163        for (parent_path, ignore) in new_ignores.into_iter().rev() {
2164            if ignore_stack.is_path_ignored(&parent_path, true) {
2165                ignore_stack = IgnoreStack::all();
2166                break;
2167            } else if let Some(ignore) = ignore {
2168                ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
2169            }
2170        }
2171
2172        if ignore_stack.is_path_ignored(path, is_dir) {
2173            ignore_stack = IgnoreStack::all();
2174        }
2175
2176        ignore_stack
2177    }
2178}
2179
2180impl fmt::Debug for Snapshot {
2181    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2182        for entry in self.entries_by_path.cursor::<()>() {
2183            for _ in entry.path.ancestors().skip(1) {
2184                write!(f, " ")?;
2185            }
2186            writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
2187        }
2188        Ok(())
2189    }
2190}
2191
2192#[derive(Clone, PartialEq)]
2193pub struct File {
2194    entry_id: Option<usize>,
2195    worktree: ModelHandle<Worktree>,
2196    worktree_path: Arc<Path>,
2197    pub path: Arc<Path>,
2198    pub mtime: SystemTime,
2199    is_local: bool,
2200}
2201
2202impl language::File for File {
2203    fn mtime(&self) -> SystemTime {
2204        self.mtime
2205    }
2206
2207    fn path(&self) -> &Arc<Path> {
2208        &self.path
2209    }
2210
2211    fn abs_path(&self) -> Option<PathBuf> {
2212        if self.is_local {
2213            Some(self.worktree_path.join(&self.path))
2214        } else {
2215            None
2216        }
2217    }
2218
2219    fn full_path(&self) -> PathBuf {
2220        let mut full_path = PathBuf::new();
2221        if let Some(worktree_name) = self.worktree_path.file_name() {
2222            full_path.push(worktree_name);
2223        }
2224        full_path.push(&self.path);
2225        full_path
2226    }
2227
2228    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2229    /// of its worktree, then this method will return the name of the worktree itself.
2230    fn file_name<'a>(&'a self) -> Option<OsString> {
2231        self.path
2232            .file_name()
2233            .or_else(|| self.worktree_path.file_name())
2234            .map(Into::into)
2235    }
2236
2237    fn is_deleted(&self) -> bool {
2238        self.entry_id.is_none()
2239    }
2240
2241    fn save(
2242        &self,
2243        buffer_id: u64,
2244        text: Rope,
2245        version: clock::Global,
2246        cx: &mut MutableAppContext,
2247    ) -> Task<Result<(clock::Global, SystemTime)>> {
2248        let worktree_id = self.worktree.read(cx).id().to_proto();
2249        self.worktree.update(cx, |worktree, cx| match worktree {
2250            Worktree::Local(worktree) => {
2251                let rpc = worktree.client.clone();
2252                let project_id = worktree.share.as_ref().map(|share| share.project_id);
2253                let save = worktree.save(self.path.clone(), text, cx);
2254                cx.background().spawn(async move {
2255                    let entry = save.await?;
2256                    if let Some(project_id) = project_id {
2257                        rpc.send(proto::BufferSaved {
2258                            project_id,
2259                            worktree_id,
2260                            buffer_id,
2261                            version: (&version).into(),
2262                            mtime: Some(entry.mtime.into()),
2263                        })
2264                        .await?;
2265                    }
2266                    Ok((version, entry.mtime))
2267                })
2268            }
2269            Worktree::Remote(worktree) => {
2270                let rpc = worktree.client.clone();
2271                let project_id = worktree.project_id;
2272                cx.foreground().spawn(async move {
2273                    let response = rpc
2274                        .request(proto::SaveBuffer {
2275                            project_id,
2276                            worktree_id,
2277                            buffer_id,
2278                        })
2279                        .await?;
2280                    let version = response.version.try_into()?;
2281                    let mtime = response
2282                        .mtime
2283                        .ok_or_else(|| anyhow!("missing mtime"))?
2284                        .into();
2285                    Ok((version, mtime))
2286                })
2287            }
2288        })
2289    }
2290
2291    fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2292        let worktree = self.worktree.read(cx).as_local()?;
2293        let abs_path = worktree.absolutize(&self.path);
2294        let fs = worktree.fs.clone();
2295        Some(
2296            cx.background()
2297                .spawn(async move { fs.load(&abs_path).await }),
2298        )
2299    }
2300
2301    fn format_remote(
2302        &self,
2303        buffer_id: u64,
2304        cx: &mut MutableAppContext,
2305    ) -> Option<Task<Result<()>>> {
2306        let worktree = self.worktree.read(cx);
2307        let worktree_id = worktree.id().to_proto();
2308        let worktree = worktree.as_remote()?;
2309        let rpc = worktree.client.clone();
2310        let project_id = worktree.project_id;
2311        Some(cx.foreground().spawn(async move {
2312            rpc.request(proto::FormatBuffer {
2313                project_id,
2314                worktree_id,
2315                buffer_id,
2316            })
2317            .await?;
2318            Ok(())
2319        }))
2320    }
2321
2322    fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2323        self.worktree.update(cx, |worktree, cx| {
2324            worktree.send_buffer_update(buffer_id, operation, cx);
2325        });
2326    }
2327
2328    fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2329        self.worktree.update(cx, |worktree, cx| {
2330            if let Worktree::Remote(worktree) = worktree {
2331                let project_id = worktree.project_id;
2332                let worktree_id = worktree.id().to_proto();
2333                let rpc = worktree.client.clone();
2334                cx.background()
2335                    .spawn(async move {
2336                        if let Err(error) = rpc
2337                            .send(proto::CloseBuffer {
2338                                project_id,
2339                                worktree_id,
2340                                buffer_id,
2341                            })
2342                            .await
2343                        {
2344                            log::error!("error closing remote buffer: {}", error);
2345                        }
2346                    })
2347                    .detach();
2348            }
2349        });
2350    }
2351
2352    fn as_any(&self) -> &dyn Any {
2353        self
2354    }
2355}
2356
2357impl File {
2358    pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2359        file.and_then(|f| f.as_any().downcast_ref())
2360    }
2361
2362    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2363        self.worktree.read(cx).id()
2364    }
2365
2366    pub fn project_entry(&self, cx: &AppContext) -> Option<ProjectEntry> {
2367        self.entry_id.map(|entry_id| ProjectEntry {
2368            worktree_id: self.worktree_id(cx),
2369            entry_id,
2370        })
2371    }
2372}
2373
2374#[derive(Clone, Debug)]
2375pub struct Entry {
2376    pub id: usize,
2377    pub kind: EntryKind,
2378    pub path: Arc<Path>,
2379    pub inode: u64,
2380    pub mtime: SystemTime,
2381    pub is_symlink: bool,
2382    pub is_ignored: bool,
2383}
2384
2385#[derive(Clone, Debug)]
2386pub enum EntryKind {
2387    PendingDir,
2388    Dir,
2389    File(CharBag),
2390}
2391
2392impl Entry {
2393    fn new(
2394        path: Arc<Path>,
2395        metadata: &fs::Metadata,
2396        next_entry_id: &AtomicUsize,
2397        root_char_bag: CharBag,
2398    ) -> Self {
2399        Self {
2400            id: next_entry_id.fetch_add(1, SeqCst),
2401            kind: if metadata.is_dir {
2402                EntryKind::PendingDir
2403            } else {
2404                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2405            },
2406            path,
2407            inode: metadata.inode,
2408            mtime: metadata.mtime,
2409            is_symlink: metadata.is_symlink,
2410            is_ignored: false,
2411        }
2412    }
2413
2414    pub fn is_dir(&self) -> bool {
2415        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2416    }
2417
2418    pub fn is_file(&self) -> bool {
2419        matches!(self.kind, EntryKind::File(_))
2420    }
2421}
2422
2423impl sum_tree::Item for Entry {
2424    type Summary = EntrySummary;
2425
2426    fn summary(&self) -> Self::Summary {
2427        let visible_count = if self.is_ignored { 0 } else { 1 };
2428        let file_count;
2429        let visible_file_count;
2430        if self.is_file() {
2431            file_count = 1;
2432            visible_file_count = visible_count;
2433        } else {
2434            file_count = 0;
2435            visible_file_count = 0;
2436        }
2437
2438        EntrySummary {
2439            max_path: self.path.clone(),
2440            count: 1,
2441            visible_count,
2442            file_count,
2443            visible_file_count,
2444        }
2445    }
2446}
2447
2448impl sum_tree::KeyedItem for Entry {
2449    type Key = PathKey;
2450
2451    fn key(&self) -> Self::Key {
2452        PathKey(self.path.clone())
2453    }
2454}
2455
2456#[derive(Clone, Debug)]
2457pub struct EntrySummary {
2458    max_path: Arc<Path>,
2459    count: usize,
2460    visible_count: usize,
2461    file_count: usize,
2462    visible_file_count: usize,
2463}
2464
2465impl Default for EntrySummary {
2466    fn default() -> Self {
2467        Self {
2468            max_path: Arc::from(Path::new("")),
2469            count: 0,
2470            visible_count: 0,
2471            file_count: 0,
2472            visible_file_count: 0,
2473        }
2474    }
2475}
2476
2477impl sum_tree::Summary for EntrySummary {
2478    type Context = ();
2479
2480    fn add_summary(&mut self, rhs: &Self, _: &()) {
2481        self.max_path = rhs.max_path.clone();
2482        self.visible_count += rhs.visible_count;
2483        self.file_count += rhs.file_count;
2484        self.visible_file_count += rhs.visible_file_count;
2485    }
2486}
2487
2488#[derive(Clone, Debug)]
2489struct PathEntry {
2490    id: usize,
2491    path: Arc<Path>,
2492    is_ignored: bool,
2493    scan_id: usize,
2494}
2495
2496impl sum_tree::Item for PathEntry {
2497    type Summary = PathEntrySummary;
2498
2499    fn summary(&self) -> Self::Summary {
2500        PathEntrySummary { max_id: self.id }
2501    }
2502}
2503
2504impl sum_tree::KeyedItem for PathEntry {
2505    type Key = usize;
2506
2507    fn key(&self) -> Self::Key {
2508        self.id
2509    }
2510}
2511
2512#[derive(Clone, Debug, Default)]
2513struct PathEntrySummary {
2514    max_id: usize,
2515}
2516
2517impl sum_tree::Summary for PathEntrySummary {
2518    type Context = ();
2519
2520    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2521        self.max_id = summary.max_id;
2522    }
2523}
2524
2525impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2526    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2527        *self = summary.max_id;
2528    }
2529}
2530
2531#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2532pub struct PathKey(Arc<Path>);
2533
2534impl Default for PathKey {
2535    fn default() -> Self {
2536        Self(Path::new("").into())
2537    }
2538}
2539
2540impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2541    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2542        self.0 = summary.max_path.clone();
2543    }
2544}
2545
2546struct BackgroundScanner {
2547    fs: Arc<dyn Fs>,
2548    snapshot: Arc<Mutex<Snapshot>>,
2549    notify: Sender<ScanState>,
2550    executor: Arc<executor::Background>,
2551}
2552
2553impl BackgroundScanner {
2554    fn new(
2555        snapshot: Arc<Mutex<Snapshot>>,
2556        notify: Sender<ScanState>,
2557        fs: Arc<dyn Fs>,
2558        executor: Arc<executor::Background>,
2559    ) -> Self {
2560        Self {
2561            fs,
2562            snapshot,
2563            notify,
2564            executor,
2565        }
2566    }
2567
2568    fn abs_path(&self) -> Arc<Path> {
2569        self.snapshot.lock().abs_path.clone()
2570    }
2571
2572    fn snapshot(&self) -> Snapshot {
2573        self.snapshot.lock().clone()
2574    }
2575
2576    async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2577        if self.notify.send(ScanState::Scanning).await.is_err() {
2578            return;
2579        }
2580
2581        if let Err(err) = self.scan_dirs().await {
2582            if self
2583                .notify
2584                .send(ScanState::Err(Arc::new(err)))
2585                .await
2586                .is_err()
2587            {
2588                return;
2589            }
2590        }
2591
2592        if self.notify.send(ScanState::Idle).await.is_err() {
2593            return;
2594        }
2595
2596        futures::pin_mut!(events_rx);
2597        while let Some(events) = events_rx.next().await {
2598            if self.notify.send(ScanState::Scanning).await.is_err() {
2599                break;
2600            }
2601
2602            if !self.process_events(events).await {
2603                break;
2604            }
2605
2606            if self.notify.send(ScanState::Idle).await.is_err() {
2607                break;
2608            }
2609        }
2610    }
2611
2612    async fn scan_dirs(&mut self) -> Result<()> {
2613        let root_char_bag;
2614        let next_entry_id;
2615        let is_dir;
2616        {
2617            let snapshot = self.snapshot.lock();
2618            root_char_bag = snapshot.root_char_bag;
2619            next_entry_id = snapshot.next_entry_id.clone();
2620            is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2621        };
2622
2623        if is_dir {
2624            let path: Arc<Path> = Arc::from(Path::new(""));
2625            let abs_path = self.abs_path();
2626            let (tx, rx) = channel::unbounded();
2627            tx.send(ScanJob {
2628                abs_path: abs_path.to_path_buf(),
2629                path,
2630                ignore_stack: IgnoreStack::none(),
2631                scan_queue: tx.clone(),
2632            })
2633            .await
2634            .unwrap();
2635            drop(tx);
2636
2637            self.executor
2638                .scoped(|scope| {
2639                    for _ in 0..self.executor.num_cpus() {
2640                        scope.spawn(async {
2641                            while let Ok(job) = rx.recv().await {
2642                                if let Err(err) = self
2643                                    .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2644                                    .await
2645                                {
2646                                    log::error!("error scanning {:?}: {}", job.abs_path, err);
2647                                }
2648                            }
2649                        });
2650                    }
2651                })
2652                .await;
2653        }
2654
2655        Ok(())
2656    }
2657
2658    async fn scan_dir(
2659        &self,
2660        root_char_bag: CharBag,
2661        next_entry_id: Arc<AtomicUsize>,
2662        job: &ScanJob,
2663    ) -> Result<()> {
2664        let mut new_entries: Vec<Entry> = Vec::new();
2665        let mut new_jobs: Vec<ScanJob> = Vec::new();
2666        let mut ignore_stack = job.ignore_stack.clone();
2667        let mut new_ignore = None;
2668
2669        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2670        while let Some(child_abs_path) = child_paths.next().await {
2671            let child_abs_path = match child_abs_path {
2672                Ok(child_abs_path) => child_abs_path,
2673                Err(error) => {
2674                    log::error!("error processing entry {:?}", error);
2675                    continue;
2676                }
2677            };
2678            let child_name = child_abs_path.file_name().unwrap();
2679            let child_path: Arc<Path> = job.path.join(child_name).into();
2680            let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2681                Some(metadata) => metadata,
2682                None => continue,
2683            };
2684
2685            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2686            if child_name == *GITIGNORE {
2687                match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2688                    Ok(ignore) => {
2689                        let ignore = Arc::new(ignore);
2690                        ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2691                        new_ignore = Some(ignore);
2692                    }
2693                    Err(error) => {
2694                        log::error!(
2695                            "error loading .gitignore file {:?} - {:?}",
2696                            child_name,
2697                            error
2698                        );
2699                    }
2700                }
2701
2702                // Update ignore status of any child entries we've already processed to reflect the
2703                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2704                // there should rarely be too numerous. Update the ignore stack associated with any
2705                // new jobs as well.
2706                let mut new_jobs = new_jobs.iter_mut();
2707                for entry in &mut new_entries {
2708                    entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2709                    if entry.is_dir() {
2710                        new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2711                            IgnoreStack::all()
2712                        } else {
2713                            ignore_stack.clone()
2714                        };
2715                    }
2716                }
2717            }
2718
2719            let mut child_entry = Entry::new(
2720                child_path.clone(),
2721                &child_metadata,
2722                &next_entry_id,
2723                root_char_bag,
2724            );
2725
2726            if child_metadata.is_dir {
2727                let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2728                child_entry.is_ignored = is_ignored;
2729                new_entries.push(child_entry);
2730                new_jobs.push(ScanJob {
2731                    abs_path: child_abs_path,
2732                    path: child_path,
2733                    ignore_stack: if is_ignored {
2734                        IgnoreStack::all()
2735                    } else {
2736                        ignore_stack.clone()
2737                    },
2738                    scan_queue: job.scan_queue.clone(),
2739                });
2740            } else {
2741                child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2742                new_entries.push(child_entry);
2743            };
2744        }
2745
2746        self.snapshot
2747            .lock()
2748            .populate_dir(job.path.clone(), new_entries, new_ignore);
2749        for new_job in new_jobs {
2750            job.scan_queue.send(new_job).await.unwrap();
2751        }
2752
2753        Ok(())
2754    }
2755
2756    async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2757        let mut snapshot = self.snapshot();
2758        snapshot.scan_id += 1;
2759
2760        let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2761            abs_path
2762        } else {
2763            return false;
2764        };
2765        let root_char_bag = snapshot.root_char_bag;
2766        let next_entry_id = snapshot.next_entry_id.clone();
2767
2768        events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2769        events.dedup_by(|a, b| a.path.starts_with(&b.path));
2770
2771        for event in &events {
2772            match event.path.strip_prefix(&root_abs_path) {
2773                Ok(path) => snapshot.remove_path(&path),
2774                Err(_) => {
2775                    log::error!(
2776                        "unexpected event {:?} for root path {:?}",
2777                        event.path,
2778                        root_abs_path
2779                    );
2780                    continue;
2781                }
2782            }
2783        }
2784
2785        let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2786        for event in events {
2787            let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2788                Ok(path) => Arc::from(path.to_path_buf()),
2789                Err(_) => {
2790                    log::error!(
2791                        "unexpected event {:?} for root path {:?}",
2792                        event.path,
2793                        root_abs_path
2794                    );
2795                    continue;
2796                }
2797            };
2798
2799            match self.fs.metadata(&event.path).await {
2800                Ok(Some(metadata)) => {
2801                    let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2802                    let mut fs_entry = Entry::new(
2803                        path.clone(),
2804                        &metadata,
2805                        snapshot.next_entry_id.as_ref(),
2806                        snapshot.root_char_bag,
2807                    );
2808                    fs_entry.is_ignored = ignore_stack.is_all();
2809                    snapshot.insert_entry(fs_entry, self.fs.as_ref());
2810                    if metadata.is_dir {
2811                        scan_queue_tx
2812                            .send(ScanJob {
2813                                abs_path: event.path,
2814                                path,
2815                                ignore_stack,
2816                                scan_queue: scan_queue_tx.clone(),
2817                            })
2818                            .await
2819                            .unwrap();
2820                    }
2821                }
2822                Ok(None) => {}
2823                Err(err) => {
2824                    // TODO - create a special 'error' entry in the entries tree to mark this
2825                    log::error!("error reading file on event {:?}", err);
2826                }
2827            }
2828        }
2829
2830        *self.snapshot.lock() = snapshot;
2831
2832        // Scan any directories that were created as part of this event batch.
2833        drop(scan_queue_tx);
2834        self.executor
2835            .scoped(|scope| {
2836                for _ in 0..self.executor.num_cpus() {
2837                    scope.spawn(async {
2838                        while let Ok(job) = scan_queue_rx.recv().await {
2839                            if let Err(err) = self
2840                                .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2841                                .await
2842                            {
2843                                log::error!("error scanning {:?}: {}", job.abs_path, err);
2844                            }
2845                        }
2846                    });
2847                }
2848            })
2849            .await;
2850
2851        // Attempt to detect renames only over a single batch of file-system events.
2852        self.snapshot.lock().removed_entry_ids.clear();
2853
2854        self.update_ignore_statuses().await;
2855        true
2856    }
2857
2858    async fn update_ignore_statuses(&self) {
2859        let mut snapshot = self.snapshot();
2860
2861        let mut ignores_to_update = Vec::new();
2862        let mut ignores_to_delete = Vec::new();
2863        for (parent_path, (_, scan_id)) in &snapshot.ignores {
2864            if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2865                ignores_to_update.push(parent_path.clone());
2866            }
2867
2868            let ignore_path = parent_path.join(&*GITIGNORE);
2869            if snapshot.entry_for_path(ignore_path).is_none() {
2870                ignores_to_delete.push(parent_path.clone());
2871            }
2872        }
2873
2874        for parent_path in ignores_to_delete {
2875            snapshot.ignores.remove(&parent_path);
2876            self.snapshot.lock().ignores.remove(&parent_path);
2877        }
2878
2879        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2880        ignores_to_update.sort_unstable();
2881        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2882        while let Some(parent_path) = ignores_to_update.next() {
2883            while ignores_to_update
2884                .peek()
2885                .map_or(false, |p| p.starts_with(&parent_path))
2886            {
2887                ignores_to_update.next().unwrap();
2888            }
2889
2890            let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2891            ignore_queue_tx
2892                .send(UpdateIgnoreStatusJob {
2893                    path: parent_path,
2894                    ignore_stack,
2895                    ignore_queue: ignore_queue_tx.clone(),
2896                })
2897                .await
2898                .unwrap();
2899        }
2900        drop(ignore_queue_tx);
2901
2902        self.executor
2903            .scoped(|scope| {
2904                for _ in 0..self.executor.num_cpus() {
2905                    scope.spawn(async {
2906                        while let Ok(job) = ignore_queue_rx.recv().await {
2907                            self.update_ignore_status(job, &snapshot).await;
2908                        }
2909                    });
2910                }
2911            })
2912            .await;
2913    }
2914
2915    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2916        let mut ignore_stack = job.ignore_stack;
2917        if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2918            ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2919        }
2920
2921        let mut entries_by_id_edits = Vec::new();
2922        let mut entries_by_path_edits = Vec::new();
2923        for mut entry in snapshot.child_entries(&job.path).cloned() {
2924            let was_ignored = entry.is_ignored;
2925            entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2926            if entry.is_dir() {
2927                let child_ignore_stack = if entry.is_ignored {
2928                    IgnoreStack::all()
2929                } else {
2930                    ignore_stack.clone()
2931                };
2932                job.ignore_queue
2933                    .send(UpdateIgnoreStatusJob {
2934                        path: entry.path.clone(),
2935                        ignore_stack: child_ignore_stack,
2936                        ignore_queue: job.ignore_queue.clone(),
2937                    })
2938                    .await
2939                    .unwrap();
2940            }
2941
2942            if entry.is_ignored != was_ignored {
2943                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2944                path_entry.scan_id = snapshot.scan_id;
2945                path_entry.is_ignored = entry.is_ignored;
2946                entries_by_id_edits.push(Edit::Insert(path_entry));
2947                entries_by_path_edits.push(Edit::Insert(entry));
2948            }
2949        }
2950
2951        let mut snapshot = self.snapshot.lock();
2952        snapshot.entries_by_path.edit(entries_by_path_edits, &());
2953        snapshot.entries_by_id.edit(entries_by_id_edits, &());
2954    }
2955}
2956
2957async fn refresh_entry(
2958    fs: &dyn Fs,
2959    snapshot: &Mutex<Snapshot>,
2960    path: Arc<Path>,
2961    abs_path: &Path,
2962) -> Result<Entry> {
2963    let root_char_bag;
2964    let next_entry_id;
2965    {
2966        let snapshot = snapshot.lock();
2967        root_char_bag = snapshot.root_char_bag;
2968        next_entry_id = snapshot.next_entry_id.clone();
2969    }
2970    let entry = Entry::new(
2971        path,
2972        &fs.metadata(abs_path)
2973            .await?
2974            .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2975        &next_entry_id,
2976        root_char_bag,
2977    );
2978    Ok(snapshot.lock().insert_entry(entry, fs))
2979}
2980
2981fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2982    let mut result = root_char_bag;
2983    result.extend(
2984        path.to_string_lossy()
2985            .chars()
2986            .map(|c| c.to_ascii_lowercase()),
2987    );
2988    result
2989}
2990
2991struct ScanJob {
2992    abs_path: PathBuf,
2993    path: Arc<Path>,
2994    ignore_stack: Arc<IgnoreStack>,
2995    scan_queue: Sender<ScanJob>,
2996}
2997
2998struct UpdateIgnoreStatusJob {
2999    path: Arc<Path>,
3000    ignore_stack: Arc<IgnoreStack>,
3001    ignore_queue: Sender<UpdateIgnoreStatusJob>,
3002}
3003
3004pub trait WorktreeHandle {
3005    #[cfg(test)]
3006    fn flush_fs_events<'a>(
3007        &self,
3008        cx: &'a gpui::TestAppContext,
3009    ) -> futures::future::LocalBoxFuture<'a, ()>;
3010}
3011
3012impl WorktreeHandle for ModelHandle<Worktree> {
3013    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3014    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3015    // extra directory scans, and emit extra scan-state notifications.
3016    //
3017    // This function mutates the worktree's directory and waits for those mutations to be picked up,
3018    // to ensure that all redundant FS events have already been processed.
3019    #[cfg(test)]
3020    fn flush_fs_events<'a>(
3021        &self,
3022        cx: &'a gpui::TestAppContext,
3023    ) -> futures::future::LocalBoxFuture<'a, ()> {
3024        use smol::future::FutureExt;
3025
3026        let filename = "fs-event-sentinel";
3027        let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
3028        let tree = self.clone();
3029        async move {
3030            std::fs::write(root_path.join(filename), "").unwrap();
3031            tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
3032                .await;
3033
3034            std::fs::remove_file(root_path.join(filename)).unwrap();
3035            tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
3036                .await;
3037
3038            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3039                .await;
3040        }
3041        .boxed_local()
3042    }
3043}
3044
3045#[derive(Clone, Debug)]
3046struct TraversalProgress<'a> {
3047    max_path: &'a Path,
3048    count: usize,
3049    visible_count: usize,
3050    file_count: usize,
3051    visible_file_count: usize,
3052}
3053
3054impl<'a> TraversalProgress<'a> {
3055    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3056        match (include_ignored, include_dirs) {
3057            (true, true) => self.count,
3058            (true, false) => self.file_count,
3059            (false, true) => self.visible_count,
3060            (false, false) => self.visible_file_count,
3061        }
3062    }
3063}
3064
3065impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3066    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3067        self.max_path = summary.max_path.as_ref();
3068        self.count += summary.count;
3069        self.visible_count += summary.visible_count;
3070        self.file_count += summary.file_count;
3071        self.visible_file_count += summary.visible_file_count;
3072    }
3073}
3074
3075impl<'a> Default for TraversalProgress<'a> {
3076    fn default() -> Self {
3077        Self {
3078            max_path: Path::new(""),
3079            count: 0,
3080            visible_count: 0,
3081            file_count: 0,
3082            visible_file_count: 0,
3083        }
3084    }
3085}
3086
3087pub struct Traversal<'a> {
3088    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3089    include_ignored: bool,
3090    include_dirs: bool,
3091}
3092
3093impl<'a> Traversal<'a> {
3094    pub fn advance(&mut self) -> bool {
3095        self.advance_to_offset(self.offset() + 1)
3096    }
3097
3098    pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3099        self.cursor.seek_forward(
3100            &TraversalTarget::Count {
3101                count: offset,
3102                include_dirs: self.include_dirs,
3103                include_ignored: self.include_ignored,
3104            },
3105            Bias::Right,
3106            &(),
3107        )
3108    }
3109
3110    pub fn advance_to_sibling(&mut self) -> bool {
3111        while let Some(entry) = self.cursor.item() {
3112            self.cursor.seek_forward(
3113                &TraversalTarget::PathSuccessor(&entry.path),
3114                Bias::Left,
3115                &(),
3116            );
3117            if let Some(entry) = self.cursor.item() {
3118                if (self.include_dirs || !entry.is_dir())
3119                    && (self.include_ignored || !entry.is_ignored)
3120                {
3121                    return true;
3122                }
3123            }
3124        }
3125        false
3126    }
3127
3128    pub fn entry(&self) -> Option<&'a Entry> {
3129        self.cursor.item()
3130    }
3131
3132    pub fn offset(&self) -> usize {
3133        self.cursor
3134            .start()
3135            .count(self.include_dirs, self.include_ignored)
3136    }
3137}
3138
3139impl<'a> Iterator for Traversal<'a> {
3140    type Item = &'a Entry;
3141
3142    fn next(&mut self) -> Option<Self::Item> {
3143        if let Some(item) = self.entry() {
3144            self.advance();
3145            Some(item)
3146        } else {
3147            None
3148        }
3149    }
3150}
3151
3152#[derive(Debug)]
3153enum TraversalTarget<'a> {
3154    Path(&'a Path),
3155    PathSuccessor(&'a Path),
3156    Count {
3157        count: usize,
3158        include_ignored: bool,
3159        include_dirs: bool,
3160    },
3161}
3162
3163impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3164    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3165        match self {
3166            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3167            TraversalTarget::PathSuccessor(path) => {
3168                if !cursor_location.max_path.starts_with(path) {
3169                    Ordering::Equal
3170                } else {
3171                    Ordering::Greater
3172                }
3173            }
3174            TraversalTarget::Count {
3175                count,
3176                include_dirs,
3177                include_ignored,
3178            } => Ord::cmp(
3179                count,
3180                &cursor_location.count(*include_dirs, *include_ignored),
3181            ),
3182        }
3183    }
3184}
3185
3186struct ChildEntriesIter<'a> {
3187    parent_path: &'a Path,
3188    traversal: Traversal<'a>,
3189}
3190
3191impl<'a> Iterator for ChildEntriesIter<'a> {
3192    type Item = &'a Entry;
3193
3194    fn next(&mut self) -> Option<Self::Item> {
3195        if let Some(item) = self.traversal.entry() {
3196            if item.path.starts_with(&self.parent_path) {
3197                self.traversal.advance_to_sibling();
3198                return Some(item);
3199            }
3200        }
3201        None
3202    }
3203}
3204
3205impl<'a> From<&'a Entry> for proto::Entry {
3206    fn from(entry: &'a Entry) -> Self {
3207        Self {
3208            id: entry.id as u64,
3209            is_dir: entry.is_dir(),
3210            path: entry.path.to_string_lossy().to_string(),
3211            inode: entry.inode,
3212            mtime: Some(entry.mtime.into()),
3213            is_symlink: entry.is_symlink,
3214            is_ignored: entry.is_ignored,
3215        }
3216    }
3217}
3218
3219impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3220    type Error = anyhow::Error;
3221
3222    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3223        if let Some(mtime) = entry.mtime {
3224            let kind = if entry.is_dir {
3225                EntryKind::Dir
3226            } else {
3227                let mut char_bag = root_char_bag.clone();
3228                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3229                EntryKind::File(char_bag)
3230            };
3231            let path: Arc<Path> = Arc::from(Path::new(&entry.path));
3232            Ok(Entry {
3233                id: entry.id as usize,
3234                kind,
3235                path: path.clone(),
3236                inode: entry.inode,
3237                mtime: mtime.into(),
3238                is_symlink: entry.is_symlink,
3239                is_ignored: entry.is_ignored,
3240            })
3241        } else {
3242            Err(anyhow!(
3243                "missing mtime in remote worktree entry {:?}",
3244                entry.path
3245            ))
3246        }
3247    }
3248}
3249
3250#[cfg(test)]
3251mod tests {
3252    use super::*;
3253    use crate::fs::FakeFs;
3254    use anyhow::Result;
3255    use client::test::{FakeHttpClient, FakeServer};
3256    use fs::RealFs;
3257    use gpui::test::subscribe;
3258    use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
3259    use language::{Diagnostic, LanguageConfig};
3260    use lsp::Url;
3261    use rand::prelude::*;
3262    use serde_json::json;
3263    use std::{cell::RefCell, rc::Rc};
3264    use std::{
3265        env,
3266        fmt::Write,
3267        time::{SystemTime, UNIX_EPOCH},
3268    };
3269    use text::Point;
3270    use unindent::Unindent as _;
3271    use util::test::temp_tree;
3272
3273    #[gpui::test]
3274    async fn test_traversal(mut cx: gpui::TestAppContext) {
3275        let fs = FakeFs::new();
3276        fs.insert_tree(
3277            "/root",
3278            json!({
3279               ".gitignore": "a/b\n",
3280               "a": {
3281                   "b": "",
3282                   "c": "",
3283               }
3284            }),
3285        )
3286        .await;
3287
3288        let http_client = FakeHttpClient::with_404_response();
3289        let client = Client::new(http_client.clone());
3290        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3291
3292        let tree = Worktree::open_local(
3293            client,
3294            user_store,
3295            Arc::from(Path::new("/root")),
3296            Arc::new(fs),
3297            Default::default(),
3298            &mut cx.to_async(),
3299        )
3300        .await
3301        .unwrap();
3302        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3303            .await;
3304
3305        tree.read_with(&cx, |tree, _| {
3306            assert_eq!(
3307                tree.entries(false)
3308                    .map(|entry| entry.path.as_ref())
3309                    .collect::<Vec<_>>(),
3310                vec![
3311                    Path::new(""),
3312                    Path::new(".gitignore"),
3313                    Path::new("a"),
3314                    Path::new("a/c"),
3315                ]
3316            );
3317        })
3318    }
3319
3320    #[gpui::test]
3321    async fn test_save_file(mut cx: gpui::TestAppContext) {
3322        let dir = temp_tree(json!({
3323            "file1": "the old contents",
3324        }));
3325
3326        let http_client = FakeHttpClient::with_404_response();
3327        let client = Client::new(http_client.clone());
3328        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3329
3330        let tree = Worktree::open_local(
3331            client,
3332            user_store,
3333            dir.path(),
3334            Arc::new(RealFs),
3335            Default::default(),
3336            &mut cx.to_async(),
3337        )
3338        .await
3339        .unwrap();
3340        let buffer = tree
3341            .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3342            .await
3343            .unwrap();
3344        let save = buffer.update(&mut cx, |buffer, cx| {
3345            buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3346            buffer.save(cx).unwrap()
3347        });
3348        save.await.unwrap();
3349
3350        let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3351        assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3352    }
3353
3354    #[gpui::test]
3355    async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3356        let dir = temp_tree(json!({
3357            "file1": "the old contents",
3358        }));
3359        let file_path = dir.path().join("file1");
3360
3361        let http_client = FakeHttpClient::with_404_response();
3362        let client = Client::new(http_client.clone());
3363        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3364
3365        let tree = Worktree::open_local(
3366            client,
3367            user_store,
3368            file_path.clone(),
3369            Arc::new(RealFs),
3370            Default::default(),
3371            &mut cx.to_async(),
3372        )
3373        .await
3374        .unwrap();
3375        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3376            .await;
3377        cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3378
3379        let buffer = tree
3380            .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3381            .await
3382            .unwrap();
3383        let save = buffer.update(&mut cx, |buffer, cx| {
3384            buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3385            buffer.save(cx).unwrap()
3386        });
3387        save.await.unwrap();
3388
3389        let new_text = std::fs::read_to_string(file_path).unwrap();
3390        assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3391    }
3392
3393    #[gpui::test]
3394    async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3395        let dir = temp_tree(json!({
3396            "a": {
3397                "file1": "",
3398                "file2": "",
3399                "file3": "",
3400            },
3401            "b": {
3402                "c": {
3403                    "file4": "",
3404                    "file5": "",
3405                }
3406            }
3407        }));
3408
3409        let user_id = 5;
3410        let http_client = FakeHttpClient::with_404_response();
3411        let mut client = Client::new(http_client.clone());
3412        let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3413        let user_store = server.build_user_store(client.clone(), &mut cx).await;
3414        let tree = Worktree::open_local(
3415            client,
3416            user_store.clone(),
3417            dir.path(),
3418            Arc::new(RealFs),
3419            Default::default(),
3420            &mut cx.to_async(),
3421        )
3422        .await
3423        .unwrap();
3424
3425        let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3426            let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3427            async move { buffer.await.unwrap() }
3428        };
3429        let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3430            tree.read_with(cx, |tree, _| {
3431                tree.entry_for_path(path)
3432                    .expect(&format!("no entry for path {}", path))
3433                    .id
3434            })
3435        };
3436
3437        let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3438        let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3439        let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3440        let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3441
3442        let file2_id = id_for_path("a/file2", &cx);
3443        let file3_id = id_for_path("a/file3", &cx);
3444        let file4_id = id_for_path("b/c/file4", &cx);
3445
3446        // Wait for the initial scan.
3447        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3448            .await;
3449
3450        // Create a remote copy of this worktree.
3451        let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3452        let remote = Worktree::remote(
3453            1,
3454            1,
3455            initial_snapshot.to_proto(&Default::default()),
3456            Client::new(http_client.clone()),
3457            user_store,
3458            Default::default(),
3459            &mut cx.to_async(),
3460        )
3461        .await
3462        .unwrap();
3463
3464        cx.read(|cx| {
3465            assert!(!buffer2.read(cx).is_dirty());
3466            assert!(!buffer3.read(cx).is_dirty());
3467            assert!(!buffer4.read(cx).is_dirty());
3468            assert!(!buffer5.read(cx).is_dirty());
3469        });
3470
3471        // Rename and delete files and directories.
3472        tree.flush_fs_events(&cx).await;
3473        std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3474        std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3475        std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3476        std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3477        tree.flush_fs_events(&cx).await;
3478
3479        let expected_paths = vec![
3480            "a",
3481            "a/file1",
3482            "a/file2.new",
3483            "b",
3484            "d",
3485            "d/file3",
3486            "d/file4",
3487        ];
3488
3489        cx.read(|app| {
3490            assert_eq!(
3491                tree.read(app)
3492                    .paths()
3493                    .map(|p| p.to_str().unwrap())
3494                    .collect::<Vec<_>>(),
3495                expected_paths
3496            );
3497
3498            assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3499            assert_eq!(id_for_path("d/file3", &cx), file3_id);
3500            assert_eq!(id_for_path("d/file4", &cx), file4_id);
3501
3502            assert_eq!(
3503                buffer2.read(app).file().unwrap().path().as_ref(),
3504                Path::new("a/file2.new")
3505            );
3506            assert_eq!(
3507                buffer3.read(app).file().unwrap().path().as_ref(),
3508                Path::new("d/file3")
3509            );
3510            assert_eq!(
3511                buffer4.read(app).file().unwrap().path().as_ref(),
3512                Path::new("d/file4")
3513            );
3514            assert_eq!(
3515                buffer5.read(app).file().unwrap().path().as_ref(),
3516                Path::new("b/c/file5")
3517            );
3518
3519            assert!(!buffer2.read(app).file().unwrap().is_deleted());
3520            assert!(!buffer3.read(app).file().unwrap().is_deleted());
3521            assert!(!buffer4.read(app).file().unwrap().is_deleted());
3522            assert!(buffer5.read(app).file().unwrap().is_deleted());
3523        });
3524
3525        // Update the remote worktree. Check that it becomes consistent with the
3526        // local worktree.
3527        remote.update(&mut cx, |remote, cx| {
3528            let update_message =
3529                tree.read(cx)
3530                    .snapshot()
3531                    .build_update(&initial_snapshot, 1, 1, true);
3532            remote
3533                .as_remote_mut()
3534                .unwrap()
3535                .snapshot
3536                .apply_update(update_message)
3537                .unwrap();
3538
3539            assert_eq!(
3540                remote
3541                    .paths()
3542                    .map(|p| p.to_str().unwrap())
3543                    .collect::<Vec<_>>(),
3544                expected_paths
3545            );
3546        });
3547    }
3548
3549    #[gpui::test]
3550    async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3551        let dir = temp_tree(json!({
3552            ".git": {},
3553            ".gitignore": "ignored-dir\n",
3554            "tracked-dir": {
3555                "tracked-file1": "tracked contents",
3556            },
3557            "ignored-dir": {
3558                "ignored-file1": "ignored contents",
3559            }
3560        }));
3561
3562        let http_client = FakeHttpClient::with_404_response();
3563        let client = Client::new(http_client.clone());
3564        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3565
3566        let tree = Worktree::open_local(
3567            client,
3568            user_store,
3569            dir.path(),
3570            Arc::new(RealFs),
3571            Default::default(),
3572            &mut cx.to_async(),
3573        )
3574        .await
3575        .unwrap();
3576        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3577            .await;
3578        tree.flush_fs_events(&cx).await;
3579        cx.read(|cx| {
3580            let tree = tree.read(cx);
3581            let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3582            let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3583            assert_eq!(tracked.is_ignored, false);
3584            assert_eq!(ignored.is_ignored, true);
3585        });
3586
3587        std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3588        std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3589        tree.flush_fs_events(&cx).await;
3590        cx.read(|cx| {
3591            let tree = tree.read(cx);
3592            let dot_git = tree.entry_for_path(".git").unwrap();
3593            let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3594            let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3595            assert_eq!(tracked.is_ignored, false);
3596            assert_eq!(ignored.is_ignored, true);
3597            assert_eq!(dot_git.is_ignored, true);
3598        });
3599    }
3600
3601    #[gpui::test]
3602    async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3603        let user_id = 100;
3604        let http_client = FakeHttpClient::with_404_response();
3605        let mut client = Client::new(http_client);
3606        let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3607        let user_store = server.build_user_store(client.clone(), &mut cx).await;
3608
3609        let fs = Arc::new(FakeFs::new());
3610        fs.insert_tree(
3611            "/the-dir",
3612            json!({
3613                "a.txt": "a-contents",
3614                "b.txt": "b-contents",
3615            }),
3616        )
3617        .await;
3618
3619        let worktree = Worktree::open_local(
3620            client.clone(),
3621            user_store,
3622            "/the-dir".as_ref(),
3623            fs,
3624            Default::default(),
3625            &mut cx.to_async(),
3626        )
3627        .await
3628        .unwrap();
3629
3630        // Spawn multiple tasks to open paths, repeating some paths.
3631        let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3632            (
3633                worktree.open_buffer("a.txt", cx),
3634                worktree.open_buffer("b.txt", cx),
3635                worktree.open_buffer("a.txt", cx),
3636            )
3637        });
3638
3639        let buffer_a_1 = buffer_a_1.await.unwrap();
3640        let buffer_a_2 = buffer_a_2.await.unwrap();
3641        let buffer_b = buffer_b.await.unwrap();
3642        assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3643        assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3644
3645        // There is only one buffer per path.
3646        let buffer_a_id = buffer_a_1.id();
3647        assert_eq!(buffer_a_2.id(), buffer_a_id);
3648
3649        // Open the same path again while it is still open.
3650        drop(buffer_a_1);
3651        let buffer_a_3 = worktree
3652            .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3653            .await
3654            .unwrap();
3655
3656        // There's still only one buffer per path.
3657        assert_eq!(buffer_a_3.id(), buffer_a_id);
3658    }
3659
3660    #[gpui::test]
3661    async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3662        use std::fs;
3663
3664        let dir = temp_tree(json!({
3665            "file1": "abc",
3666            "file2": "def",
3667            "file3": "ghi",
3668        }));
3669        let http_client = FakeHttpClient::with_404_response();
3670        let client = Client::new(http_client.clone());
3671        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3672
3673        let tree = Worktree::open_local(
3674            client,
3675            user_store,
3676            dir.path(),
3677            Arc::new(RealFs),
3678            Default::default(),
3679            &mut cx.to_async(),
3680        )
3681        .await
3682        .unwrap();
3683        tree.flush_fs_events(&cx).await;
3684        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3685            .await;
3686
3687        let buffer1 = tree
3688            .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3689            .await
3690            .unwrap();
3691        let events = Rc::new(RefCell::new(Vec::new()));
3692
3693        // initially, the buffer isn't dirty.
3694        buffer1.update(&mut cx, |buffer, cx| {
3695            cx.subscribe(&buffer1, {
3696                let events = events.clone();
3697                move |_, _, event, _| events.borrow_mut().push(event.clone())
3698            })
3699            .detach();
3700
3701            assert!(!buffer.is_dirty());
3702            assert!(events.borrow().is_empty());
3703
3704            buffer.edit(vec![1..2], "", cx);
3705        });
3706
3707        // after the first edit, the buffer is dirty, and emits a dirtied event.
3708        buffer1.update(&mut cx, |buffer, cx| {
3709            assert!(buffer.text() == "ac");
3710            assert!(buffer.is_dirty());
3711            assert_eq!(
3712                *events.borrow(),
3713                &[language::Event::Edited, language::Event::Dirtied]
3714            );
3715            events.borrow_mut().clear();
3716            buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3717        });
3718
3719        // after saving, the buffer is not dirty, and emits a saved event.
3720        buffer1.update(&mut cx, |buffer, cx| {
3721            assert!(!buffer.is_dirty());
3722            assert_eq!(*events.borrow(), &[language::Event::Saved]);
3723            events.borrow_mut().clear();
3724
3725            buffer.edit(vec![1..1], "B", cx);
3726            buffer.edit(vec![2..2], "D", cx);
3727        });
3728
3729        // after editing again, the buffer is dirty, and emits another dirty event.
3730        buffer1.update(&mut cx, |buffer, cx| {
3731            assert!(buffer.text() == "aBDc");
3732            assert!(buffer.is_dirty());
3733            assert_eq!(
3734                *events.borrow(),
3735                &[
3736                    language::Event::Edited,
3737                    language::Event::Dirtied,
3738                    language::Event::Edited,
3739                ],
3740            );
3741            events.borrow_mut().clear();
3742
3743            // TODO - currently, after restoring the buffer to its
3744            // previously-saved state, the is still considered dirty.
3745            buffer.edit([1..3], "", cx);
3746            assert!(buffer.text() == "ac");
3747            assert!(buffer.is_dirty());
3748        });
3749
3750        assert_eq!(*events.borrow(), &[language::Event::Edited]);
3751
3752        // When a file is deleted, the buffer is considered dirty.
3753        let events = Rc::new(RefCell::new(Vec::new()));
3754        let buffer2 = tree
3755            .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3756            .await
3757            .unwrap();
3758        buffer2.update(&mut cx, |_, cx| {
3759            cx.subscribe(&buffer2, {
3760                let events = events.clone();
3761                move |_, _, event, _| events.borrow_mut().push(event.clone())
3762            })
3763            .detach();
3764        });
3765
3766        fs::remove_file(dir.path().join("file2")).unwrap();
3767        buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3768        assert_eq!(
3769            *events.borrow(),
3770            &[language::Event::Dirtied, language::Event::FileHandleChanged]
3771        );
3772
3773        // When a file is already dirty when deleted, we don't emit a Dirtied event.
3774        let events = Rc::new(RefCell::new(Vec::new()));
3775        let buffer3 = tree
3776            .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3777            .await
3778            .unwrap();
3779        buffer3.update(&mut cx, |_, cx| {
3780            cx.subscribe(&buffer3, {
3781                let events = events.clone();
3782                move |_, _, event, _| events.borrow_mut().push(event.clone())
3783            })
3784            .detach();
3785        });
3786
3787        tree.flush_fs_events(&cx).await;
3788        buffer3.update(&mut cx, |buffer, cx| {
3789            buffer.edit(Some(0..0), "x", cx);
3790        });
3791        events.borrow_mut().clear();
3792        fs::remove_file(dir.path().join("file3")).unwrap();
3793        buffer3
3794            .condition(&cx, |_, _| !events.borrow().is_empty())
3795            .await;
3796        assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3797        cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3798    }
3799
3800    #[gpui::test]
3801    async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3802        use std::fs;
3803
3804        let initial_contents = "aaa\nbbbbb\nc\n";
3805        let dir = temp_tree(json!({ "the-file": initial_contents }));
3806        let http_client = FakeHttpClient::with_404_response();
3807        let client = Client::new(http_client.clone());
3808        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3809
3810        let tree = Worktree::open_local(
3811            client,
3812            user_store,
3813            dir.path(),
3814            Arc::new(RealFs),
3815            Default::default(),
3816            &mut cx.to_async(),
3817        )
3818        .await
3819        .unwrap();
3820        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3821            .await;
3822
3823        let abs_path = dir.path().join("the-file");
3824        let buffer = tree
3825            .update(&mut cx, |tree, cx| {
3826                tree.open_buffer(Path::new("the-file"), cx)
3827            })
3828            .await
3829            .unwrap();
3830
3831        // TODO
3832        // Add a cursor on each row.
3833        // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3834        //     assert!(!buffer.is_dirty());
3835        //     buffer.add_selection_set(
3836        //         &(0..3)
3837        //             .map(|row| Selection {
3838        //                 id: row as usize,
3839        //                 start: Point::new(row, 1),
3840        //                 end: Point::new(row, 1),
3841        //                 reversed: false,
3842        //                 goal: SelectionGoal::None,
3843        //             })
3844        //             .collect::<Vec<_>>(),
3845        //         cx,
3846        //     )
3847        // });
3848
3849        // Change the file on disk, adding two new lines of text, and removing
3850        // one line.
3851        buffer.read_with(&cx, |buffer, _| {
3852            assert!(!buffer.is_dirty());
3853            assert!(!buffer.has_conflict());
3854        });
3855        let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3856        fs::write(&abs_path, new_contents).unwrap();
3857
3858        // Because the buffer was not modified, it is reloaded from disk. Its
3859        // contents are edited according to the diff between the old and new
3860        // file contents.
3861        buffer
3862            .condition(&cx, |buffer, _| buffer.text() == new_contents)
3863            .await;
3864
3865        buffer.update(&mut cx, |buffer, _| {
3866            assert_eq!(buffer.text(), new_contents);
3867            assert!(!buffer.is_dirty());
3868            assert!(!buffer.has_conflict());
3869
3870            // TODO
3871            // let cursor_positions = buffer
3872            //     .selection_set(selection_set_id)
3873            //     .unwrap()
3874            //     .selections::<Point>(&*buffer)
3875            //     .map(|selection| {
3876            //         assert_eq!(selection.start, selection.end);
3877            //         selection.start
3878            //     })
3879            //     .collect::<Vec<_>>();
3880            // assert_eq!(
3881            //     cursor_positions,
3882            //     [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3883            // );
3884        });
3885
3886        // Modify the buffer
3887        buffer.update(&mut cx, |buffer, cx| {
3888            buffer.edit(vec![0..0], " ", cx);
3889            assert!(buffer.is_dirty());
3890            assert!(!buffer.has_conflict());
3891        });
3892
3893        // Change the file on disk again, adding blank lines to the beginning.
3894        fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3895
3896        // Because the buffer is modified, it doesn't reload from disk, but is
3897        // marked as having a conflict.
3898        buffer
3899            .condition(&cx, |buffer, _| buffer.has_conflict())
3900            .await;
3901    }
3902
3903    #[gpui::test]
3904    async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3905        let (language_server_config, mut fake_server) =
3906            LanguageServerConfig::fake(cx.background()).await;
3907        let progress_token = language_server_config
3908            .disk_based_diagnostics_progress_token
3909            .clone()
3910            .unwrap();
3911        let mut languages = LanguageRegistry::new();
3912        languages.add(Arc::new(Language::new(
3913            LanguageConfig {
3914                name: "Rust".to_string(),
3915                path_suffixes: vec!["rs".to_string()],
3916                language_server: Some(language_server_config),
3917                ..Default::default()
3918            },
3919            Some(tree_sitter_rust::language()),
3920        )));
3921
3922        let dir = temp_tree(json!({
3923            "a.rs": "fn a() { A }",
3924            "b.rs": "const y: i32 = 1",
3925        }));
3926
3927        let http_client = FakeHttpClient::with_404_response();
3928        let client = Client::new(http_client.clone());
3929        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3930
3931        let tree = Worktree::open_local(
3932            client,
3933            user_store,
3934            dir.path(),
3935            Arc::new(RealFs),
3936            Arc::new(languages),
3937            &mut cx.to_async(),
3938        )
3939        .await
3940        .unwrap();
3941        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3942            .await;
3943
3944        // Cause worktree to start the fake language server
3945        let _buffer = tree
3946            .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3947            .await
3948            .unwrap();
3949
3950        let mut events = subscribe(&tree, &mut cx);
3951
3952        fake_server.start_progress(&progress_token).await;
3953        assert_eq!(
3954            events.next().await.unwrap(),
3955            Event::DiskBasedDiagnosticsUpdating
3956        );
3957
3958        fake_server.start_progress(&progress_token).await;
3959        fake_server.end_progress(&progress_token).await;
3960        fake_server.start_progress(&progress_token).await;
3961
3962        fake_server
3963            .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3964                uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3965                version: None,
3966                diagnostics: vec![lsp::Diagnostic {
3967                    range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
3968                    severity: Some(lsp::DiagnosticSeverity::ERROR),
3969                    message: "undefined variable 'A'".to_string(),
3970                    ..Default::default()
3971                }],
3972            })
3973            .await;
3974        assert_eq!(
3975            events.next().await.unwrap(),
3976            Event::DiagnosticsUpdated(Arc::from(Path::new("a.rs")))
3977        );
3978
3979        fake_server.end_progress(&progress_token).await;
3980        fake_server.end_progress(&progress_token).await;
3981        assert_eq!(
3982            events.next().await.unwrap(),
3983            Event::DiskBasedDiagnosticsUpdated
3984        );
3985
3986        let buffer = tree
3987            .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3988            .await
3989            .unwrap();
3990
3991        buffer.read_with(&cx, |buffer, _| {
3992            let snapshot = buffer.snapshot();
3993            let diagnostics = snapshot
3994                .diagnostics_in_range::<_, Point>(0..buffer.len())
3995                .collect::<Vec<_>>();
3996            assert_eq!(
3997                diagnostics,
3998                &[DiagnosticEntry {
3999                    range: Point::new(0, 9)..Point::new(0, 10),
4000                    diagnostic: Diagnostic {
4001                        severity: lsp::DiagnosticSeverity::ERROR,
4002                        message: "undefined variable 'A'".to_string(),
4003                        group_id: 0,
4004                        is_primary: true,
4005                        ..Default::default()
4006                    }
4007                }]
4008            )
4009        });
4010    }
4011
4012    #[gpui::test]
4013    async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
4014        let fs = Arc::new(FakeFs::new());
4015        let http_client = FakeHttpClient::with_404_response();
4016        let client = Client::new(http_client.clone());
4017        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
4018
4019        fs.insert_tree(
4020            "/the-dir",
4021            json!({
4022                "a.rs": "
4023                    fn foo(mut v: Vec<usize>) {
4024                        for x in &v {
4025                            v.push(1);
4026                        }
4027                    }
4028                "
4029                .unindent(),
4030            }),
4031        )
4032        .await;
4033
4034        let worktree = Worktree::open_local(
4035            client.clone(),
4036            user_store,
4037            "/the-dir".as_ref(),
4038            fs,
4039            Default::default(),
4040            &mut cx.to_async(),
4041        )
4042        .await
4043        .unwrap();
4044
4045        let buffer = worktree
4046            .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
4047            .await
4048            .unwrap();
4049
4050        let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
4051        let message = lsp::PublishDiagnosticsParams {
4052            uri: buffer_uri.clone(),
4053            diagnostics: vec![
4054                lsp::Diagnostic {
4055                    range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
4056                    severity: Some(DiagnosticSeverity::WARNING),
4057                    message: "error 1".to_string(),
4058                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4059                        location: lsp::Location {
4060                            uri: buffer_uri.clone(),
4061                            range: lsp::Range::new(
4062                                lsp::Position::new(1, 8),
4063                                lsp::Position::new(1, 9),
4064                            ),
4065                        },
4066                        message: "error 1 hint 1".to_string(),
4067                    }]),
4068                    ..Default::default()
4069                },
4070                lsp::Diagnostic {
4071                    range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
4072                    severity: Some(DiagnosticSeverity::HINT),
4073                    message: "error 1 hint 1".to_string(),
4074                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4075                        location: lsp::Location {
4076                            uri: buffer_uri.clone(),
4077                            range: lsp::Range::new(
4078                                lsp::Position::new(1, 8),
4079                                lsp::Position::new(1, 9),
4080                            ),
4081                        },
4082                        message: "original diagnostic".to_string(),
4083                    }]),
4084                    ..Default::default()
4085                },
4086                lsp::Diagnostic {
4087                    range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
4088                    severity: Some(DiagnosticSeverity::ERROR),
4089                    message: "error 2".to_string(),
4090                    related_information: Some(vec![
4091                        lsp::DiagnosticRelatedInformation {
4092                            location: lsp::Location {
4093                                uri: buffer_uri.clone(),
4094                                range: lsp::Range::new(
4095                                    lsp::Position::new(1, 13),
4096                                    lsp::Position::new(1, 15),
4097                                ),
4098                            },
4099                            message: "error 2 hint 1".to_string(),
4100                        },
4101                        lsp::DiagnosticRelatedInformation {
4102                            location: lsp::Location {
4103                                uri: buffer_uri.clone(),
4104                                range: lsp::Range::new(
4105                                    lsp::Position::new(1, 13),
4106                                    lsp::Position::new(1, 15),
4107                                ),
4108                            },
4109                            message: "error 2 hint 2".to_string(),
4110                        },
4111                    ]),
4112                    ..Default::default()
4113                },
4114                lsp::Diagnostic {
4115                    range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
4116                    severity: Some(DiagnosticSeverity::HINT),
4117                    message: "error 2 hint 1".to_string(),
4118                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4119                        location: lsp::Location {
4120                            uri: buffer_uri.clone(),
4121                            range: lsp::Range::new(
4122                                lsp::Position::new(2, 8),
4123                                lsp::Position::new(2, 17),
4124                            ),
4125                        },
4126                        message: "original diagnostic".to_string(),
4127                    }]),
4128                    ..Default::default()
4129                },
4130                lsp::Diagnostic {
4131                    range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
4132                    severity: Some(DiagnosticSeverity::HINT),
4133                    message: "error 2 hint 2".to_string(),
4134                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4135                        location: lsp::Location {
4136                            uri: buffer_uri.clone(),
4137                            range: lsp::Range::new(
4138                                lsp::Position::new(2, 8),
4139                                lsp::Position::new(2, 17),
4140                            ),
4141                        },
4142                        message: "original diagnostic".to_string(),
4143                    }]),
4144                    ..Default::default()
4145                },
4146            ],
4147            version: None,
4148        };
4149
4150        worktree
4151            .update(&mut cx, |tree, cx| {
4152                tree.update_diagnostics(message, &Default::default(), cx)
4153            })
4154            .unwrap();
4155        let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
4156
4157        assert_eq!(
4158            buffer
4159                .diagnostics_in_range::<_, Point>(0..buffer.len())
4160                .collect::<Vec<_>>(),
4161            &[
4162                DiagnosticEntry {
4163                    range: Point::new(1, 8)..Point::new(1, 9),
4164                    diagnostic: Diagnostic {
4165                        severity: DiagnosticSeverity::WARNING,
4166                        message: "error 1".to_string(),
4167                        group_id: 0,
4168                        is_primary: true,
4169                        ..Default::default()
4170                    }
4171                },
4172                DiagnosticEntry {
4173                    range: Point::new(1, 8)..Point::new(1, 9),
4174                    diagnostic: Diagnostic {
4175                        severity: DiagnosticSeverity::HINT,
4176                        message: "error 1 hint 1".to_string(),
4177                        group_id: 0,
4178                        is_primary: false,
4179                        ..Default::default()
4180                    }
4181                },
4182                DiagnosticEntry {
4183                    range: Point::new(1, 13)..Point::new(1, 15),
4184                    diagnostic: Diagnostic {
4185                        severity: DiagnosticSeverity::HINT,
4186                        message: "error 2 hint 1".to_string(),
4187                        group_id: 1,
4188                        is_primary: false,
4189                        ..Default::default()
4190                    }
4191                },
4192                DiagnosticEntry {
4193                    range: Point::new(1, 13)..Point::new(1, 15),
4194                    diagnostic: Diagnostic {
4195                        severity: DiagnosticSeverity::HINT,
4196                        message: "error 2 hint 2".to_string(),
4197                        group_id: 1,
4198                        is_primary: false,
4199                        ..Default::default()
4200                    }
4201                },
4202                DiagnosticEntry {
4203                    range: Point::new(2, 8)..Point::new(2, 17),
4204                    diagnostic: Diagnostic {
4205                        severity: DiagnosticSeverity::ERROR,
4206                        message: "error 2".to_string(),
4207                        group_id: 1,
4208                        is_primary: true,
4209                        ..Default::default()
4210                    }
4211                }
4212            ]
4213        );
4214
4215        assert_eq!(
4216            buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
4217            &[
4218                DiagnosticEntry {
4219                    range: Point::new(1, 8)..Point::new(1, 9),
4220                    diagnostic: Diagnostic {
4221                        severity: DiagnosticSeverity::WARNING,
4222                        message: "error 1".to_string(),
4223                        group_id: 0,
4224                        is_primary: true,
4225                        ..Default::default()
4226                    }
4227                },
4228                DiagnosticEntry {
4229                    range: Point::new(1, 8)..Point::new(1, 9),
4230                    diagnostic: Diagnostic {
4231                        severity: DiagnosticSeverity::HINT,
4232                        message: "error 1 hint 1".to_string(),
4233                        group_id: 0,
4234                        is_primary: false,
4235                        ..Default::default()
4236                    }
4237                },
4238            ]
4239        );
4240        assert_eq!(
4241            buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
4242            &[
4243                DiagnosticEntry {
4244                    range: Point::new(1, 13)..Point::new(1, 15),
4245                    diagnostic: Diagnostic {
4246                        severity: DiagnosticSeverity::HINT,
4247                        message: "error 2 hint 1".to_string(),
4248                        group_id: 1,
4249                        is_primary: false,
4250                        ..Default::default()
4251                    }
4252                },
4253                DiagnosticEntry {
4254                    range: Point::new(1, 13)..Point::new(1, 15),
4255                    diagnostic: Diagnostic {
4256                        severity: DiagnosticSeverity::HINT,
4257                        message: "error 2 hint 2".to_string(),
4258                        group_id: 1,
4259                        is_primary: false,
4260                        ..Default::default()
4261                    }
4262                },
4263                DiagnosticEntry {
4264                    range: Point::new(2, 8)..Point::new(2, 17),
4265                    diagnostic: Diagnostic {
4266                        severity: DiagnosticSeverity::ERROR,
4267                        message: "error 2".to_string(),
4268                        group_id: 1,
4269                        is_primary: true,
4270                        ..Default::default()
4271                    }
4272                }
4273            ]
4274        );
4275    }
4276
4277    #[gpui::test(iterations = 100)]
4278    fn test_random(mut rng: StdRng) {
4279        let operations = env::var("OPERATIONS")
4280            .map(|o| o.parse().unwrap())
4281            .unwrap_or(40);
4282        let initial_entries = env::var("INITIAL_ENTRIES")
4283            .map(|o| o.parse().unwrap())
4284            .unwrap_or(20);
4285
4286        let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
4287        for _ in 0..initial_entries {
4288            randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
4289        }
4290        log::info!("Generated initial tree");
4291
4292        let (notify_tx, _notify_rx) = smol::channel::unbounded();
4293        let fs = Arc::new(RealFs);
4294        let next_entry_id = Arc::new(AtomicUsize::new(0));
4295        let mut initial_snapshot = Snapshot {
4296            id: WorktreeId::from_usize(0),
4297            scan_id: 0,
4298            abs_path: root_dir.path().into(),
4299            entries_by_path: Default::default(),
4300            entries_by_id: Default::default(),
4301            removed_entry_ids: Default::default(),
4302            ignores: Default::default(),
4303            root_name: Default::default(),
4304            root_char_bag: Default::default(),
4305            next_entry_id: next_entry_id.clone(),
4306        };
4307        initial_snapshot.insert_entry(
4308            Entry::new(
4309                Path::new("").into(),
4310                &smol::block_on(fs.metadata(root_dir.path()))
4311                    .unwrap()
4312                    .unwrap(),
4313                &next_entry_id,
4314                Default::default(),
4315            ),
4316            fs.as_ref(),
4317        );
4318        let mut scanner = BackgroundScanner::new(
4319            Arc::new(Mutex::new(initial_snapshot.clone())),
4320            notify_tx,
4321            fs.clone(),
4322            Arc::new(gpui::executor::Background::new()),
4323        );
4324        smol::block_on(scanner.scan_dirs()).unwrap();
4325        scanner.snapshot().check_invariants();
4326
4327        let mut events = Vec::new();
4328        let mut snapshots = Vec::new();
4329        let mut mutations_len = operations;
4330        while mutations_len > 1 {
4331            if !events.is_empty() && rng.gen_bool(0.4) {
4332                let len = rng.gen_range(0..=events.len());
4333                let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4334                log::info!("Delivering events: {:#?}", to_deliver);
4335                smol::block_on(scanner.process_events(to_deliver));
4336                scanner.snapshot().check_invariants();
4337            } else {
4338                events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4339                mutations_len -= 1;
4340            }
4341
4342            if rng.gen_bool(0.2) {
4343                snapshots.push(scanner.snapshot());
4344            }
4345        }
4346        log::info!("Quiescing: {:#?}", events);
4347        smol::block_on(scanner.process_events(events));
4348        scanner.snapshot().check_invariants();
4349
4350        let (notify_tx, _notify_rx) = smol::channel::unbounded();
4351        let mut new_scanner = BackgroundScanner::new(
4352            Arc::new(Mutex::new(initial_snapshot)),
4353            notify_tx,
4354            scanner.fs.clone(),
4355            scanner.executor.clone(),
4356        );
4357        smol::block_on(new_scanner.scan_dirs()).unwrap();
4358        assert_eq!(
4359            scanner.snapshot().to_vec(true),
4360            new_scanner.snapshot().to_vec(true)
4361        );
4362
4363        for mut prev_snapshot in snapshots {
4364            let include_ignored = rng.gen::<bool>();
4365            if !include_ignored {
4366                let mut entries_by_path_edits = Vec::new();
4367                let mut entries_by_id_edits = Vec::new();
4368                for entry in prev_snapshot
4369                    .entries_by_id
4370                    .cursor::<()>()
4371                    .filter(|e| e.is_ignored)
4372                {
4373                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4374                    entries_by_id_edits.push(Edit::Remove(entry.id));
4375                }
4376
4377                prev_snapshot
4378                    .entries_by_path
4379                    .edit(entries_by_path_edits, &());
4380                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4381            }
4382
4383            let update = scanner
4384                .snapshot()
4385                .build_update(&prev_snapshot, 0, 0, include_ignored);
4386            prev_snapshot.apply_update(update).unwrap();
4387            assert_eq!(
4388                prev_snapshot.to_vec(true),
4389                scanner.snapshot().to_vec(include_ignored)
4390            );
4391        }
4392    }
4393
4394    fn randomly_mutate_tree(
4395        root_path: &Path,
4396        insertion_probability: f64,
4397        rng: &mut impl Rng,
4398    ) -> Result<Vec<fsevent::Event>> {
4399        let root_path = root_path.canonicalize().unwrap();
4400        let (dirs, files) = read_dir_recursive(root_path.clone());
4401
4402        let mut events = Vec::new();
4403        let mut record_event = |path: PathBuf| {
4404            events.push(fsevent::Event {
4405                event_id: SystemTime::now()
4406                    .duration_since(UNIX_EPOCH)
4407                    .unwrap()
4408                    .as_secs(),
4409                flags: fsevent::StreamFlags::empty(),
4410                path,
4411            });
4412        };
4413
4414        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4415            let path = dirs.choose(rng).unwrap();
4416            let new_path = path.join(gen_name(rng));
4417
4418            if rng.gen() {
4419                log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4420                std::fs::create_dir(&new_path)?;
4421            } else {
4422                log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4423                std::fs::write(&new_path, "")?;
4424            }
4425            record_event(new_path);
4426        } else if rng.gen_bool(0.05) {
4427            let ignore_dir_path = dirs.choose(rng).unwrap();
4428            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4429
4430            let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4431            let files_to_ignore = {
4432                let len = rng.gen_range(0..=subfiles.len());
4433                subfiles.choose_multiple(rng, len)
4434            };
4435            let dirs_to_ignore = {
4436                let len = rng.gen_range(0..subdirs.len());
4437                subdirs.choose_multiple(rng, len)
4438            };
4439
4440            let mut ignore_contents = String::new();
4441            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4442                write!(
4443                    ignore_contents,
4444                    "{}\n",
4445                    path_to_ignore
4446                        .strip_prefix(&ignore_dir_path)?
4447                        .to_str()
4448                        .unwrap()
4449                )
4450                .unwrap();
4451            }
4452            log::info!(
4453                "Creating {:?} with contents:\n{}",
4454                ignore_path.strip_prefix(&root_path)?,
4455                ignore_contents
4456            );
4457            std::fs::write(&ignore_path, ignore_contents).unwrap();
4458            record_event(ignore_path);
4459        } else {
4460            let old_path = {
4461                let file_path = files.choose(rng);
4462                let dir_path = dirs[1..].choose(rng);
4463                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4464            };
4465
4466            let is_rename = rng.gen();
4467            if is_rename {
4468                let new_path_parent = dirs
4469                    .iter()
4470                    .filter(|d| !d.starts_with(old_path))
4471                    .choose(rng)
4472                    .unwrap();
4473
4474                let overwrite_existing_dir =
4475                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4476                let new_path = if overwrite_existing_dir {
4477                    std::fs::remove_dir_all(&new_path_parent).ok();
4478                    new_path_parent.to_path_buf()
4479                } else {
4480                    new_path_parent.join(gen_name(rng))
4481                };
4482
4483                log::info!(
4484                    "Renaming {:?} to {}{:?}",
4485                    old_path.strip_prefix(&root_path)?,
4486                    if overwrite_existing_dir {
4487                        "overwrite "
4488                    } else {
4489                        ""
4490                    },
4491                    new_path.strip_prefix(&root_path)?
4492                );
4493                std::fs::rename(&old_path, &new_path)?;
4494                record_event(old_path.clone());
4495                record_event(new_path);
4496            } else if old_path.is_dir() {
4497                let (dirs, files) = read_dir_recursive(old_path.clone());
4498
4499                log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4500                std::fs::remove_dir_all(&old_path).unwrap();
4501                for file in files {
4502                    record_event(file);
4503                }
4504                for dir in dirs {
4505                    record_event(dir);
4506                }
4507            } else {
4508                log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4509                std::fs::remove_file(old_path).unwrap();
4510                record_event(old_path.clone());
4511            }
4512        }
4513
4514        Ok(events)
4515    }
4516
4517    fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4518        let child_entries = std::fs::read_dir(&path).unwrap();
4519        let mut dirs = vec![path];
4520        let mut files = Vec::new();
4521        for child_entry in child_entries {
4522            let child_path = child_entry.unwrap().path();
4523            if child_path.is_dir() {
4524                let (child_dirs, child_files) = read_dir_recursive(child_path);
4525                dirs.extend(child_dirs);
4526                files.extend(child_files);
4527            } else {
4528                files.push(child_path);
4529            }
4530        }
4531        (dirs, files)
4532    }
4533
4534    fn gen_name(rng: &mut impl Rng) -> String {
4535        (0..6)
4536            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4537            .map(char::from)
4538            .collect()
4539    }
4540
4541    impl Snapshot {
4542        fn check_invariants(&self) {
4543            let mut files = self.files(true, 0);
4544            let mut visible_files = self.files(false, 0);
4545            for entry in self.entries_by_path.cursor::<()>() {
4546                if entry.is_file() {
4547                    assert_eq!(files.next().unwrap().inode, entry.inode);
4548                    if !entry.is_ignored {
4549                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4550                    }
4551                }
4552            }
4553            assert!(files.next().is_none());
4554            assert!(visible_files.next().is_none());
4555
4556            let mut bfs_paths = Vec::new();
4557            let mut stack = vec![Path::new("")];
4558            while let Some(path) = stack.pop() {
4559                bfs_paths.push(path);
4560                let ix = stack.len();
4561                for child_entry in self.child_entries(path) {
4562                    stack.insert(ix, &child_entry.path);
4563                }
4564            }
4565
4566            let dfs_paths = self
4567                .entries_by_path
4568                .cursor::<()>()
4569                .map(|e| e.path.as_ref())
4570                .collect::<Vec<_>>();
4571            assert_eq!(bfs_paths, dfs_paths);
4572
4573            for (ignore_parent_path, _) in &self.ignores {
4574                assert!(self.entry_for_path(ignore_parent_path).is_some());
4575                assert!(self
4576                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4577                    .is_some());
4578            }
4579        }
4580
4581        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4582            let mut paths = Vec::new();
4583            for entry in self.entries_by_path.cursor::<()>() {
4584                if include_ignored || !entry.is_ignored {
4585                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4586                }
4587            }
4588            paths.sort_by(|a, b| a.0.cmp(&b.0));
4589            paths
4590        }
4591    }
4592}