worktree.rs

   1use super::{
   2    fs::{self, Fs},
   3    ignore::IgnoreStack,
   4    DiagnosticSummary,
   5};
   6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   7use anyhow::{anyhow, Context, Result};
   8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
   9use clock::ReplicaId;
  10use collections::{hash_map, HashMap, HashSet};
  11use futures::{Stream, StreamExt};
  12use fuzzy::CharBag;
  13use gpui::{
  14    executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
  15    Task, UpgradeModelHandle, WeakModelHandle,
  16};
  17use language::{
  18    Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Language, LanguageRegistry,
  19    Operation, PointUtf16, Rope,
  20};
  21use lazy_static::lazy_static;
  22use lsp::LanguageServer;
  23use parking_lot::Mutex;
  24use postage::{
  25    prelude::{Sink as _, Stream as _},
  26    watch,
  27};
  28use serde::Deserialize;
  29use smol::channel::{self, Sender};
  30use std::{
  31    any::Any,
  32    cmp::{self, Ordering},
  33    convert::{TryFrom, TryInto},
  34    ffi::{OsStr, OsString},
  35    fmt,
  36    future::Future,
  37    ops::{Deref, Range},
  38    path::{Path, PathBuf},
  39    sync::{
  40        atomic::{AtomicUsize, Ordering::SeqCst},
  41        Arc,
  42    },
  43    time::{Duration, SystemTime},
  44};
  45use sum_tree::{Bias, TreeMap};
  46use sum_tree::{Edit, SeekTarget, SumTree};
  47use util::{post_inc, ResultExt, TryFutureExt};
  48
  49lazy_static! {
  50    static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
  51}
  52
  53#[derive(Clone, Debug)]
  54enum ScanState {
  55    Idle,
  56    Scanning,
  57    Err(Arc<anyhow::Error>),
  58}
  59
  60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
  61pub struct WorktreeId(usize);
  62
  63pub enum Worktree {
  64    Local(LocalWorktree),
  65    Remote(RemoteWorktree),
  66}
  67
  68#[derive(Clone, Debug, Eq, PartialEq)]
  69pub enum Event {
  70    DiskBasedDiagnosticsUpdated,
  71    DiagnosticsUpdated(Arc<Path>),
  72}
  73
  74impl Entity for Worktree {
  75    type Event = Event;
  76
  77    fn app_will_quit(
  78        &mut self,
  79        _: &mut MutableAppContext,
  80    ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
  81        use futures::FutureExt;
  82
  83        if let Self::Local(worktree) = self {
  84            let shutdown_futures = worktree
  85                .language_servers
  86                .drain()
  87                .filter_map(|(_, server)| server.shutdown())
  88                .collect::<Vec<_>>();
  89            Some(
  90                async move {
  91                    futures::future::join_all(shutdown_futures).await;
  92                }
  93                .boxed(),
  94            )
  95        } else {
  96            None
  97        }
  98    }
  99}
 100
 101impl Worktree {
 102    pub async fn open_local(
 103        client: Arc<Client>,
 104        user_store: ModelHandle<UserStore>,
 105        path: impl Into<Arc<Path>>,
 106        fs: Arc<dyn Fs>,
 107        languages: Arc<LanguageRegistry>,
 108        cx: &mut AsyncAppContext,
 109    ) -> Result<ModelHandle<Self>> {
 110        let (tree, scan_states_tx) =
 111            LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
 112        tree.update(cx, |tree, cx| {
 113            let tree = tree.as_local_mut().unwrap();
 114            let abs_path = tree.snapshot.abs_path.clone();
 115            let background_snapshot = tree.background_snapshot.clone();
 116            let background = cx.background().clone();
 117            tree._background_scanner_task = Some(cx.background().spawn(async move {
 118                let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 119                let scanner =
 120                    BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
 121                scanner.run(events).await;
 122            }));
 123        });
 124        Ok(tree)
 125    }
 126
 127    pub async fn remote(
 128        project_remote_id: u64,
 129        replica_id: ReplicaId,
 130        worktree: proto::Worktree,
 131        client: Arc<Client>,
 132        user_store: ModelHandle<UserStore>,
 133        languages: Arc<LanguageRegistry>,
 134        cx: &mut AsyncAppContext,
 135    ) -> Result<ModelHandle<Self>> {
 136        let remote_id = worktree.id;
 137        let root_char_bag: CharBag = worktree
 138            .root_name
 139            .chars()
 140            .map(|c| c.to_ascii_lowercase())
 141            .collect();
 142        let root_name = worktree.root_name.clone();
 143        let (entries_by_path, entries_by_id, diagnostic_summaries) = cx
 144            .background()
 145            .spawn(async move {
 146                let mut entries_by_path_edits = Vec::new();
 147                let mut entries_by_id_edits = Vec::new();
 148                for entry in worktree.entries {
 149                    match Entry::try_from((&root_char_bag, entry)) {
 150                        Ok(entry) => {
 151                            entries_by_id_edits.push(Edit::Insert(PathEntry {
 152                                id: entry.id,
 153                                path: entry.path.clone(),
 154                                is_ignored: entry.is_ignored,
 155                                scan_id: 0,
 156                            }));
 157                            entries_by_path_edits.push(Edit::Insert(entry));
 158                        }
 159                        Err(err) => log::warn!("error for remote worktree entry {:?}", err),
 160                    }
 161                }
 162
 163                let mut entries_by_path = SumTree::new();
 164                let mut entries_by_id = SumTree::new();
 165                entries_by_path.edit(entries_by_path_edits, &());
 166                entries_by_id.edit(entries_by_id_edits, &());
 167
 168                let diagnostic_summaries = TreeMap::from_ordered_entries(
 169                    worktree.diagnostic_summaries.into_iter().map(|summary| {
 170                        (
 171                            PathKey(PathBuf::from(summary.path).into()),
 172                            DiagnosticSummary {
 173                                error_count: summary.error_count as usize,
 174                                warning_count: summary.warning_count as usize,
 175                                info_count: summary.info_count as usize,
 176                                hint_count: summary.hint_count as usize,
 177                            },
 178                        )
 179                    }),
 180                );
 181
 182                (entries_by_path, entries_by_id, diagnostic_summaries)
 183            })
 184            .await;
 185
 186        let worktree = cx.update(|cx| {
 187            cx.add_model(|cx: &mut ModelContext<Worktree>| {
 188                let snapshot = Snapshot {
 189                    id: WorktreeId(remote_id as usize),
 190                    scan_id: 0,
 191                    abs_path: Path::new("").into(),
 192                    root_name,
 193                    root_char_bag,
 194                    ignores: Default::default(),
 195                    entries_by_path,
 196                    entries_by_id,
 197                    removed_entry_ids: Default::default(),
 198                    next_entry_id: Default::default(),
 199                };
 200
 201                let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
 202                let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
 203
 204                cx.background()
 205                    .spawn(async move {
 206                        while let Some(update) = updates_rx.recv().await {
 207                            let mut snapshot = snapshot_tx.borrow().clone();
 208                            if let Err(error) = snapshot.apply_update(update) {
 209                                log::error!("error applying worktree update: {}", error);
 210                            }
 211                            *snapshot_tx.borrow_mut() = snapshot;
 212                        }
 213                    })
 214                    .detach();
 215
 216                {
 217                    let mut snapshot_rx = snapshot_rx.clone();
 218                    cx.spawn_weak(|this, mut cx| async move {
 219                        while let Some(_) = snapshot_rx.recv().await {
 220                            if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
 221                                this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
 222                            } else {
 223                                break;
 224                            }
 225                        }
 226                    })
 227                    .detach();
 228                }
 229
 230                Worktree::Remote(RemoteWorktree {
 231                    project_id: project_remote_id,
 232                    replica_id,
 233                    snapshot,
 234                    snapshot_rx,
 235                    updates_tx,
 236                    client: client.clone(),
 237                    loading_buffers: Default::default(),
 238                    open_buffers: Default::default(),
 239                    queued_operations: Default::default(),
 240                    languages,
 241                    user_store,
 242                    diagnostic_summaries,
 243                })
 244            })
 245        });
 246
 247        Ok(worktree)
 248    }
 249
 250    pub fn as_local(&self) -> Option<&LocalWorktree> {
 251        if let Worktree::Local(worktree) = self {
 252            Some(worktree)
 253        } else {
 254            None
 255        }
 256    }
 257
 258    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 259        if let Worktree::Remote(worktree) = self {
 260            Some(worktree)
 261        } else {
 262            None
 263        }
 264    }
 265
 266    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 267        if let Worktree::Local(worktree) = self {
 268            Some(worktree)
 269        } else {
 270            None
 271        }
 272    }
 273
 274    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 275        if let Worktree::Remote(worktree) = self {
 276            Some(worktree)
 277        } else {
 278            None
 279        }
 280    }
 281
 282    pub fn snapshot(&self) -> Snapshot {
 283        match self {
 284            Worktree::Local(worktree) => worktree.snapshot(),
 285            Worktree::Remote(worktree) => worktree.snapshot(),
 286        }
 287    }
 288
 289    pub fn replica_id(&self) -> ReplicaId {
 290        match self {
 291            Worktree::Local(_) => 0,
 292            Worktree::Remote(worktree) => worktree.replica_id,
 293        }
 294    }
 295
 296    pub fn remove_collaborator(
 297        &mut self,
 298        peer_id: PeerId,
 299        replica_id: ReplicaId,
 300        cx: &mut ModelContext<Self>,
 301    ) {
 302        match self {
 303            Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
 304            Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
 305        }
 306    }
 307
 308    pub fn languages(&self) -> &Arc<LanguageRegistry> {
 309        match self {
 310            Worktree::Local(worktree) => &worktree.language_registry,
 311            Worktree::Remote(worktree) => &worktree.languages,
 312        }
 313    }
 314
 315    pub fn user_store(&self) -> &ModelHandle<UserStore> {
 316        match self {
 317            Worktree::Local(worktree) => &worktree.user_store,
 318            Worktree::Remote(worktree) => &worktree.user_store,
 319        }
 320    }
 321
 322    pub fn handle_open_buffer(
 323        &mut self,
 324        envelope: TypedEnvelope<proto::OpenBuffer>,
 325        rpc: Arc<Client>,
 326        cx: &mut ModelContext<Self>,
 327    ) -> anyhow::Result<()> {
 328        let receipt = envelope.receipt();
 329
 330        let response = self
 331            .as_local_mut()
 332            .unwrap()
 333            .open_remote_buffer(envelope, cx);
 334
 335        cx.background()
 336            .spawn(
 337                async move {
 338                    rpc.respond(receipt, response.await?).await?;
 339                    Ok(())
 340                }
 341                .log_err(),
 342            )
 343            .detach();
 344
 345        Ok(())
 346    }
 347
 348    pub fn handle_close_buffer(
 349        &mut self,
 350        envelope: TypedEnvelope<proto::CloseBuffer>,
 351        _: Arc<Client>,
 352        cx: &mut ModelContext<Self>,
 353    ) -> anyhow::Result<()> {
 354        self.as_local_mut()
 355            .unwrap()
 356            .close_remote_buffer(envelope, cx)
 357    }
 358
 359    pub fn diagnostic_summaries<'a>(
 360        &'a self,
 361    ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
 362        match self {
 363            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 364            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 365        }
 366        .iter()
 367        .map(|(path, summary)| (path.0.clone(), summary.clone()))
 368    }
 369
 370    pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
 371        match self {
 372            Worktree::Local(worktree) => &mut worktree.loading_buffers,
 373            Worktree::Remote(worktree) => &mut worktree.loading_buffers,
 374        }
 375    }
 376
 377    pub fn open_buffer(
 378        &mut self,
 379        path: impl AsRef<Path>,
 380        cx: &mut ModelContext<Self>,
 381    ) -> Task<Result<ModelHandle<Buffer>>> {
 382        let path = path.as_ref();
 383
 384        // If there is already a buffer for the given path, then return it.
 385        let existing_buffer = match self {
 386            Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
 387            Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
 388        };
 389        if let Some(existing_buffer) = existing_buffer {
 390            return cx.spawn(move |_, _| async move { Ok(existing_buffer) });
 391        }
 392
 393        let path: Arc<Path> = Arc::from(path);
 394        let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
 395            // If the given path is already being loaded, then wait for that existing
 396            // task to complete and return the same buffer.
 397            hash_map::Entry::Occupied(e) => e.get().clone(),
 398
 399            // Otherwise, record the fact that this path is now being loaded.
 400            hash_map::Entry::Vacant(entry) => {
 401                let (mut tx, rx) = postage::watch::channel();
 402                entry.insert(rx.clone());
 403
 404                let load_buffer = match self {
 405                    Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
 406                    Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
 407                };
 408                cx.spawn(move |this, mut cx| async move {
 409                    let result = load_buffer.await;
 410
 411                    // After the buffer loads, record the fact that it is no longer
 412                    // loading.
 413                    this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
 414                    *tx.borrow_mut() = Some(result.map_err(|e| Arc::new(e)));
 415                })
 416                .detach();
 417                rx
 418            }
 419        };
 420
 421        cx.spawn(|_, _| async move {
 422            loop {
 423                if let Some(result) = loading_watch.borrow().as_ref() {
 424                    return result.clone().map_err(|e| anyhow!("{}", e));
 425                }
 426                loading_watch.recv().await;
 427            }
 428        })
 429    }
 430
 431    #[cfg(feature = "test-support")]
 432    pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
 433        let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
 434            Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
 435            Worktree::Remote(worktree) => {
 436                Box::new(worktree.open_buffers.values().filter_map(|buf| {
 437                    if let RemoteBuffer::Loaded(buf) = buf {
 438                        Some(buf)
 439                    } else {
 440                        None
 441                    }
 442                }))
 443            }
 444        };
 445
 446        let path = path.as_ref();
 447        open_buffers
 448            .find(|buffer| {
 449                if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
 450                    file.path().as_ref() == path
 451                } else {
 452                    false
 453                }
 454            })
 455            .is_some()
 456    }
 457
 458    pub fn handle_update_buffer(
 459        &mut self,
 460        envelope: TypedEnvelope<proto::UpdateBuffer>,
 461        cx: &mut ModelContext<Self>,
 462    ) -> Result<()> {
 463        let payload = envelope.payload.clone();
 464        let buffer_id = payload.buffer_id as usize;
 465        let ops = payload
 466            .operations
 467            .into_iter()
 468            .map(|op| language::proto::deserialize_operation(op))
 469            .collect::<Result<Vec<_>, _>>()?;
 470
 471        match self {
 472            Worktree::Local(worktree) => {
 473                let buffer = worktree
 474                    .open_buffers
 475                    .get(&buffer_id)
 476                    .and_then(|buf| buf.upgrade(cx))
 477                    .ok_or_else(|| {
 478                        anyhow!("invalid buffer {} in update buffer message", buffer_id)
 479                    })?;
 480                buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
 481            }
 482            Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
 483                Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
 484                Some(RemoteBuffer::Loaded(buffer)) => {
 485                    if let Some(buffer) = buffer.upgrade(cx) {
 486                        buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
 487                    } else {
 488                        worktree
 489                            .open_buffers
 490                            .insert(buffer_id, RemoteBuffer::Operations(ops));
 491                    }
 492                }
 493                None => {
 494                    worktree
 495                        .open_buffers
 496                        .insert(buffer_id, RemoteBuffer::Operations(ops));
 497                }
 498            },
 499        }
 500
 501        Ok(())
 502    }
 503
 504    pub fn handle_save_buffer(
 505        &mut self,
 506        envelope: TypedEnvelope<proto::SaveBuffer>,
 507        rpc: Arc<Client>,
 508        cx: &mut ModelContext<Self>,
 509    ) -> Result<()> {
 510        let sender_id = envelope.original_sender_id()?;
 511        let this = self.as_local().unwrap();
 512        let project_id = this
 513            .share
 514            .as_ref()
 515            .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
 516            .project_id;
 517
 518        let buffer = this
 519            .shared_buffers
 520            .get(&sender_id)
 521            .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
 522            .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
 523
 524        let receipt = envelope.receipt();
 525        let worktree_id = envelope.payload.worktree_id;
 526        let buffer_id = envelope.payload.buffer_id;
 527        let save = cx.spawn(|_, mut cx| async move {
 528            buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
 529        });
 530
 531        cx.background()
 532            .spawn(
 533                async move {
 534                    let (version, mtime) = save.await?;
 535
 536                    rpc.respond(
 537                        receipt,
 538                        proto::BufferSaved {
 539                            project_id,
 540                            worktree_id,
 541                            buffer_id,
 542                            version: (&version).into(),
 543                            mtime: Some(mtime.into()),
 544                        },
 545                    )
 546                    .await?;
 547
 548                    Ok(())
 549                }
 550                .log_err(),
 551            )
 552            .detach();
 553
 554        Ok(())
 555    }
 556
 557    pub fn handle_buffer_saved(
 558        &mut self,
 559        envelope: TypedEnvelope<proto::BufferSaved>,
 560        cx: &mut ModelContext<Self>,
 561    ) -> Result<()> {
 562        let payload = envelope.payload.clone();
 563        let worktree = self.as_remote_mut().unwrap();
 564        if let Some(buffer) = worktree
 565            .open_buffers
 566            .get(&(payload.buffer_id as usize))
 567            .and_then(|buf| buf.upgrade(cx))
 568        {
 569            buffer.update(cx, |buffer, cx| {
 570                let version = payload.version.try_into()?;
 571                let mtime = payload
 572                    .mtime
 573                    .ok_or_else(|| anyhow!("missing mtime"))?
 574                    .into();
 575                buffer.did_save(version, mtime, None, cx);
 576                Result::<_, anyhow::Error>::Ok(())
 577            })?;
 578        }
 579        Ok(())
 580    }
 581
 582    fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
 583        match self {
 584            Self::Local(worktree) => {
 585                let is_fake_fs = worktree.fs.is_fake();
 586                worktree.snapshot = worktree.background_snapshot.lock().clone();
 587                if worktree.is_scanning() {
 588                    if worktree.poll_task.is_none() {
 589                        worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
 590                            if is_fake_fs {
 591                                smol::future::yield_now().await;
 592                            } else {
 593                                smol::Timer::after(Duration::from_millis(100)).await;
 594                            }
 595                            this.update(&mut cx, |this, cx| {
 596                                this.as_local_mut().unwrap().poll_task = None;
 597                                this.poll_snapshot(cx);
 598                            })
 599                        }));
 600                    }
 601                } else {
 602                    worktree.poll_task.take();
 603                    self.update_open_buffers(cx);
 604                }
 605            }
 606            Self::Remote(worktree) => {
 607                worktree.snapshot = worktree.snapshot_rx.borrow().clone();
 608                self.update_open_buffers(cx);
 609            }
 610        };
 611
 612        cx.notify();
 613    }
 614
 615    fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
 616        let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
 617            Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
 618            Self::Remote(worktree) => {
 619                Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
 620                    if let RemoteBuffer::Loaded(buf) = buf {
 621                        Some((id, buf))
 622                    } else {
 623                        None
 624                    }
 625                }))
 626            }
 627        };
 628
 629        let local = self.as_local().is_some();
 630        let worktree_path = self.abs_path.clone();
 631        let worktree_handle = cx.handle();
 632        let mut buffers_to_delete = Vec::new();
 633        for (buffer_id, buffer) in open_buffers {
 634            if let Some(buffer) = buffer.upgrade(cx) {
 635                buffer.update(cx, |buffer, cx| {
 636                    if let Some(old_file) = File::from_dyn(buffer.file()) {
 637                        let new_file = if let Some(entry) = old_file
 638                            .entry_id
 639                            .and_then(|entry_id| self.entry_for_id(entry_id))
 640                        {
 641                            File {
 642                                is_local: local,
 643                                worktree_path: worktree_path.clone(),
 644                                entry_id: Some(entry.id),
 645                                mtime: entry.mtime,
 646                                path: entry.path.clone(),
 647                                worktree: worktree_handle.clone(),
 648                            }
 649                        } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
 650                            File {
 651                                is_local: local,
 652                                worktree_path: worktree_path.clone(),
 653                                entry_id: Some(entry.id),
 654                                mtime: entry.mtime,
 655                                path: entry.path.clone(),
 656                                worktree: worktree_handle.clone(),
 657                            }
 658                        } else {
 659                            File {
 660                                is_local: local,
 661                                worktree_path: worktree_path.clone(),
 662                                entry_id: None,
 663                                path: old_file.path().clone(),
 664                                mtime: old_file.mtime(),
 665                                worktree: worktree_handle.clone(),
 666                            }
 667                        };
 668
 669                        if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
 670                            task.detach();
 671                        }
 672                    }
 673                });
 674            } else {
 675                buffers_to_delete.push(*buffer_id);
 676            }
 677        }
 678
 679        for buffer_id in buffers_to_delete {
 680            match self {
 681                Self::Local(worktree) => {
 682                    worktree.open_buffers.remove(&buffer_id);
 683                }
 684                Self::Remote(worktree) => {
 685                    worktree.open_buffers.remove(&buffer_id);
 686                }
 687            }
 688        }
 689    }
 690
 691    pub fn update_diagnostics(
 692        &mut self,
 693        params: lsp::PublishDiagnosticsParams,
 694        disk_based_sources: &HashSet<String>,
 695        cx: &mut ModelContext<Worktree>,
 696    ) -> Result<()> {
 697        let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
 698        let abs_path = params
 699            .uri
 700            .to_file_path()
 701            .map_err(|_| anyhow!("URI is not a file"))?;
 702        let worktree_path = Arc::from(
 703            abs_path
 704                .strip_prefix(&this.abs_path)
 705                .context("path is not within worktree")?,
 706        );
 707
 708        let mut next_group_id = 0;
 709        let mut diagnostics = Vec::default();
 710        let mut primary_diagnostic_group_ids = HashMap::default();
 711        let mut sources_by_group_id = HashMap::default();
 712        let mut supporting_diagnostic_severities = HashMap::default();
 713        for diagnostic in &params.diagnostics {
 714            let source = diagnostic.source.as_ref();
 715            let code = diagnostic.code.as_ref().map(|code| match code {
 716                lsp::NumberOrString::Number(code) => code.to_string(),
 717                lsp::NumberOrString::String(code) => code.clone(),
 718            });
 719            let range = range_from_lsp(diagnostic.range);
 720            let is_supporting = diagnostic
 721                .related_information
 722                .as_ref()
 723                .map_or(false, |infos| {
 724                    infos.iter().any(|info| {
 725                        primary_diagnostic_group_ids.contains_key(&(
 726                            source,
 727                            code.clone(),
 728                            range_from_lsp(info.location.range),
 729                        ))
 730                    })
 731                });
 732
 733            if is_supporting {
 734                if let Some(severity) = diagnostic.severity {
 735                    supporting_diagnostic_severities
 736                        .insert((source, code.clone(), range), severity);
 737                }
 738            } else {
 739                let group_id = post_inc(&mut next_group_id);
 740                let is_disk_based =
 741                    source.map_or(false, |source| disk_based_sources.contains(source));
 742
 743                sources_by_group_id.insert(group_id, source);
 744                primary_diagnostic_group_ids
 745                    .insert((source, code.clone(), range.clone()), group_id);
 746
 747                diagnostics.push(DiagnosticEntry {
 748                    range,
 749                    diagnostic: Diagnostic {
 750                        code: code.clone(),
 751                        severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
 752                        message: diagnostic.message.clone(),
 753                        group_id,
 754                        is_primary: true,
 755                        is_valid: true,
 756                        is_disk_based,
 757                    },
 758                });
 759                if let Some(infos) = &diagnostic.related_information {
 760                    for info in infos {
 761                        if info.location.uri == params.uri {
 762                            let range = range_from_lsp(info.location.range);
 763                            diagnostics.push(DiagnosticEntry {
 764                                range,
 765                                diagnostic: Diagnostic {
 766                                    code: code.clone(),
 767                                    severity: DiagnosticSeverity::INFORMATION,
 768                                    message: info.message.clone(),
 769                                    group_id,
 770                                    is_primary: false,
 771                                    is_valid: true,
 772                                    is_disk_based,
 773                                },
 774                            });
 775                        }
 776                    }
 777                }
 778            }
 779        }
 780
 781        for entry in &mut diagnostics {
 782            let diagnostic = &mut entry.diagnostic;
 783            if !diagnostic.is_primary {
 784                let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
 785                if let Some(&severity) = supporting_diagnostic_severities.get(&(
 786                    source,
 787                    diagnostic.code.clone(),
 788                    entry.range.clone(),
 789                )) {
 790                    diagnostic.severity = severity;
 791                }
 792            }
 793        }
 794
 795        self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)?;
 796        Ok(())
 797    }
 798
 799    pub fn update_diagnostic_entries(
 800        &mut self,
 801        worktree_path: Arc<Path>,
 802        version: Option<i32>,
 803        diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
 804        cx: &mut ModelContext<Self>,
 805    ) -> Result<()> {
 806        let this = self.as_local_mut().unwrap();
 807        for buffer in this.open_buffers.values() {
 808            if let Some(buffer) = buffer.upgrade(cx) {
 809                if buffer
 810                    .read(cx)
 811                    .file()
 812                    .map_or(false, |file| *file.path() == worktree_path)
 813                {
 814                    let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
 815                        (
 816                            buffer.remote_id(),
 817                            buffer.update_diagnostics(version, diagnostics.clone(), cx),
 818                        )
 819                    });
 820                    self.send_buffer_update(remote_id, operation?, cx);
 821                    break;
 822                }
 823            }
 824        }
 825
 826        let this = self.as_local_mut().unwrap();
 827        let summary = DiagnosticSummary::new(&diagnostics);
 828        this.diagnostic_summaries
 829            .insert(PathKey(worktree_path.clone()), summary.clone());
 830        this.diagnostics.insert(worktree_path.clone(), diagnostics);
 831
 832        cx.emit(Event::DiagnosticsUpdated(worktree_path.clone()));
 833
 834        if let Some(share) = this.share.as_ref() {
 835            cx.foreground()
 836                .spawn({
 837                    let client = this.client.clone();
 838                    let project_id = share.project_id;
 839                    let worktree_id = this.id().to_proto();
 840                    let path = worktree_path.to_string_lossy().to_string();
 841                    async move {
 842                        client
 843                            .send(proto::UpdateDiagnosticSummary {
 844                                project_id,
 845                                worktree_id,
 846                                summary: Some(proto::DiagnosticSummary {
 847                                    path,
 848                                    error_count: summary.error_count as u32,
 849                                    warning_count: summary.warning_count as u32,
 850                                    info_count: summary.info_count as u32,
 851                                    hint_count: summary.hint_count as u32,
 852                                }),
 853                            })
 854                            .await
 855                            .log_err()
 856                    }
 857                })
 858                .detach();
 859        }
 860
 861        Ok(())
 862    }
 863
 864    fn send_buffer_update(
 865        &mut self,
 866        buffer_id: u64,
 867        operation: Operation,
 868        cx: &mut ModelContext<Self>,
 869    ) {
 870        if let Some((project_id, worktree_id, rpc)) = match self {
 871            Worktree::Local(worktree) => worktree
 872                .share
 873                .as_ref()
 874                .map(|share| (share.project_id, worktree.id(), worktree.client.clone())),
 875            Worktree::Remote(worktree) => Some((
 876                worktree.project_id,
 877                worktree.snapshot.id(),
 878                worktree.client.clone(),
 879            )),
 880        } {
 881            cx.spawn(|worktree, mut cx| async move {
 882                if let Err(error) = rpc
 883                    .request(proto::UpdateBuffer {
 884                        project_id,
 885                        worktree_id: worktree_id.0 as u64,
 886                        buffer_id,
 887                        operations: vec![language::proto::serialize_operation(&operation)],
 888                    })
 889                    .await
 890                {
 891                    worktree.update(&mut cx, |worktree, _| {
 892                        log::error!("error sending buffer operation: {}", error);
 893                        match worktree {
 894                            Worktree::Local(t) => &mut t.queued_operations,
 895                            Worktree::Remote(t) => &mut t.queued_operations,
 896                        }
 897                        .push((buffer_id, operation));
 898                    });
 899                }
 900            })
 901            .detach();
 902        }
 903    }
 904}
 905
 906impl WorktreeId {
 907    pub fn from_usize(handle_id: usize) -> Self {
 908        Self(handle_id)
 909    }
 910
 911    pub(crate) fn from_proto(id: u64) -> Self {
 912        Self(id as usize)
 913    }
 914
 915    pub fn to_proto(&self) -> u64 {
 916        self.0 as u64
 917    }
 918
 919    pub fn to_usize(&self) -> usize {
 920        self.0
 921    }
 922}
 923
 924impl fmt::Display for WorktreeId {
 925    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 926        self.0.fmt(f)
 927    }
 928}
 929
 930#[derive(Clone)]
 931pub struct Snapshot {
 932    id: WorktreeId,
 933    scan_id: usize,
 934    abs_path: Arc<Path>,
 935    root_name: String,
 936    root_char_bag: CharBag,
 937    ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
 938    entries_by_path: SumTree<Entry>,
 939    entries_by_id: SumTree<PathEntry>,
 940    removed_entry_ids: HashMap<u64, usize>,
 941    next_entry_id: Arc<AtomicUsize>,
 942}
 943
 944pub struct LocalWorktree {
 945    snapshot: Snapshot,
 946    config: WorktreeConfig,
 947    background_snapshot: Arc<Mutex<Snapshot>>,
 948    last_scan_state_rx: watch::Receiver<ScanState>,
 949    _background_scanner_task: Option<Task<()>>,
 950    poll_task: Option<Task<()>>,
 951    share: Option<ShareState>,
 952    loading_buffers: LoadingBuffers,
 953    open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
 954    shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
 955    diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
 956    diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
 957    queued_operations: Vec<(u64, Operation)>,
 958    language_registry: Arc<LanguageRegistry>,
 959    client: Arc<Client>,
 960    user_store: ModelHandle<UserStore>,
 961    fs: Arc<dyn Fs>,
 962    languages: Vec<Arc<Language>>,
 963    language_servers: HashMap<String, Arc<LanguageServer>>,
 964}
 965
 966struct ShareState {
 967    project_id: u64,
 968    snapshots_tx: Sender<Snapshot>,
 969}
 970
 971pub struct RemoteWorktree {
 972    project_id: u64,
 973    snapshot: Snapshot,
 974    snapshot_rx: watch::Receiver<Snapshot>,
 975    client: Arc<Client>,
 976    updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
 977    replica_id: ReplicaId,
 978    loading_buffers: LoadingBuffers,
 979    open_buffers: HashMap<usize, RemoteBuffer>,
 980    languages: Arc<LanguageRegistry>,
 981    user_store: ModelHandle<UserStore>,
 982    queued_operations: Vec<(u64, Operation)>,
 983    diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
 984}
 985
 986type LoadingBuffers = HashMap<
 987    Arc<Path>,
 988    postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
 989>;
 990
 991#[derive(Default, Deserialize)]
 992struct WorktreeConfig {
 993    collaborators: Vec<String>,
 994}
 995
 996impl LocalWorktree {
 997    async fn new(
 998        client: Arc<Client>,
 999        user_store: ModelHandle<UserStore>,
1000        path: impl Into<Arc<Path>>,
1001        fs: Arc<dyn Fs>,
1002        languages: Arc<LanguageRegistry>,
1003        cx: &mut AsyncAppContext,
1004    ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
1005        let abs_path = path.into();
1006        let path: Arc<Path> = Arc::from(Path::new(""));
1007        let next_entry_id = AtomicUsize::new(0);
1008
1009        // After determining whether the root entry is a file or a directory, populate the
1010        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
1011        let root_name = abs_path
1012            .file_name()
1013            .map_or(String::new(), |f| f.to_string_lossy().to_string());
1014        let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
1015        let metadata = fs.metadata(&abs_path).await?;
1016
1017        let mut config = WorktreeConfig::default();
1018        if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
1019            if let Ok(parsed) = toml::from_str(&zed_toml) {
1020                config = parsed;
1021            }
1022        }
1023
1024        let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
1025        let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
1026        let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
1027            let mut snapshot = Snapshot {
1028                id: WorktreeId::from_usize(cx.model_id()),
1029                scan_id: 0,
1030                abs_path,
1031                root_name: root_name.clone(),
1032                root_char_bag,
1033                ignores: Default::default(),
1034                entries_by_path: Default::default(),
1035                entries_by_id: Default::default(),
1036                removed_entry_ids: Default::default(),
1037                next_entry_id: Arc::new(next_entry_id),
1038            };
1039            if let Some(metadata) = metadata {
1040                snapshot.insert_entry(
1041                    Entry::new(
1042                        path.into(),
1043                        &metadata,
1044                        &snapshot.next_entry_id,
1045                        snapshot.root_char_bag,
1046                    ),
1047                    fs.as_ref(),
1048                );
1049            }
1050
1051            let tree = Self {
1052                snapshot: snapshot.clone(),
1053                config,
1054                background_snapshot: Arc::new(Mutex::new(snapshot)),
1055                last_scan_state_rx,
1056                _background_scanner_task: None,
1057                share: None,
1058                poll_task: None,
1059                loading_buffers: Default::default(),
1060                open_buffers: Default::default(),
1061                shared_buffers: Default::default(),
1062                diagnostics: Default::default(),
1063                diagnostic_summaries: Default::default(),
1064                queued_operations: Default::default(),
1065                language_registry: languages,
1066                client,
1067                user_store,
1068                fs,
1069                languages: Default::default(),
1070                language_servers: Default::default(),
1071            };
1072
1073            cx.spawn_weak(|this, mut cx| async move {
1074                while let Ok(scan_state) = scan_states_rx.recv().await {
1075                    if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1076                        let to_send = handle.update(&mut cx, |this, cx| {
1077                            last_scan_state_tx.blocking_send(scan_state).ok();
1078                            this.poll_snapshot(cx);
1079                            let tree = this.as_local_mut().unwrap();
1080                            if !tree.is_scanning() {
1081                                if let Some(share) = tree.share.as_ref() {
1082                                    return Some((tree.snapshot(), share.snapshots_tx.clone()));
1083                                }
1084                            }
1085                            None
1086                        });
1087
1088                        if let Some((snapshot, snapshots_to_send_tx)) = to_send {
1089                            if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
1090                                log::error!("error submitting snapshot to send {}", err);
1091                            }
1092                        }
1093                    } else {
1094                        break;
1095                    }
1096                }
1097            })
1098            .detach();
1099
1100            Worktree::Local(tree)
1101        });
1102
1103        Ok((tree, scan_states_tx))
1104    }
1105
1106    pub fn authorized_logins(&self) -> Vec<String> {
1107        self.config.collaborators.clone()
1108    }
1109
1110    pub fn language_registry(&self) -> &LanguageRegistry {
1111        &self.language_registry
1112    }
1113
1114    pub fn languages(&self) -> &[Arc<Language>] {
1115        &self.languages
1116    }
1117
1118    pub fn register_language(
1119        &mut self,
1120        language: &Arc<Language>,
1121        cx: &mut ModelContext<Worktree>,
1122    ) -> Option<Arc<LanguageServer>> {
1123        if !self.languages.iter().any(|l| Arc::ptr_eq(l, language)) {
1124            self.languages.push(language.clone());
1125        }
1126
1127        if let Some(server) = self.language_servers.get(language.name()) {
1128            return Some(server.clone());
1129        }
1130
1131        if let Some(language_server) = language
1132            .start_server(self.abs_path(), cx)
1133            .log_err()
1134            .flatten()
1135        {
1136            let disk_based_sources = language
1137                .disk_based_diagnostic_sources()
1138                .cloned()
1139                .unwrap_or_default();
1140            let disk_based_diagnostics_progress_token =
1141                language.disk_based_diagnostics_progress_token().cloned();
1142            let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
1143            let (disk_based_diagnostics_done_tx, disk_based_diagnostics_done_rx) =
1144                smol::channel::unbounded();
1145            language_server
1146                .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
1147                    smol::block_on(diagnostics_tx.send(params)).ok();
1148                })
1149                .detach();
1150            cx.spawn_weak(|this, mut cx| {
1151                let has_disk_based_diagnostic_progress_token =
1152                    disk_based_diagnostics_progress_token.is_some();
1153                let disk_based_diagnostics_done_tx = disk_based_diagnostics_done_tx.clone();
1154                async move {
1155                    while let Ok(diagnostics) = diagnostics_rx.recv().await {
1156                        if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1157                            handle.update(&mut cx, |this, cx| {
1158                                this.update_diagnostics(diagnostics, &disk_based_sources, cx)
1159                                    .log_err();
1160                                if !has_disk_based_diagnostic_progress_token {
1161                                    smol::block_on(disk_based_diagnostics_done_tx.send(())).ok();
1162                                }
1163                            })
1164                        } else {
1165                            break;
1166                        }
1167                    }
1168                }
1169            })
1170            .detach();
1171
1172            let mut pending_disk_based_diagnostics: i32 = 0;
1173            language_server
1174                .on_notification::<lsp::notification::Progress, _>(move |params| {
1175                    let token = match params.token {
1176                        lsp::NumberOrString::Number(_) => None,
1177                        lsp::NumberOrString::String(token) => Some(token),
1178                    };
1179
1180                    if token == disk_based_diagnostics_progress_token {
1181                        match params.value {
1182                            lsp::ProgressParamsValue::WorkDone(progress) => match progress {
1183                                lsp::WorkDoneProgress::Begin(_) => {
1184                                    pending_disk_based_diagnostics += 1;
1185                                }
1186                                lsp::WorkDoneProgress::End(_) => {
1187                                    pending_disk_based_diagnostics -= 1;
1188                                    if pending_disk_based_diagnostics == 0 {
1189                                        smol::block_on(disk_based_diagnostics_done_tx.send(()))
1190                                            .ok();
1191                                    }
1192                                }
1193                                _ => {}
1194                            },
1195                        }
1196                    }
1197                })
1198                .detach();
1199            let rpc = self.client.clone();
1200            cx.spawn_weak(|this, mut cx| async move {
1201                while let Ok(()) = disk_based_diagnostics_done_rx.recv().await {
1202                    if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1203                        let message = handle.update(&mut cx, |this, cx| {
1204                            cx.emit(Event::DiskBasedDiagnosticsUpdated);
1205                            let this = this.as_local().unwrap();
1206                            this.share
1207                                .as_ref()
1208                                .map(|share| proto::DiskBasedDiagnosticsUpdated {
1209                                    project_id: share.project_id,
1210                                    worktree_id: this.id().to_proto(),
1211                                })
1212                        });
1213
1214                        if let Some(message) = message {
1215                            rpc.send(message).await.log_err();
1216                        }
1217                    } else {
1218                        break;
1219                    }
1220                }
1221            })
1222            .detach();
1223
1224            self.language_servers
1225                .insert(language.name().to_string(), language_server.clone());
1226            Some(language_server.clone())
1227        } else {
1228            None
1229        }
1230    }
1231
1232    fn get_open_buffer(
1233        &mut self,
1234        path: &Path,
1235        cx: &mut ModelContext<Worktree>,
1236    ) -> Option<ModelHandle<Buffer>> {
1237        let handle = cx.handle();
1238        let mut result = None;
1239        self.open_buffers.retain(|_buffer_id, buffer| {
1240            if let Some(buffer) = buffer.upgrade(cx) {
1241                if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1242                    if file.worktree == handle && file.path().as_ref() == path {
1243                        result = Some(buffer);
1244                    }
1245                }
1246                true
1247            } else {
1248                false
1249            }
1250        });
1251        result
1252    }
1253
1254    fn open_buffer(
1255        &mut self,
1256        path: &Path,
1257        cx: &mut ModelContext<Worktree>,
1258    ) -> Task<Result<ModelHandle<Buffer>>> {
1259        let path = Arc::from(path);
1260        cx.spawn(move |this, mut cx| async move {
1261            let (file, contents) = this
1262                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
1263                .await?;
1264
1265            let (diagnostics, language, language_server) = this.update(&mut cx, |this, cx| {
1266                let this = this.as_local_mut().unwrap();
1267                let diagnostics = this.diagnostics.get(&path).cloned();
1268                let language = this
1269                    .language_registry
1270                    .select_language(file.full_path())
1271                    .cloned();
1272                let server = language
1273                    .as_ref()
1274                    .and_then(|language| this.register_language(language, cx));
1275                (diagnostics, language, server)
1276            });
1277
1278            let mut buffer_operations = Vec::new();
1279            let buffer = cx.add_model(|cx| {
1280                let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1281                buffer.set_language(language, language_server, cx);
1282                if let Some(diagnostics) = diagnostics {
1283                    let op = buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1284                    buffer_operations.push(op);
1285                }
1286                buffer
1287            });
1288
1289            this.update(&mut cx, |this, cx| {
1290                for op in buffer_operations {
1291                    this.send_buffer_update(buffer.read(cx).remote_id(), op, cx);
1292                }
1293                let this = this.as_local_mut().unwrap();
1294                this.open_buffers.insert(buffer.id(), buffer.downgrade());
1295            });
1296
1297            Ok(buffer)
1298        })
1299    }
1300
1301    pub fn open_remote_buffer(
1302        &mut self,
1303        envelope: TypedEnvelope<proto::OpenBuffer>,
1304        cx: &mut ModelContext<Worktree>,
1305    ) -> Task<Result<proto::OpenBufferResponse>> {
1306        cx.spawn(|this, mut cx| async move {
1307            let peer_id = envelope.original_sender_id();
1308            let path = Path::new(&envelope.payload.path);
1309            let buffer = this
1310                .update(&mut cx, |this, cx| this.open_buffer(path, cx))
1311                .await?;
1312            this.update(&mut cx, |this, cx| {
1313                this.as_local_mut()
1314                    .unwrap()
1315                    .shared_buffers
1316                    .entry(peer_id?)
1317                    .or_default()
1318                    .insert(buffer.id() as u64, buffer.clone());
1319
1320                Ok(proto::OpenBufferResponse {
1321                    buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1322                })
1323            })
1324        })
1325    }
1326
1327    pub fn close_remote_buffer(
1328        &mut self,
1329        envelope: TypedEnvelope<proto::CloseBuffer>,
1330        cx: &mut ModelContext<Worktree>,
1331    ) -> Result<()> {
1332        if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1333            shared_buffers.remove(&envelope.payload.buffer_id);
1334            cx.notify();
1335        }
1336
1337        Ok(())
1338    }
1339
1340    pub fn remove_collaborator(
1341        &mut self,
1342        peer_id: PeerId,
1343        replica_id: ReplicaId,
1344        cx: &mut ModelContext<Worktree>,
1345    ) {
1346        self.shared_buffers.remove(&peer_id);
1347        for (_, buffer) in &self.open_buffers {
1348            if let Some(buffer) = buffer.upgrade(cx) {
1349                buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1350            }
1351        }
1352        cx.notify();
1353    }
1354
1355    pub fn scan_complete(&self) -> impl Future<Output = ()> {
1356        let mut scan_state_rx = self.last_scan_state_rx.clone();
1357        async move {
1358            let mut scan_state = Some(scan_state_rx.borrow().clone());
1359            while let Some(ScanState::Scanning) = scan_state {
1360                scan_state = scan_state_rx.recv().await;
1361            }
1362        }
1363    }
1364
1365    fn is_scanning(&self) -> bool {
1366        if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1367            true
1368        } else {
1369            false
1370        }
1371    }
1372
1373    pub fn snapshot(&self) -> Snapshot {
1374        self.snapshot.clone()
1375    }
1376
1377    pub fn abs_path(&self) -> &Arc<Path> {
1378        &self.snapshot.abs_path
1379    }
1380
1381    pub fn contains_abs_path(&self, path: &Path) -> bool {
1382        path.starts_with(&self.snapshot.abs_path)
1383    }
1384
1385    fn absolutize(&self, path: &Path) -> PathBuf {
1386        if path.file_name().is_some() {
1387            self.snapshot.abs_path.join(path)
1388        } else {
1389            self.snapshot.abs_path.to_path_buf()
1390        }
1391    }
1392
1393    fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1394        let handle = cx.handle();
1395        let path = Arc::from(path);
1396        let worktree_path = self.abs_path.clone();
1397        let abs_path = self.absolutize(&path);
1398        let background_snapshot = self.background_snapshot.clone();
1399        let fs = self.fs.clone();
1400        cx.spawn(|this, mut cx| async move {
1401            let text = fs.load(&abs_path).await?;
1402            // Eagerly populate the snapshot with an updated entry for the loaded file
1403            let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1404            this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1405            Ok((
1406                File {
1407                    entry_id: Some(entry.id),
1408                    worktree: handle,
1409                    worktree_path,
1410                    path: entry.path,
1411                    mtime: entry.mtime,
1412                    is_local: true,
1413                },
1414                text,
1415            ))
1416        })
1417    }
1418
1419    pub fn save_buffer_as(
1420        &self,
1421        buffer: ModelHandle<Buffer>,
1422        path: impl Into<Arc<Path>>,
1423        text: Rope,
1424        cx: &mut ModelContext<Worktree>,
1425    ) -> Task<Result<File>> {
1426        let save = self.save(path, text, cx);
1427        cx.spawn(|this, mut cx| async move {
1428            let entry = save.await?;
1429            this.update(&mut cx, |this, cx| {
1430                let this = this.as_local_mut().unwrap();
1431                this.open_buffers.insert(buffer.id(), buffer.downgrade());
1432                Ok(File {
1433                    entry_id: Some(entry.id),
1434                    worktree: cx.handle(),
1435                    worktree_path: this.abs_path.clone(),
1436                    path: entry.path,
1437                    mtime: entry.mtime,
1438                    is_local: true,
1439                })
1440            })
1441        })
1442    }
1443
1444    fn save(
1445        &self,
1446        path: impl Into<Arc<Path>>,
1447        text: Rope,
1448        cx: &mut ModelContext<Worktree>,
1449    ) -> Task<Result<Entry>> {
1450        let path = path.into();
1451        let abs_path = self.absolutize(&path);
1452        let background_snapshot = self.background_snapshot.clone();
1453        let fs = self.fs.clone();
1454        let save = cx.background().spawn(async move {
1455            fs.save(&abs_path, &text).await?;
1456            refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1457        });
1458
1459        cx.spawn(|this, mut cx| async move {
1460            let entry = save.await?;
1461            this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1462            Ok(entry)
1463        })
1464    }
1465
1466    pub fn share(
1467        &mut self,
1468        project_id: u64,
1469        cx: &mut ModelContext<Worktree>,
1470    ) -> Task<anyhow::Result<()>> {
1471        if self.share.is_some() {
1472            return Task::ready(Ok(()));
1473        }
1474
1475        let snapshot = self.snapshot();
1476        let rpc = self.client.clone();
1477        let worktree_id = cx.model_id() as u64;
1478        let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1479        self.share = Some(ShareState {
1480            project_id,
1481            snapshots_tx: snapshots_to_send_tx,
1482        });
1483
1484        cx.background()
1485            .spawn({
1486                let rpc = rpc.clone();
1487                let snapshot = snapshot.clone();
1488                async move {
1489                    let mut prev_snapshot = snapshot;
1490                    while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1491                        let message =
1492                            snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1493                        match rpc.send(message).await {
1494                            Ok(()) => prev_snapshot = snapshot,
1495                            Err(err) => log::error!("error sending snapshot diff {}", err),
1496                        }
1497                    }
1498                }
1499            })
1500            .detach();
1501
1502        let diagnostic_summaries = self.diagnostic_summaries.clone();
1503        let share_message = cx.background().spawn(async move {
1504            proto::ShareWorktree {
1505                project_id,
1506                worktree: Some(snapshot.to_proto(&diagnostic_summaries)),
1507            }
1508        });
1509
1510        cx.foreground().spawn(async move {
1511            rpc.request(share_message.await).await?;
1512            Ok(())
1513        })
1514    }
1515}
1516
1517fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1518    let contents = smol::block_on(fs.load(&abs_path))?;
1519    let parent = abs_path.parent().unwrap_or(Path::new("/"));
1520    let mut builder = GitignoreBuilder::new(parent);
1521    for line in contents.lines() {
1522        builder.add_line(Some(abs_path.into()), line)?;
1523    }
1524    Ok(builder.build()?)
1525}
1526
1527impl Deref for Worktree {
1528    type Target = Snapshot;
1529
1530    fn deref(&self) -> &Self::Target {
1531        match self {
1532            Worktree::Local(worktree) => &worktree.snapshot,
1533            Worktree::Remote(worktree) => &worktree.snapshot,
1534        }
1535    }
1536}
1537
1538impl Deref for LocalWorktree {
1539    type Target = Snapshot;
1540
1541    fn deref(&self) -> &Self::Target {
1542        &self.snapshot
1543    }
1544}
1545
1546impl Deref for RemoteWorktree {
1547    type Target = Snapshot;
1548
1549    fn deref(&self) -> &Self::Target {
1550        &self.snapshot
1551    }
1552}
1553
1554impl fmt::Debug for LocalWorktree {
1555    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1556        self.snapshot.fmt(f)
1557    }
1558}
1559
1560impl RemoteWorktree {
1561    fn get_open_buffer(
1562        &mut self,
1563        path: &Path,
1564        cx: &mut ModelContext<Worktree>,
1565    ) -> Option<ModelHandle<Buffer>> {
1566        let handle = cx.handle();
1567        let mut existing_buffer = None;
1568        self.open_buffers.retain(|_buffer_id, buffer| {
1569            if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1570                if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1571                    if file.worktree == handle && file.path().as_ref() == path {
1572                        existing_buffer = Some(buffer);
1573                    }
1574                }
1575                true
1576            } else {
1577                false
1578            }
1579        });
1580        existing_buffer
1581    }
1582
1583    fn open_buffer(
1584        &mut self,
1585        path: &Path,
1586        cx: &mut ModelContext<Worktree>,
1587    ) -> Task<Result<ModelHandle<Buffer>>> {
1588        let rpc = self.client.clone();
1589        let replica_id = self.replica_id;
1590        let project_id = self.project_id;
1591        let remote_worktree_id = self.id();
1592        let root_path = self.snapshot.abs_path.clone();
1593        let path: Arc<Path> = Arc::from(path);
1594        let path_string = path.to_string_lossy().to_string();
1595        cx.spawn_weak(move |this, mut cx| async move {
1596            let entry = this
1597                .upgrade(&cx)
1598                .ok_or_else(|| anyhow!("worktree was closed"))?
1599                .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1600                .ok_or_else(|| anyhow!("file does not exist"))?;
1601            let response = rpc
1602                .request(proto::OpenBuffer {
1603                    project_id,
1604                    worktree_id: remote_worktree_id.to_proto(),
1605                    path: path_string,
1606                })
1607                .await?;
1608
1609            let this = this
1610                .upgrade(&cx)
1611                .ok_or_else(|| anyhow!("worktree was closed"))?;
1612            let file = File {
1613                entry_id: Some(entry.id),
1614                worktree: this.clone(),
1615                worktree_path: root_path,
1616                path: entry.path,
1617                mtime: entry.mtime,
1618                is_local: false,
1619            };
1620            let language = this.read_with(&cx, |this, _| {
1621                use language::File;
1622                this.languages().select_language(file.full_path()).cloned()
1623            });
1624            let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1625            let buffer_id = remote_buffer.id as usize;
1626            let buffer = cx.add_model(|cx| {
1627                Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1628                    .unwrap()
1629                    .with_language(language, None, cx)
1630            });
1631            this.update(&mut cx, move |this, cx| {
1632                let this = this.as_remote_mut().unwrap();
1633                if let Some(RemoteBuffer::Operations(pending_ops)) = this
1634                    .open_buffers
1635                    .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1636                {
1637                    buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1638                }
1639                Result::<_, anyhow::Error>::Ok(buffer)
1640            })
1641        })
1642    }
1643
1644    pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1645        for (_, buffer) in self.open_buffers.drain() {
1646            if let RemoteBuffer::Loaded(buffer) = buffer {
1647                if let Some(buffer) = buffer.upgrade(cx) {
1648                    buffer.update(cx, |buffer, cx| buffer.close(cx))
1649                }
1650            }
1651        }
1652    }
1653
1654    fn snapshot(&self) -> Snapshot {
1655        self.snapshot.clone()
1656    }
1657
1658    pub fn update_from_remote(
1659        &mut self,
1660        envelope: TypedEnvelope<proto::UpdateWorktree>,
1661        cx: &mut ModelContext<Worktree>,
1662    ) -> Result<()> {
1663        let mut tx = self.updates_tx.clone();
1664        let payload = envelope.payload.clone();
1665        cx.background()
1666            .spawn(async move {
1667                tx.send(payload).await.expect("receiver runs to completion");
1668            })
1669            .detach();
1670
1671        Ok(())
1672    }
1673
1674    pub fn update_diagnostic_summary(
1675        &mut self,
1676        envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1677        cx: &mut ModelContext<Worktree>,
1678    ) {
1679        if let Some(summary) = envelope.payload.summary {
1680            let path: Arc<Path> = Path::new(&summary.path).into();
1681            self.diagnostic_summaries.insert(
1682                PathKey(path.clone()),
1683                DiagnosticSummary {
1684                    error_count: summary.error_count as usize,
1685                    warning_count: summary.warning_count as usize,
1686                    info_count: summary.info_count as usize,
1687                    hint_count: summary.hint_count as usize,
1688                },
1689            );
1690            cx.emit(Event::DiagnosticsUpdated(path));
1691        }
1692    }
1693
1694    pub fn disk_based_diagnostics_updated(&self, cx: &mut ModelContext<Worktree>) {
1695        cx.emit(Event::DiskBasedDiagnosticsUpdated);
1696    }
1697
1698    pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1699        for (_, buffer) in &self.open_buffers {
1700            if let Some(buffer) = buffer.upgrade(cx) {
1701                buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1702            }
1703        }
1704        cx.notify();
1705    }
1706}
1707
1708enum RemoteBuffer {
1709    Operations(Vec<Operation>),
1710    Loaded(WeakModelHandle<Buffer>),
1711}
1712
1713impl RemoteBuffer {
1714    fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1715        match self {
1716            Self::Operations(_) => None,
1717            Self::Loaded(buffer) => buffer.upgrade(cx),
1718        }
1719    }
1720}
1721
1722impl Snapshot {
1723    pub fn id(&self) -> WorktreeId {
1724        self.id
1725    }
1726
1727    pub fn to_proto(
1728        &self,
1729        diagnostic_summaries: &TreeMap<PathKey, DiagnosticSummary>,
1730    ) -> proto::Worktree {
1731        let root_name = self.root_name.clone();
1732        proto::Worktree {
1733            id: self.id.0 as u64,
1734            root_name,
1735            entries: self
1736                .entries_by_path
1737                .iter()
1738                .filter(|e| !e.is_ignored)
1739                .map(Into::into)
1740                .collect(),
1741            diagnostic_summaries: diagnostic_summaries
1742                .iter()
1743                .map(|(path, summary)| summary.to_proto(path.0.clone()))
1744                .collect(),
1745        }
1746    }
1747
1748    pub fn build_update(
1749        &self,
1750        other: &Self,
1751        project_id: u64,
1752        worktree_id: u64,
1753        include_ignored: bool,
1754    ) -> proto::UpdateWorktree {
1755        let mut updated_entries = Vec::new();
1756        let mut removed_entries = Vec::new();
1757        let mut self_entries = self
1758            .entries_by_id
1759            .cursor::<()>()
1760            .filter(|e| include_ignored || !e.is_ignored)
1761            .peekable();
1762        let mut other_entries = other
1763            .entries_by_id
1764            .cursor::<()>()
1765            .filter(|e| include_ignored || !e.is_ignored)
1766            .peekable();
1767        loop {
1768            match (self_entries.peek(), other_entries.peek()) {
1769                (Some(self_entry), Some(other_entry)) => {
1770                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1771                        Ordering::Less => {
1772                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1773                            updated_entries.push(entry);
1774                            self_entries.next();
1775                        }
1776                        Ordering::Equal => {
1777                            if self_entry.scan_id != other_entry.scan_id {
1778                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1779                                updated_entries.push(entry);
1780                            }
1781
1782                            self_entries.next();
1783                            other_entries.next();
1784                        }
1785                        Ordering::Greater => {
1786                            removed_entries.push(other_entry.id as u64);
1787                            other_entries.next();
1788                        }
1789                    }
1790                }
1791                (Some(self_entry), None) => {
1792                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1793                    updated_entries.push(entry);
1794                    self_entries.next();
1795                }
1796                (None, Some(other_entry)) => {
1797                    removed_entries.push(other_entry.id as u64);
1798                    other_entries.next();
1799                }
1800                (None, None) => break,
1801            }
1802        }
1803
1804        proto::UpdateWorktree {
1805            project_id,
1806            worktree_id,
1807            root_name: self.root_name().to_string(),
1808            updated_entries,
1809            removed_entries,
1810        }
1811    }
1812
1813    fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1814        self.scan_id += 1;
1815        let scan_id = self.scan_id;
1816
1817        let mut entries_by_path_edits = Vec::new();
1818        let mut entries_by_id_edits = Vec::new();
1819        for entry_id in update.removed_entries {
1820            let entry_id = entry_id as usize;
1821            let entry = self
1822                .entry_for_id(entry_id)
1823                .ok_or_else(|| anyhow!("unknown entry"))?;
1824            entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1825            entries_by_id_edits.push(Edit::Remove(entry.id));
1826        }
1827
1828        for entry in update.updated_entries {
1829            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1830            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1831                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1832            }
1833            entries_by_id_edits.push(Edit::Insert(PathEntry {
1834                id: entry.id,
1835                path: entry.path.clone(),
1836                is_ignored: entry.is_ignored,
1837                scan_id,
1838            }));
1839            entries_by_path_edits.push(Edit::Insert(entry));
1840        }
1841
1842        self.entries_by_path.edit(entries_by_path_edits, &());
1843        self.entries_by_id.edit(entries_by_id_edits, &());
1844
1845        Ok(())
1846    }
1847
1848    pub fn file_count(&self) -> usize {
1849        self.entries_by_path.summary().file_count
1850    }
1851
1852    pub fn visible_file_count(&self) -> usize {
1853        self.entries_by_path.summary().visible_file_count
1854    }
1855
1856    fn traverse_from_offset(
1857        &self,
1858        include_dirs: bool,
1859        include_ignored: bool,
1860        start_offset: usize,
1861    ) -> Traversal {
1862        let mut cursor = self.entries_by_path.cursor();
1863        cursor.seek(
1864            &TraversalTarget::Count {
1865                count: start_offset,
1866                include_dirs,
1867                include_ignored,
1868            },
1869            Bias::Right,
1870            &(),
1871        );
1872        Traversal {
1873            cursor,
1874            include_dirs,
1875            include_ignored,
1876        }
1877    }
1878
1879    fn traverse_from_path(
1880        &self,
1881        include_dirs: bool,
1882        include_ignored: bool,
1883        path: &Path,
1884    ) -> Traversal {
1885        let mut cursor = self.entries_by_path.cursor();
1886        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1887        Traversal {
1888            cursor,
1889            include_dirs,
1890            include_ignored,
1891        }
1892    }
1893
1894    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1895        self.traverse_from_offset(false, include_ignored, start)
1896    }
1897
1898    pub fn entries(&self, include_ignored: bool) -> Traversal {
1899        self.traverse_from_offset(true, include_ignored, 0)
1900    }
1901
1902    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1903        let empty_path = Path::new("");
1904        self.entries_by_path
1905            .cursor::<()>()
1906            .filter(move |entry| entry.path.as_ref() != empty_path)
1907            .map(|entry| &entry.path)
1908    }
1909
1910    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1911        let mut cursor = self.entries_by_path.cursor();
1912        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1913        let traversal = Traversal {
1914            cursor,
1915            include_dirs: true,
1916            include_ignored: true,
1917        };
1918        ChildEntriesIter {
1919            traversal,
1920            parent_path,
1921        }
1922    }
1923
1924    pub fn root_entry(&self) -> Option<&Entry> {
1925        self.entry_for_path("")
1926    }
1927
1928    pub fn root_name(&self) -> &str {
1929        &self.root_name
1930    }
1931
1932    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1933        let path = path.as_ref();
1934        self.traverse_from_path(true, true, path)
1935            .entry()
1936            .and_then(|entry| {
1937                if entry.path.as_ref() == path {
1938                    Some(entry)
1939                } else {
1940                    None
1941                }
1942            })
1943    }
1944
1945    pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1946        let entry = self.entries_by_id.get(&id, &())?;
1947        self.entry_for_path(&entry.path)
1948    }
1949
1950    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1951        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1952    }
1953
1954    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1955        if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1956            let abs_path = self.abs_path.join(&entry.path);
1957            match build_gitignore(&abs_path, fs) {
1958                Ok(ignore) => {
1959                    let ignore_dir_path = entry.path.parent().unwrap();
1960                    self.ignores
1961                        .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1962                }
1963                Err(error) => {
1964                    log::error!(
1965                        "error loading .gitignore file {:?} - {:?}",
1966                        &entry.path,
1967                        error
1968                    );
1969                }
1970            }
1971        }
1972
1973        self.reuse_entry_id(&mut entry);
1974        self.entries_by_path.insert_or_replace(entry.clone(), &());
1975        self.entries_by_id.insert_or_replace(
1976            PathEntry {
1977                id: entry.id,
1978                path: entry.path.clone(),
1979                is_ignored: entry.is_ignored,
1980                scan_id: self.scan_id,
1981            },
1982            &(),
1983        );
1984        entry
1985    }
1986
1987    fn populate_dir(
1988        &mut self,
1989        parent_path: Arc<Path>,
1990        entries: impl IntoIterator<Item = Entry>,
1991        ignore: Option<Arc<Gitignore>>,
1992    ) {
1993        let mut parent_entry = self
1994            .entries_by_path
1995            .get(&PathKey(parent_path.clone()), &())
1996            .unwrap()
1997            .clone();
1998        if let Some(ignore) = ignore {
1999            self.ignores.insert(parent_path, (ignore, self.scan_id));
2000        }
2001        if matches!(parent_entry.kind, EntryKind::PendingDir) {
2002            parent_entry.kind = EntryKind::Dir;
2003        } else {
2004            unreachable!();
2005        }
2006
2007        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2008        let mut entries_by_id_edits = Vec::new();
2009
2010        for mut entry in entries {
2011            self.reuse_entry_id(&mut entry);
2012            entries_by_id_edits.push(Edit::Insert(PathEntry {
2013                id: entry.id,
2014                path: entry.path.clone(),
2015                is_ignored: entry.is_ignored,
2016                scan_id: self.scan_id,
2017            }));
2018            entries_by_path_edits.push(Edit::Insert(entry));
2019        }
2020
2021        self.entries_by_path.edit(entries_by_path_edits, &());
2022        self.entries_by_id.edit(entries_by_id_edits, &());
2023    }
2024
2025    fn reuse_entry_id(&mut self, entry: &mut Entry) {
2026        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2027            entry.id = removed_entry_id;
2028        } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2029            entry.id = existing_entry.id;
2030        }
2031    }
2032
2033    fn remove_path(&mut self, path: &Path) {
2034        let mut new_entries;
2035        let removed_entries;
2036        {
2037            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2038            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2039            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2040            new_entries.push_tree(cursor.suffix(&()), &());
2041        }
2042        self.entries_by_path = new_entries;
2043
2044        let mut entries_by_id_edits = Vec::new();
2045        for entry in removed_entries.cursor::<()>() {
2046            let removed_entry_id = self
2047                .removed_entry_ids
2048                .entry(entry.inode)
2049                .or_insert(entry.id);
2050            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2051            entries_by_id_edits.push(Edit::Remove(entry.id));
2052        }
2053        self.entries_by_id.edit(entries_by_id_edits, &());
2054
2055        if path.file_name() == Some(&GITIGNORE) {
2056            if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
2057                *scan_id = self.scan_id;
2058            }
2059        }
2060    }
2061
2062    fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2063        let mut new_ignores = Vec::new();
2064        for ancestor in path.ancestors().skip(1) {
2065            if let Some((ignore, _)) = self.ignores.get(ancestor) {
2066                new_ignores.push((ancestor, Some(ignore.clone())));
2067            } else {
2068                new_ignores.push((ancestor, None));
2069            }
2070        }
2071
2072        let mut ignore_stack = IgnoreStack::none();
2073        for (parent_path, ignore) in new_ignores.into_iter().rev() {
2074            if ignore_stack.is_path_ignored(&parent_path, true) {
2075                ignore_stack = IgnoreStack::all();
2076                break;
2077            } else if let Some(ignore) = ignore {
2078                ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
2079            }
2080        }
2081
2082        if ignore_stack.is_path_ignored(path, is_dir) {
2083            ignore_stack = IgnoreStack::all();
2084        }
2085
2086        ignore_stack
2087    }
2088}
2089
2090impl fmt::Debug for Snapshot {
2091    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2092        for entry in self.entries_by_path.cursor::<()>() {
2093            for _ in entry.path.ancestors().skip(1) {
2094                write!(f, " ")?;
2095            }
2096            writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
2097        }
2098        Ok(())
2099    }
2100}
2101
2102#[derive(Clone, PartialEq)]
2103pub struct File {
2104    entry_id: Option<usize>,
2105    worktree: ModelHandle<Worktree>,
2106    worktree_path: Arc<Path>,
2107    pub path: Arc<Path>,
2108    pub mtime: SystemTime,
2109    is_local: bool,
2110}
2111
2112impl language::File for File {
2113    fn mtime(&self) -> SystemTime {
2114        self.mtime
2115    }
2116
2117    fn path(&self) -> &Arc<Path> {
2118        &self.path
2119    }
2120
2121    fn abs_path(&self) -> Option<PathBuf> {
2122        if self.is_local {
2123            Some(self.worktree_path.join(&self.path))
2124        } else {
2125            None
2126        }
2127    }
2128
2129    fn full_path(&self) -> PathBuf {
2130        let mut full_path = PathBuf::new();
2131        if let Some(worktree_name) = self.worktree_path.file_name() {
2132            full_path.push(worktree_name);
2133        }
2134        full_path.push(&self.path);
2135        full_path
2136    }
2137
2138    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2139    /// of its worktree, then this method will return the name of the worktree itself.
2140    fn file_name<'a>(&'a self) -> Option<OsString> {
2141        self.path
2142            .file_name()
2143            .or_else(|| self.worktree_path.file_name())
2144            .map(Into::into)
2145    }
2146
2147    fn is_deleted(&self) -> bool {
2148        self.entry_id.is_none()
2149    }
2150
2151    fn save(
2152        &self,
2153        buffer_id: u64,
2154        text: Rope,
2155        version: clock::Global,
2156        cx: &mut MutableAppContext,
2157    ) -> Task<Result<(clock::Global, SystemTime)>> {
2158        let worktree_id = self.worktree.read(cx).id().to_proto();
2159        self.worktree.update(cx, |worktree, cx| match worktree {
2160            Worktree::Local(worktree) => {
2161                let rpc = worktree.client.clone();
2162                let project_id = worktree.share.as_ref().map(|share| share.project_id);
2163                let save = worktree.save(self.path.clone(), text, cx);
2164                cx.background().spawn(async move {
2165                    let entry = save.await?;
2166                    if let Some(project_id) = project_id {
2167                        rpc.send(proto::BufferSaved {
2168                            project_id,
2169                            worktree_id,
2170                            buffer_id,
2171                            version: (&version).into(),
2172                            mtime: Some(entry.mtime.into()),
2173                        })
2174                        .await?;
2175                    }
2176                    Ok((version, entry.mtime))
2177                })
2178            }
2179            Worktree::Remote(worktree) => {
2180                let rpc = worktree.client.clone();
2181                let project_id = worktree.project_id;
2182                cx.foreground().spawn(async move {
2183                    let response = rpc
2184                        .request(proto::SaveBuffer {
2185                            project_id,
2186                            worktree_id,
2187                            buffer_id,
2188                        })
2189                        .await?;
2190                    let version = response.version.try_into()?;
2191                    let mtime = response
2192                        .mtime
2193                        .ok_or_else(|| anyhow!("missing mtime"))?
2194                        .into();
2195                    Ok((version, mtime))
2196                })
2197            }
2198        })
2199    }
2200
2201    fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2202        let worktree = self.worktree.read(cx).as_local()?;
2203        let abs_path = worktree.absolutize(&self.path);
2204        let fs = worktree.fs.clone();
2205        Some(
2206            cx.background()
2207                .spawn(async move { fs.load(&abs_path).await }),
2208        )
2209    }
2210
2211    fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2212        self.worktree.update(cx, |worktree, cx| {
2213            worktree.send_buffer_update(buffer_id, operation, cx);
2214        });
2215    }
2216
2217    fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2218        self.worktree.update(cx, |worktree, cx| {
2219            if let Worktree::Remote(worktree) = worktree {
2220                let project_id = worktree.project_id;
2221                let worktree_id = worktree.id().to_proto();
2222                let rpc = worktree.client.clone();
2223                cx.background()
2224                    .spawn(async move {
2225                        if let Err(error) = rpc
2226                            .send(proto::CloseBuffer {
2227                                project_id,
2228                                worktree_id,
2229                                buffer_id,
2230                            })
2231                            .await
2232                        {
2233                            log::error!("error closing remote buffer: {}", error);
2234                        }
2235                    })
2236                    .detach();
2237            }
2238        });
2239    }
2240
2241    fn as_any(&self) -> &dyn Any {
2242        self
2243    }
2244}
2245
2246impl File {
2247    pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2248        file.and_then(|f| f.as_any().downcast_ref())
2249    }
2250
2251    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2252        self.worktree.read(cx).id()
2253    }
2254}
2255
2256#[derive(Clone, Debug)]
2257pub struct Entry {
2258    pub id: usize,
2259    pub kind: EntryKind,
2260    pub path: Arc<Path>,
2261    pub inode: u64,
2262    pub mtime: SystemTime,
2263    pub is_symlink: bool,
2264    pub is_ignored: bool,
2265}
2266
2267#[derive(Clone, Debug)]
2268pub enum EntryKind {
2269    PendingDir,
2270    Dir,
2271    File(CharBag),
2272}
2273
2274impl Entry {
2275    fn new(
2276        path: Arc<Path>,
2277        metadata: &fs::Metadata,
2278        next_entry_id: &AtomicUsize,
2279        root_char_bag: CharBag,
2280    ) -> Self {
2281        Self {
2282            id: next_entry_id.fetch_add(1, SeqCst),
2283            kind: if metadata.is_dir {
2284                EntryKind::PendingDir
2285            } else {
2286                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2287            },
2288            path,
2289            inode: metadata.inode,
2290            mtime: metadata.mtime,
2291            is_symlink: metadata.is_symlink,
2292            is_ignored: false,
2293        }
2294    }
2295
2296    pub fn is_dir(&self) -> bool {
2297        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2298    }
2299
2300    pub fn is_file(&self) -> bool {
2301        matches!(self.kind, EntryKind::File(_))
2302    }
2303}
2304
2305impl sum_tree::Item for Entry {
2306    type Summary = EntrySummary;
2307
2308    fn summary(&self) -> Self::Summary {
2309        let visible_count = if self.is_ignored { 0 } else { 1 };
2310        let file_count;
2311        let visible_file_count;
2312        if self.is_file() {
2313            file_count = 1;
2314            visible_file_count = visible_count;
2315        } else {
2316            file_count = 0;
2317            visible_file_count = 0;
2318        }
2319
2320        EntrySummary {
2321            max_path: self.path.clone(),
2322            count: 1,
2323            visible_count,
2324            file_count,
2325            visible_file_count,
2326        }
2327    }
2328}
2329
2330impl sum_tree::KeyedItem for Entry {
2331    type Key = PathKey;
2332
2333    fn key(&self) -> Self::Key {
2334        PathKey(self.path.clone())
2335    }
2336}
2337
2338#[derive(Clone, Debug)]
2339pub struct EntrySummary {
2340    max_path: Arc<Path>,
2341    count: usize,
2342    visible_count: usize,
2343    file_count: usize,
2344    visible_file_count: usize,
2345}
2346
2347impl Default for EntrySummary {
2348    fn default() -> Self {
2349        Self {
2350            max_path: Arc::from(Path::new("")),
2351            count: 0,
2352            visible_count: 0,
2353            file_count: 0,
2354            visible_file_count: 0,
2355        }
2356    }
2357}
2358
2359impl sum_tree::Summary for EntrySummary {
2360    type Context = ();
2361
2362    fn add_summary(&mut self, rhs: &Self, _: &()) {
2363        self.max_path = rhs.max_path.clone();
2364        self.visible_count += rhs.visible_count;
2365        self.file_count += rhs.file_count;
2366        self.visible_file_count += rhs.visible_file_count;
2367    }
2368}
2369
2370#[derive(Clone, Debug)]
2371struct PathEntry {
2372    id: usize,
2373    path: Arc<Path>,
2374    is_ignored: bool,
2375    scan_id: usize,
2376}
2377
2378impl sum_tree::Item for PathEntry {
2379    type Summary = PathEntrySummary;
2380
2381    fn summary(&self) -> Self::Summary {
2382        PathEntrySummary { max_id: self.id }
2383    }
2384}
2385
2386impl sum_tree::KeyedItem for PathEntry {
2387    type Key = usize;
2388
2389    fn key(&self) -> Self::Key {
2390        self.id
2391    }
2392}
2393
2394#[derive(Clone, Debug, Default)]
2395struct PathEntrySummary {
2396    max_id: usize,
2397}
2398
2399impl sum_tree::Summary for PathEntrySummary {
2400    type Context = ();
2401
2402    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2403        self.max_id = summary.max_id;
2404    }
2405}
2406
2407impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2408    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2409        *self = summary.max_id;
2410    }
2411}
2412
2413#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2414pub struct PathKey(Arc<Path>);
2415
2416impl Default for PathKey {
2417    fn default() -> Self {
2418        Self(Path::new("").into())
2419    }
2420}
2421
2422impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2423    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2424        self.0 = summary.max_path.clone();
2425    }
2426}
2427
2428struct BackgroundScanner {
2429    fs: Arc<dyn Fs>,
2430    snapshot: Arc<Mutex<Snapshot>>,
2431    notify: Sender<ScanState>,
2432    executor: Arc<executor::Background>,
2433}
2434
2435impl BackgroundScanner {
2436    fn new(
2437        snapshot: Arc<Mutex<Snapshot>>,
2438        notify: Sender<ScanState>,
2439        fs: Arc<dyn Fs>,
2440        executor: Arc<executor::Background>,
2441    ) -> Self {
2442        Self {
2443            fs,
2444            snapshot,
2445            notify,
2446            executor,
2447        }
2448    }
2449
2450    fn abs_path(&self) -> Arc<Path> {
2451        self.snapshot.lock().abs_path.clone()
2452    }
2453
2454    fn snapshot(&self) -> Snapshot {
2455        self.snapshot.lock().clone()
2456    }
2457
2458    async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2459        if self.notify.send(ScanState::Scanning).await.is_err() {
2460            return;
2461        }
2462
2463        if let Err(err) = self.scan_dirs().await {
2464            if self
2465                .notify
2466                .send(ScanState::Err(Arc::new(err)))
2467                .await
2468                .is_err()
2469            {
2470                return;
2471            }
2472        }
2473
2474        if self.notify.send(ScanState::Idle).await.is_err() {
2475            return;
2476        }
2477
2478        futures::pin_mut!(events_rx);
2479        while let Some(events) = events_rx.next().await {
2480            if self.notify.send(ScanState::Scanning).await.is_err() {
2481                break;
2482            }
2483
2484            if !self.process_events(events).await {
2485                break;
2486            }
2487
2488            if self.notify.send(ScanState::Idle).await.is_err() {
2489                break;
2490            }
2491        }
2492    }
2493
2494    async fn scan_dirs(&mut self) -> Result<()> {
2495        let root_char_bag;
2496        let next_entry_id;
2497        let is_dir;
2498        {
2499            let snapshot = self.snapshot.lock();
2500            root_char_bag = snapshot.root_char_bag;
2501            next_entry_id = snapshot.next_entry_id.clone();
2502            is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2503        };
2504
2505        if is_dir {
2506            let path: Arc<Path> = Arc::from(Path::new(""));
2507            let abs_path = self.abs_path();
2508            let (tx, rx) = channel::unbounded();
2509            tx.send(ScanJob {
2510                abs_path: abs_path.to_path_buf(),
2511                path,
2512                ignore_stack: IgnoreStack::none(),
2513                scan_queue: tx.clone(),
2514            })
2515            .await
2516            .unwrap();
2517            drop(tx);
2518
2519            self.executor
2520                .scoped(|scope| {
2521                    for _ in 0..self.executor.num_cpus() {
2522                        scope.spawn(async {
2523                            while let Ok(job) = rx.recv().await {
2524                                if let Err(err) = self
2525                                    .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2526                                    .await
2527                                {
2528                                    log::error!("error scanning {:?}: {}", job.abs_path, err);
2529                                }
2530                            }
2531                        });
2532                    }
2533                })
2534                .await;
2535        }
2536
2537        Ok(())
2538    }
2539
2540    async fn scan_dir(
2541        &self,
2542        root_char_bag: CharBag,
2543        next_entry_id: Arc<AtomicUsize>,
2544        job: &ScanJob,
2545    ) -> Result<()> {
2546        let mut new_entries: Vec<Entry> = Vec::new();
2547        let mut new_jobs: Vec<ScanJob> = Vec::new();
2548        let mut ignore_stack = job.ignore_stack.clone();
2549        let mut new_ignore = None;
2550
2551        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2552        while let Some(child_abs_path) = child_paths.next().await {
2553            let child_abs_path = match child_abs_path {
2554                Ok(child_abs_path) => child_abs_path,
2555                Err(error) => {
2556                    log::error!("error processing entry {:?}", error);
2557                    continue;
2558                }
2559            };
2560            let child_name = child_abs_path.file_name().unwrap();
2561            let child_path: Arc<Path> = job.path.join(child_name).into();
2562            let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2563                Some(metadata) => metadata,
2564                None => continue,
2565            };
2566
2567            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2568            if child_name == *GITIGNORE {
2569                match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2570                    Ok(ignore) => {
2571                        let ignore = Arc::new(ignore);
2572                        ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2573                        new_ignore = Some(ignore);
2574                    }
2575                    Err(error) => {
2576                        log::error!(
2577                            "error loading .gitignore file {:?} - {:?}",
2578                            child_name,
2579                            error
2580                        );
2581                    }
2582                }
2583
2584                // Update ignore status of any child entries we've already processed to reflect the
2585                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2586                // there should rarely be too numerous. Update the ignore stack associated with any
2587                // new jobs as well.
2588                let mut new_jobs = new_jobs.iter_mut();
2589                for entry in &mut new_entries {
2590                    entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2591                    if entry.is_dir() {
2592                        new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2593                            IgnoreStack::all()
2594                        } else {
2595                            ignore_stack.clone()
2596                        };
2597                    }
2598                }
2599            }
2600
2601            let mut child_entry = Entry::new(
2602                child_path.clone(),
2603                &child_metadata,
2604                &next_entry_id,
2605                root_char_bag,
2606            );
2607
2608            if child_metadata.is_dir {
2609                let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2610                child_entry.is_ignored = is_ignored;
2611                new_entries.push(child_entry);
2612                new_jobs.push(ScanJob {
2613                    abs_path: child_abs_path,
2614                    path: child_path,
2615                    ignore_stack: if is_ignored {
2616                        IgnoreStack::all()
2617                    } else {
2618                        ignore_stack.clone()
2619                    },
2620                    scan_queue: job.scan_queue.clone(),
2621                });
2622            } else {
2623                child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2624                new_entries.push(child_entry);
2625            };
2626        }
2627
2628        self.snapshot
2629            .lock()
2630            .populate_dir(job.path.clone(), new_entries, new_ignore);
2631        for new_job in new_jobs {
2632            job.scan_queue.send(new_job).await.unwrap();
2633        }
2634
2635        Ok(())
2636    }
2637
2638    async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2639        let mut snapshot = self.snapshot();
2640        snapshot.scan_id += 1;
2641
2642        let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2643            abs_path
2644        } else {
2645            return false;
2646        };
2647        let root_char_bag = snapshot.root_char_bag;
2648        let next_entry_id = snapshot.next_entry_id.clone();
2649
2650        events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2651        events.dedup_by(|a, b| a.path.starts_with(&b.path));
2652
2653        for event in &events {
2654            match event.path.strip_prefix(&root_abs_path) {
2655                Ok(path) => snapshot.remove_path(&path),
2656                Err(_) => {
2657                    log::error!(
2658                        "unexpected event {:?} for root path {:?}",
2659                        event.path,
2660                        root_abs_path
2661                    );
2662                    continue;
2663                }
2664            }
2665        }
2666
2667        let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2668        for event in events {
2669            let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2670                Ok(path) => Arc::from(path.to_path_buf()),
2671                Err(_) => {
2672                    log::error!(
2673                        "unexpected event {:?} for root path {:?}",
2674                        event.path,
2675                        root_abs_path
2676                    );
2677                    continue;
2678                }
2679            };
2680
2681            match self.fs.metadata(&event.path).await {
2682                Ok(Some(metadata)) => {
2683                    let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2684                    let mut fs_entry = Entry::new(
2685                        path.clone(),
2686                        &metadata,
2687                        snapshot.next_entry_id.as_ref(),
2688                        snapshot.root_char_bag,
2689                    );
2690                    fs_entry.is_ignored = ignore_stack.is_all();
2691                    snapshot.insert_entry(fs_entry, self.fs.as_ref());
2692                    if metadata.is_dir {
2693                        scan_queue_tx
2694                            .send(ScanJob {
2695                                abs_path: event.path,
2696                                path,
2697                                ignore_stack,
2698                                scan_queue: scan_queue_tx.clone(),
2699                            })
2700                            .await
2701                            .unwrap();
2702                    }
2703                }
2704                Ok(None) => {}
2705                Err(err) => {
2706                    // TODO - create a special 'error' entry in the entries tree to mark this
2707                    log::error!("error reading file on event {:?}", err);
2708                }
2709            }
2710        }
2711
2712        *self.snapshot.lock() = snapshot;
2713
2714        // Scan any directories that were created as part of this event batch.
2715        drop(scan_queue_tx);
2716        self.executor
2717            .scoped(|scope| {
2718                for _ in 0..self.executor.num_cpus() {
2719                    scope.spawn(async {
2720                        while let Ok(job) = scan_queue_rx.recv().await {
2721                            if let Err(err) = self
2722                                .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2723                                .await
2724                            {
2725                                log::error!("error scanning {:?}: {}", job.abs_path, err);
2726                            }
2727                        }
2728                    });
2729                }
2730            })
2731            .await;
2732
2733        // Attempt to detect renames only over a single batch of file-system events.
2734        self.snapshot.lock().removed_entry_ids.clear();
2735
2736        self.update_ignore_statuses().await;
2737        true
2738    }
2739
2740    async fn update_ignore_statuses(&self) {
2741        let mut snapshot = self.snapshot();
2742
2743        let mut ignores_to_update = Vec::new();
2744        let mut ignores_to_delete = Vec::new();
2745        for (parent_path, (_, scan_id)) in &snapshot.ignores {
2746            if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2747                ignores_to_update.push(parent_path.clone());
2748            }
2749
2750            let ignore_path = parent_path.join(&*GITIGNORE);
2751            if snapshot.entry_for_path(ignore_path).is_none() {
2752                ignores_to_delete.push(parent_path.clone());
2753            }
2754        }
2755
2756        for parent_path in ignores_to_delete {
2757            snapshot.ignores.remove(&parent_path);
2758            self.snapshot.lock().ignores.remove(&parent_path);
2759        }
2760
2761        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2762        ignores_to_update.sort_unstable();
2763        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2764        while let Some(parent_path) = ignores_to_update.next() {
2765            while ignores_to_update
2766                .peek()
2767                .map_or(false, |p| p.starts_with(&parent_path))
2768            {
2769                ignores_to_update.next().unwrap();
2770            }
2771
2772            let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2773            ignore_queue_tx
2774                .send(UpdateIgnoreStatusJob {
2775                    path: parent_path,
2776                    ignore_stack,
2777                    ignore_queue: ignore_queue_tx.clone(),
2778                })
2779                .await
2780                .unwrap();
2781        }
2782        drop(ignore_queue_tx);
2783
2784        self.executor
2785            .scoped(|scope| {
2786                for _ in 0..self.executor.num_cpus() {
2787                    scope.spawn(async {
2788                        while let Ok(job) = ignore_queue_rx.recv().await {
2789                            self.update_ignore_status(job, &snapshot).await;
2790                        }
2791                    });
2792                }
2793            })
2794            .await;
2795    }
2796
2797    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2798        let mut ignore_stack = job.ignore_stack;
2799        if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2800            ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2801        }
2802
2803        let mut entries_by_id_edits = Vec::new();
2804        let mut entries_by_path_edits = Vec::new();
2805        for mut entry in snapshot.child_entries(&job.path).cloned() {
2806            let was_ignored = entry.is_ignored;
2807            entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2808            if entry.is_dir() {
2809                let child_ignore_stack = if entry.is_ignored {
2810                    IgnoreStack::all()
2811                } else {
2812                    ignore_stack.clone()
2813                };
2814                job.ignore_queue
2815                    .send(UpdateIgnoreStatusJob {
2816                        path: entry.path.clone(),
2817                        ignore_stack: child_ignore_stack,
2818                        ignore_queue: job.ignore_queue.clone(),
2819                    })
2820                    .await
2821                    .unwrap();
2822            }
2823
2824            if entry.is_ignored != was_ignored {
2825                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2826                path_entry.scan_id = snapshot.scan_id;
2827                path_entry.is_ignored = entry.is_ignored;
2828                entries_by_id_edits.push(Edit::Insert(path_entry));
2829                entries_by_path_edits.push(Edit::Insert(entry));
2830            }
2831        }
2832
2833        let mut snapshot = self.snapshot.lock();
2834        snapshot.entries_by_path.edit(entries_by_path_edits, &());
2835        snapshot.entries_by_id.edit(entries_by_id_edits, &());
2836    }
2837}
2838
2839async fn refresh_entry(
2840    fs: &dyn Fs,
2841    snapshot: &Mutex<Snapshot>,
2842    path: Arc<Path>,
2843    abs_path: &Path,
2844) -> Result<Entry> {
2845    let root_char_bag;
2846    let next_entry_id;
2847    {
2848        let snapshot = snapshot.lock();
2849        root_char_bag = snapshot.root_char_bag;
2850        next_entry_id = snapshot.next_entry_id.clone();
2851    }
2852    let entry = Entry::new(
2853        path,
2854        &fs.metadata(abs_path)
2855            .await?
2856            .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2857        &next_entry_id,
2858        root_char_bag,
2859    );
2860    Ok(snapshot.lock().insert_entry(entry, fs))
2861}
2862
2863fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2864    let mut result = root_char_bag;
2865    result.extend(
2866        path.to_string_lossy()
2867            .chars()
2868            .map(|c| c.to_ascii_lowercase()),
2869    );
2870    result
2871}
2872
2873struct ScanJob {
2874    abs_path: PathBuf,
2875    path: Arc<Path>,
2876    ignore_stack: Arc<IgnoreStack>,
2877    scan_queue: Sender<ScanJob>,
2878}
2879
2880struct UpdateIgnoreStatusJob {
2881    path: Arc<Path>,
2882    ignore_stack: Arc<IgnoreStack>,
2883    ignore_queue: Sender<UpdateIgnoreStatusJob>,
2884}
2885
2886pub trait WorktreeHandle {
2887    #[cfg(test)]
2888    fn flush_fs_events<'a>(
2889        &self,
2890        cx: &'a gpui::TestAppContext,
2891    ) -> futures::future::LocalBoxFuture<'a, ()>;
2892}
2893
2894impl WorktreeHandle for ModelHandle<Worktree> {
2895    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2896    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2897    // extra directory scans, and emit extra scan-state notifications.
2898    //
2899    // This function mutates the worktree's directory and waits for those mutations to be picked up,
2900    // to ensure that all redundant FS events have already been processed.
2901    #[cfg(test)]
2902    fn flush_fs_events<'a>(
2903        &self,
2904        cx: &'a gpui::TestAppContext,
2905    ) -> futures::future::LocalBoxFuture<'a, ()> {
2906        use smol::future::FutureExt;
2907
2908        let filename = "fs-event-sentinel";
2909        let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2910        let tree = self.clone();
2911        async move {
2912            std::fs::write(root_path.join(filename), "").unwrap();
2913            tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2914                .await;
2915
2916            std::fs::remove_file(root_path.join(filename)).unwrap();
2917            tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2918                .await;
2919
2920            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2921                .await;
2922        }
2923        .boxed_local()
2924    }
2925}
2926
2927#[derive(Clone, Debug)]
2928struct TraversalProgress<'a> {
2929    max_path: &'a Path,
2930    count: usize,
2931    visible_count: usize,
2932    file_count: usize,
2933    visible_file_count: usize,
2934}
2935
2936impl<'a> TraversalProgress<'a> {
2937    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2938        match (include_ignored, include_dirs) {
2939            (true, true) => self.count,
2940            (true, false) => self.file_count,
2941            (false, true) => self.visible_count,
2942            (false, false) => self.visible_file_count,
2943        }
2944    }
2945}
2946
2947impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2948    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2949        self.max_path = summary.max_path.as_ref();
2950        self.count += summary.count;
2951        self.visible_count += summary.visible_count;
2952        self.file_count += summary.file_count;
2953        self.visible_file_count += summary.visible_file_count;
2954    }
2955}
2956
2957impl<'a> Default for TraversalProgress<'a> {
2958    fn default() -> Self {
2959        Self {
2960            max_path: Path::new(""),
2961            count: 0,
2962            visible_count: 0,
2963            file_count: 0,
2964            visible_file_count: 0,
2965        }
2966    }
2967}
2968
2969pub struct Traversal<'a> {
2970    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2971    include_ignored: bool,
2972    include_dirs: bool,
2973}
2974
2975impl<'a> Traversal<'a> {
2976    pub fn advance(&mut self) -> bool {
2977        self.advance_to_offset(self.offset() + 1)
2978    }
2979
2980    pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2981        self.cursor.seek_forward(
2982            &TraversalTarget::Count {
2983                count: offset,
2984                include_dirs: self.include_dirs,
2985                include_ignored: self.include_ignored,
2986            },
2987            Bias::Right,
2988            &(),
2989        )
2990    }
2991
2992    pub fn advance_to_sibling(&mut self) -> bool {
2993        while let Some(entry) = self.cursor.item() {
2994            self.cursor.seek_forward(
2995                &TraversalTarget::PathSuccessor(&entry.path),
2996                Bias::Left,
2997                &(),
2998            );
2999            if let Some(entry) = self.cursor.item() {
3000                if (self.include_dirs || !entry.is_dir())
3001                    && (self.include_ignored || !entry.is_ignored)
3002                {
3003                    return true;
3004                }
3005            }
3006        }
3007        false
3008    }
3009
3010    pub fn entry(&self) -> Option<&'a Entry> {
3011        self.cursor.item()
3012    }
3013
3014    pub fn offset(&self) -> usize {
3015        self.cursor
3016            .start()
3017            .count(self.include_dirs, self.include_ignored)
3018    }
3019}
3020
3021impl<'a> Iterator for Traversal<'a> {
3022    type Item = &'a Entry;
3023
3024    fn next(&mut self) -> Option<Self::Item> {
3025        if let Some(item) = self.entry() {
3026            self.advance();
3027            Some(item)
3028        } else {
3029            None
3030        }
3031    }
3032}
3033
3034#[derive(Debug)]
3035enum TraversalTarget<'a> {
3036    Path(&'a Path),
3037    PathSuccessor(&'a Path),
3038    Count {
3039        count: usize,
3040        include_ignored: bool,
3041        include_dirs: bool,
3042    },
3043}
3044
3045impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3046    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3047        match self {
3048            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3049            TraversalTarget::PathSuccessor(path) => {
3050                if !cursor_location.max_path.starts_with(path) {
3051                    Ordering::Equal
3052                } else {
3053                    Ordering::Greater
3054                }
3055            }
3056            TraversalTarget::Count {
3057                count,
3058                include_dirs,
3059                include_ignored,
3060            } => Ord::cmp(
3061                count,
3062                &cursor_location.count(*include_dirs, *include_ignored),
3063            ),
3064        }
3065    }
3066}
3067
3068struct ChildEntriesIter<'a> {
3069    parent_path: &'a Path,
3070    traversal: Traversal<'a>,
3071}
3072
3073impl<'a> Iterator for ChildEntriesIter<'a> {
3074    type Item = &'a Entry;
3075
3076    fn next(&mut self) -> Option<Self::Item> {
3077        if let Some(item) = self.traversal.entry() {
3078            if item.path.starts_with(&self.parent_path) {
3079                self.traversal.advance_to_sibling();
3080                return Some(item);
3081            }
3082        }
3083        None
3084    }
3085}
3086
3087impl<'a> From<&'a Entry> for proto::Entry {
3088    fn from(entry: &'a Entry) -> Self {
3089        Self {
3090            id: entry.id as u64,
3091            is_dir: entry.is_dir(),
3092            path: entry.path.to_string_lossy().to_string(),
3093            inode: entry.inode,
3094            mtime: Some(entry.mtime.into()),
3095            is_symlink: entry.is_symlink,
3096            is_ignored: entry.is_ignored,
3097        }
3098    }
3099}
3100
3101impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3102    type Error = anyhow::Error;
3103
3104    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3105        if let Some(mtime) = entry.mtime {
3106            let kind = if entry.is_dir {
3107                EntryKind::Dir
3108            } else {
3109                let mut char_bag = root_char_bag.clone();
3110                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3111                EntryKind::File(char_bag)
3112            };
3113            let path: Arc<Path> = Arc::from(Path::new(&entry.path));
3114            Ok(Entry {
3115                id: entry.id as usize,
3116                kind,
3117                path: path.clone(),
3118                inode: entry.inode,
3119                mtime: mtime.into(),
3120                is_symlink: entry.is_symlink,
3121                is_ignored: entry.is_ignored,
3122            })
3123        } else {
3124            Err(anyhow!(
3125                "missing mtime in remote worktree entry {:?}",
3126                entry.path
3127            ))
3128        }
3129    }
3130}
3131
3132trait ToPointUtf16 {
3133    fn to_point_utf16(self) -> PointUtf16;
3134}
3135
3136impl ToPointUtf16 for lsp::Position {
3137    fn to_point_utf16(self) -> PointUtf16 {
3138        PointUtf16::new(self.line, self.character)
3139    }
3140}
3141
3142fn range_from_lsp(range: lsp::Range) -> Range<PointUtf16> {
3143    let start = PointUtf16::new(range.start.line, range.start.character);
3144    let end = PointUtf16::new(range.end.line, range.end.character);
3145    start..end
3146}
3147
3148#[cfg(test)]
3149mod tests {
3150    use super::*;
3151    use crate::fs::FakeFs;
3152    use anyhow::Result;
3153    use client::test::{FakeHttpClient, FakeServer};
3154    use fs::RealFs;
3155    use gpui::test::subscribe;
3156    use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
3157    use language::{Diagnostic, LanguageConfig};
3158    use lsp::Url;
3159    use rand::prelude::*;
3160    use serde_json::json;
3161    use std::{cell::RefCell, rc::Rc};
3162    use std::{
3163        env,
3164        fmt::Write,
3165        time::{SystemTime, UNIX_EPOCH},
3166    };
3167    use text::Point;
3168    use unindent::Unindent as _;
3169    use util::test::temp_tree;
3170
3171    #[gpui::test]
3172    async fn test_traversal(mut cx: gpui::TestAppContext) {
3173        let fs = FakeFs::new();
3174        fs.insert_tree(
3175            "/root",
3176            json!({
3177               ".gitignore": "a/b\n",
3178               "a": {
3179                   "b": "",
3180                   "c": "",
3181               }
3182            }),
3183        )
3184        .await;
3185
3186        let http_client = FakeHttpClient::with_404_response();
3187        let client = Client::new(http_client.clone());
3188        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3189
3190        let tree = Worktree::open_local(
3191            client,
3192            user_store,
3193            Arc::from(Path::new("/root")),
3194            Arc::new(fs),
3195            Default::default(),
3196            &mut cx.to_async(),
3197        )
3198        .await
3199        .unwrap();
3200        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3201            .await;
3202
3203        tree.read_with(&cx, |tree, _| {
3204            assert_eq!(
3205                tree.entries(false)
3206                    .map(|entry| entry.path.as_ref())
3207                    .collect::<Vec<_>>(),
3208                vec![
3209                    Path::new(""),
3210                    Path::new(".gitignore"),
3211                    Path::new("a"),
3212                    Path::new("a/c"),
3213                ]
3214            );
3215        })
3216    }
3217
3218    #[gpui::test]
3219    async fn test_save_file(mut cx: gpui::TestAppContext) {
3220        let dir = temp_tree(json!({
3221            "file1": "the old contents",
3222        }));
3223
3224        let http_client = FakeHttpClient::with_404_response();
3225        let client = Client::new(http_client.clone());
3226        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3227
3228        let tree = Worktree::open_local(
3229            client,
3230            user_store,
3231            dir.path(),
3232            Arc::new(RealFs),
3233            Default::default(),
3234            &mut cx.to_async(),
3235        )
3236        .await
3237        .unwrap();
3238        let buffer = tree
3239            .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3240            .await
3241            .unwrap();
3242        let save = buffer.update(&mut cx, |buffer, cx| {
3243            buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3244            buffer.save(cx).unwrap()
3245        });
3246        save.await.unwrap();
3247
3248        let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3249        assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3250    }
3251
3252    #[gpui::test]
3253    async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3254        let dir = temp_tree(json!({
3255            "file1": "the old contents",
3256        }));
3257        let file_path = dir.path().join("file1");
3258
3259        let http_client = FakeHttpClient::with_404_response();
3260        let client = Client::new(http_client.clone());
3261        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3262
3263        let tree = Worktree::open_local(
3264            client,
3265            user_store,
3266            file_path.clone(),
3267            Arc::new(RealFs),
3268            Default::default(),
3269            &mut cx.to_async(),
3270        )
3271        .await
3272        .unwrap();
3273        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3274            .await;
3275        cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3276
3277        let buffer = tree
3278            .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3279            .await
3280            .unwrap();
3281        let save = buffer.update(&mut cx, |buffer, cx| {
3282            buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3283            buffer.save(cx).unwrap()
3284        });
3285        save.await.unwrap();
3286
3287        let new_text = std::fs::read_to_string(file_path).unwrap();
3288        assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3289    }
3290
3291    #[gpui::test]
3292    async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3293        let dir = temp_tree(json!({
3294            "a": {
3295                "file1": "",
3296                "file2": "",
3297                "file3": "",
3298            },
3299            "b": {
3300                "c": {
3301                    "file4": "",
3302                    "file5": "",
3303                }
3304            }
3305        }));
3306
3307        let user_id = 5;
3308        let http_client = FakeHttpClient::with_404_response();
3309        let mut client = Client::new(http_client.clone());
3310        let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3311        let user_store = server.build_user_store(client.clone(), &mut cx).await;
3312        let tree = Worktree::open_local(
3313            client,
3314            user_store.clone(),
3315            dir.path(),
3316            Arc::new(RealFs),
3317            Default::default(),
3318            &mut cx.to_async(),
3319        )
3320        .await
3321        .unwrap();
3322
3323        let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3324            let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3325            async move { buffer.await.unwrap() }
3326        };
3327        let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3328            tree.read_with(cx, |tree, _| {
3329                tree.entry_for_path(path)
3330                    .expect(&format!("no entry for path {}", path))
3331                    .id
3332            })
3333        };
3334
3335        let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3336        let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3337        let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3338        let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3339
3340        let file2_id = id_for_path("a/file2", &cx);
3341        let file3_id = id_for_path("a/file3", &cx);
3342        let file4_id = id_for_path("b/c/file4", &cx);
3343
3344        // Wait for the initial scan.
3345        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3346            .await;
3347
3348        // Create a remote copy of this worktree.
3349        let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3350        let remote = Worktree::remote(
3351            1,
3352            1,
3353            initial_snapshot.to_proto(&Default::default()),
3354            Client::new(http_client.clone()),
3355            user_store,
3356            Default::default(),
3357            &mut cx.to_async(),
3358        )
3359        .await
3360        .unwrap();
3361
3362        cx.read(|cx| {
3363            assert!(!buffer2.read(cx).is_dirty());
3364            assert!(!buffer3.read(cx).is_dirty());
3365            assert!(!buffer4.read(cx).is_dirty());
3366            assert!(!buffer5.read(cx).is_dirty());
3367        });
3368
3369        // Rename and delete files and directories.
3370        tree.flush_fs_events(&cx).await;
3371        std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3372        std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3373        std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3374        std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3375        tree.flush_fs_events(&cx).await;
3376
3377        let expected_paths = vec![
3378            "a",
3379            "a/file1",
3380            "a/file2.new",
3381            "b",
3382            "d",
3383            "d/file3",
3384            "d/file4",
3385        ];
3386
3387        cx.read(|app| {
3388            assert_eq!(
3389                tree.read(app)
3390                    .paths()
3391                    .map(|p| p.to_str().unwrap())
3392                    .collect::<Vec<_>>(),
3393                expected_paths
3394            );
3395
3396            assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3397            assert_eq!(id_for_path("d/file3", &cx), file3_id);
3398            assert_eq!(id_for_path("d/file4", &cx), file4_id);
3399
3400            assert_eq!(
3401                buffer2.read(app).file().unwrap().path().as_ref(),
3402                Path::new("a/file2.new")
3403            );
3404            assert_eq!(
3405                buffer3.read(app).file().unwrap().path().as_ref(),
3406                Path::new("d/file3")
3407            );
3408            assert_eq!(
3409                buffer4.read(app).file().unwrap().path().as_ref(),
3410                Path::new("d/file4")
3411            );
3412            assert_eq!(
3413                buffer5.read(app).file().unwrap().path().as_ref(),
3414                Path::new("b/c/file5")
3415            );
3416
3417            assert!(!buffer2.read(app).file().unwrap().is_deleted());
3418            assert!(!buffer3.read(app).file().unwrap().is_deleted());
3419            assert!(!buffer4.read(app).file().unwrap().is_deleted());
3420            assert!(buffer5.read(app).file().unwrap().is_deleted());
3421        });
3422
3423        // Update the remote worktree. Check that it becomes consistent with the
3424        // local worktree.
3425        remote.update(&mut cx, |remote, cx| {
3426            let update_message =
3427                tree.read(cx)
3428                    .snapshot()
3429                    .build_update(&initial_snapshot, 1, 1, true);
3430            remote
3431                .as_remote_mut()
3432                .unwrap()
3433                .snapshot
3434                .apply_update(update_message)
3435                .unwrap();
3436
3437            assert_eq!(
3438                remote
3439                    .paths()
3440                    .map(|p| p.to_str().unwrap())
3441                    .collect::<Vec<_>>(),
3442                expected_paths
3443            );
3444        });
3445    }
3446
3447    #[gpui::test]
3448    async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3449        let dir = temp_tree(json!({
3450            ".git": {},
3451            ".gitignore": "ignored-dir\n",
3452            "tracked-dir": {
3453                "tracked-file1": "tracked contents",
3454            },
3455            "ignored-dir": {
3456                "ignored-file1": "ignored contents",
3457            }
3458        }));
3459
3460        let http_client = FakeHttpClient::with_404_response();
3461        let client = Client::new(http_client.clone());
3462        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3463
3464        let tree = Worktree::open_local(
3465            client,
3466            user_store,
3467            dir.path(),
3468            Arc::new(RealFs),
3469            Default::default(),
3470            &mut cx.to_async(),
3471        )
3472        .await
3473        .unwrap();
3474        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3475            .await;
3476        tree.flush_fs_events(&cx).await;
3477        cx.read(|cx| {
3478            let tree = tree.read(cx);
3479            let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3480            let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3481            assert_eq!(tracked.is_ignored, false);
3482            assert_eq!(ignored.is_ignored, true);
3483        });
3484
3485        std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3486        std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3487        tree.flush_fs_events(&cx).await;
3488        cx.read(|cx| {
3489            let tree = tree.read(cx);
3490            let dot_git = tree.entry_for_path(".git").unwrap();
3491            let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3492            let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3493            assert_eq!(tracked.is_ignored, false);
3494            assert_eq!(ignored.is_ignored, true);
3495            assert_eq!(dot_git.is_ignored, true);
3496        });
3497    }
3498
3499    #[gpui::test]
3500    async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3501        let user_id = 100;
3502        let http_client = FakeHttpClient::with_404_response();
3503        let mut client = Client::new(http_client);
3504        let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3505        let user_store = server.build_user_store(client.clone(), &mut cx).await;
3506
3507        let fs = Arc::new(FakeFs::new());
3508        fs.insert_tree(
3509            "/the-dir",
3510            json!({
3511                "a.txt": "a-contents",
3512                "b.txt": "b-contents",
3513            }),
3514        )
3515        .await;
3516
3517        let worktree = Worktree::open_local(
3518            client.clone(),
3519            user_store,
3520            "/the-dir".as_ref(),
3521            fs,
3522            Default::default(),
3523            &mut cx.to_async(),
3524        )
3525        .await
3526        .unwrap();
3527
3528        // Spawn multiple tasks to open paths, repeating some paths.
3529        let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3530            (
3531                worktree.open_buffer("a.txt", cx),
3532                worktree.open_buffer("b.txt", cx),
3533                worktree.open_buffer("a.txt", cx),
3534            )
3535        });
3536
3537        let buffer_a_1 = buffer_a_1.await.unwrap();
3538        let buffer_a_2 = buffer_a_2.await.unwrap();
3539        let buffer_b = buffer_b.await.unwrap();
3540        assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3541        assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3542
3543        // There is only one buffer per path.
3544        let buffer_a_id = buffer_a_1.id();
3545        assert_eq!(buffer_a_2.id(), buffer_a_id);
3546
3547        // Open the same path again while it is still open.
3548        drop(buffer_a_1);
3549        let buffer_a_3 = worktree
3550            .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3551            .await
3552            .unwrap();
3553
3554        // There's still only one buffer per path.
3555        assert_eq!(buffer_a_3.id(), buffer_a_id);
3556    }
3557
3558    #[gpui::test]
3559    async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3560        use std::fs;
3561
3562        let dir = temp_tree(json!({
3563            "file1": "abc",
3564            "file2": "def",
3565            "file3": "ghi",
3566        }));
3567        let http_client = FakeHttpClient::with_404_response();
3568        let client = Client::new(http_client.clone());
3569        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3570
3571        let tree = Worktree::open_local(
3572            client,
3573            user_store,
3574            dir.path(),
3575            Arc::new(RealFs),
3576            Default::default(),
3577            &mut cx.to_async(),
3578        )
3579        .await
3580        .unwrap();
3581        tree.flush_fs_events(&cx).await;
3582        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3583            .await;
3584
3585        let buffer1 = tree
3586            .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3587            .await
3588            .unwrap();
3589        let events = Rc::new(RefCell::new(Vec::new()));
3590
3591        // initially, the buffer isn't dirty.
3592        buffer1.update(&mut cx, |buffer, cx| {
3593            cx.subscribe(&buffer1, {
3594                let events = events.clone();
3595                move |_, _, event, _| events.borrow_mut().push(event.clone())
3596            })
3597            .detach();
3598
3599            assert!(!buffer.is_dirty());
3600            assert!(events.borrow().is_empty());
3601
3602            buffer.edit(vec![1..2], "", cx);
3603        });
3604
3605        // after the first edit, the buffer is dirty, and emits a dirtied event.
3606        buffer1.update(&mut cx, |buffer, cx| {
3607            assert!(buffer.text() == "ac");
3608            assert!(buffer.is_dirty());
3609            assert_eq!(
3610                *events.borrow(),
3611                &[language::Event::Edited, language::Event::Dirtied]
3612            );
3613            events.borrow_mut().clear();
3614            buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3615        });
3616
3617        // after saving, the buffer is not dirty, and emits a saved event.
3618        buffer1.update(&mut cx, |buffer, cx| {
3619            assert!(!buffer.is_dirty());
3620            assert_eq!(*events.borrow(), &[language::Event::Saved]);
3621            events.borrow_mut().clear();
3622
3623            buffer.edit(vec![1..1], "B", cx);
3624            buffer.edit(vec![2..2], "D", cx);
3625        });
3626
3627        // after editing again, the buffer is dirty, and emits another dirty event.
3628        buffer1.update(&mut cx, |buffer, cx| {
3629            assert!(buffer.text() == "aBDc");
3630            assert!(buffer.is_dirty());
3631            assert_eq!(
3632                *events.borrow(),
3633                &[
3634                    language::Event::Edited,
3635                    language::Event::Dirtied,
3636                    language::Event::Edited,
3637                ],
3638            );
3639            events.borrow_mut().clear();
3640
3641            // TODO - currently, after restoring the buffer to its
3642            // previously-saved state, the is still considered dirty.
3643            buffer.edit([1..3], "", cx);
3644            assert!(buffer.text() == "ac");
3645            assert!(buffer.is_dirty());
3646        });
3647
3648        assert_eq!(*events.borrow(), &[language::Event::Edited]);
3649
3650        // When a file is deleted, the buffer is considered dirty.
3651        let events = Rc::new(RefCell::new(Vec::new()));
3652        let buffer2 = tree
3653            .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3654            .await
3655            .unwrap();
3656        buffer2.update(&mut cx, |_, cx| {
3657            cx.subscribe(&buffer2, {
3658                let events = events.clone();
3659                move |_, _, event, _| events.borrow_mut().push(event.clone())
3660            })
3661            .detach();
3662        });
3663
3664        fs::remove_file(dir.path().join("file2")).unwrap();
3665        buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3666        assert_eq!(
3667            *events.borrow(),
3668            &[language::Event::Dirtied, language::Event::FileHandleChanged]
3669        );
3670
3671        // When a file is already dirty when deleted, we don't emit a Dirtied event.
3672        let events = Rc::new(RefCell::new(Vec::new()));
3673        let buffer3 = tree
3674            .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3675            .await
3676            .unwrap();
3677        buffer3.update(&mut cx, |_, cx| {
3678            cx.subscribe(&buffer3, {
3679                let events = events.clone();
3680                move |_, _, event, _| events.borrow_mut().push(event.clone())
3681            })
3682            .detach();
3683        });
3684
3685        tree.flush_fs_events(&cx).await;
3686        buffer3.update(&mut cx, |buffer, cx| {
3687            buffer.edit(Some(0..0), "x", cx);
3688        });
3689        events.borrow_mut().clear();
3690        fs::remove_file(dir.path().join("file3")).unwrap();
3691        buffer3
3692            .condition(&cx, |_, _| !events.borrow().is_empty())
3693            .await;
3694        assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3695        cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3696    }
3697
3698    #[gpui::test]
3699    async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3700        use std::fs;
3701
3702        let initial_contents = "aaa\nbbbbb\nc\n";
3703        let dir = temp_tree(json!({ "the-file": initial_contents }));
3704        let http_client = FakeHttpClient::with_404_response();
3705        let client = Client::new(http_client.clone());
3706        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3707
3708        let tree = Worktree::open_local(
3709            client,
3710            user_store,
3711            dir.path(),
3712            Arc::new(RealFs),
3713            Default::default(),
3714            &mut cx.to_async(),
3715        )
3716        .await
3717        .unwrap();
3718        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3719            .await;
3720
3721        let abs_path = dir.path().join("the-file");
3722        let buffer = tree
3723            .update(&mut cx, |tree, cx| {
3724                tree.open_buffer(Path::new("the-file"), cx)
3725            })
3726            .await
3727            .unwrap();
3728
3729        // TODO
3730        // Add a cursor on each row.
3731        // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3732        //     assert!(!buffer.is_dirty());
3733        //     buffer.add_selection_set(
3734        //         &(0..3)
3735        //             .map(|row| Selection {
3736        //                 id: row as usize,
3737        //                 start: Point::new(row, 1),
3738        //                 end: Point::new(row, 1),
3739        //                 reversed: false,
3740        //                 goal: SelectionGoal::None,
3741        //             })
3742        //             .collect::<Vec<_>>(),
3743        //         cx,
3744        //     )
3745        // });
3746
3747        // Change the file on disk, adding two new lines of text, and removing
3748        // one line.
3749        buffer.read_with(&cx, |buffer, _| {
3750            assert!(!buffer.is_dirty());
3751            assert!(!buffer.has_conflict());
3752        });
3753        let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3754        fs::write(&abs_path, new_contents).unwrap();
3755
3756        // Because the buffer was not modified, it is reloaded from disk. Its
3757        // contents are edited according to the diff between the old and new
3758        // file contents.
3759        buffer
3760            .condition(&cx, |buffer, _| buffer.text() == new_contents)
3761            .await;
3762
3763        buffer.update(&mut cx, |buffer, _| {
3764            assert_eq!(buffer.text(), new_contents);
3765            assert!(!buffer.is_dirty());
3766            assert!(!buffer.has_conflict());
3767
3768            // TODO
3769            // let cursor_positions = buffer
3770            //     .selection_set(selection_set_id)
3771            //     .unwrap()
3772            //     .selections::<Point>(&*buffer)
3773            //     .map(|selection| {
3774            //         assert_eq!(selection.start, selection.end);
3775            //         selection.start
3776            //     })
3777            //     .collect::<Vec<_>>();
3778            // assert_eq!(
3779            //     cursor_positions,
3780            //     [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3781            // );
3782        });
3783
3784        // Modify the buffer
3785        buffer.update(&mut cx, |buffer, cx| {
3786            buffer.edit(vec![0..0], " ", cx);
3787            assert!(buffer.is_dirty());
3788            assert!(!buffer.has_conflict());
3789        });
3790
3791        // Change the file on disk again, adding blank lines to the beginning.
3792        fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3793
3794        // Because the buffer is modified, it doesn't reload from disk, but is
3795        // marked as having a conflict.
3796        buffer
3797            .condition(&cx, |buffer, _| buffer.has_conflict())
3798            .await;
3799    }
3800
3801    #[gpui::test]
3802    async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3803        let (language_server_config, mut fake_server) =
3804            LanguageServerConfig::fake(cx.background()).await;
3805        let progress_token = language_server_config
3806            .disk_based_diagnostics_progress_token
3807            .clone()
3808            .unwrap();
3809        let mut languages = LanguageRegistry::new();
3810        languages.add(Arc::new(Language::new(
3811            LanguageConfig {
3812                name: "Rust".to_string(),
3813                path_suffixes: vec!["rs".to_string()],
3814                language_server: Some(language_server_config),
3815                ..Default::default()
3816            },
3817            Some(tree_sitter_rust::language()),
3818        )));
3819
3820        let dir = temp_tree(json!({
3821            "a.rs": "fn a() { A }",
3822            "b.rs": "const y: i32 = 1",
3823        }));
3824
3825        let http_client = FakeHttpClient::with_404_response();
3826        let client = Client::new(http_client.clone());
3827        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3828
3829        let tree = Worktree::open_local(
3830            client,
3831            user_store,
3832            dir.path(),
3833            Arc::new(RealFs),
3834            Arc::new(languages),
3835            &mut cx.to_async(),
3836        )
3837        .await
3838        .unwrap();
3839        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3840            .await;
3841
3842        // Cause worktree to start the fake language server
3843        let _buffer = tree
3844            .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3845            .await
3846            .unwrap();
3847
3848        let mut events = subscribe(&tree, &mut cx);
3849
3850        fake_server.start_progress(&progress_token).await;
3851        fake_server.start_progress(&progress_token).await;
3852        fake_server.end_progress(&progress_token).await;
3853        fake_server.start_progress(&progress_token).await;
3854
3855        fake_server
3856            .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3857                uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3858                version: None,
3859                diagnostics: vec![lsp::Diagnostic {
3860                    range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
3861                    severity: Some(lsp::DiagnosticSeverity::ERROR),
3862                    message: "undefined variable 'A'".to_string(),
3863                    ..Default::default()
3864                }],
3865            })
3866            .await;
3867
3868        let event = events.next().await.unwrap();
3869        assert_eq!(
3870            event,
3871            Event::DiagnosticsUpdated(Arc::from(Path::new("a.rs")))
3872        );
3873
3874        fake_server.end_progress(&progress_token).await;
3875        fake_server.end_progress(&progress_token).await;
3876
3877        let event = events.next().await.unwrap();
3878        assert_eq!(event, Event::DiskBasedDiagnosticsUpdated);
3879
3880        let buffer = tree
3881            .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3882            .await
3883            .unwrap();
3884
3885        buffer.read_with(&cx, |buffer, _| {
3886            let snapshot = buffer.snapshot();
3887            let diagnostics = snapshot
3888                .diagnostics_in_range::<_, Point>(0..buffer.len())
3889                .collect::<Vec<_>>();
3890            assert_eq!(
3891                diagnostics,
3892                &[DiagnosticEntry {
3893                    range: Point::new(0, 9)..Point::new(0, 10),
3894                    diagnostic: Diagnostic {
3895                        severity: lsp::DiagnosticSeverity::ERROR,
3896                        message: "undefined variable 'A'".to_string(),
3897                        group_id: 0,
3898                        is_primary: true,
3899                        ..Default::default()
3900                    }
3901                }]
3902            )
3903        });
3904    }
3905
3906    #[gpui::test]
3907    async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3908        let fs = Arc::new(FakeFs::new());
3909        let http_client = FakeHttpClient::with_404_response();
3910        let client = Client::new(http_client.clone());
3911        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3912
3913        fs.insert_tree(
3914            "/the-dir",
3915            json!({
3916                "a.rs": "
3917                    fn foo(mut v: Vec<usize>) {
3918                        for x in &v {
3919                            v.push(1);
3920                        }
3921                    }
3922                "
3923                .unindent(),
3924            }),
3925        )
3926        .await;
3927
3928        let worktree = Worktree::open_local(
3929            client.clone(),
3930            user_store,
3931            "/the-dir".as_ref(),
3932            fs,
3933            Default::default(),
3934            &mut cx.to_async(),
3935        )
3936        .await
3937        .unwrap();
3938
3939        let buffer = worktree
3940            .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3941            .await
3942            .unwrap();
3943
3944        let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3945        let message = lsp::PublishDiagnosticsParams {
3946            uri: buffer_uri.clone(),
3947            diagnostics: vec![
3948                lsp::Diagnostic {
3949                    range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3950                    severity: Some(DiagnosticSeverity::WARNING),
3951                    message: "error 1".to_string(),
3952                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3953                        location: lsp::Location {
3954                            uri: buffer_uri.clone(),
3955                            range: lsp::Range::new(
3956                                lsp::Position::new(1, 8),
3957                                lsp::Position::new(1, 9),
3958                            ),
3959                        },
3960                        message: "error 1 hint 1".to_string(),
3961                    }]),
3962                    ..Default::default()
3963                },
3964                lsp::Diagnostic {
3965                    range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3966                    severity: Some(DiagnosticSeverity::HINT),
3967                    message: "error 1 hint 1".to_string(),
3968                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3969                        location: lsp::Location {
3970                            uri: buffer_uri.clone(),
3971                            range: lsp::Range::new(
3972                                lsp::Position::new(1, 8),
3973                                lsp::Position::new(1, 9),
3974                            ),
3975                        },
3976                        message: "original diagnostic".to_string(),
3977                    }]),
3978                    ..Default::default()
3979                },
3980                lsp::Diagnostic {
3981                    range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3982                    severity: Some(DiagnosticSeverity::ERROR),
3983                    message: "error 2".to_string(),
3984                    related_information: Some(vec![
3985                        lsp::DiagnosticRelatedInformation {
3986                            location: lsp::Location {
3987                                uri: buffer_uri.clone(),
3988                                range: lsp::Range::new(
3989                                    lsp::Position::new(1, 13),
3990                                    lsp::Position::new(1, 15),
3991                                ),
3992                            },
3993                            message: "error 2 hint 1".to_string(),
3994                        },
3995                        lsp::DiagnosticRelatedInformation {
3996                            location: lsp::Location {
3997                                uri: buffer_uri.clone(),
3998                                range: lsp::Range::new(
3999                                    lsp::Position::new(1, 13),
4000                                    lsp::Position::new(1, 15),
4001                                ),
4002                            },
4003                            message: "error 2 hint 2".to_string(),
4004                        },
4005                    ]),
4006                    ..Default::default()
4007                },
4008                lsp::Diagnostic {
4009                    range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
4010                    severity: Some(DiagnosticSeverity::HINT),
4011                    message: "error 2 hint 1".to_string(),
4012                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4013                        location: lsp::Location {
4014                            uri: buffer_uri.clone(),
4015                            range: lsp::Range::new(
4016                                lsp::Position::new(2, 8),
4017                                lsp::Position::new(2, 17),
4018                            ),
4019                        },
4020                        message: "original diagnostic".to_string(),
4021                    }]),
4022                    ..Default::default()
4023                },
4024                lsp::Diagnostic {
4025                    range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
4026                    severity: Some(DiagnosticSeverity::HINT),
4027                    message: "error 2 hint 2".to_string(),
4028                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
4029                        location: lsp::Location {
4030                            uri: buffer_uri.clone(),
4031                            range: lsp::Range::new(
4032                                lsp::Position::new(2, 8),
4033                                lsp::Position::new(2, 17),
4034                            ),
4035                        },
4036                        message: "original diagnostic".to_string(),
4037                    }]),
4038                    ..Default::default()
4039                },
4040            ],
4041            version: None,
4042        };
4043
4044        worktree
4045            .update(&mut cx, |tree, cx| {
4046                tree.update_diagnostics(message, &Default::default(), cx)
4047            })
4048            .unwrap();
4049        let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
4050
4051        assert_eq!(
4052            buffer
4053                .diagnostics_in_range::<_, Point>(0..buffer.len())
4054                .collect::<Vec<_>>(),
4055            &[
4056                DiagnosticEntry {
4057                    range: Point::new(1, 8)..Point::new(1, 9),
4058                    diagnostic: Diagnostic {
4059                        severity: DiagnosticSeverity::WARNING,
4060                        message: "error 1".to_string(),
4061                        group_id: 0,
4062                        is_primary: true,
4063                        ..Default::default()
4064                    }
4065                },
4066                DiagnosticEntry {
4067                    range: Point::new(1, 8)..Point::new(1, 9),
4068                    diagnostic: Diagnostic {
4069                        severity: DiagnosticSeverity::HINT,
4070                        message: "error 1 hint 1".to_string(),
4071                        group_id: 0,
4072                        is_primary: false,
4073                        ..Default::default()
4074                    }
4075                },
4076                DiagnosticEntry {
4077                    range: Point::new(1, 13)..Point::new(1, 15),
4078                    diagnostic: Diagnostic {
4079                        severity: DiagnosticSeverity::HINT,
4080                        message: "error 2 hint 1".to_string(),
4081                        group_id: 1,
4082                        is_primary: false,
4083                        ..Default::default()
4084                    }
4085                },
4086                DiagnosticEntry {
4087                    range: Point::new(1, 13)..Point::new(1, 15),
4088                    diagnostic: Diagnostic {
4089                        severity: DiagnosticSeverity::HINT,
4090                        message: "error 2 hint 2".to_string(),
4091                        group_id: 1,
4092                        is_primary: false,
4093                        ..Default::default()
4094                    }
4095                },
4096                DiagnosticEntry {
4097                    range: Point::new(2, 8)..Point::new(2, 17),
4098                    diagnostic: Diagnostic {
4099                        severity: DiagnosticSeverity::ERROR,
4100                        message: "error 2".to_string(),
4101                        group_id: 1,
4102                        is_primary: true,
4103                        ..Default::default()
4104                    }
4105                }
4106            ]
4107        );
4108
4109        assert_eq!(
4110            buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
4111            &[
4112                DiagnosticEntry {
4113                    range: Point::new(1, 8)..Point::new(1, 9),
4114                    diagnostic: Diagnostic {
4115                        severity: DiagnosticSeverity::WARNING,
4116                        message: "error 1".to_string(),
4117                        group_id: 0,
4118                        is_primary: true,
4119                        ..Default::default()
4120                    }
4121                },
4122                DiagnosticEntry {
4123                    range: Point::new(1, 8)..Point::new(1, 9),
4124                    diagnostic: Diagnostic {
4125                        severity: DiagnosticSeverity::HINT,
4126                        message: "error 1 hint 1".to_string(),
4127                        group_id: 0,
4128                        is_primary: false,
4129                        ..Default::default()
4130                    }
4131                },
4132            ]
4133        );
4134        assert_eq!(
4135            buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
4136            &[
4137                DiagnosticEntry {
4138                    range: Point::new(1, 13)..Point::new(1, 15),
4139                    diagnostic: Diagnostic {
4140                        severity: DiagnosticSeverity::HINT,
4141                        message: "error 2 hint 1".to_string(),
4142                        group_id: 1,
4143                        is_primary: false,
4144                        ..Default::default()
4145                    }
4146                },
4147                DiagnosticEntry {
4148                    range: Point::new(1, 13)..Point::new(1, 15),
4149                    diagnostic: Diagnostic {
4150                        severity: DiagnosticSeverity::HINT,
4151                        message: "error 2 hint 2".to_string(),
4152                        group_id: 1,
4153                        is_primary: false,
4154                        ..Default::default()
4155                    }
4156                },
4157                DiagnosticEntry {
4158                    range: Point::new(2, 8)..Point::new(2, 17),
4159                    diagnostic: Diagnostic {
4160                        severity: DiagnosticSeverity::ERROR,
4161                        message: "error 2".to_string(),
4162                        group_id: 1,
4163                        is_primary: true,
4164                        ..Default::default()
4165                    }
4166                }
4167            ]
4168        );
4169    }
4170
4171    #[gpui::test(iterations = 100)]
4172    fn test_random(mut rng: StdRng) {
4173        let operations = env::var("OPERATIONS")
4174            .map(|o| o.parse().unwrap())
4175            .unwrap_or(40);
4176        let initial_entries = env::var("INITIAL_ENTRIES")
4177            .map(|o| o.parse().unwrap())
4178            .unwrap_or(20);
4179
4180        let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
4181        for _ in 0..initial_entries {
4182            randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
4183        }
4184        log::info!("Generated initial tree");
4185
4186        let (notify_tx, _notify_rx) = smol::channel::unbounded();
4187        let fs = Arc::new(RealFs);
4188        let next_entry_id = Arc::new(AtomicUsize::new(0));
4189        let mut initial_snapshot = Snapshot {
4190            id: WorktreeId::from_usize(0),
4191            scan_id: 0,
4192            abs_path: root_dir.path().into(),
4193            entries_by_path: Default::default(),
4194            entries_by_id: Default::default(),
4195            removed_entry_ids: Default::default(),
4196            ignores: Default::default(),
4197            root_name: Default::default(),
4198            root_char_bag: Default::default(),
4199            next_entry_id: next_entry_id.clone(),
4200        };
4201        initial_snapshot.insert_entry(
4202            Entry::new(
4203                Path::new("").into(),
4204                &smol::block_on(fs.metadata(root_dir.path()))
4205                    .unwrap()
4206                    .unwrap(),
4207                &next_entry_id,
4208                Default::default(),
4209            ),
4210            fs.as_ref(),
4211        );
4212        let mut scanner = BackgroundScanner::new(
4213            Arc::new(Mutex::new(initial_snapshot.clone())),
4214            notify_tx,
4215            fs.clone(),
4216            Arc::new(gpui::executor::Background::new()),
4217        );
4218        smol::block_on(scanner.scan_dirs()).unwrap();
4219        scanner.snapshot().check_invariants();
4220
4221        let mut events = Vec::new();
4222        let mut snapshots = Vec::new();
4223        let mut mutations_len = operations;
4224        while mutations_len > 1 {
4225            if !events.is_empty() && rng.gen_bool(0.4) {
4226                let len = rng.gen_range(0..=events.len());
4227                let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4228                log::info!("Delivering events: {:#?}", to_deliver);
4229                smol::block_on(scanner.process_events(to_deliver));
4230                scanner.snapshot().check_invariants();
4231            } else {
4232                events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4233                mutations_len -= 1;
4234            }
4235
4236            if rng.gen_bool(0.2) {
4237                snapshots.push(scanner.snapshot());
4238            }
4239        }
4240        log::info!("Quiescing: {:#?}", events);
4241        smol::block_on(scanner.process_events(events));
4242        scanner.snapshot().check_invariants();
4243
4244        let (notify_tx, _notify_rx) = smol::channel::unbounded();
4245        let mut new_scanner = BackgroundScanner::new(
4246            Arc::new(Mutex::new(initial_snapshot)),
4247            notify_tx,
4248            scanner.fs.clone(),
4249            scanner.executor.clone(),
4250        );
4251        smol::block_on(new_scanner.scan_dirs()).unwrap();
4252        assert_eq!(
4253            scanner.snapshot().to_vec(true),
4254            new_scanner.snapshot().to_vec(true)
4255        );
4256
4257        for mut prev_snapshot in snapshots {
4258            let include_ignored = rng.gen::<bool>();
4259            if !include_ignored {
4260                let mut entries_by_path_edits = Vec::new();
4261                let mut entries_by_id_edits = Vec::new();
4262                for entry in prev_snapshot
4263                    .entries_by_id
4264                    .cursor::<()>()
4265                    .filter(|e| e.is_ignored)
4266                {
4267                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4268                    entries_by_id_edits.push(Edit::Remove(entry.id));
4269                }
4270
4271                prev_snapshot
4272                    .entries_by_path
4273                    .edit(entries_by_path_edits, &());
4274                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4275            }
4276
4277            let update = scanner
4278                .snapshot()
4279                .build_update(&prev_snapshot, 0, 0, include_ignored);
4280            prev_snapshot.apply_update(update).unwrap();
4281            assert_eq!(
4282                prev_snapshot.to_vec(true),
4283                scanner.snapshot().to_vec(include_ignored)
4284            );
4285        }
4286    }
4287
4288    fn randomly_mutate_tree(
4289        root_path: &Path,
4290        insertion_probability: f64,
4291        rng: &mut impl Rng,
4292    ) -> Result<Vec<fsevent::Event>> {
4293        let root_path = root_path.canonicalize().unwrap();
4294        let (dirs, files) = read_dir_recursive(root_path.clone());
4295
4296        let mut events = Vec::new();
4297        let mut record_event = |path: PathBuf| {
4298            events.push(fsevent::Event {
4299                event_id: SystemTime::now()
4300                    .duration_since(UNIX_EPOCH)
4301                    .unwrap()
4302                    .as_secs(),
4303                flags: fsevent::StreamFlags::empty(),
4304                path,
4305            });
4306        };
4307
4308        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4309            let path = dirs.choose(rng).unwrap();
4310            let new_path = path.join(gen_name(rng));
4311
4312            if rng.gen() {
4313                log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4314                std::fs::create_dir(&new_path)?;
4315            } else {
4316                log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4317                std::fs::write(&new_path, "")?;
4318            }
4319            record_event(new_path);
4320        } else if rng.gen_bool(0.05) {
4321            let ignore_dir_path = dirs.choose(rng).unwrap();
4322            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4323
4324            let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4325            let files_to_ignore = {
4326                let len = rng.gen_range(0..=subfiles.len());
4327                subfiles.choose_multiple(rng, len)
4328            };
4329            let dirs_to_ignore = {
4330                let len = rng.gen_range(0..subdirs.len());
4331                subdirs.choose_multiple(rng, len)
4332            };
4333
4334            let mut ignore_contents = String::new();
4335            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4336                write!(
4337                    ignore_contents,
4338                    "{}\n",
4339                    path_to_ignore
4340                        .strip_prefix(&ignore_dir_path)?
4341                        .to_str()
4342                        .unwrap()
4343                )
4344                .unwrap();
4345            }
4346            log::info!(
4347                "Creating {:?} with contents:\n{}",
4348                ignore_path.strip_prefix(&root_path)?,
4349                ignore_contents
4350            );
4351            std::fs::write(&ignore_path, ignore_contents).unwrap();
4352            record_event(ignore_path);
4353        } else {
4354            let old_path = {
4355                let file_path = files.choose(rng);
4356                let dir_path = dirs[1..].choose(rng);
4357                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4358            };
4359
4360            let is_rename = rng.gen();
4361            if is_rename {
4362                let new_path_parent = dirs
4363                    .iter()
4364                    .filter(|d| !d.starts_with(old_path))
4365                    .choose(rng)
4366                    .unwrap();
4367
4368                let overwrite_existing_dir =
4369                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4370                let new_path = if overwrite_existing_dir {
4371                    std::fs::remove_dir_all(&new_path_parent).ok();
4372                    new_path_parent.to_path_buf()
4373                } else {
4374                    new_path_parent.join(gen_name(rng))
4375                };
4376
4377                log::info!(
4378                    "Renaming {:?} to {}{:?}",
4379                    old_path.strip_prefix(&root_path)?,
4380                    if overwrite_existing_dir {
4381                        "overwrite "
4382                    } else {
4383                        ""
4384                    },
4385                    new_path.strip_prefix(&root_path)?
4386                );
4387                std::fs::rename(&old_path, &new_path)?;
4388                record_event(old_path.clone());
4389                record_event(new_path);
4390            } else if old_path.is_dir() {
4391                let (dirs, files) = read_dir_recursive(old_path.clone());
4392
4393                log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4394                std::fs::remove_dir_all(&old_path).unwrap();
4395                for file in files {
4396                    record_event(file);
4397                }
4398                for dir in dirs {
4399                    record_event(dir);
4400                }
4401            } else {
4402                log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4403                std::fs::remove_file(old_path).unwrap();
4404                record_event(old_path.clone());
4405            }
4406        }
4407
4408        Ok(events)
4409    }
4410
4411    fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4412        let child_entries = std::fs::read_dir(&path).unwrap();
4413        let mut dirs = vec![path];
4414        let mut files = Vec::new();
4415        for child_entry in child_entries {
4416            let child_path = child_entry.unwrap().path();
4417            if child_path.is_dir() {
4418                let (child_dirs, child_files) = read_dir_recursive(child_path);
4419                dirs.extend(child_dirs);
4420                files.extend(child_files);
4421            } else {
4422                files.push(child_path);
4423            }
4424        }
4425        (dirs, files)
4426    }
4427
4428    fn gen_name(rng: &mut impl Rng) -> String {
4429        (0..6)
4430            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4431            .map(char::from)
4432            .collect()
4433    }
4434
4435    impl Snapshot {
4436        fn check_invariants(&self) {
4437            let mut files = self.files(true, 0);
4438            let mut visible_files = self.files(false, 0);
4439            for entry in self.entries_by_path.cursor::<()>() {
4440                if entry.is_file() {
4441                    assert_eq!(files.next().unwrap().inode, entry.inode);
4442                    if !entry.is_ignored {
4443                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4444                    }
4445                }
4446            }
4447            assert!(files.next().is_none());
4448            assert!(visible_files.next().is_none());
4449
4450            let mut bfs_paths = Vec::new();
4451            let mut stack = vec![Path::new("")];
4452            while let Some(path) = stack.pop() {
4453                bfs_paths.push(path);
4454                let ix = stack.len();
4455                for child_entry in self.child_entries(path) {
4456                    stack.insert(ix, &child_entry.path);
4457                }
4458            }
4459
4460            let dfs_paths = self
4461                .entries_by_path
4462                .cursor::<()>()
4463                .map(|e| e.path.as_ref())
4464                .collect::<Vec<_>>();
4465            assert_eq!(bfs_paths, dfs_paths);
4466
4467            for (ignore_parent_path, _) in &self.ignores {
4468                assert!(self.entry_for_path(ignore_parent_path).is_some());
4469                assert!(self
4470                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4471                    .is_some());
4472            }
4473        }
4474
4475        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4476            let mut paths = Vec::new();
4477            for entry in self.entries_by_path.cursor::<()>() {
4478                if include_ignored || !entry.is_ignored {
4479                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4480                }
4481            }
4482            paths.sort_by(|a, b| a.0.cmp(&b.0));
4483            paths
4484        }
4485    }
4486}