worktree.rs

   1use super::{
   2    fs::{self, Fs},
   3    ignore::IgnoreStack,
   4    DiagnosticSummary,
   5};
   6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   7use anyhow::{anyhow, Context, Result};
   8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
   9use clock::ReplicaId;
  10use collections::{hash_map, HashMap};
  11use collections::{BTreeMap, HashSet};
  12use futures::{Stream, StreamExt};
  13use fuzzy::CharBag;
  14use gpui::{
  15    executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
  16    Task, UpgradeModelHandle, WeakModelHandle,
  17};
  18use language::{
  19    Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Language, LanguageRegistry,
  20    Operation, PointUtf16, Rope,
  21};
  22use lazy_static::lazy_static;
  23use lsp::LanguageServer;
  24use parking_lot::Mutex;
  25use postage::{
  26    prelude::{Sink as _, Stream as _},
  27    watch,
  28};
  29use serde::Deserialize;
  30use smol::channel::{self, Sender};
  31use std::{
  32    any::Any,
  33    cmp::{self, Ordering},
  34    convert::{TryFrom, TryInto},
  35    ffi::{OsStr, OsString},
  36    fmt,
  37    future::Future,
  38    mem,
  39    ops::{Deref, Range},
  40    path::{Path, PathBuf},
  41    sync::{
  42        atomic::{AtomicUsize, Ordering::SeqCst},
  43        Arc,
  44    },
  45    time::{Duration, SystemTime},
  46};
  47use sum_tree::Bias;
  48use sum_tree::{Edit, SeekTarget, SumTree};
  49use util::{post_inc, ResultExt, TryFutureExt};
  50
  51lazy_static! {
  52    static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
  53}
  54
  55#[derive(Clone, Debug)]
  56enum ScanState {
  57    Idle,
  58    Scanning,
  59    Err(Arc<anyhow::Error>),
  60}
  61
  62#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
  63pub struct WorktreeId(usize);
  64
  65pub enum Worktree {
  66    Local(LocalWorktree),
  67    Remote(RemoteWorktree),
  68}
  69
  70#[derive(Debug)]
  71pub enum Event {
  72    DiskBasedDiagnosticsUpdated,
  73    DiagnosticsUpdated(Arc<Path>),
  74}
  75
  76impl Entity for Worktree {
  77    type Event = Event;
  78
  79    fn app_will_quit(
  80        &mut self,
  81        _: &mut MutableAppContext,
  82    ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
  83        use futures::FutureExt;
  84
  85        if let Self::Local(worktree) = self {
  86            let shutdown_futures = worktree
  87                .language_servers
  88                .drain()
  89                .filter_map(|(_, server)| server.shutdown())
  90                .collect::<Vec<_>>();
  91            Some(
  92                async move {
  93                    futures::future::join_all(shutdown_futures).await;
  94                }
  95                .boxed(),
  96            )
  97        } else {
  98            None
  99        }
 100    }
 101}
 102
 103impl Worktree {
 104    pub async fn open_local(
 105        client: Arc<Client>,
 106        user_store: ModelHandle<UserStore>,
 107        path: impl Into<Arc<Path>>,
 108        fs: Arc<dyn Fs>,
 109        languages: Arc<LanguageRegistry>,
 110        cx: &mut AsyncAppContext,
 111    ) -> Result<ModelHandle<Self>> {
 112        let (tree, scan_states_tx) =
 113            LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
 114        tree.update(cx, |tree, cx| {
 115            let tree = tree.as_local_mut().unwrap();
 116            let abs_path = tree.snapshot.abs_path.clone();
 117            let background_snapshot = tree.background_snapshot.clone();
 118            let background = cx.background().clone();
 119            tree._background_scanner_task = Some(cx.background().spawn(async move {
 120                let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 121                let scanner =
 122                    BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
 123                scanner.run(events).await;
 124            }));
 125        });
 126        Ok(tree)
 127    }
 128
 129    pub async fn remote(
 130        project_remote_id: u64,
 131        replica_id: ReplicaId,
 132        worktree: proto::Worktree,
 133        client: Arc<Client>,
 134        user_store: ModelHandle<UserStore>,
 135        languages: Arc<LanguageRegistry>,
 136        cx: &mut AsyncAppContext,
 137    ) -> Result<ModelHandle<Self>> {
 138        let remote_id = worktree.id;
 139        let root_char_bag: CharBag = worktree
 140            .root_name
 141            .chars()
 142            .map(|c| c.to_ascii_lowercase())
 143            .collect();
 144        let root_name = worktree.root_name.clone();
 145        let (entries_by_path, entries_by_id) = cx
 146            .background()
 147            .spawn(async move {
 148                let mut entries_by_path_edits = Vec::new();
 149                let mut entries_by_id_edits = Vec::new();
 150                for entry in worktree.entries {
 151                    match Entry::try_from((&root_char_bag, entry)) {
 152                        Ok(entry) => {
 153                            entries_by_id_edits.push(Edit::Insert(PathEntry {
 154                                id: entry.id,
 155                                path: entry.path.clone(),
 156                                is_ignored: entry.is_ignored,
 157                                scan_id: 0,
 158                            }));
 159                            entries_by_path_edits.push(Edit::Insert(entry));
 160                        }
 161                        Err(err) => log::warn!("error for remote worktree entry {:?}", err),
 162                    }
 163                }
 164
 165                let mut entries_by_path = SumTree::new();
 166                let mut entries_by_id = SumTree::new();
 167                entries_by_path.edit(entries_by_path_edits, &());
 168                entries_by_id.edit(entries_by_id_edits, &());
 169                (entries_by_path, entries_by_id)
 170            })
 171            .await;
 172
 173        let worktree = cx.update(|cx| {
 174            cx.add_model(|cx: &mut ModelContext<Worktree>| {
 175                let snapshot = Snapshot {
 176                    id: WorktreeId(remote_id as usize),
 177                    scan_id: 0,
 178                    abs_path: Path::new("").into(),
 179                    root_name,
 180                    root_char_bag,
 181                    ignores: Default::default(),
 182                    entries_by_path,
 183                    entries_by_id,
 184                    removed_entry_ids: Default::default(),
 185                    next_entry_id: Default::default(),
 186                };
 187
 188                let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
 189                let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
 190
 191                cx.background()
 192                    .spawn(async move {
 193                        while let Some(update) = updates_rx.recv().await {
 194                            let mut snapshot = snapshot_tx.borrow().clone();
 195                            if let Err(error) = snapshot.apply_update(update) {
 196                                log::error!("error applying worktree update: {}", error);
 197                            }
 198                            *snapshot_tx.borrow_mut() = snapshot;
 199                        }
 200                    })
 201                    .detach();
 202
 203                {
 204                    let mut snapshot_rx = snapshot_rx.clone();
 205                    cx.spawn_weak(|this, mut cx| async move {
 206                        while let Some(_) = snapshot_rx.recv().await {
 207                            if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
 208                                this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
 209                            } else {
 210                                break;
 211                            }
 212                        }
 213                    })
 214                    .detach();
 215                }
 216
 217                Worktree::Remote(RemoteWorktree {
 218                    project_id: project_remote_id,
 219                    replica_id,
 220                    snapshot,
 221                    snapshot_rx,
 222                    updates_tx,
 223                    client: client.clone(),
 224                    loading_buffers: Default::default(),
 225                    open_buffers: Default::default(),
 226                    diagnostic_summaries: Default::default(),
 227                    queued_operations: Default::default(),
 228                    languages,
 229                    user_store,
 230                })
 231            })
 232        });
 233
 234        Ok(worktree)
 235    }
 236
 237    pub fn as_local(&self) -> Option<&LocalWorktree> {
 238        if let Worktree::Local(worktree) = self {
 239            Some(worktree)
 240        } else {
 241            None
 242        }
 243    }
 244
 245    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 246        if let Worktree::Remote(worktree) = self {
 247            Some(worktree)
 248        } else {
 249            None
 250        }
 251    }
 252
 253    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 254        if let Worktree::Local(worktree) = self {
 255            Some(worktree)
 256        } else {
 257            None
 258        }
 259    }
 260
 261    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 262        if let Worktree::Remote(worktree) = self {
 263            Some(worktree)
 264        } else {
 265            None
 266        }
 267    }
 268
 269    pub fn snapshot(&self) -> Snapshot {
 270        match self {
 271            Worktree::Local(worktree) => worktree.snapshot(),
 272            Worktree::Remote(worktree) => worktree.snapshot(),
 273        }
 274    }
 275
 276    pub fn replica_id(&self) -> ReplicaId {
 277        match self {
 278            Worktree::Local(_) => 0,
 279            Worktree::Remote(worktree) => worktree.replica_id,
 280        }
 281    }
 282
 283    pub fn remove_collaborator(
 284        &mut self,
 285        peer_id: PeerId,
 286        replica_id: ReplicaId,
 287        cx: &mut ModelContext<Self>,
 288    ) {
 289        match self {
 290            Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
 291            Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
 292        }
 293    }
 294
 295    pub fn languages(&self) -> &Arc<LanguageRegistry> {
 296        match self {
 297            Worktree::Local(worktree) => &worktree.language_registry,
 298            Worktree::Remote(worktree) => &worktree.languages,
 299        }
 300    }
 301
 302    pub fn user_store(&self) -> &ModelHandle<UserStore> {
 303        match self {
 304            Worktree::Local(worktree) => &worktree.user_store,
 305            Worktree::Remote(worktree) => &worktree.user_store,
 306        }
 307    }
 308
 309    pub fn handle_open_buffer(
 310        &mut self,
 311        envelope: TypedEnvelope<proto::OpenBuffer>,
 312        rpc: Arc<Client>,
 313        cx: &mut ModelContext<Self>,
 314    ) -> anyhow::Result<()> {
 315        let receipt = envelope.receipt();
 316
 317        let response = self
 318            .as_local_mut()
 319            .unwrap()
 320            .open_remote_buffer(envelope, cx);
 321
 322        cx.background()
 323            .spawn(
 324                async move {
 325                    rpc.respond(receipt, response.await?).await?;
 326                    Ok(())
 327                }
 328                .log_err(),
 329            )
 330            .detach();
 331
 332        Ok(())
 333    }
 334
 335    pub fn handle_close_buffer(
 336        &mut self,
 337        envelope: TypedEnvelope<proto::CloseBuffer>,
 338        _: Arc<Client>,
 339        cx: &mut ModelContext<Self>,
 340    ) -> anyhow::Result<()> {
 341        self.as_local_mut()
 342            .unwrap()
 343            .close_remote_buffer(envelope, cx)
 344    }
 345
 346    pub fn diagnostic_summaries<'a>(
 347        &'a self,
 348    ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
 349        match self {
 350            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 351            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 352        }
 353        .iter()
 354        .map(|(path, summary)| (path.clone(), summary.clone()))
 355    }
 356
 357    pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
 358        match self {
 359            Worktree::Local(worktree) => &mut worktree.loading_buffers,
 360            Worktree::Remote(worktree) => &mut worktree.loading_buffers,
 361        }
 362    }
 363
 364    pub fn open_buffer(
 365        &mut self,
 366        path: impl AsRef<Path>,
 367        cx: &mut ModelContext<Self>,
 368    ) -> Task<Result<ModelHandle<Buffer>>> {
 369        let path = path.as_ref();
 370
 371        // If there is already a buffer for the given path, then return it.
 372        let existing_buffer = match self {
 373            Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
 374            Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
 375        };
 376        if let Some(existing_buffer) = existing_buffer {
 377            return cx.spawn(move |_, _| async move { Ok(existing_buffer) });
 378        }
 379
 380        let path: Arc<Path> = Arc::from(path);
 381        let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
 382            // If the given path is already being loaded, then wait for that existing
 383            // task to complete and return the same buffer.
 384            hash_map::Entry::Occupied(e) => e.get().clone(),
 385
 386            // Otherwise, record the fact that this path is now being loaded.
 387            hash_map::Entry::Vacant(entry) => {
 388                let (mut tx, rx) = postage::watch::channel();
 389                entry.insert(rx.clone());
 390
 391                let load_buffer = match self {
 392                    Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
 393                    Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
 394                };
 395                cx.spawn(move |this, mut cx| async move {
 396                    let result = load_buffer.await;
 397
 398                    // After the buffer loads, record the fact that it is no longer
 399                    // loading.
 400                    this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
 401                    *tx.borrow_mut() = Some(result.map_err(|e| Arc::new(e)));
 402                })
 403                .detach();
 404                rx
 405            }
 406        };
 407
 408        cx.spawn(|_, _| async move {
 409            loop {
 410                if let Some(result) = loading_watch.borrow().as_ref() {
 411                    return result.clone().map_err(|e| anyhow!("{}", e));
 412                }
 413                loading_watch.recv().await;
 414            }
 415        })
 416    }
 417
 418    #[cfg(feature = "test-support")]
 419    pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
 420        let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
 421            Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
 422            Worktree::Remote(worktree) => {
 423                Box::new(worktree.open_buffers.values().filter_map(|buf| {
 424                    if let RemoteBuffer::Loaded(buf) = buf {
 425                        Some(buf)
 426                    } else {
 427                        None
 428                    }
 429                }))
 430            }
 431        };
 432
 433        let path = path.as_ref();
 434        open_buffers
 435            .find(|buffer| {
 436                if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
 437                    file.path().as_ref() == path
 438                } else {
 439                    false
 440                }
 441            })
 442            .is_some()
 443    }
 444
 445    pub fn handle_update_buffer(
 446        &mut self,
 447        envelope: TypedEnvelope<proto::UpdateBuffer>,
 448        cx: &mut ModelContext<Self>,
 449    ) -> Result<()> {
 450        let payload = envelope.payload.clone();
 451        let buffer_id = payload.buffer_id as usize;
 452        let ops = payload
 453            .operations
 454            .into_iter()
 455            .map(|op| language::proto::deserialize_operation(op))
 456            .collect::<Result<Vec<_>, _>>()?;
 457
 458        match self {
 459            Worktree::Local(worktree) => {
 460                let buffer = worktree
 461                    .open_buffers
 462                    .get(&buffer_id)
 463                    .and_then(|buf| buf.upgrade(cx))
 464                    .ok_or_else(|| {
 465                        anyhow!("invalid buffer {} in update buffer message", buffer_id)
 466                    })?;
 467                buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
 468            }
 469            Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
 470                Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
 471                Some(RemoteBuffer::Loaded(buffer)) => {
 472                    if let Some(buffer) = buffer.upgrade(cx) {
 473                        buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
 474                    } else {
 475                        worktree
 476                            .open_buffers
 477                            .insert(buffer_id, RemoteBuffer::Operations(ops));
 478                    }
 479                }
 480                None => {
 481                    worktree
 482                        .open_buffers
 483                        .insert(buffer_id, RemoteBuffer::Operations(ops));
 484                }
 485            },
 486        }
 487
 488        Ok(())
 489    }
 490
 491    pub fn handle_save_buffer(
 492        &mut self,
 493        envelope: TypedEnvelope<proto::SaveBuffer>,
 494        rpc: Arc<Client>,
 495        cx: &mut ModelContext<Self>,
 496    ) -> Result<()> {
 497        let sender_id = envelope.original_sender_id()?;
 498        let this = self.as_local().unwrap();
 499        let project_id = this
 500            .share
 501            .as_ref()
 502            .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
 503            .project_id;
 504
 505        let buffer = this
 506            .shared_buffers
 507            .get(&sender_id)
 508            .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
 509            .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
 510
 511        let receipt = envelope.receipt();
 512        let worktree_id = envelope.payload.worktree_id;
 513        let buffer_id = envelope.payload.buffer_id;
 514        let save = cx.spawn(|_, mut cx| async move {
 515            buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
 516        });
 517
 518        cx.background()
 519            .spawn(
 520                async move {
 521                    let (version, mtime) = save.await?;
 522
 523                    rpc.respond(
 524                        receipt,
 525                        proto::BufferSaved {
 526                            project_id,
 527                            worktree_id,
 528                            buffer_id,
 529                            version: (&version).into(),
 530                            mtime: Some(mtime.into()),
 531                        },
 532                    )
 533                    .await?;
 534
 535                    Ok(())
 536                }
 537                .log_err(),
 538            )
 539            .detach();
 540
 541        Ok(())
 542    }
 543
 544    pub fn handle_buffer_saved(
 545        &mut self,
 546        envelope: TypedEnvelope<proto::BufferSaved>,
 547        cx: &mut ModelContext<Self>,
 548    ) -> Result<()> {
 549        let payload = envelope.payload.clone();
 550        let worktree = self.as_remote_mut().unwrap();
 551        if let Some(buffer) = worktree
 552            .open_buffers
 553            .get(&(payload.buffer_id as usize))
 554            .and_then(|buf| buf.upgrade(cx))
 555        {
 556            buffer.update(cx, |buffer, cx| {
 557                let version = payload.version.try_into()?;
 558                let mtime = payload
 559                    .mtime
 560                    .ok_or_else(|| anyhow!("missing mtime"))?
 561                    .into();
 562                buffer.did_save(version, mtime, None, cx);
 563                Result::<_, anyhow::Error>::Ok(())
 564            })?;
 565        }
 566        Ok(())
 567    }
 568
 569    fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
 570        match self {
 571            Self::Local(worktree) => {
 572                let is_fake_fs = worktree.fs.is_fake();
 573                worktree.snapshot = worktree.background_snapshot.lock().clone();
 574                if worktree.is_scanning() {
 575                    if worktree.poll_task.is_none() {
 576                        worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
 577                            if is_fake_fs {
 578                                smol::future::yield_now().await;
 579                            } else {
 580                                smol::Timer::after(Duration::from_millis(100)).await;
 581                            }
 582                            this.update(&mut cx, |this, cx| {
 583                                this.as_local_mut().unwrap().poll_task = None;
 584                                this.poll_snapshot(cx);
 585                            })
 586                        }));
 587                    }
 588                } else {
 589                    worktree.poll_task.take();
 590                    self.update_open_buffers(cx);
 591                }
 592            }
 593            Self::Remote(worktree) => {
 594                worktree.snapshot = worktree.snapshot_rx.borrow().clone();
 595                self.update_open_buffers(cx);
 596            }
 597        };
 598
 599        cx.notify();
 600    }
 601
 602    fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
 603        let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
 604            Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
 605            Self::Remote(worktree) => {
 606                Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
 607                    if let RemoteBuffer::Loaded(buf) = buf {
 608                        Some((id, buf))
 609                    } else {
 610                        None
 611                    }
 612                }))
 613            }
 614        };
 615
 616        let local = self.as_local().is_some();
 617        let worktree_path = self.abs_path.clone();
 618        let worktree_handle = cx.handle();
 619        let mut buffers_to_delete = Vec::new();
 620        for (buffer_id, buffer) in open_buffers {
 621            if let Some(buffer) = buffer.upgrade(cx) {
 622                buffer.update(cx, |buffer, cx| {
 623                    if let Some(old_file) = File::from_dyn(buffer.file()) {
 624                        let new_file = if let Some(entry) = old_file
 625                            .entry_id
 626                            .and_then(|entry_id| self.entry_for_id(entry_id))
 627                        {
 628                            File {
 629                                is_local: local,
 630                                worktree_path: worktree_path.clone(),
 631                                entry_id: Some(entry.id),
 632                                mtime: entry.mtime,
 633                                path: entry.path.clone(),
 634                                worktree: worktree_handle.clone(),
 635                            }
 636                        } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
 637                            File {
 638                                is_local: local,
 639                                worktree_path: worktree_path.clone(),
 640                                entry_id: Some(entry.id),
 641                                mtime: entry.mtime,
 642                                path: entry.path.clone(),
 643                                worktree: worktree_handle.clone(),
 644                            }
 645                        } else {
 646                            File {
 647                                is_local: local,
 648                                worktree_path: worktree_path.clone(),
 649                                entry_id: None,
 650                                path: old_file.path().clone(),
 651                                mtime: old_file.mtime(),
 652                                worktree: worktree_handle.clone(),
 653                            }
 654                        };
 655
 656                        if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
 657                            task.detach();
 658                        }
 659                    }
 660                });
 661            } else {
 662                buffers_to_delete.push(*buffer_id);
 663            }
 664        }
 665
 666        for buffer_id in buffers_to_delete {
 667            match self {
 668                Self::Local(worktree) => {
 669                    worktree.open_buffers.remove(&buffer_id);
 670                }
 671                Self::Remote(worktree) => {
 672                    worktree.open_buffers.remove(&buffer_id);
 673                }
 674            }
 675        }
 676    }
 677
 678    pub fn update_diagnostics(
 679        &mut self,
 680        mut params: lsp::PublishDiagnosticsParams,
 681        disk_based_sources: &HashSet<String>,
 682        cx: &mut ModelContext<Worktree>,
 683    ) -> Result<()> {
 684        let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
 685        let abs_path = params
 686            .uri
 687            .to_file_path()
 688            .map_err(|_| anyhow!("URI is not a file"))?;
 689        let worktree_path = Arc::from(
 690            abs_path
 691                .strip_prefix(&this.abs_path)
 692                .context("path is not within worktree")?,
 693        );
 694
 695        let mut group_ids_by_diagnostic_range = HashMap::default();
 696        let mut diagnostics_by_group_id = HashMap::default();
 697        let mut next_group_id = 0;
 698        for diagnostic in &mut params.diagnostics {
 699            let source = diagnostic.source.as_ref();
 700            let code = diagnostic.code.as_ref();
 701            let group_id = diagnostic_ranges(&diagnostic, &abs_path)
 702                .find_map(|range| group_ids_by_diagnostic_range.get(&(source, code, range)))
 703                .copied()
 704                .unwrap_or_else(|| {
 705                    let group_id = post_inc(&mut next_group_id);
 706                    for range in diagnostic_ranges(&diagnostic, &abs_path) {
 707                        group_ids_by_diagnostic_range.insert((source, code, range), group_id);
 708                    }
 709                    group_id
 710                });
 711
 712            diagnostics_by_group_id
 713                .entry(group_id)
 714                .or_insert(Vec::new())
 715                .push(DiagnosticEntry {
 716                    range: diagnostic.range.start.to_point_utf16()
 717                        ..diagnostic.range.end.to_point_utf16(),
 718                    diagnostic: Diagnostic {
 719                        code: diagnostic.code.clone().map(|code| match code {
 720                            lsp::NumberOrString::Number(code) => code.to_string(),
 721                            lsp::NumberOrString::String(code) => code,
 722                        }),
 723                        severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
 724                        message: mem::take(&mut diagnostic.message),
 725                        group_id,
 726                        is_primary: false,
 727                        is_valid: true,
 728                        is_disk_based: diagnostic
 729                            .source
 730                            .as_ref()
 731                            .map_or(false, |source| disk_based_sources.contains(source)),
 732                    },
 733                });
 734        }
 735
 736        let diagnostics = diagnostics_by_group_id
 737            .into_values()
 738            .flat_map(|mut diagnostics| {
 739                let primary = diagnostics
 740                    .iter_mut()
 741                    .min_by_key(|entry| entry.diagnostic.severity)
 742                    .unwrap();
 743                primary.diagnostic.is_primary = true;
 744                diagnostics
 745            })
 746            .collect::<Vec<_>>();
 747
 748        self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)?;
 749        Ok(())
 750    }
 751
 752    pub fn update_diagnostic_entries(
 753        &mut self,
 754        worktree_path: Arc<Path>,
 755        version: Option<i32>,
 756        diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
 757        cx: &mut ModelContext<Self>,
 758    ) -> Result<()> {
 759        let this = self.as_local_mut().unwrap();
 760        for buffer in this.open_buffers.values() {
 761            if let Some(buffer) = buffer.upgrade(cx) {
 762                if buffer
 763                    .read(cx)
 764                    .file()
 765                    .map_or(false, |file| *file.path() == worktree_path)
 766                {
 767                    let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
 768                        (
 769                            buffer.remote_id(),
 770                            buffer.update_diagnostics(version, diagnostics.clone(), cx),
 771                        )
 772                    });
 773                    self.send_buffer_update(remote_id, operation?, cx);
 774                    break;
 775                }
 776            }
 777        }
 778
 779        let this = self.as_local_mut().unwrap();
 780        let summary = DiagnosticSummary::new(&diagnostics);
 781        this.diagnostic_summaries
 782            .insert(worktree_path.clone(), summary.clone());
 783        this.diagnostics.insert(worktree_path.clone(), diagnostics);
 784
 785        cx.emit(Event::DiagnosticsUpdated(worktree_path.clone()));
 786
 787        if let Some(share) = this.share.as_ref() {
 788            cx.foreground()
 789                .spawn({
 790                    let client = this.client.clone();
 791                    let project_id = share.project_id;
 792                    let worktree_id = this.id().to_proto();
 793                    let path = worktree_path.to_string_lossy().to_string();
 794                    async move {
 795                        client
 796                            .send(proto::UpdateDiagnosticSummary {
 797                                project_id,
 798                                worktree_id,
 799                                path,
 800                                error_count: summary.error_count as u32,
 801                                warning_count: summary.warning_count as u32,
 802                                info_count: summary.info_count as u32,
 803                                hint_count: summary.hint_count as u32,
 804                            })
 805                            .await
 806                            .log_err()
 807                    }
 808                })
 809                .detach();
 810        }
 811
 812        Ok(())
 813    }
 814
 815    fn send_buffer_update(
 816        &mut self,
 817        buffer_id: u64,
 818        operation: Operation,
 819        cx: &mut ModelContext<Self>,
 820    ) {
 821        if let Some((project_id, worktree_id, rpc)) = match self {
 822            Worktree::Local(worktree) => worktree
 823                .share
 824                .as_ref()
 825                .map(|share| (share.project_id, worktree.id(), worktree.client.clone())),
 826            Worktree::Remote(worktree) => Some((
 827                worktree.project_id,
 828                worktree.snapshot.id(),
 829                worktree.client.clone(),
 830            )),
 831        } {
 832            cx.spawn(|worktree, mut cx| async move {
 833                if let Err(error) = rpc
 834                    .request(proto::UpdateBuffer {
 835                        project_id,
 836                        worktree_id: worktree_id.0 as u64,
 837                        buffer_id,
 838                        operations: vec![language::proto::serialize_operation(&operation)],
 839                    })
 840                    .await
 841                {
 842                    worktree.update(&mut cx, |worktree, _| {
 843                        log::error!("error sending buffer operation: {}", error);
 844                        match worktree {
 845                            Worktree::Local(t) => &mut t.queued_operations,
 846                            Worktree::Remote(t) => &mut t.queued_operations,
 847                        }
 848                        .push((buffer_id, operation));
 849                    });
 850                }
 851            })
 852            .detach();
 853        }
 854    }
 855}
 856
 857impl WorktreeId {
 858    pub fn from_usize(handle_id: usize) -> Self {
 859        Self(handle_id)
 860    }
 861
 862    pub(crate) fn from_proto(id: u64) -> Self {
 863        Self(id as usize)
 864    }
 865
 866    pub fn to_proto(&self) -> u64 {
 867        self.0 as u64
 868    }
 869
 870    pub fn to_usize(&self) -> usize {
 871        self.0
 872    }
 873}
 874
 875impl fmt::Display for WorktreeId {
 876    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 877        self.0.fmt(f)
 878    }
 879}
 880
 881#[derive(Clone)]
 882pub struct Snapshot {
 883    id: WorktreeId,
 884    scan_id: usize,
 885    abs_path: Arc<Path>,
 886    root_name: String,
 887    root_char_bag: CharBag,
 888    ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
 889    entries_by_path: SumTree<Entry>,
 890    entries_by_id: SumTree<PathEntry>,
 891    removed_entry_ids: HashMap<u64, usize>,
 892    next_entry_id: Arc<AtomicUsize>,
 893}
 894
 895pub struct LocalWorktree {
 896    snapshot: Snapshot,
 897    config: WorktreeConfig,
 898    background_snapshot: Arc<Mutex<Snapshot>>,
 899    last_scan_state_rx: watch::Receiver<ScanState>,
 900    _background_scanner_task: Option<Task<()>>,
 901    poll_task: Option<Task<()>>,
 902    share: Option<ShareState>,
 903    loading_buffers: LoadingBuffers,
 904    open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
 905    shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
 906    diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
 907    diagnostic_summaries: BTreeMap<Arc<Path>, DiagnosticSummary>,
 908    queued_operations: Vec<(u64, Operation)>,
 909    language_registry: Arc<LanguageRegistry>,
 910    client: Arc<Client>,
 911    user_store: ModelHandle<UserStore>,
 912    fs: Arc<dyn Fs>,
 913    languages: Vec<Arc<Language>>,
 914    language_servers: HashMap<String, Arc<LanguageServer>>,
 915}
 916
 917struct ShareState {
 918    project_id: u64,
 919    snapshots_tx: Sender<Snapshot>,
 920}
 921
 922pub struct RemoteWorktree {
 923    project_id: u64,
 924    snapshot: Snapshot,
 925    snapshot_rx: watch::Receiver<Snapshot>,
 926    client: Arc<Client>,
 927    updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
 928    replica_id: ReplicaId,
 929    loading_buffers: LoadingBuffers,
 930    open_buffers: HashMap<usize, RemoteBuffer>,
 931    diagnostic_summaries: BTreeMap<Arc<Path>, DiagnosticSummary>,
 932    languages: Arc<LanguageRegistry>,
 933    user_store: ModelHandle<UserStore>,
 934    queued_operations: Vec<(u64, Operation)>,
 935}
 936
 937type LoadingBuffers = HashMap<
 938    Arc<Path>,
 939    postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
 940>;
 941
 942#[derive(Default, Deserialize)]
 943struct WorktreeConfig {
 944    collaborators: Vec<String>,
 945}
 946
 947impl LocalWorktree {
 948    async fn new(
 949        client: Arc<Client>,
 950        user_store: ModelHandle<UserStore>,
 951        path: impl Into<Arc<Path>>,
 952        fs: Arc<dyn Fs>,
 953        languages: Arc<LanguageRegistry>,
 954        cx: &mut AsyncAppContext,
 955    ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
 956        let abs_path = path.into();
 957        let path: Arc<Path> = Arc::from(Path::new(""));
 958        let next_entry_id = AtomicUsize::new(0);
 959
 960        // After determining whether the root entry is a file or a directory, populate the
 961        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 962        let root_name = abs_path
 963            .file_name()
 964            .map_or(String::new(), |f| f.to_string_lossy().to_string());
 965        let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
 966        let metadata = fs.metadata(&abs_path).await?;
 967
 968        let mut config = WorktreeConfig::default();
 969        if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
 970            if let Ok(parsed) = toml::from_str(&zed_toml) {
 971                config = parsed;
 972            }
 973        }
 974
 975        let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
 976        let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
 977        let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
 978            let mut snapshot = Snapshot {
 979                id: WorktreeId::from_usize(cx.model_id()),
 980                scan_id: 0,
 981                abs_path,
 982                root_name: root_name.clone(),
 983                root_char_bag,
 984                ignores: Default::default(),
 985                entries_by_path: Default::default(),
 986                entries_by_id: Default::default(),
 987                removed_entry_ids: Default::default(),
 988                next_entry_id: Arc::new(next_entry_id),
 989            };
 990            if let Some(metadata) = metadata {
 991                snapshot.insert_entry(
 992                    Entry::new(
 993                        path.into(),
 994                        &metadata,
 995                        &snapshot.next_entry_id,
 996                        snapshot.root_char_bag,
 997                    ),
 998                    fs.as_ref(),
 999                );
1000            }
1001
1002            let tree = Self {
1003                snapshot: snapshot.clone(),
1004                config,
1005                background_snapshot: Arc::new(Mutex::new(snapshot)),
1006                last_scan_state_rx,
1007                _background_scanner_task: None,
1008                share: None,
1009                poll_task: None,
1010                loading_buffers: Default::default(),
1011                open_buffers: Default::default(),
1012                shared_buffers: Default::default(),
1013                diagnostics: Default::default(),
1014                diagnostic_summaries: Default::default(),
1015                queued_operations: Default::default(),
1016                language_registry: languages,
1017                client,
1018                user_store,
1019                fs,
1020                languages: Default::default(),
1021                language_servers: Default::default(),
1022            };
1023
1024            cx.spawn_weak(|this, mut cx| async move {
1025                while let Ok(scan_state) = scan_states_rx.recv().await {
1026                    if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1027                        let to_send = handle.update(&mut cx, |this, cx| {
1028                            last_scan_state_tx.blocking_send(scan_state).ok();
1029                            this.poll_snapshot(cx);
1030                            let tree = this.as_local_mut().unwrap();
1031                            if !tree.is_scanning() {
1032                                if let Some(share) = tree.share.as_ref() {
1033                                    return Some((tree.snapshot(), share.snapshots_tx.clone()));
1034                                }
1035                            }
1036                            None
1037                        });
1038
1039                        if let Some((snapshot, snapshots_to_send_tx)) = to_send {
1040                            if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
1041                                log::error!("error submitting snapshot to send {}", err);
1042                            }
1043                        }
1044                    } else {
1045                        break;
1046                    }
1047                }
1048            })
1049            .detach();
1050
1051            Worktree::Local(tree)
1052        });
1053
1054        Ok((tree, scan_states_tx))
1055    }
1056
1057    pub fn authorized_logins(&self) -> Vec<String> {
1058        self.config.collaborators.clone()
1059    }
1060
1061    pub fn language_registry(&self) -> &LanguageRegistry {
1062        &self.language_registry
1063    }
1064
1065    pub fn languages(&self) -> &[Arc<Language>] {
1066        &self.languages
1067    }
1068
1069    pub fn register_language(
1070        &mut self,
1071        language: &Arc<Language>,
1072        cx: &mut ModelContext<Worktree>,
1073    ) -> Option<Arc<LanguageServer>> {
1074        if !self.languages.iter().any(|l| Arc::ptr_eq(l, language)) {
1075            self.languages.push(language.clone());
1076        }
1077
1078        if let Some(server) = self.language_servers.get(language.name()) {
1079            return Some(server.clone());
1080        }
1081
1082        if let Some(language_server) = language
1083            .start_server(self.abs_path(), cx)
1084            .log_err()
1085            .flatten()
1086        {
1087            let disk_based_sources = language
1088                .disk_based_diagnostic_sources()
1089                .cloned()
1090                .unwrap_or_default();
1091            let disk_based_diagnostics_progress_token =
1092                language.disk_based_diagnostics_progress_token().cloned();
1093            let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
1094            let (disk_based_diagnostics_done_tx, disk_based_diagnostics_done_rx) =
1095                smol::channel::unbounded();
1096            language_server
1097                .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
1098                    smol::block_on(diagnostics_tx.send(params)).ok();
1099                })
1100                .detach();
1101            cx.spawn_weak(|this, mut cx| {
1102                let has_disk_based_diagnostic_progress_token =
1103                    disk_based_diagnostics_progress_token.is_some();
1104                let disk_based_diagnostics_done_tx = disk_based_diagnostics_done_tx.clone();
1105                async move {
1106                    while let Ok(diagnostics) = diagnostics_rx.recv().await {
1107                        if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1108                            handle.update(&mut cx, |this, cx| {
1109                                this.update_diagnostics(diagnostics, &disk_based_sources, cx)
1110                                    .log_err();
1111                                if !has_disk_based_diagnostic_progress_token {
1112                                    smol::block_on(disk_based_diagnostics_done_tx.send(())).ok();
1113                                }
1114                            })
1115                        } else {
1116                            break;
1117                        }
1118                    }
1119                }
1120            })
1121            .detach();
1122
1123            language_server
1124                .on_notification::<lsp::notification::Progress, _>(move |params| {
1125                    let token = match params.token {
1126                        lsp::NumberOrString::Number(_) => None,
1127                        lsp::NumberOrString::String(token) => Some(token),
1128                    };
1129
1130                    if token == disk_based_diagnostics_progress_token {
1131                        match params.value {
1132                            lsp::ProgressParamsValue::WorkDone(progress) => match progress {
1133                                lsp::WorkDoneProgress::End(_) => {
1134                                    smol::block_on(disk_based_diagnostics_done_tx.send(())).ok();
1135                                }
1136                                _ => {}
1137                            },
1138                        }
1139                    }
1140                })
1141                .detach();
1142            let rpc = self.client.clone();
1143            cx.spawn_weak(|this, mut cx| async move {
1144                while let Ok(()) = disk_based_diagnostics_done_rx.recv().await {
1145                    if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1146                        let message = handle.update(&mut cx, |this, cx| {
1147                            cx.emit(Event::DiskBasedDiagnosticsUpdated);
1148                            let this = this.as_local().unwrap();
1149                            this.share
1150                                .as_ref()
1151                                .map(|share| proto::DiskBasedDiagnosticsUpdated {
1152                                    project_id: share.project_id,
1153                                    worktree_id: this.id().to_proto(),
1154                                })
1155                        });
1156
1157                        if let Some(message) = message {
1158                            rpc.send(message).await.log_err();
1159                        }
1160                    } else {
1161                        break;
1162                    }
1163                }
1164            })
1165            .detach();
1166
1167            self.language_servers
1168                .insert(language.name().to_string(), language_server.clone());
1169            Some(language_server.clone())
1170        } else {
1171            None
1172        }
1173    }
1174
1175    fn get_open_buffer(
1176        &mut self,
1177        path: &Path,
1178        cx: &mut ModelContext<Worktree>,
1179    ) -> Option<ModelHandle<Buffer>> {
1180        let handle = cx.handle();
1181        let mut result = None;
1182        self.open_buffers.retain(|_buffer_id, buffer| {
1183            if let Some(buffer) = buffer.upgrade(cx) {
1184                if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1185                    if file.worktree == handle && file.path().as_ref() == path {
1186                        result = Some(buffer);
1187                    }
1188                }
1189                true
1190            } else {
1191                false
1192            }
1193        });
1194        result
1195    }
1196
1197    fn open_buffer(
1198        &mut self,
1199        path: &Path,
1200        cx: &mut ModelContext<Worktree>,
1201    ) -> Task<Result<ModelHandle<Buffer>>> {
1202        let path = Arc::from(path);
1203        cx.spawn(move |this, mut cx| async move {
1204            let (file, contents) = this
1205                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
1206                .await?;
1207
1208            let (diagnostics, language, language_server) = this.update(&mut cx, |this, cx| {
1209                let this = this.as_local_mut().unwrap();
1210                let diagnostics = this.diagnostics.remove(&path);
1211                let language = this
1212                    .language_registry
1213                    .select_language(file.full_path())
1214                    .cloned();
1215                let server = language
1216                    .as_ref()
1217                    .and_then(|language| this.register_language(language, cx));
1218                (diagnostics, language, server)
1219            });
1220
1221            let mut buffer_operations = Vec::new();
1222            let buffer = cx.add_model(|cx| {
1223                let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1224                buffer.set_language(language, language_server, cx);
1225                if let Some(diagnostics) = diagnostics {
1226                    let op = buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1227                    buffer_operations.push(op);
1228                }
1229                buffer
1230            });
1231
1232            this.update(&mut cx, |this, cx| {
1233                for op in buffer_operations {
1234                    this.send_buffer_update(buffer.read(cx).remote_id(), op, cx);
1235                }
1236                let this = this.as_local_mut().unwrap();
1237                this.open_buffers.insert(buffer.id(), buffer.downgrade());
1238            });
1239
1240            Ok(buffer)
1241        })
1242    }
1243
1244    pub fn open_remote_buffer(
1245        &mut self,
1246        envelope: TypedEnvelope<proto::OpenBuffer>,
1247        cx: &mut ModelContext<Worktree>,
1248    ) -> Task<Result<proto::OpenBufferResponse>> {
1249        cx.spawn(|this, mut cx| async move {
1250            let peer_id = envelope.original_sender_id();
1251            let path = Path::new(&envelope.payload.path);
1252            let buffer = this
1253                .update(&mut cx, |this, cx| this.open_buffer(path, cx))
1254                .await?;
1255            this.update(&mut cx, |this, cx| {
1256                this.as_local_mut()
1257                    .unwrap()
1258                    .shared_buffers
1259                    .entry(peer_id?)
1260                    .or_default()
1261                    .insert(buffer.id() as u64, buffer.clone());
1262
1263                Ok(proto::OpenBufferResponse {
1264                    buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1265                })
1266            })
1267        })
1268    }
1269
1270    pub fn close_remote_buffer(
1271        &mut self,
1272        envelope: TypedEnvelope<proto::CloseBuffer>,
1273        cx: &mut ModelContext<Worktree>,
1274    ) -> Result<()> {
1275        if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1276            shared_buffers.remove(&envelope.payload.buffer_id);
1277            cx.notify();
1278        }
1279
1280        Ok(())
1281    }
1282
1283    pub fn remove_collaborator(
1284        &mut self,
1285        peer_id: PeerId,
1286        replica_id: ReplicaId,
1287        cx: &mut ModelContext<Worktree>,
1288    ) {
1289        self.shared_buffers.remove(&peer_id);
1290        for (_, buffer) in &self.open_buffers {
1291            if let Some(buffer) = buffer.upgrade(cx) {
1292                buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1293            }
1294        }
1295        cx.notify();
1296    }
1297
1298    pub fn scan_complete(&self) -> impl Future<Output = ()> {
1299        let mut scan_state_rx = self.last_scan_state_rx.clone();
1300        async move {
1301            let mut scan_state = Some(scan_state_rx.borrow().clone());
1302            while let Some(ScanState::Scanning) = scan_state {
1303                scan_state = scan_state_rx.recv().await;
1304            }
1305        }
1306    }
1307
1308    fn is_scanning(&self) -> bool {
1309        if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1310            true
1311        } else {
1312            false
1313        }
1314    }
1315
1316    pub fn snapshot(&self) -> Snapshot {
1317        self.snapshot.clone()
1318    }
1319
1320    pub fn abs_path(&self) -> &Arc<Path> {
1321        &self.snapshot.abs_path
1322    }
1323
1324    pub fn contains_abs_path(&self, path: &Path) -> bool {
1325        path.starts_with(&self.snapshot.abs_path)
1326    }
1327
1328    fn absolutize(&self, path: &Path) -> PathBuf {
1329        if path.file_name().is_some() {
1330            self.snapshot.abs_path.join(path)
1331        } else {
1332            self.snapshot.abs_path.to_path_buf()
1333        }
1334    }
1335
1336    fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1337        let handle = cx.handle();
1338        let path = Arc::from(path);
1339        let worktree_path = self.abs_path.clone();
1340        let abs_path = self.absolutize(&path);
1341        let background_snapshot = self.background_snapshot.clone();
1342        let fs = self.fs.clone();
1343        cx.spawn(|this, mut cx| async move {
1344            let text = fs.load(&abs_path).await?;
1345            // Eagerly populate the snapshot with an updated entry for the loaded file
1346            let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1347            this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1348            Ok((
1349                File {
1350                    entry_id: Some(entry.id),
1351                    worktree: handle,
1352                    worktree_path,
1353                    path: entry.path,
1354                    mtime: entry.mtime,
1355                    is_local: true,
1356                },
1357                text,
1358            ))
1359        })
1360    }
1361
1362    pub fn save_buffer_as(
1363        &self,
1364        buffer: ModelHandle<Buffer>,
1365        path: impl Into<Arc<Path>>,
1366        text: Rope,
1367        cx: &mut ModelContext<Worktree>,
1368    ) -> Task<Result<File>> {
1369        let save = self.save(path, text, cx);
1370        cx.spawn(|this, mut cx| async move {
1371            let entry = save.await?;
1372            this.update(&mut cx, |this, cx| {
1373                let this = this.as_local_mut().unwrap();
1374                this.open_buffers.insert(buffer.id(), buffer.downgrade());
1375                Ok(File {
1376                    entry_id: Some(entry.id),
1377                    worktree: cx.handle(),
1378                    worktree_path: this.abs_path.clone(),
1379                    path: entry.path,
1380                    mtime: entry.mtime,
1381                    is_local: true,
1382                })
1383            })
1384        })
1385    }
1386
1387    fn save(
1388        &self,
1389        path: impl Into<Arc<Path>>,
1390        text: Rope,
1391        cx: &mut ModelContext<Worktree>,
1392    ) -> Task<Result<Entry>> {
1393        let path = path.into();
1394        let abs_path = self.absolutize(&path);
1395        let background_snapshot = self.background_snapshot.clone();
1396        let fs = self.fs.clone();
1397        let save = cx.background().spawn(async move {
1398            fs.save(&abs_path, &text).await?;
1399            refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1400        });
1401
1402        cx.spawn(|this, mut cx| async move {
1403            let entry = save.await?;
1404            this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1405            Ok(entry)
1406        })
1407    }
1408
1409    pub fn share(
1410        &mut self,
1411        project_id: u64,
1412        cx: &mut ModelContext<Worktree>,
1413    ) -> Task<anyhow::Result<()>> {
1414        if self.share.is_some() {
1415            return Task::ready(Ok(()));
1416        }
1417
1418        let snapshot = self.snapshot();
1419        let rpc = self.client.clone();
1420        let worktree_id = cx.model_id() as u64;
1421        let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1422        self.share = Some(ShareState {
1423            project_id,
1424            snapshots_tx: snapshots_to_send_tx,
1425        });
1426
1427        cx.background()
1428            .spawn({
1429                let rpc = rpc.clone();
1430                let snapshot = snapshot.clone();
1431                async move {
1432                    let mut prev_snapshot = snapshot;
1433                    while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1434                        let message =
1435                            snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1436                        match rpc.send(message).await {
1437                            Ok(()) => prev_snapshot = snapshot,
1438                            Err(err) => log::error!("error sending snapshot diff {}", err),
1439                        }
1440                    }
1441                }
1442            })
1443            .detach();
1444
1445        let share_message = cx.background().spawn(async move {
1446            proto::ShareWorktree {
1447                project_id,
1448                worktree: Some(snapshot.to_proto()),
1449            }
1450        });
1451
1452        cx.foreground().spawn(async move {
1453            rpc.request(share_message.await).await?;
1454            Ok(())
1455        })
1456    }
1457}
1458
1459fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1460    let contents = smol::block_on(fs.load(&abs_path))?;
1461    let parent = abs_path.parent().unwrap_or(Path::new("/"));
1462    let mut builder = GitignoreBuilder::new(parent);
1463    for line in contents.lines() {
1464        builder.add_line(Some(abs_path.into()), line)?;
1465    }
1466    Ok(builder.build()?)
1467}
1468
1469impl Deref for Worktree {
1470    type Target = Snapshot;
1471
1472    fn deref(&self) -> &Self::Target {
1473        match self {
1474            Worktree::Local(worktree) => &worktree.snapshot,
1475            Worktree::Remote(worktree) => &worktree.snapshot,
1476        }
1477    }
1478}
1479
1480impl Deref for LocalWorktree {
1481    type Target = Snapshot;
1482
1483    fn deref(&self) -> &Self::Target {
1484        &self.snapshot
1485    }
1486}
1487
1488impl Deref for RemoteWorktree {
1489    type Target = Snapshot;
1490
1491    fn deref(&self) -> &Self::Target {
1492        &self.snapshot
1493    }
1494}
1495
1496impl fmt::Debug for LocalWorktree {
1497    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1498        self.snapshot.fmt(f)
1499    }
1500}
1501
1502impl RemoteWorktree {
1503    fn get_open_buffer(
1504        &mut self,
1505        path: &Path,
1506        cx: &mut ModelContext<Worktree>,
1507    ) -> Option<ModelHandle<Buffer>> {
1508        let handle = cx.handle();
1509        let mut existing_buffer = None;
1510        self.open_buffers.retain(|_buffer_id, buffer| {
1511            if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1512                if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1513                    if file.worktree == handle && file.path().as_ref() == path {
1514                        existing_buffer = Some(buffer);
1515                    }
1516                }
1517                true
1518            } else {
1519                false
1520            }
1521        });
1522        existing_buffer
1523    }
1524
1525    fn open_buffer(
1526        &mut self,
1527        path: &Path,
1528        cx: &mut ModelContext<Worktree>,
1529    ) -> Task<Result<ModelHandle<Buffer>>> {
1530        let rpc = self.client.clone();
1531        let replica_id = self.replica_id;
1532        let project_id = self.project_id;
1533        let remote_worktree_id = self.id();
1534        let root_path = self.snapshot.abs_path.clone();
1535        let path: Arc<Path> = Arc::from(path);
1536        let path_string = path.to_string_lossy().to_string();
1537        cx.spawn_weak(move |this, mut cx| async move {
1538            let entry = this
1539                .upgrade(&cx)
1540                .ok_or_else(|| anyhow!("worktree was closed"))?
1541                .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1542                .ok_or_else(|| anyhow!("file does not exist"))?;
1543            let response = rpc
1544                .request(proto::OpenBuffer {
1545                    project_id,
1546                    worktree_id: remote_worktree_id.to_proto(),
1547                    path: path_string,
1548                })
1549                .await?;
1550
1551            let this = this
1552                .upgrade(&cx)
1553                .ok_or_else(|| anyhow!("worktree was closed"))?;
1554            let file = File {
1555                entry_id: Some(entry.id),
1556                worktree: this.clone(),
1557                worktree_path: root_path,
1558                path: entry.path,
1559                mtime: entry.mtime,
1560                is_local: false,
1561            };
1562            let language = this.read_with(&cx, |this, _| {
1563                use language::File;
1564                this.languages().select_language(file.full_path()).cloned()
1565            });
1566            let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1567            let buffer_id = remote_buffer.id as usize;
1568            let buffer = cx.add_model(|cx| {
1569                Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1570                    .unwrap()
1571                    .with_language(language, None, cx)
1572            });
1573            this.update(&mut cx, move |this, cx| {
1574                let this = this.as_remote_mut().unwrap();
1575                if let Some(RemoteBuffer::Operations(pending_ops)) = this
1576                    .open_buffers
1577                    .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1578                {
1579                    buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1580                }
1581                Result::<_, anyhow::Error>::Ok(buffer)
1582            })
1583        })
1584    }
1585
1586    pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1587        for (_, buffer) in self.open_buffers.drain() {
1588            if let RemoteBuffer::Loaded(buffer) = buffer {
1589                if let Some(buffer) = buffer.upgrade(cx) {
1590                    buffer.update(cx, |buffer, cx| buffer.close(cx))
1591                }
1592            }
1593        }
1594    }
1595
1596    fn snapshot(&self) -> Snapshot {
1597        self.snapshot.clone()
1598    }
1599
1600    pub fn update_from_remote(
1601        &mut self,
1602        envelope: TypedEnvelope<proto::UpdateWorktree>,
1603        cx: &mut ModelContext<Worktree>,
1604    ) -> Result<()> {
1605        let mut tx = self.updates_tx.clone();
1606        let payload = envelope.payload.clone();
1607        cx.background()
1608            .spawn(async move {
1609                tx.send(payload).await.expect("receiver runs to completion");
1610            })
1611            .detach();
1612
1613        Ok(())
1614    }
1615
1616    pub fn update_diagnostic_summary(
1617        &mut self,
1618        envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
1619        cx: &mut ModelContext<Worktree>,
1620    ) {
1621        let path: Arc<Path> = Path::new(&envelope.payload.path).into();
1622        self.diagnostic_summaries.insert(
1623            path.clone(),
1624            DiagnosticSummary {
1625                error_count: envelope.payload.error_count as usize,
1626                warning_count: envelope.payload.warning_count as usize,
1627                info_count: envelope.payload.info_count as usize,
1628                hint_count: envelope.payload.hint_count as usize,
1629            },
1630        );
1631        cx.emit(Event::DiagnosticsUpdated(path));
1632    }
1633
1634    pub fn disk_based_diagnostics_updated(&self, cx: &mut ModelContext<Worktree>) {
1635        cx.emit(Event::DiskBasedDiagnosticsUpdated);
1636    }
1637
1638    pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1639        for (_, buffer) in &self.open_buffers {
1640            if let Some(buffer) = buffer.upgrade(cx) {
1641                buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1642            }
1643        }
1644        cx.notify();
1645    }
1646}
1647
1648enum RemoteBuffer {
1649    Operations(Vec<Operation>),
1650    Loaded(WeakModelHandle<Buffer>),
1651}
1652
1653impl RemoteBuffer {
1654    fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1655        match self {
1656            Self::Operations(_) => None,
1657            Self::Loaded(buffer) => buffer.upgrade(cx),
1658        }
1659    }
1660}
1661
1662impl Snapshot {
1663    pub fn id(&self) -> WorktreeId {
1664        self.id
1665    }
1666
1667    pub fn to_proto(&self) -> proto::Worktree {
1668        let root_name = self.root_name.clone();
1669        proto::Worktree {
1670            id: self.id.0 as u64,
1671            root_name,
1672            entries: self
1673                .entries_by_path
1674                .cursor::<()>()
1675                .filter(|e| !e.is_ignored)
1676                .map(Into::into)
1677                .collect(),
1678        }
1679    }
1680
1681    pub fn build_update(
1682        &self,
1683        other: &Self,
1684        project_id: u64,
1685        worktree_id: u64,
1686        include_ignored: bool,
1687    ) -> proto::UpdateWorktree {
1688        let mut updated_entries = Vec::new();
1689        let mut removed_entries = Vec::new();
1690        let mut self_entries = self
1691            .entries_by_id
1692            .cursor::<()>()
1693            .filter(|e| include_ignored || !e.is_ignored)
1694            .peekable();
1695        let mut other_entries = other
1696            .entries_by_id
1697            .cursor::<()>()
1698            .filter(|e| include_ignored || !e.is_ignored)
1699            .peekable();
1700        loop {
1701            match (self_entries.peek(), other_entries.peek()) {
1702                (Some(self_entry), Some(other_entry)) => {
1703                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1704                        Ordering::Less => {
1705                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1706                            updated_entries.push(entry);
1707                            self_entries.next();
1708                        }
1709                        Ordering::Equal => {
1710                            if self_entry.scan_id != other_entry.scan_id {
1711                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1712                                updated_entries.push(entry);
1713                            }
1714
1715                            self_entries.next();
1716                            other_entries.next();
1717                        }
1718                        Ordering::Greater => {
1719                            removed_entries.push(other_entry.id as u64);
1720                            other_entries.next();
1721                        }
1722                    }
1723                }
1724                (Some(self_entry), None) => {
1725                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1726                    updated_entries.push(entry);
1727                    self_entries.next();
1728                }
1729                (None, Some(other_entry)) => {
1730                    removed_entries.push(other_entry.id as u64);
1731                    other_entries.next();
1732                }
1733                (None, None) => break,
1734            }
1735        }
1736
1737        proto::UpdateWorktree {
1738            project_id,
1739            worktree_id,
1740            root_name: self.root_name().to_string(),
1741            updated_entries,
1742            removed_entries,
1743        }
1744    }
1745
1746    fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1747        self.scan_id += 1;
1748        let scan_id = self.scan_id;
1749
1750        let mut entries_by_path_edits = Vec::new();
1751        let mut entries_by_id_edits = Vec::new();
1752        for entry_id in update.removed_entries {
1753            let entry_id = entry_id as usize;
1754            let entry = self
1755                .entry_for_id(entry_id)
1756                .ok_or_else(|| anyhow!("unknown entry"))?;
1757            entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1758            entries_by_id_edits.push(Edit::Remove(entry.id));
1759        }
1760
1761        for entry in update.updated_entries {
1762            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1763            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1764                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1765            }
1766            entries_by_id_edits.push(Edit::Insert(PathEntry {
1767                id: entry.id,
1768                path: entry.path.clone(),
1769                is_ignored: entry.is_ignored,
1770                scan_id,
1771            }));
1772            entries_by_path_edits.push(Edit::Insert(entry));
1773        }
1774
1775        self.entries_by_path.edit(entries_by_path_edits, &());
1776        self.entries_by_id.edit(entries_by_id_edits, &());
1777
1778        Ok(())
1779    }
1780
1781    pub fn file_count(&self) -> usize {
1782        self.entries_by_path.summary().file_count
1783    }
1784
1785    pub fn visible_file_count(&self) -> usize {
1786        self.entries_by_path.summary().visible_file_count
1787    }
1788
1789    fn traverse_from_offset(
1790        &self,
1791        include_dirs: bool,
1792        include_ignored: bool,
1793        start_offset: usize,
1794    ) -> Traversal {
1795        let mut cursor = self.entries_by_path.cursor();
1796        cursor.seek(
1797            &TraversalTarget::Count {
1798                count: start_offset,
1799                include_dirs,
1800                include_ignored,
1801            },
1802            Bias::Right,
1803            &(),
1804        );
1805        Traversal {
1806            cursor,
1807            include_dirs,
1808            include_ignored,
1809        }
1810    }
1811
1812    fn traverse_from_path(
1813        &self,
1814        include_dirs: bool,
1815        include_ignored: bool,
1816        path: &Path,
1817    ) -> Traversal {
1818        let mut cursor = self.entries_by_path.cursor();
1819        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1820        Traversal {
1821            cursor,
1822            include_dirs,
1823            include_ignored,
1824        }
1825    }
1826
1827    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1828        self.traverse_from_offset(false, include_ignored, start)
1829    }
1830
1831    pub fn entries(&self, include_ignored: bool) -> Traversal {
1832        self.traverse_from_offset(true, include_ignored, 0)
1833    }
1834
1835    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1836        let empty_path = Path::new("");
1837        self.entries_by_path
1838            .cursor::<()>()
1839            .filter(move |entry| entry.path.as_ref() != empty_path)
1840            .map(|entry| &entry.path)
1841    }
1842
1843    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1844        let mut cursor = self.entries_by_path.cursor();
1845        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1846        let traversal = Traversal {
1847            cursor,
1848            include_dirs: true,
1849            include_ignored: true,
1850        };
1851        ChildEntriesIter {
1852            traversal,
1853            parent_path,
1854        }
1855    }
1856
1857    pub fn root_entry(&self) -> Option<&Entry> {
1858        self.entry_for_path("")
1859    }
1860
1861    pub fn root_name(&self) -> &str {
1862        &self.root_name
1863    }
1864
1865    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1866        let path = path.as_ref();
1867        self.traverse_from_path(true, true, path)
1868            .entry()
1869            .and_then(|entry| {
1870                if entry.path.as_ref() == path {
1871                    Some(entry)
1872                } else {
1873                    None
1874                }
1875            })
1876    }
1877
1878    pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1879        let entry = self.entries_by_id.get(&id, &())?;
1880        self.entry_for_path(&entry.path)
1881    }
1882
1883    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1884        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1885    }
1886
1887    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1888        if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1889            let abs_path = self.abs_path.join(&entry.path);
1890            match build_gitignore(&abs_path, fs) {
1891                Ok(ignore) => {
1892                    let ignore_dir_path = entry.path.parent().unwrap();
1893                    self.ignores
1894                        .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1895                }
1896                Err(error) => {
1897                    log::error!(
1898                        "error loading .gitignore file {:?} - {:?}",
1899                        &entry.path,
1900                        error
1901                    );
1902                }
1903            }
1904        }
1905
1906        self.reuse_entry_id(&mut entry);
1907        self.entries_by_path.insert_or_replace(entry.clone(), &());
1908        self.entries_by_id.insert_or_replace(
1909            PathEntry {
1910                id: entry.id,
1911                path: entry.path.clone(),
1912                is_ignored: entry.is_ignored,
1913                scan_id: self.scan_id,
1914            },
1915            &(),
1916        );
1917        entry
1918    }
1919
1920    fn populate_dir(
1921        &mut self,
1922        parent_path: Arc<Path>,
1923        entries: impl IntoIterator<Item = Entry>,
1924        ignore: Option<Arc<Gitignore>>,
1925    ) {
1926        let mut parent_entry = self
1927            .entries_by_path
1928            .get(&PathKey(parent_path.clone()), &())
1929            .unwrap()
1930            .clone();
1931        if let Some(ignore) = ignore {
1932            self.ignores.insert(parent_path, (ignore, self.scan_id));
1933        }
1934        if matches!(parent_entry.kind, EntryKind::PendingDir) {
1935            parent_entry.kind = EntryKind::Dir;
1936        } else {
1937            unreachable!();
1938        }
1939
1940        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1941        let mut entries_by_id_edits = Vec::new();
1942
1943        for mut entry in entries {
1944            self.reuse_entry_id(&mut entry);
1945            entries_by_id_edits.push(Edit::Insert(PathEntry {
1946                id: entry.id,
1947                path: entry.path.clone(),
1948                is_ignored: entry.is_ignored,
1949                scan_id: self.scan_id,
1950            }));
1951            entries_by_path_edits.push(Edit::Insert(entry));
1952        }
1953
1954        self.entries_by_path.edit(entries_by_path_edits, &());
1955        self.entries_by_id.edit(entries_by_id_edits, &());
1956    }
1957
1958    fn reuse_entry_id(&mut self, entry: &mut Entry) {
1959        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1960            entry.id = removed_entry_id;
1961        } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1962            entry.id = existing_entry.id;
1963        }
1964    }
1965
1966    fn remove_path(&mut self, path: &Path) {
1967        let mut new_entries;
1968        let removed_entries;
1969        {
1970            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1971            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1972            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1973            new_entries.push_tree(cursor.suffix(&()), &());
1974        }
1975        self.entries_by_path = new_entries;
1976
1977        let mut entries_by_id_edits = Vec::new();
1978        for entry in removed_entries.cursor::<()>() {
1979            let removed_entry_id = self
1980                .removed_entry_ids
1981                .entry(entry.inode)
1982                .or_insert(entry.id);
1983            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1984            entries_by_id_edits.push(Edit::Remove(entry.id));
1985        }
1986        self.entries_by_id.edit(entries_by_id_edits, &());
1987
1988        if path.file_name() == Some(&GITIGNORE) {
1989            if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1990                *scan_id = self.scan_id;
1991            }
1992        }
1993    }
1994
1995    fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1996        let mut new_ignores = Vec::new();
1997        for ancestor in path.ancestors().skip(1) {
1998            if let Some((ignore, _)) = self.ignores.get(ancestor) {
1999                new_ignores.push((ancestor, Some(ignore.clone())));
2000            } else {
2001                new_ignores.push((ancestor, None));
2002            }
2003        }
2004
2005        let mut ignore_stack = IgnoreStack::none();
2006        for (parent_path, ignore) in new_ignores.into_iter().rev() {
2007            if ignore_stack.is_path_ignored(&parent_path, true) {
2008                ignore_stack = IgnoreStack::all();
2009                break;
2010            } else if let Some(ignore) = ignore {
2011                ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
2012            }
2013        }
2014
2015        if ignore_stack.is_path_ignored(path, is_dir) {
2016            ignore_stack = IgnoreStack::all();
2017        }
2018
2019        ignore_stack
2020    }
2021}
2022
2023impl fmt::Debug for Snapshot {
2024    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2025        for entry in self.entries_by_path.cursor::<()>() {
2026            for _ in entry.path.ancestors().skip(1) {
2027                write!(f, " ")?;
2028            }
2029            writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
2030        }
2031        Ok(())
2032    }
2033}
2034
2035#[derive(Clone, PartialEq)]
2036pub struct File {
2037    entry_id: Option<usize>,
2038    worktree: ModelHandle<Worktree>,
2039    worktree_path: Arc<Path>,
2040    pub path: Arc<Path>,
2041    pub mtime: SystemTime,
2042    is_local: bool,
2043}
2044
2045impl language::File for File {
2046    fn mtime(&self) -> SystemTime {
2047        self.mtime
2048    }
2049
2050    fn path(&self) -> &Arc<Path> {
2051        &self.path
2052    }
2053
2054    fn abs_path(&self) -> Option<PathBuf> {
2055        if self.is_local {
2056            Some(self.worktree_path.join(&self.path))
2057        } else {
2058            None
2059        }
2060    }
2061
2062    fn full_path(&self) -> PathBuf {
2063        let mut full_path = PathBuf::new();
2064        if let Some(worktree_name) = self.worktree_path.file_name() {
2065            full_path.push(worktree_name);
2066        }
2067        full_path.push(&self.path);
2068        full_path
2069    }
2070
2071    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2072    /// of its worktree, then this method will return the name of the worktree itself.
2073    fn file_name<'a>(&'a self) -> Option<OsString> {
2074        self.path
2075            .file_name()
2076            .or_else(|| self.worktree_path.file_name())
2077            .map(Into::into)
2078    }
2079
2080    fn is_deleted(&self) -> bool {
2081        self.entry_id.is_none()
2082    }
2083
2084    fn save(
2085        &self,
2086        buffer_id: u64,
2087        text: Rope,
2088        version: clock::Global,
2089        cx: &mut MutableAppContext,
2090    ) -> Task<Result<(clock::Global, SystemTime)>> {
2091        let worktree_id = self.worktree.read(cx).id().to_proto();
2092        self.worktree.update(cx, |worktree, cx| match worktree {
2093            Worktree::Local(worktree) => {
2094                let rpc = worktree.client.clone();
2095                let project_id = worktree.share.as_ref().map(|share| share.project_id);
2096                let save = worktree.save(self.path.clone(), text, cx);
2097                cx.background().spawn(async move {
2098                    let entry = save.await?;
2099                    if let Some(project_id) = project_id {
2100                        rpc.send(proto::BufferSaved {
2101                            project_id,
2102                            worktree_id,
2103                            buffer_id,
2104                            version: (&version).into(),
2105                            mtime: Some(entry.mtime.into()),
2106                        })
2107                        .await?;
2108                    }
2109                    Ok((version, entry.mtime))
2110                })
2111            }
2112            Worktree::Remote(worktree) => {
2113                let rpc = worktree.client.clone();
2114                let project_id = worktree.project_id;
2115                cx.foreground().spawn(async move {
2116                    let response = rpc
2117                        .request(proto::SaveBuffer {
2118                            project_id,
2119                            worktree_id,
2120                            buffer_id,
2121                        })
2122                        .await?;
2123                    let version = response.version.try_into()?;
2124                    let mtime = response
2125                        .mtime
2126                        .ok_or_else(|| anyhow!("missing mtime"))?
2127                        .into();
2128                    Ok((version, mtime))
2129                })
2130            }
2131        })
2132    }
2133
2134    fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2135        let worktree = self.worktree.read(cx).as_local()?;
2136        let abs_path = worktree.absolutize(&self.path);
2137        let fs = worktree.fs.clone();
2138        Some(
2139            cx.background()
2140                .spawn(async move { fs.load(&abs_path).await }),
2141        )
2142    }
2143
2144    fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2145        self.worktree.update(cx, |worktree, cx| {
2146            worktree.send_buffer_update(buffer_id, operation, cx);
2147        });
2148    }
2149
2150    fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2151        self.worktree.update(cx, |worktree, cx| {
2152            if let Worktree::Remote(worktree) = worktree {
2153                let project_id = worktree.project_id;
2154                let worktree_id = worktree.id().to_proto();
2155                let rpc = worktree.client.clone();
2156                cx.background()
2157                    .spawn(async move {
2158                        if let Err(error) = rpc
2159                            .send(proto::CloseBuffer {
2160                                project_id,
2161                                worktree_id,
2162                                buffer_id,
2163                            })
2164                            .await
2165                        {
2166                            log::error!("error closing remote buffer: {}", error);
2167                        }
2168                    })
2169                    .detach();
2170            }
2171        });
2172    }
2173
2174    fn as_any(&self) -> &dyn Any {
2175        self
2176    }
2177}
2178
2179impl File {
2180    pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2181        file.and_then(|f| f.as_any().downcast_ref())
2182    }
2183
2184    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2185        self.worktree.read(cx).id()
2186    }
2187}
2188
2189#[derive(Clone, Debug)]
2190pub struct Entry {
2191    pub id: usize,
2192    pub kind: EntryKind,
2193    pub path: Arc<Path>,
2194    pub inode: u64,
2195    pub mtime: SystemTime,
2196    pub is_symlink: bool,
2197    pub is_ignored: bool,
2198}
2199
2200#[derive(Clone, Debug)]
2201pub enum EntryKind {
2202    PendingDir,
2203    Dir,
2204    File(CharBag),
2205}
2206
2207impl Entry {
2208    fn new(
2209        path: Arc<Path>,
2210        metadata: &fs::Metadata,
2211        next_entry_id: &AtomicUsize,
2212        root_char_bag: CharBag,
2213    ) -> Self {
2214        Self {
2215            id: next_entry_id.fetch_add(1, SeqCst),
2216            kind: if metadata.is_dir {
2217                EntryKind::PendingDir
2218            } else {
2219                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2220            },
2221            path,
2222            inode: metadata.inode,
2223            mtime: metadata.mtime,
2224            is_symlink: metadata.is_symlink,
2225            is_ignored: false,
2226        }
2227    }
2228
2229    pub fn is_dir(&self) -> bool {
2230        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2231    }
2232
2233    pub fn is_file(&self) -> bool {
2234        matches!(self.kind, EntryKind::File(_))
2235    }
2236}
2237
2238impl sum_tree::Item for Entry {
2239    type Summary = EntrySummary;
2240
2241    fn summary(&self) -> Self::Summary {
2242        let visible_count = if self.is_ignored { 0 } else { 1 };
2243        let file_count;
2244        let visible_file_count;
2245        if self.is_file() {
2246            file_count = 1;
2247            visible_file_count = visible_count;
2248        } else {
2249            file_count = 0;
2250            visible_file_count = 0;
2251        }
2252
2253        EntrySummary {
2254            max_path: self.path.clone(),
2255            count: 1,
2256            visible_count,
2257            file_count,
2258            visible_file_count,
2259        }
2260    }
2261}
2262
2263impl sum_tree::KeyedItem for Entry {
2264    type Key = PathKey;
2265
2266    fn key(&self) -> Self::Key {
2267        PathKey(self.path.clone())
2268    }
2269}
2270
2271#[derive(Clone, Debug)]
2272pub struct EntrySummary {
2273    max_path: Arc<Path>,
2274    count: usize,
2275    visible_count: usize,
2276    file_count: usize,
2277    visible_file_count: usize,
2278}
2279
2280impl Default for EntrySummary {
2281    fn default() -> Self {
2282        Self {
2283            max_path: Arc::from(Path::new("")),
2284            count: 0,
2285            visible_count: 0,
2286            file_count: 0,
2287            visible_file_count: 0,
2288        }
2289    }
2290}
2291
2292impl sum_tree::Summary for EntrySummary {
2293    type Context = ();
2294
2295    fn add_summary(&mut self, rhs: &Self, _: &()) {
2296        self.max_path = rhs.max_path.clone();
2297        self.visible_count += rhs.visible_count;
2298        self.file_count += rhs.file_count;
2299        self.visible_file_count += rhs.visible_file_count;
2300    }
2301}
2302
2303#[derive(Clone, Debug)]
2304struct PathEntry {
2305    id: usize,
2306    path: Arc<Path>,
2307    is_ignored: bool,
2308    scan_id: usize,
2309}
2310
2311impl sum_tree::Item for PathEntry {
2312    type Summary = PathEntrySummary;
2313
2314    fn summary(&self) -> Self::Summary {
2315        PathEntrySummary { max_id: self.id }
2316    }
2317}
2318
2319impl sum_tree::KeyedItem for PathEntry {
2320    type Key = usize;
2321
2322    fn key(&self) -> Self::Key {
2323        self.id
2324    }
2325}
2326
2327#[derive(Clone, Debug, Default)]
2328struct PathEntrySummary {
2329    max_id: usize,
2330}
2331
2332impl sum_tree::Summary for PathEntrySummary {
2333    type Context = ();
2334
2335    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2336        self.max_id = summary.max_id;
2337    }
2338}
2339
2340impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2341    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2342        *self = summary.max_id;
2343    }
2344}
2345
2346#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2347pub struct PathKey(Arc<Path>);
2348
2349impl Default for PathKey {
2350    fn default() -> Self {
2351        Self(Path::new("").into())
2352    }
2353}
2354
2355impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2356    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2357        self.0 = summary.max_path.clone();
2358    }
2359}
2360
2361struct BackgroundScanner {
2362    fs: Arc<dyn Fs>,
2363    snapshot: Arc<Mutex<Snapshot>>,
2364    notify: Sender<ScanState>,
2365    executor: Arc<executor::Background>,
2366}
2367
2368impl BackgroundScanner {
2369    fn new(
2370        snapshot: Arc<Mutex<Snapshot>>,
2371        notify: Sender<ScanState>,
2372        fs: Arc<dyn Fs>,
2373        executor: Arc<executor::Background>,
2374    ) -> Self {
2375        Self {
2376            fs,
2377            snapshot,
2378            notify,
2379            executor,
2380        }
2381    }
2382
2383    fn abs_path(&self) -> Arc<Path> {
2384        self.snapshot.lock().abs_path.clone()
2385    }
2386
2387    fn snapshot(&self) -> Snapshot {
2388        self.snapshot.lock().clone()
2389    }
2390
2391    async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2392        if self.notify.send(ScanState::Scanning).await.is_err() {
2393            return;
2394        }
2395
2396        if let Err(err) = self.scan_dirs().await {
2397            if self
2398                .notify
2399                .send(ScanState::Err(Arc::new(err)))
2400                .await
2401                .is_err()
2402            {
2403                return;
2404            }
2405        }
2406
2407        if self.notify.send(ScanState::Idle).await.is_err() {
2408            return;
2409        }
2410
2411        futures::pin_mut!(events_rx);
2412        while let Some(events) = events_rx.next().await {
2413            if self.notify.send(ScanState::Scanning).await.is_err() {
2414                break;
2415            }
2416
2417            if !self.process_events(events).await {
2418                break;
2419            }
2420
2421            if self.notify.send(ScanState::Idle).await.is_err() {
2422                break;
2423            }
2424        }
2425    }
2426
2427    async fn scan_dirs(&mut self) -> Result<()> {
2428        let root_char_bag;
2429        let next_entry_id;
2430        let is_dir;
2431        {
2432            let snapshot = self.snapshot.lock();
2433            root_char_bag = snapshot.root_char_bag;
2434            next_entry_id = snapshot.next_entry_id.clone();
2435            is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2436        };
2437
2438        if is_dir {
2439            let path: Arc<Path> = Arc::from(Path::new(""));
2440            let abs_path = self.abs_path();
2441            let (tx, rx) = channel::unbounded();
2442            tx.send(ScanJob {
2443                abs_path: abs_path.to_path_buf(),
2444                path,
2445                ignore_stack: IgnoreStack::none(),
2446                scan_queue: tx.clone(),
2447            })
2448            .await
2449            .unwrap();
2450            drop(tx);
2451
2452            self.executor
2453                .scoped(|scope| {
2454                    for _ in 0..self.executor.num_cpus() {
2455                        scope.spawn(async {
2456                            while let Ok(job) = rx.recv().await {
2457                                if let Err(err) = self
2458                                    .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2459                                    .await
2460                                {
2461                                    log::error!("error scanning {:?}: {}", job.abs_path, err);
2462                                }
2463                            }
2464                        });
2465                    }
2466                })
2467                .await;
2468        }
2469
2470        Ok(())
2471    }
2472
2473    async fn scan_dir(
2474        &self,
2475        root_char_bag: CharBag,
2476        next_entry_id: Arc<AtomicUsize>,
2477        job: &ScanJob,
2478    ) -> Result<()> {
2479        let mut new_entries: Vec<Entry> = Vec::new();
2480        let mut new_jobs: Vec<ScanJob> = Vec::new();
2481        let mut ignore_stack = job.ignore_stack.clone();
2482        let mut new_ignore = None;
2483
2484        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2485        while let Some(child_abs_path) = child_paths.next().await {
2486            let child_abs_path = match child_abs_path {
2487                Ok(child_abs_path) => child_abs_path,
2488                Err(error) => {
2489                    log::error!("error processing entry {:?}", error);
2490                    continue;
2491                }
2492            };
2493            let child_name = child_abs_path.file_name().unwrap();
2494            let child_path: Arc<Path> = job.path.join(child_name).into();
2495            let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2496                Some(metadata) => metadata,
2497                None => continue,
2498            };
2499
2500            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2501            if child_name == *GITIGNORE {
2502                match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2503                    Ok(ignore) => {
2504                        let ignore = Arc::new(ignore);
2505                        ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2506                        new_ignore = Some(ignore);
2507                    }
2508                    Err(error) => {
2509                        log::error!(
2510                            "error loading .gitignore file {:?} - {:?}",
2511                            child_name,
2512                            error
2513                        );
2514                    }
2515                }
2516
2517                // Update ignore status of any child entries we've already processed to reflect the
2518                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2519                // there should rarely be too numerous. Update the ignore stack associated with any
2520                // new jobs as well.
2521                let mut new_jobs = new_jobs.iter_mut();
2522                for entry in &mut new_entries {
2523                    entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2524                    if entry.is_dir() {
2525                        new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2526                            IgnoreStack::all()
2527                        } else {
2528                            ignore_stack.clone()
2529                        };
2530                    }
2531                }
2532            }
2533
2534            let mut child_entry = Entry::new(
2535                child_path.clone(),
2536                &child_metadata,
2537                &next_entry_id,
2538                root_char_bag,
2539            );
2540
2541            if child_metadata.is_dir {
2542                let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2543                child_entry.is_ignored = is_ignored;
2544                new_entries.push(child_entry);
2545                new_jobs.push(ScanJob {
2546                    abs_path: child_abs_path,
2547                    path: child_path,
2548                    ignore_stack: if is_ignored {
2549                        IgnoreStack::all()
2550                    } else {
2551                        ignore_stack.clone()
2552                    },
2553                    scan_queue: job.scan_queue.clone(),
2554                });
2555            } else {
2556                child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2557                new_entries.push(child_entry);
2558            };
2559        }
2560
2561        self.snapshot
2562            .lock()
2563            .populate_dir(job.path.clone(), new_entries, new_ignore);
2564        for new_job in new_jobs {
2565            job.scan_queue.send(new_job).await.unwrap();
2566        }
2567
2568        Ok(())
2569    }
2570
2571    async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2572        let mut snapshot = self.snapshot();
2573        snapshot.scan_id += 1;
2574
2575        let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2576            abs_path
2577        } else {
2578            return false;
2579        };
2580        let root_char_bag = snapshot.root_char_bag;
2581        let next_entry_id = snapshot.next_entry_id.clone();
2582
2583        events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2584        events.dedup_by(|a, b| a.path.starts_with(&b.path));
2585
2586        for event in &events {
2587            match event.path.strip_prefix(&root_abs_path) {
2588                Ok(path) => snapshot.remove_path(&path),
2589                Err(_) => {
2590                    log::error!(
2591                        "unexpected event {:?} for root path {:?}",
2592                        event.path,
2593                        root_abs_path
2594                    );
2595                    continue;
2596                }
2597            }
2598        }
2599
2600        let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2601        for event in events {
2602            let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2603                Ok(path) => Arc::from(path.to_path_buf()),
2604                Err(_) => {
2605                    log::error!(
2606                        "unexpected event {:?} for root path {:?}",
2607                        event.path,
2608                        root_abs_path
2609                    );
2610                    continue;
2611                }
2612            };
2613
2614            match self.fs.metadata(&event.path).await {
2615                Ok(Some(metadata)) => {
2616                    let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2617                    let mut fs_entry = Entry::new(
2618                        path.clone(),
2619                        &metadata,
2620                        snapshot.next_entry_id.as_ref(),
2621                        snapshot.root_char_bag,
2622                    );
2623                    fs_entry.is_ignored = ignore_stack.is_all();
2624                    snapshot.insert_entry(fs_entry, self.fs.as_ref());
2625                    if metadata.is_dir {
2626                        scan_queue_tx
2627                            .send(ScanJob {
2628                                abs_path: event.path,
2629                                path,
2630                                ignore_stack,
2631                                scan_queue: scan_queue_tx.clone(),
2632                            })
2633                            .await
2634                            .unwrap();
2635                    }
2636                }
2637                Ok(None) => {}
2638                Err(err) => {
2639                    // TODO - create a special 'error' entry in the entries tree to mark this
2640                    log::error!("error reading file on event {:?}", err);
2641                }
2642            }
2643        }
2644
2645        *self.snapshot.lock() = snapshot;
2646
2647        // Scan any directories that were created as part of this event batch.
2648        drop(scan_queue_tx);
2649        self.executor
2650            .scoped(|scope| {
2651                for _ in 0..self.executor.num_cpus() {
2652                    scope.spawn(async {
2653                        while let Ok(job) = scan_queue_rx.recv().await {
2654                            if let Err(err) = self
2655                                .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2656                                .await
2657                            {
2658                                log::error!("error scanning {:?}: {}", job.abs_path, err);
2659                            }
2660                        }
2661                    });
2662                }
2663            })
2664            .await;
2665
2666        // Attempt to detect renames only over a single batch of file-system events.
2667        self.snapshot.lock().removed_entry_ids.clear();
2668
2669        self.update_ignore_statuses().await;
2670        true
2671    }
2672
2673    async fn update_ignore_statuses(&self) {
2674        let mut snapshot = self.snapshot();
2675
2676        let mut ignores_to_update = Vec::new();
2677        let mut ignores_to_delete = Vec::new();
2678        for (parent_path, (_, scan_id)) in &snapshot.ignores {
2679            if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2680                ignores_to_update.push(parent_path.clone());
2681            }
2682
2683            let ignore_path = parent_path.join(&*GITIGNORE);
2684            if snapshot.entry_for_path(ignore_path).is_none() {
2685                ignores_to_delete.push(parent_path.clone());
2686            }
2687        }
2688
2689        for parent_path in ignores_to_delete {
2690            snapshot.ignores.remove(&parent_path);
2691            self.snapshot.lock().ignores.remove(&parent_path);
2692        }
2693
2694        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2695        ignores_to_update.sort_unstable();
2696        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2697        while let Some(parent_path) = ignores_to_update.next() {
2698            while ignores_to_update
2699                .peek()
2700                .map_or(false, |p| p.starts_with(&parent_path))
2701            {
2702                ignores_to_update.next().unwrap();
2703            }
2704
2705            let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2706            ignore_queue_tx
2707                .send(UpdateIgnoreStatusJob {
2708                    path: parent_path,
2709                    ignore_stack,
2710                    ignore_queue: ignore_queue_tx.clone(),
2711                })
2712                .await
2713                .unwrap();
2714        }
2715        drop(ignore_queue_tx);
2716
2717        self.executor
2718            .scoped(|scope| {
2719                for _ in 0..self.executor.num_cpus() {
2720                    scope.spawn(async {
2721                        while let Ok(job) = ignore_queue_rx.recv().await {
2722                            self.update_ignore_status(job, &snapshot).await;
2723                        }
2724                    });
2725                }
2726            })
2727            .await;
2728    }
2729
2730    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2731        let mut ignore_stack = job.ignore_stack;
2732        if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2733            ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2734        }
2735
2736        let mut entries_by_id_edits = Vec::new();
2737        let mut entries_by_path_edits = Vec::new();
2738        for mut entry in snapshot.child_entries(&job.path).cloned() {
2739            let was_ignored = entry.is_ignored;
2740            entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2741            if entry.is_dir() {
2742                let child_ignore_stack = if entry.is_ignored {
2743                    IgnoreStack::all()
2744                } else {
2745                    ignore_stack.clone()
2746                };
2747                job.ignore_queue
2748                    .send(UpdateIgnoreStatusJob {
2749                        path: entry.path.clone(),
2750                        ignore_stack: child_ignore_stack,
2751                        ignore_queue: job.ignore_queue.clone(),
2752                    })
2753                    .await
2754                    .unwrap();
2755            }
2756
2757            if entry.is_ignored != was_ignored {
2758                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2759                path_entry.scan_id = snapshot.scan_id;
2760                path_entry.is_ignored = entry.is_ignored;
2761                entries_by_id_edits.push(Edit::Insert(path_entry));
2762                entries_by_path_edits.push(Edit::Insert(entry));
2763            }
2764        }
2765
2766        let mut snapshot = self.snapshot.lock();
2767        snapshot.entries_by_path.edit(entries_by_path_edits, &());
2768        snapshot.entries_by_id.edit(entries_by_id_edits, &());
2769    }
2770}
2771
2772async fn refresh_entry(
2773    fs: &dyn Fs,
2774    snapshot: &Mutex<Snapshot>,
2775    path: Arc<Path>,
2776    abs_path: &Path,
2777) -> Result<Entry> {
2778    let root_char_bag;
2779    let next_entry_id;
2780    {
2781        let snapshot = snapshot.lock();
2782        root_char_bag = snapshot.root_char_bag;
2783        next_entry_id = snapshot.next_entry_id.clone();
2784    }
2785    let entry = Entry::new(
2786        path,
2787        &fs.metadata(abs_path)
2788            .await?
2789            .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2790        &next_entry_id,
2791        root_char_bag,
2792    );
2793    Ok(snapshot.lock().insert_entry(entry, fs))
2794}
2795
2796fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2797    let mut result = root_char_bag;
2798    result.extend(
2799        path.to_string_lossy()
2800            .chars()
2801            .map(|c| c.to_ascii_lowercase()),
2802    );
2803    result
2804}
2805
2806struct ScanJob {
2807    abs_path: PathBuf,
2808    path: Arc<Path>,
2809    ignore_stack: Arc<IgnoreStack>,
2810    scan_queue: Sender<ScanJob>,
2811}
2812
2813struct UpdateIgnoreStatusJob {
2814    path: Arc<Path>,
2815    ignore_stack: Arc<IgnoreStack>,
2816    ignore_queue: Sender<UpdateIgnoreStatusJob>,
2817}
2818
2819pub trait WorktreeHandle {
2820    #[cfg(test)]
2821    fn flush_fs_events<'a>(
2822        &self,
2823        cx: &'a gpui::TestAppContext,
2824    ) -> futures::future::LocalBoxFuture<'a, ()>;
2825}
2826
2827impl WorktreeHandle for ModelHandle<Worktree> {
2828    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2829    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2830    // extra directory scans, and emit extra scan-state notifications.
2831    //
2832    // This function mutates the worktree's directory and waits for those mutations to be picked up,
2833    // to ensure that all redundant FS events have already been processed.
2834    #[cfg(test)]
2835    fn flush_fs_events<'a>(
2836        &self,
2837        cx: &'a gpui::TestAppContext,
2838    ) -> futures::future::LocalBoxFuture<'a, ()> {
2839        use smol::future::FutureExt;
2840
2841        let filename = "fs-event-sentinel";
2842        let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2843        let tree = self.clone();
2844        async move {
2845            std::fs::write(root_path.join(filename), "").unwrap();
2846            tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2847                .await;
2848
2849            std::fs::remove_file(root_path.join(filename)).unwrap();
2850            tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2851                .await;
2852
2853            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2854                .await;
2855        }
2856        .boxed_local()
2857    }
2858}
2859
2860#[derive(Clone, Debug)]
2861struct TraversalProgress<'a> {
2862    max_path: &'a Path,
2863    count: usize,
2864    visible_count: usize,
2865    file_count: usize,
2866    visible_file_count: usize,
2867}
2868
2869impl<'a> TraversalProgress<'a> {
2870    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2871        match (include_ignored, include_dirs) {
2872            (true, true) => self.count,
2873            (true, false) => self.file_count,
2874            (false, true) => self.visible_count,
2875            (false, false) => self.visible_file_count,
2876        }
2877    }
2878}
2879
2880impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2881    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2882        self.max_path = summary.max_path.as_ref();
2883        self.count += summary.count;
2884        self.visible_count += summary.visible_count;
2885        self.file_count += summary.file_count;
2886        self.visible_file_count += summary.visible_file_count;
2887    }
2888}
2889
2890impl<'a> Default for TraversalProgress<'a> {
2891    fn default() -> Self {
2892        Self {
2893            max_path: Path::new(""),
2894            count: 0,
2895            visible_count: 0,
2896            file_count: 0,
2897            visible_file_count: 0,
2898        }
2899    }
2900}
2901
2902pub struct Traversal<'a> {
2903    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2904    include_ignored: bool,
2905    include_dirs: bool,
2906}
2907
2908impl<'a> Traversal<'a> {
2909    pub fn advance(&mut self) -> bool {
2910        self.advance_to_offset(self.offset() + 1)
2911    }
2912
2913    pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2914        self.cursor.seek_forward(
2915            &TraversalTarget::Count {
2916                count: offset,
2917                include_dirs: self.include_dirs,
2918                include_ignored: self.include_ignored,
2919            },
2920            Bias::Right,
2921            &(),
2922        )
2923    }
2924
2925    pub fn advance_to_sibling(&mut self) -> bool {
2926        while let Some(entry) = self.cursor.item() {
2927            self.cursor.seek_forward(
2928                &TraversalTarget::PathSuccessor(&entry.path),
2929                Bias::Left,
2930                &(),
2931            );
2932            if let Some(entry) = self.cursor.item() {
2933                if (self.include_dirs || !entry.is_dir())
2934                    && (self.include_ignored || !entry.is_ignored)
2935                {
2936                    return true;
2937                }
2938            }
2939        }
2940        false
2941    }
2942
2943    pub fn entry(&self) -> Option<&'a Entry> {
2944        self.cursor.item()
2945    }
2946
2947    pub fn offset(&self) -> usize {
2948        self.cursor
2949            .start()
2950            .count(self.include_dirs, self.include_ignored)
2951    }
2952}
2953
2954impl<'a> Iterator for Traversal<'a> {
2955    type Item = &'a Entry;
2956
2957    fn next(&mut self) -> Option<Self::Item> {
2958        if let Some(item) = self.entry() {
2959            self.advance();
2960            Some(item)
2961        } else {
2962            None
2963        }
2964    }
2965}
2966
2967#[derive(Debug)]
2968enum TraversalTarget<'a> {
2969    Path(&'a Path),
2970    PathSuccessor(&'a Path),
2971    Count {
2972        count: usize,
2973        include_ignored: bool,
2974        include_dirs: bool,
2975    },
2976}
2977
2978impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2979    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2980        match self {
2981            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2982            TraversalTarget::PathSuccessor(path) => {
2983                if !cursor_location.max_path.starts_with(path) {
2984                    Ordering::Equal
2985                } else {
2986                    Ordering::Greater
2987                }
2988            }
2989            TraversalTarget::Count {
2990                count,
2991                include_dirs,
2992                include_ignored,
2993            } => Ord::cmp(
2994                count,
2995                &cursor_location.count(*include_dirs, *include_ignored),
2996            ),
2997        }
2998    }
2999}
3000
3001struct ChildEntriesIter<'a> {
3002    parent_path: &'a Path,
3003    traversal: Traversal<'a>,
3004}
3005
3006impl<'a> Iterator for ChildEntriesIter<'a> {
3007    type Item = &'a Entry;
3008
3009    fn next(&mut self) -> Option<Self::Item> {
3010        if let Some(item) = self.traversal.entry() {
3011            if item.path.starts_with(&self.parent_path) {
3012                self.traversal.advance_to_sibling();
3013                return Some(item);
3014            }
3015        }
3016        None
3017    }
3018}
3019
3020impl<'a> From<&'a Entry> for proto::Entry {
3021    fn from(entry: &'a Entry) -> Self {
3022        Self {
3023            id: entry.id as u64,
3024            is_dir: entry.is_dir(),
3025            path: entry.path.to_string_lossy().to_string(),
3026            inode: entry.inode,
3027            mtime: Some(entry.mtime.into()),
3028            is_symlink: entry.is_symlink,
3029            is_ignored: entry.is_ignored,
3030        }
3031    }
3032}
3033
3034impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3035    type Error = anyhow::Error;
3036
3037    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3038        if let Some(mtime) = entry.mtime {
3039            let kind = if entry.is_dir {
3040                EntryKind::Dir
3041            } else {
3042                let mut char_bag = root_char_bag.clone();
3043                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3044                EntryKind::File(char_bag)
3045            };
3046            let path: Arc<Path> = Arc::from(Path::new(&entry.path));
3047            Ok(Entry {
3048                id: entry.id as usize,
3049                kind,
3050                path: path.clone(),
3051                inode: entry.inode,
3052                mtime: mtime.into(),
3053                is_symlink: entry.is_symlink,
3054                is_ignored: entry.is_ignored,
3055            })
3056        } else {
3057            Err(anyhow!(
3058                "missing mtime in remote worktree entry {:?}",
3059                entry.path
3060            ))
3061        }
3062    }
3063}
3064
3065trait ToPointUtf16 {
3066    fn to_point_utf16(self) -> PointUtf16;
3067}
3068
3069impl ToPointUtf16 for lsp::Position {
3070    fn to_point_utf16(self) -> PointUtf16 {
3071        PointUtf16::new(self.line, self.character)
3072    }
3073}
3074
3075fn diagnostic_ranges<'a>(
3076    diagnostic: &'a lsp::Diagnostic,
3077    abs_path: &'a Path,
3078) -> impl 'a + Iterator<Item = Range<PointUtf16>> {
3079    diagnostic
3080        .related_information
3081        .iter()
3082        .flatten()
3083        .filter_map(move |info| {
3084            if info.location.uri.to_file_path().ok()? == abs_path {
3085                let info_start = PointUtf16::new(
3086                    info.location.range.start.line,
3087                    info.location.range.start.character,
3088                );
3089                let info_end = PointUtf16::new(
3090                    info.location.range.end.line,
3091                    info.location.range.end.character,
3092                );
3093                Some(info_start..info_end)
3094            } else {
3095                None
3096            }
3097        })
3098        .chain(Some(
3099            diagnostic.range.start.to_point_utf16()..diagnostic.range.end.to_point_utf16(),
3100        ))
3101}
3102
3103#[cfg(test)]
3104mod tests {
3105    use super::*;
3106    use crate::fs::FakeFs;
3107    use anyhow::Result;
3108    use client::test::{FakeHttpClient, FakeServer};
3109    use fs::RealFs;
3110    use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
3111    use language::{Diagnostic, LanguageConfig};
3112    use lsp::Url;
3113    use rand::prelude::*;
3114    use serde_json::json;
3115    use std::{cell::RefCell, rc::Rc};
3116    use std::{
3117        env,
3118        fmt::Write,
3119        time::{SystemTime, UNIX_EPOCH},
3120    };
3121    use text::Point;
3122    use unindent::Unindent as _;
3123    use util::test::temp_tree;
3124
3125    #[gpui::test]
3126    async fn test_traversal(mut cx: gpui::TestAppContext) {
3127        let fs = FakeFs::new();
3128        fs.insert_tree(
3129            "/root",
3130            json!({
3131               ".gitignore": "a/b\n",
3132               "a": {
3133                   "b": "",
3134                   "c": "",
3135               }
3136            }),
3137        )
3138        .await;
3139
3140        let http_client = FakeHttpClient::with_404_response();
3141        let client = Client::new(http_client.clone());
3142        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3143
3144        let tree = Worktree::open_local(
3145            client,
3146            user_store,
3147            Arc::from(Path::new("/root")),
3148            Arc::new(fs),
3149            Default::default(),
3150            &mut cx.to_async(),
3151        )
3152        .await
3153        .unwrap();
3154        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3155            .await;
3156
3157        tree.read_with(&cx, |tree, _| {
3158            assert_eq!(
3159                tree.entries(false)
3160                    .map(|entry| entry.path.as_ref())
3161                    .collect::<Vec<_>>(),
3162                vec![
3163                    Path::new(""),
3164                    Path::new(".gitignore"),
3165                    Path::new("a"),
3166                    Path::new("a/c"),
3167                ]
3168            );
3169        })
3170    }
3171
3172    #[gpui::test]
3173    async fn test_save_file(mut cx: gpui::TestAppContext) {
3174        let dir = temp_tree(json!({
3175            "file1": "the old contents",
3176        }));
3177
3178        let http_client = FakeHttpClient::with_404_response();
3179        let client = Client::new(http_client.clone());
3180        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3181
3182        let tree = Worktree::open_local(
3183            client,
3184            user_store,
3185            dir.path(),
3186            Arc::new(RealFs),
3187            Default::default(),
3188            &mut cx.to_async(),
3189        )
3190        .await
3191        .unwrap();
3192        let buffer = tree
3193            .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3194            .await
3195            .unwrap();
3196        let save = buffer.update(&mut cx, |buffer, cx| {
3197            buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3198            buffer.save(cx).unwrap()
3199        });
3200        save.await.unwrap();
3201
3202        let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3203        assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3204    }
3205
3206    #[gpui::test]
3207    async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3208        let dir = temp_tree(json!({
3209            "file1": "the old contents",
3210        }));
3211        let file_path = dir.path().join("file1");
3212
3213        let http_client = FakeHttpClient::with_404_response();
3214        let client = Client::new(http_client.clone());
3215        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3216
3217        let tree = Worktree::open_local(
3218            client,
3219            user_store,
3220            file_path.clone(),
3221            Arc::new(RealFs),
3222            Default::default(),
3223            &mut cx.to_async(),
3224        )
3225        .await
3226        .unwrap();
3227        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3228            .await;
3229        cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3230
3231        let buffer = tree
3232            .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3233            .await
3234            .unwrap();
3235        let save = buffer.update(&mut cx, |buffer, cx| {
3236            buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3237            buffer.save(cx).unwrap()
3238        });
3239        save.await.unwrap();
3240
3241        let new_text = std::fs::read_to_string(file_path).unwrap();
3242        assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3243    }
3244
3245    #[gpui::test]
3246    async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3247        let dir = temp_tree(json!({
3248            "a": {
3249                "file1": "",
3250                "file2": "",
3251                "file3": "",
3252            },
3253            "b": {
3254                "c": {
3255                    "file4": "",
3256                    "file5": "",
3257                }
3258            }
3259        }));
3260
3261        let user_id = 5;
3262        let http_client = FakeHttpClient::with_404_response();
3263        let mut client = Client::new(http_client.clone());
3264        let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3265        let user_store = server.build_user_store(client.clone(), &mut cx).await;
3266        let tree = Worktree::open_local(
3267            client,
3268            user_store.clone(),
3269            dir.path(),
3270            Arc::new(RealFs),
3271            Default::default(),
3272            &mut cx.to_async(),
3273        )
3274        .await
3275        .unwrap();
3276
3277        let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3278            let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3279            async move { buffer.await.unwrap() }
3280        };
3281        let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3282            tree.read_with(cx, |tree, _| {
3283                tree.entry_for_path(path)
3284                    .expect(&format!("no entry for path {}", path))
3285                    .id
3286            })
3287        };
3288
3289        let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3290        let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3291        let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3292        let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3293
3294        let file2_id = id_for_path("a/file2", &cx);
3295        let file3_id = id_for_path("a/file3", &cx);
3296        let file4_id = id_for_path("b/c/file4", &cx);
3297
3298        // Wait for the initial scan.
3299        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3300            .await;
3301
3302        // Create a remote copy of this worktree.
3303        let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3304        let remote = Worktree::remote(
3305            1,
3306            1,
3307            initial_snapshot.to_proto(),
3308            Client::new(http_client.clone()),
3309            user_store,
3310            Default::default(),
3311            &mut cx.to_async(),
3312        )
3313        .await
3314        .unwrap();
3315
3316        cx.read(|cx| {
3317            assert!(!buffer2.read(cx).is_dirty());
3318            assert!(!buffer3.read(cx).is_dirty());
3319            assert!(!buffer4.read(cx).is_dirty());
3320            assert!(!buffer5.read(cx).is_dirty());
3321        });
3322
3323        // Rename and delete files and directories.
3324        tree.flush_fs_events(&cx).await;
3325        std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3326        std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3327        std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3328        std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3329        tree.flush_fs_events(&cx).await;
3330
3331        let expected_paths = vec![
3332            "a",
3333            "a/file1",
3334            "a/file2.new",
3335            "b",
3336            "d",
3337            "d/file3",
3338            "d/file4",
3339        ];
3340
3341        cx.read(|app| {
3342            assert_eq!(
3343                tree.read(app)
3344                    .paths()
3345                    .map(|p| p.to_str().unwrap())
3346                    .collect::<Vec<_>>(),
3347                expected_paths
3348            );
3349
3350            assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3351            assert_eq!(id_for_path("d/file3", &cx), file3_id);
3352            assert_eq!(id_for_path("d/file4", &cx), file4_id);
3353
3354            assert_eq!(
3355                buffer2.read(app).file().unwrap().path().as_ref(),
3356                Path::new("a/file2.new")
3357            );
3358            assert_eq!(
3359                buffer3.read(app).file().unwrap().path().as_ref(),
3360                Path::new("d/file3")
3361            );
3362            assert_eq!(
3363                buffer4.read(app).file().unwrap().path().as_ref(),
3364                Path::new("d/file4")
3365            );
3366            assert_eq!(
3367                buffer5.read(app).file().unwrap().path().as_ref(),
3368                Path::new("b/c/file5")
3369            );
3370
3371            assert!(!buffer2.read(app).file().unwrap().is_deleted());
3372            assert!(!buffer3.read(app).file().unwrap().is_deleted());
3373            assert!(!buffer4.read(app).file().unwrap().is_deleted());
3374            assert!(buffer5.read(app).file().unwrap().is_deleted());
3375        });
3376
3377        // Update the remote worktree. Check that it becomes consistent with the
3378        // local worktree.
3379        remote.update(&mut cx, |remote, cx| {
3380            let update_message =
3381                tree.read(cx)
3382                    .snapshot()
3383                    .build_update(&initial_snapshot, 1, 1, true);
3384            remote
3385                .as_remote_mut()
3386                .unwrap()
3387                .snapshot
3388                .apply_update(update_message)
3389                .unwrap();
3390
3391            assert_eq!(
3392                remote
3393                    .paths()
3394                    .map(|p| p.to_str().unwrap())
3395                    .collect::<Vec<_>>(),
3396                expected_paths
3397            );
3398        });
3399    }
3400
3401    #[gpui::test]
3402    async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3403        let dir = temp_tree(json!({
3404            ".git": {},
3405            ".gitignore": "ignored-dir\n",
3406            "tracked-dir": {
3407                "tracked-file1": "tracked contents",
3408            },
3409            "ignored-dir": {
3410                "ignored-file1": "ignored contents",
3411            }
3412        }));
3413
3414        let http_client = FakeHttpClient::with_404_response();
3415        let client = Client::new(http_client.clone());
3416        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3417
3418        let tree = Worktree::open_local(
3419            client,
3420            user_store,
3421            dir.path(),
3422            Arc::new(RealFs),
3423            Default::default(),
3424            &mut cx.to_async(),
3425        )
3426        .await
3427        .unwrap();
3428        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3429            .await;
3430        tree.flush_fs_events(&cx).await;
3431        cx.read(|cx| {
3432            let tree = tree.read(cx);
3433            let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3434            let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3435            assert_eq!(tracked.is_ignored, false);
3436            assert_eq!(ignored.is_ignored, true);
3437        });
3438
3439        std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3440        std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3441        tree.flush_fs_events(&cx).await;
3442        cx.read(|cx| {
3443            let tree = tree.read(cx);
3444            let dot_git = tree.entry_for_path(".git").unwrap();
3445            let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3446            let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3447            assert_eq!(tracked.is_ignored, false);
3448            assert_eq!(ignored.is_ignored, true);
3449            assert_eq!(dot_git.is_ignored, true);
3450        });
3451    }
3452
3453    #[gpui::test]
3454    async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3455        let user_id = 100;
3456        let http_client = FakeHttpClient::with_404_response();
3457        let mut client = Client::new(http_client);
3458        let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3459        let user_store = server.build_user_store(client.clone(), &mut cx).await;
3460
3461        let fs = Arc::new(FakeFs::new());
3462        fs.insert_tree(
3463            "/the-dir",
3464            json!({
3465                "a.txt": "a-contents",
3466                "b.txt": "b-contents",
3467            }),
3468        )
3469        .await;
3470
3471        let worktree = Worktree::open_local(
3472            client.clone(),
3473            user_store,
3474            "/the-dir".as_ref(),
3475            fs,
3476            Default::default(),
3477            &mut cx.to_async(),
3478        )
3479        .await
3480        .unwrap();
3481
3482        // Spawn multiple tasks to open paths, repeating some paths.
3483        let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3484            (
3485                worktree.open_buffer("a.txt", cx),
3486                worktree.open_buffer("b.txt", cx),
3487                worktree.open_buffer("a.txt", cx),
3488            )
3489        });
3490
3491        let buffer_a_1 = buffer_a_1.await.unwrap();
3492        let buffer_a_2 = buffer_a_2.await.unwrap();
3493        let buffer_b = buffer_b.await.unwrap();
3494        assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3495        assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3496
3497        // There is only one buffer per path.
3498        let buffer_a_id = buffer_a_1.id();
3499        assert_eq!(buffer_a_2.id(), buffer_a_id);
3500
3501        // Open the same path again while it is still open.
3502        drop(buffer_a_1);
3503        let buffer_a_3 = worktree
3504            .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3505            .await
3506            .unwrap();
3507
3508        // There's still only one buffer per path.
3509        assert_eq!(buffer_a_3.id(), buffer_a_id);
3510    }
3511
3512    #[gpui::test]
3513    async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3514        use std::fs;
3515
3516        let dir = temp_tree(json!({
3517            "file1": "abc",
3518            "file2": "def",
3519            "file3": "ghi",
3520        }));
3521        let http_client = FakeHttpClient::with_404_response();
3522        let client = Client::new(http_client.clone());
3523        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3524
3525        let tree = Worktree::open_local(
3526            client,
3527            user_store,
3528            dir.path(),
3529            Arc::new(RealFs),
3530            Default::default(),
3531            &mut cx.to_async(),
3532        )
3533        .await
3534        .unwrap();
3535        tree.flush_fs_events(&cx).await;
3536        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3537            .await;
3538
3539        let buffer1 = tree
3540            .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3541            .await
3542            .unwrap();
3543        let events = Rc::new(RefCell::new(Vec::new()));
3544
3545        // initially, the buffer isn't dirty.
3546        buffer1.update(&mut cx, |buffer, cx| {
3547            cx.subscribe(&buffer1, {
3548                let events = events.clone();
3549                move |_, _, event, _| events.borrow_mut().push(event.clone())
3550            })
3551            .detach();
3552
3553            assert!(!buffer.is_dirty());
3554            assert!(events.borrow().is_empty());
3555
3556            buffer.edit(vec![1..2], "", cx);
3557        });
3558
3559        // after the first edit, the buffer is dirty, and emits a dirtied event.
3560        buffer1.update(&mut cx, |buffer, cx| {
3561            assert!(buffer.text() == "ac");
3562            assert!(buffer.is_dirty());
3563            assert_eq!(
3564                *events.borrow(),
3565                &[language::Event::Edited, language::Event::Dirtied]
3566            );
3567            events.borrow_mut().clear();
3568            buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3569        });
3570
3571        // after saving, the buffer is not dirty, and emits a saved event.
3572        buffer1.update(&mut cx, |buffer, cx| {
3573            assert!(!buffer.is_dirty());
3574            assert_eq!(*events.borrow(), &[language::Event::Saved]);
3575            events.borrow_mut().clear();
3576
3577            buffer.edit(vec![1..1], "B", cx);
3578            buffer.edit(vec![2..2], "D", cx);
3579        });
3580
3581        // after editing again, the buffer is dirty, and emits another dirty event.
3582        buffer1.update(&mut cx, |buffer, cx| {
3583            assert!(buffer.text() == "aBDc");
3584            assert!(buffer.is_dirty());
3585            assert_eq!(
3586                *events.borrow(),
3587                &[
3588                    language::Event::Edited,
3589                    language::Event::Dirtied,
3590                    language::Event::Edited,
3591                ],
3592            );
3593            events.borrow_mut().clear();
3594
3595            // TODO - currently, after restoring the buffer to its
3596            // previously-saved state, the is still considered dirty.
3597            buffer.edit([1..3], "", cx);
3598            assert!(buffer.text() == "ac");
3599            assert!(buffer.is_dirty());
3600        });
3601
3602        assert_eq!(*events.borrow(), &[language::Event::Edited]);
3603
3604        // When a file is deleted, the buffer is considered dirty.
3605        let events = Rc::new(RefCell::new(Vec::new()));
3606        let buffer2 = tree
3607            .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3608            .await
3609            .unwrap();
3610        buffer2.update(&mut cx, |_, cx| {
3611            cx.subscribe(&buffer2, {
3612                let events = events.clone();
3613                move |_, _, event, _| events.borrow_mut().push(event.clone())
3614            })
3615            .detach();
3616        });
3617
3618        fs::remove_file(dir.path().join("file2")).unwrap();
3619        buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3620        assert_eq!(
3621            *events.borrow(),
3622            &[language::Event::Dirtied, language::Event::FileHandleChanged]
3623        );
3624
3625        // When a file is already dirty when deleted, we don't emit a Dirtied event.
3626        let events = Rc::new(RefCell::new(Vec::new()));
3627        let buffer3 = tree
3628            .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3629            .await
3630            .unwrap();
3631        buffer3.update(&mut cx, |_, cx| {
3632            cx.subscribe(&buffer3, {
3633                let events = events.clone();
3634                move |_, _, event, _| events.borrow_mut().push(event.clone())
3635            })
3636            .detach();
3637        });
3638
3639        tree.flush_fs_events(&cx).await;
3640        buffer3.update(&mut cx, |buffer, cx| {
3641            buffer.edit(Some(0..0), "x", cx);
3642        });
3643        events.borrow_mut().clear();
3644        fs::remove_file(dir.path().join("file3")).unwrap();
3645        buffer3
3646            .condition(&cx, |_, _| !events.borrow().is_empty())
3647            .await;
3648        assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3649        cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3650    }
3651
3652    #[gpui::test]
3653    async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3654        use std::fs;
3655
3656        let initial_contents = "aaa\nbbbbb\nc\n";
3657        let dir = temp_tree(json!({ "the-file": initial_contents }));
3658        let http_client = FakeHttpClient::with_404_response();
3659        let client = Client::new(http_client.clone());
3660        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3661
3662        let tree = Worktree::open_local(
3663            client,
3664            user_store,
3665            dir.path(),
3666            Arc::new(RealFs),
3667            Default::default(),
3668            &mut cx.to_async(),
3669        )
3670        .await
3671        .unwrap();
3672        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3673            .await;
3674
3675        let abs_path = dir.path().join("the-file");
3676        let buffer = tree
3677            .update(&mut cx, |tree, cx| {
3678                tree.open_buffer(Path::new("the-file"), cx)
3679            })
3680            .await
3681            .unwrap();
3682
3683        // TODO
3684        // Add a cursor on each row.
3685        // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3686        //     assert!(!buffer.is_dirty());
3687        //     buffer.add_selection_set(
3688        //         &(0..3)
3689        //             .map(|row| Selection {
3690        //                 id: row as usize,
3691        //                 start: Point::new(row, 1),
3692        //                 end: Point::new(row, 1),
3693        //                 reversed: false,
3694        //                 goal: SelectionGoal::None,
3695        //             })
3696        //             .collect::<Vec<_>>(),
3697        //         cx,
3698        //     )
3699        // });
3700
3701        // Change the file on disk, adding two new lines of text, and removing
3702        // one line.
3703        buffer.read_with(&cx, |buffer, _| {
3704            assert!(!buffer.is_dirty());
3705            assert!(!buffer.has_conflict());
3706        });
3707        let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3708        fs::write(&abs_path, new_contents).unwrap();
3709
3710        // Because the buffer was not modified, it is reloaded from disk. Its
3711        // contents are edited according to the diff between the old and new
3712        // file contents.
3713        buffer
3714            .condition(&cx, |buffer, _| buffer.text() == new_contents)
3715            .await;
3716
3717        buffer.update(&mut cx, |buffer, _| {
3718            assert_eq!(buffer.text(), new_contents);
3719            assert!(!buffer.is_dirty());
3720            assert!(!buffer.has_conflict());
3721
3722            // TODO
3723            // let cursor_positions = buffer
3724            //     .selection_set(selection_set_id)
3725            //     .unwrap()
3726            //     .selections::<Point>(&*buffer)
3727            //     .map(|selection| {
3728            //         assert_eq!(selection.start, selection.end);
3729            //         selection.start
3730            //     })
3731            //     .collect::<Vec<_>>();
3732            // assert_eq!(
3733            //     cursor_positions,
3734            //     [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3735            // );
3736        });
3737
3738        // Modify the buffer
3739        buffer.update(&mut cx, |buffer, cx| {
3740            buffer.edit(vec![0..0], " ", cx);
3741            assert!(buffer.is_dirty());
3742            assert!(!buffer.has_conflict());
3743        });
3744
3745        // Change the file on disk again, adding blank lines to the beginning.
3746        fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3747
3748        // Because the buffer is modified, it doesn't reload from disk, but is
3749        // marked as having a conflict.
3750        buffer
3751            .condition(&cx, |buffer, _| buffer.has_conflict())
3752            .await;
3753    }
3754
3755    #[gpui::test]
3756    async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3757        let (language_server_config, mut fake_server) =
3758            LanguageServerConfig::fake(cx.background()).await;
3759        let mut languages = LanguageRegistry::new();
3760        languages.add(Arc::new(Language::new(
3761            LanguageConfig {
3762                name: "Rust".to_string(),
3763                path_suffixes: vec!["rs".to_string()],
3764                language_server: Some(language_server_config),
3765                ..Default::default()
3766            },
3767            Some(tree_sitter_rust::language()),
3768        )));
3769
3770        let dir = temp_tree(json!({
3771            "a.rs": "fn a() { A }",
3772            "b.rs": "const y: i32 = 1",
3773        }));
3774
3775        let http_client = FakeHttpClient::with_404_response();
3776        let client = Client::new(http_client.clone());
3777        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3778
3779        let tree = Worktree::open_local(
3780            client,
3781            user_store,
3782            dir.path(),
3783            Arc::new(RealFs),
3784            Arc::new(languages),
3785            &mut cx.to_async(),
3786        )
3787        .await
3788        .unwrap();
3789        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3790            .await;
3791
3792        // Cause worktree to start the fake language server
3793        let _buffer = tree
3794            .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3795            .await
3796            .unwrap();
3797
3798        fake_server
3799            .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3800                uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3801                version: None,
3802                diagnostics: vec![lsp::Diagnostic {
3803                    range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
3804                    severity: Some(lsp::DiagnosticSeverity::ERROR),
3805                    message: "undefined variable 'A'".to_string(),
3806                    ..Default::default()
3807                }],
3808            })
3809            .await;
3810
3811        let buffer = tree
3812            .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3813            .await
3814            .unwrap();
3815
3816        buffer.read_with(&cx, |buffer, _| {
3817            let snapshot = buffer.snapshot();
3818            let diagnostics = snapshot
3819                .diagnostics_in_range::<_, Point>(0..buffer.len())
3820                .collect::<Vec<_>>();
3821            assert_eq!(
3822                diagnostics,
3823                &[DiagnosticEntry {
3824                    range: Point::new(0, 9)..Point::new(0, 10),
3825                    diagnostic: Diagnostic {
3826                        severity: lsp::DiagnosticSeverity::ERROR,
3827                        message: "undefined variable 'A'".to_string(),
3828                        group_id: 0,
3829                        is_primary: true,
3830                        ..Default::default()
3831                    }
3832                }]
3833            )
3834        });
3835    }
3836
3837    #[gpui::test]
3838    async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3839        let fs = Arc::new(FakeFs::new());
3840        let http_client = FakeHttpClient::with_404_response();
3841        let client = Client::new(http_client.clone());
3842        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3843
3844        fs.insert_tree(
3845            "/the-dir",
3846            json!({
3847                "a.rs": "
3848                    fn foo(mut v: Vec<usize>) {
3849                        for x in &v {
3850                            v.push(1);
3851                        }
3852                    }
3853                "
3854                .unindent(),
3855            }),
3856        )
3857        .await;
3858
3859        let worktree = Worktree::open_local(
3860            client.clone(),
3861            user_store,
3862            "/the-dir".as_ref(),
3863            fs,
3864            Default::default(),
3865            &mut cx.to_async(),
3866        )
3867        .await
3868        .unwrap();
3869
3870        let buffer = worktree
3871            .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3872            .await
3873            .unwrap();
3874
3875        let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3876        let message = lsp::PublishDiagnosticsParams {
3877            uri: buffer_uri.clone(),
3878            diagnostics: vec![
3879                lsp::Diagnostic {
3880                    range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3881                    severity: Some(DiagnosticSeverity::WARNING),
3882                    message: "error 1".to_string(),
3883                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3884                        location: lsp::Location {
3885                            uri: buffer_uri.clone(),
3886                            range: lsp::Range::new(
3887                                lsp::Position::new(1, 8),
3888                                lsp::Position::new(1, 9),
3889                            ),
3890                        },
3891                        message: "error 1 hint 1".to_string(),
3892                    }]),
3893                    ..Default::default()
3894                },
3895                lsp::Diagnostic {
3896                    range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3897                    severity: Some(DiagnosticSeverity::HINT),
3898                    message: "error 1 hint 1".to_string(),
3899                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3900                        location: lsp::Location {
3901                            uri: buffer_uri.clone(),
3902                            range: lsp::Range::new(
3903                                lsp::Position::new(1, 8),
3904                                lsp::Position::new(1, 9),
3905                            ),
3906                        },
3907                        message: "original diagnostic".to_string(),
3908                    }]),
3909                    ..Default::default()
3910                },
3911                lsp::Diagnostic {
3912                    range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3913                    severity: Some(DiagnosticSeverity::ERROR),
3914                    message: "error 2".to_string(),
3915                    related_information: Some(vec![
3916                        lsp::DiagnosticRelatedInformation {
3917                            location: lsp::Location {
3918                                uri: buffer_uri.clone(),
3919                                range: lsp::Range::new(
3920                                    lsp::Position::new(1, 13),
3921                                    lsp::Position::new(1, 15),
3922                                ),
3923                            },
3924                            message: "error 2 hint 1".to_string(),
3925                        },
3926                        lsp::DiagnosticRelatedInformation {
3927                            location: lsp::Location {
3928                                uri: buffer_uri.clone(),
3929                                range: lsp::Range::new(
3930                                    lsp::Position::new(1, 13),
3931                                    lsp::Position::new(1, 15),
3932                                ),
3933                            },
3934                            message: "error 2 hint 2".to_string(),
3935                        },
3936                    ]),
3937                    ..Default::default()
3938                },
3939                lsp::Diagnostic {
3940                    range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3941                    severity: Some(DiagnosticSeverity::HINT),
3942                    message: "error 2 hint 1".to_string(),
3943                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3944                        location: lsp::Location {
3945                            uri: buffer_uri.clone(),
3946                            range: lsp::Range::new(
3947                                lsp::Position::new(2, 8),
3948                                lsp::Position::new(2, 17),
3949                            ),
3950                        },
3951                        message: "original diagnostic".to_string(),
3952                    }]),
3953                    ..Default::default()
3954                },
3955                lsp::Diagnostic {
3956                    range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3957                    severity: Some(DiagnosticSeverity::HINT),
3958                    message: "error 2 hint 2".to_string(),
3959                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3960                        location: lsp::Location {
3961                            uri: buffer_uri.clone(),
3962                            range: lsp::Range::new(
3963                                lsp::Position::new(2, 8),
3964                                lsp::Position::new(2, 17),
3965                            ),
3966                        },
3967                        message: "original diagnostic".to_string(),
3968                    }]),
3969                    ..Default::default()
3970                },
3971            ],
3972            version: None,
3973        };
3974
3975        worktree
3976            .update(&mut cx, |tree, cx| {
3977                tree.update_diagnostics(message, &Default::default(), cx)
3978            })
3979            .unwrap();
3980        let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
3981
3982        assert_eq!(
3983            buffer
3984                .diagnostics_in_range::<_, Point>(0..buffer.len())
3985                .collect::<Vec<_>>(),
3986            &[
3987                DiagnosticEntry {
3988                    range: Point::new(1, 8)..Point::new(1, 9),
3989                    diagnostic: Diagnostic {
3990                        severity: DiagnosticSeverity::WARNING,
3991                        message: "error 1".to_string(),
3992                        group_id: 0,
3993                        is_primary: true,
3994                        ..Default::default()
3995                    }
3996                },
3997                DiagnosticEntry {
3998                    range: Point::new(1, 8)..Point::new(1, 9),
3999                    diagnostic: Diagnostic {
4000                        severity: DiagnosticSeverity::HINT,
4001                        message: "error 1 hint 1".to_string(),
4002                        group_id: 0,
4003                        is_primary: false,
4004                        ..Default::default()
4005                    }
4006                },
4007                DiagnosticEntry {
4008                    range: Point::new(1, 13)..Point::new(1, 15),
4009                    diagnostic: Diagnostic {
4010                        severity: DiagnosticSeverity::HINT,
4011                        message: "error 2 hint 1".to_string(),
4012                        group_id: 1,
4013                        is_primary: false,
4014                        ..Default::default()
4015                    }
4016                },
4017                DiagnosticEntry {
4018                    range: Point::new(1, 13)..Point::new(1, 15),
4019                    diagnostic: Diagnostic {
4020                        severity: DiagnosticSeverity::HINT,
4021                        message: "error 2 hint 2".to_string(),
4022                        group_id: 1,
4023                        is_primary: false,
4024                        ..Default::default()
4025                    }
4026                },
4027                DiagnosticEntry {
4028                    range: Point::new(2, 8)..Point::new(2, 17),
4029                    diagnostic: Diagnostic {
4030                        severity: DiagnosticSeverity::ERROR,
4031                        message: "error 2".to_string(),
4032                        group_id: 1,
4033                        is_primary: true,
4034                        ..Default::default()
4035                    }
4036                }
4037            ]
4038        );
4039
4040        assert_eq!(
4041            buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
4042            &[
4043                DiagnosticEntry {
4044                    range: Point::new(1, 8)..Point::new(1, 9),
4045                    diagnostic: Diagnostic {
4046                        severity: DiagnosticSeverity::WARNING,
4047                        message: "error 1".to_string(),
4048                        group_id: 0,
4049                        is_primary: true,
4050                        ..Default::default()
4051                    }
4052                },
4053                DiagnosticEntry {
4054                    range: Point::new(1, 8)..Point::new(1, 9),
4055                    diagnostic: Diagnostic {
4056                        severity: DiagnosticSeverity::HINT,
4057                        message: "error 1 hint 1".to_string(),
4058                        group_id: 0,
4059                        is_primary: false,
4060                        ..Default::default()
4061                    }
4062                },
4063            ]
4064        );
4065        assert_eq!(
4066            buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
4067            &[
4068                DiagnosticEntry {
4069                    range: Point::new(1, 13)..Point::new(1, 15),
4070                    diagnostic: Diagnostic {
4071                        severity: DiagnosticSeverity::HINT,
4072                        message: "error 2 hint 1".to_string(),
4073                        group_id: 1,
4074                        is_primary: false,
4075                        ..Default::default()
4076                    }
4077                },
4078                DiagnosticEntry {
4079                    range: Point::new(1, 13)..Point::new(1, 15),
4080                    diagnostic: Diagnostic {
4081                        severity: DiagnosticSeverity::HINT,
4082                        message: "error 2 hint 2".to_string(),
4083                        group_id: 1,
4084                        is_primary: false,
4085                        ..Default::default()
4086                    }
4087                },
4088                DiagnosticEntry {
4089                    range: Point::new(2, 8)..Point::new(2, 17),
4090                    diagnostic: Diagnostic {
4091                        severity: DiagnosticSeverity::ERROR,
4092                        message: "error 2".to_string(),
4093                        group_id: 1,
4094                        is_primary: true,
4095                        ..Default::default()
4096                    }
4097                }
4098            ]
4099        );
4100    }
4101
4102    #[gpui::test(iterations = 100)]
4103    fn test_random(mut rng: StdRng) {
4104        let operations = env::var("OPERATIONS")
4105            .map(|o| o.parse().unwrap())
4106            .unwrap_or(40);
4107        let initial_entries = env::var("INITIAL_ENTRIES")
4108            .map(|o| o.parse().unwrap())
4109            .unwrap_or(20);
4110
4111        let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
4112        for _ in 0..initial_entries {
4113            randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
4114        }
4115        log::info!("Generated initial tree");
4116
4117        let (notify_tx, _notify_rx) = smol::channel::unbounded();
4118        let fs = Arc::new(RealFs);
4119        let next_entry_id = Arc::new(AtomicUsize::new(0));
4120        let mut initial_snapshot = Snapshot {
4121            id: WorktreeId::from_usize(0),
4122            scan_id: 0,
4123            abs_path: root_dir.path().into(),
4124            entries_by_path: Default::default(),
4125            entries_by_id: Default::default(),
4126            removed_entry_ids: Default::default(),
4127            ignores: Default::default(),
4128            root_name: Default::default(),
4129            root_char_bag: Default::default(),
4130            next_entry_id: next_entry_id.clone(),
4131        };
4132        initial_snapshot.insert_entry(
4133            Entry::new(
4134                Path::new("").into(),
4135                &smol::block_on(fs.metadata(root_dir.path()))
4136                    .unwrap()
4137                    .unwrap(),
4138                &next_entry_id,
4139                Default::default(),
4140            ),
4141            fs.as_ref(),
4142        );
4143        let mut scanner = BackgroundScanner::new(
4144            Arc::new(Mutex::new(initial_snapshot.clone())),
4145            notify_tx,
4146            fs.clone(),
4147            Arc::new(gpui::executor::Background::new()),
4148        );
4149        smol::block_on(scanner.scan_dirs()).unwrap();
4150        scanner.snapshot().check_invariants();
4151
4152        let mut events = Vec::new();
4153        let mut snapshots = Vec::new();
4154        let mut mutations_len = operations;
4155        while mutations_len > 1 {
4156            if !events.is_empty() && rng.gen_bool(0.4) {
4157                let len = rng.gen_range(0..=events.len());
4158                let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4159                log::info!("Delivering events: {:#?}", to_deliver);
4160                smol::block_on(scanner.process_events(to_deliver));
4161                scanner.snapshot().check_invariants();
4162            } else {
4163                events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4164                mutations_len -= 1;
4165            }
4166
4167            if rng.gen_bool(0.2) {
4168                snapshots.push(scanner.snapshot());
4169            }
4170        }
4171        log::info!("Quiescing: {:#?}", events);
4172        smol::block_on(scanner.process_events(events));
4173        scanner.snapshot().check_invariants();
4174
4175        let (notify_tx, _notify_rx) = smol::channel::unbounded();
4176        let mut new_scanner = BackgroundScanner::new(
4177            Arc::new(Mutex::new(initial_snapshot)),
4178            notify_tx,
4179            scanner.fs.clone(),
4180            scanner.executor.clone(),
4181        );
4182        smol::block_on(new_scanner.scan_dirs()).unwrap();
4183        assert_eq!(
4184            scanner.snapshot().to_vec(true),
4185            new_scanner.snapshot().to_vec(true)
4186        );
4187
4188        for mut prev_snapshot in snapshots {
4189            let include_ignored = rng.gen::<bool>();
4190            if !include_ignored {
4191                let mut entries_by_path_edits = Vec::new();
4192                let mut entries_by_id_edits = Vec::new();
4193                for entry in prev_snapshot
4194                    .entries_by_id
4195                    .cursor::<()>()
4196                    .filter(|e| e.is_ignored)
4197                {
4198                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4199                    entries_by_id_edits.push(Edit::Remove(entry.id));
4200                }
4201
4202                prev_snapshot
4203                    .entries_by_path
4204                    .edit(entries_by_path_edits, &());
4205                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4206            }
4207
4208            let update = scanner
4209                .snapshot()
4210                .build_update(&prev_snapshot, 0, 0, include_ignored);
4211            prev_snapshot.apply_update(update).unwrap();
4212            assert_eq!(
4213                prev_snapshot.to_vec(true),
4214                scanner.snapshot().to_vec(include_ignored)
4215            );
4216        }
4217    }
4218
4219    fn randomly_mutate_tree(
4220        root_path: &Path,
4221        insertion_probability: f64,
4222        rng: &mut impl Rng,
4223    ) -> Result<Vec<fsevent::Event>> {
4224        let root_path = root_path.canonicalize().unwrap();
4225        let (dirs, files) = read_dir_recursive(root_path.clone());
4226
4227        let mut events = Vec::new();
4228        let mut record_event = |path: PathBuf| {
4229            events.push(fsevent::Event {
4230                event_id: SystemTime::now()
4231                    .duration_since(UNIX_EPOCH)
4232                    .unwrap()
4233                    .as_secs(),
4234                flags: fsevent::StreamFlags::empty(),
4235                path,
4236            });
4237        };
4238
4239        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4240            let path = dirs.choose(rng).unwrap();
4241            let new_path = path.join(gen_name(rng));
4242
4243            if rng.gen() {
4244                log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4245                std::fs::create_dir(&new_path)?;
4246            } else {
4247                log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4248                std::fs::write(&new_path, "")?;
4249            }
4250            record_event(new_path);
4251        } else if rng.gen_bool(0.05) {
4252            let ignore_dir_path = dirs.choose(rng).unwrap();
4253            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4254
4255            let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4256            let files_to_ignore = {
4257                let len = rng.gen_range(0..=subfiles.len());
4258                subfiles.choose_multiple(rng, len)
4259            };
4260            let dirs_to_ignore = {
4261                let len = rng.gen_range(0..subdirs.len());
4262                subdirs.choose_multiple(rng, len)
4263            };
4264
4265            let mut ignore_contents = String::new();
4266            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4267                write!(
4268                    ignore_contents,
4269                    "{}\n",
4270                    path_to_ignore
4271                        .strip_prefix(&ignore_dir_path)?
4272                        .to_str()
4273                        .unwrap()
4274                )
4275                .unwrap();
4276            }
4277            log::info!(
4278                "Creating {:?} with contents:\n{}",
4279                ignore_path.strip_prefix(&root_path)?,
4280                ignore_contents
4281            );
4282            std::fs::write(&ignore_path, ignore_contents).unwrap();
4283            record_event(ignore_path);
4284        } else {
4285            let old_path = {
4286                let file_path = files.choose(rng);
4287                let dir_path = dirs[1..].choose(rng);
4288                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4289            };
4290
4291            let is_rename = rng.gen();
4292            if is_rename {
4293                let new_path_parent = dirs
4294                    .iter()
4295                    .filter(|d| !d.starts_with(old_path))
4296                    .choose(rng)
4297                    .unwrap();
4298
4299                let overwrite_existing_dir =
4300                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4301                let new_path = if overwrite_existing_dir {
4302                    std::fs::remove_dir_all(&new_path_parent).ok();
4303                    new_path_parent.to_path_buf()
4304                } else {
4305                    new_path_parent.join(gen_name(rng))
4306                };
4307
4308                log::info!(
4309                    "Renaming {:?} to {}{:?}",
4310                    old_path.strip_prefix(&root_path)?,
4311                    if overwrite_existing_dir {
4312                        "overwrite "
4313                    } else {
4314                        ""
4315                    },
4316                    new_path.strip_prefix(&root_path)?
4317                );
4318                std::fs::rename(&old_path, &new_path)?;
4319                record_event(old_path.clone());
4320                record_event(new_path);
4321            } else if old_path.is_dir() {
4322                let (dirs, files) = read_dir_recursive(old_path.clone());
4323
4324                log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4325                std::fs::remove_dir_all(&old_path).unwrap();
4326                for file in files {
4327                    record_event(file);
4328                }
4329                for dir in dirs {
4330                    record_event(dir);
4331                }
4332            } else {
4333                log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4334                std::fs::remove_file(old_path).unwrap();
4335                record_event(old_path.clone());
4336            }
4337        }
4338
4339        Ok(events)
4340    }
4341
4342    fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4343        let child_entries = std::fs::read_dir(&path).unwrap();
4344        let mut dirs = vec![path];
4345        let mut files = Vec::new();
4346        for child_entry in child_entries {
4347            let child_path = child_entry.unwrap().path();
4348            if child_path.is_dir() {
4349                let (child_dirs, child_files) = read_dir_recursive(child_path);
4350                dirs.extend(child_dirs);
4351                files.extend(child_files);
4352            } else {
4353                files.push(child_path);
4354            }
4355        }
4356        (dirs, files)
4357    }
4358
4359    fn gen_name(rng: &mut impl Rng) -> String {
4360        (0..6)
4361            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4362            .map(char::from)
4363            .collect()
4364    }
4365
4366    impl Snapshot {
4367        fn check_invariants(&self) {
4368            let mut files = self.files(true, 0);
4369            let mut visible_files = self.files(false, 0);
4370            for entry in self.entries_by_path.cursor::<()>() {
4371                if entry.is_file() {
4372                    assert_eq!(files.next().unwrap().inode, entry.inode);
4373                    if !entry.is_ignored {
4374                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4375                    }
4376                }
4377            }
4378            assert!(files.next().is_none());
4379            assert!(visible_files.next().is_none());
4380
4381            let mut bfs_paths = Vec::new();
4382            let mut stack = vec![Path::new("")];
4383            while let Some(path) = stack.pop() {
4384                bfs_paths.push(path);
4385                let ix = stack.len();
4386                for child_entry in self.child_entries(path) {
4387                    stack.insert(ix, &child_entry.path);
4388                }
4389            }
4390
4391            let dfs_paths = self
4392                .entries_by_path
4393                .cursor::<()>()
4394                .map(|e| e.path.as_ref())
4395                .collect::<Vec<_>>();
4396            assert_eq!(bfs_paths, dfs_paths);
4397
4398            for (ignore_parent_path, _) in &self.ignores {
4399                assert!(self.entry_for_path(ignore_parent_path).is_some());
4400                assert!(self
4401                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4402                    .is_some());
4403            }
4404        }
4405
4406        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4407            let mut paths = Vec::new();
4408            for entry in self.entries_by_path.cursor::<()>() {
4409                if include_ignored || !entry.is_ignored {
4410                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4411                }
4412            }
4413            paths.sort_by(|a, b| a.0.cmp(&b.0));
4414            paths
4415        }
4416    }
4417}