worktree.rs

   1use super::{
   2    fs::{self, Fs},
   3    ignore::IgnoreStack,
   4    DiagnosticSummary,
   5};
   6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   7use anyhow::{anyhow, Context, Result};
   8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
   9use clock::ReplicaId;
  10use collections::{hash_map, HashMap};
  11use futures::{Stream, StreamExt};
  12use fuzzy::CharBag;
  13use gpui::{
  14    executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
  15    Task, UpgradeModelHandle, WeakModelHandle,
  16};
  17use language::{
  18    Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Language, LanguageRegistry,
  19    Operation, PointUtf16, Rope,
  20};
  21use lazy_static::lazy_static;
  22use lsp::LanguageServer;
  23use parking_lot::Mutex;
  24use postage::{
  25    prelude::{Sink as _, Stream as _},
  26    watch,
  27};
  28use serde::Deserialize;
  29use smol::channel::{self, Sender};
  30use std::{
  31    any::Any,
  32    cmp::{self, Ordering},
  33    convert::{TryFrom, TryInto},
  34    ffi::{OsStr, OsString},
  35    fmt,
  36    future::Future,
  37    ops::{Deref, Range},
  38    path::{Path, PathBuf},
  39    sync::{
  40        atomic::{AtomicUsize, Ordering::SeqCst},
  41        Arc,
  42    },
  43    time::{Duration, SystemTime},
  44};
  45use sum_tree::Bias;
  46use sum_tree::{Edit, SeekTarget, SumTree};
  47use util::{post_inc, ResultExt, TryFutureExt};
  48
  49lazy_static! {
  50    static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
  51}
  52
  53#[derive(Clone, Debug)]
  54enum ScanState {
  55    Idle,
  56    Scanning,
  57    Err(Arc<anyhow::Error>),
  58}
  59
  60pub enum Worktree {
  61    Local(LocalWorktree),
  62    Remote(RemoteWorktree),
  63}
  64
  65pub enum Event {
  66    Closed,
  67}
  68
  69impl Entity for Worktree {
  70    type Event = Event;
  71
  72    fn app_will_quit(
  73        &mut self,
  74        _: &mut MutableAppContext,
  75    ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
  76        use futures::FutureExt;
  77
  78        if let Self::Local(worktree) = self {
  79            let shutdown_futures = worktree
  80                .language_servers
  81                .drain()
  82                .filter_map(|(_, server)| server.shutdown())
  83                .collect::<Vec<_>>();
  84            Some(
  85                async move {
  86                    futures::future::join_all(shutdown_futures).await;
  87                }
  88                .boxed(),
  89            )
  90        } else {
  91            None
  92        }
  93    }
  94}
  95
  96impl Worktree {
  97    pub async fn open_local(
  98        client: Arc<Client>,
  99        user_store: ModelHandle<UserStore>,
 100        path: impl Into<Arc<Path>>,
 101        fs: Arc<dyn Fs>,
 102        languages: Arc<LanguageRegistry>,
 103        cx: &mut AsyncAppContext,
 104    ) -> Result<ModelHandle<Self>> {
 105        let (tree, scan_states_tx) =
 106            LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
 107        tree.update(cx, |tree, cx| {
 108            let tree = tree.as_local_mut().unwrap();
 109            let abs_path = tree.snapshot.abs_path.clone();
 110            let background_snapshot = tree.background_snapshot.clone();
 111            let background = cx.background().clone();
 112            tree._background_scanner_task = Some(cx.background().spawn(async move {
 113                let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 114                let scanner =
 115                    BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
 116                scanner.run(events).await;
 117            }));
 118        });
 119        Ok(tree)
 120    }
 121
 122    pub async fn remote(
 123        project_remote_id: u64,
 124        replica_id: ReplicaId,
 125        worktree: proto::Worktree,
 126        client: Arc<Client>,
 127        user_store: ModelHandle<UserStore>,
 128        languages: Arc<LanguageRegistry>,
 129        cx: &mut AsyncAppContext,
 130    ) -> Result<ModelHandle<Self>> {
 131        let remote_id = worktree.id;
 132        let root_char_bag: CharBag = worktree
 133            .root_name
 134            .chars()
 135            .map(|c| c.to_ascii_lowercase())
 136            .collect();
 137        let root_name = worktree.root_name.clone();
 138        let (entries_by_path, entries_by_id) = cx
 139            .background()
 140            .spawn(async move {
 141                let mut entries_by_path_edits = Vec::new();
 142                let mut entries_by_id_edits = Vec::new();
 143                for entry in worktree.entries {
 144                    match Entry::try_from((&root_char_bag, entry)) {
 145                        Ok(entry) => {
 146                            entries_by_id_edits.push(Edit::Insert(PathEntry {
 147                                id: entry.id,
 148                                path: entry.path.clone(),
 149                                is_ignored: entry.is_ignored,
 150                                scan_id: 0,
 151                            }));
 152                            entries_by_path_edits.push(Edit::Insert(entry));
 153                        }
 154                        Err(err) => log::warn!("error for remote worktree entry {:?}", err),
 155                    }
 156                }
 157
 158                let mut entries_by_path = SumTree::new();
 159                let mut entries_by_id = SumTree::new();
 160                entries_by_path.edit(entries_by_path_edits, &());
 161                entries_by_id.edit(entries_by_id_edits, &());
 162                (entries_by_path, entries_by_id)
 163            })
 164            .await;
 165
 166        let worktree = cx.update(|cx| {
 167            cx.add_model(|cx: &mut ModelContext<Worktree>| {
 168                let snapshot = Snapshot {
 169                    id: cx.model_id(),
 170                    scan_id: 0,
 171                    abs_path: Path::new("").into(),
 172                    root_name,
 173                    root_char_bag,
 174                    ignores: Default::default(),
 175                    entries_by_path,
 176                    entries_by_id,
 177                    removed_entry_ids: Default::default(),
 178                    next_entry_id: Default::default(),
 179                };
 180
 181                let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
 182                let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
 183
 184                cx.background()
 185                    .spawn(async move {
 186                        while let Some(update) = updates_rx.recv().await {
 187                            let mut snapshot = snapshot_tx.borrow().clone();
 188                            if let Err(error) = snapshot.apply_update(update) {
 189                                log::error!("error applying worktree update: {}", error);
 190                            }
 191                            *snapshot_tx.borrow_mut() = snapshot;
 192                        }
 193                    })
 194                    .detach();
 195
 196                {
 197                    let mut snapshot_rx = snapshot_rx.clone();
 198                    cx.spawn_weak(|this, mut cx| async move {
 199                        while let Some(_) = snapshot_rx.recv().await {
 200                            if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
 201                                this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
 202                            } else {
 203                                break;
 204                            }
 205                        }
 206                    })
 207                    .detach();
 208                }
 209
 210                Worktree::Remote(RemoteWorktree {
 211                    project_id: project_remote_id,
 212                    remote_id,
 213                    replica_id,
 214                    snapshot,
 215                    snapshot_rx,
 216                    updates_tx,
 217                    client: client.clone(),
 218                    loading_buffers: Default::default(),
 219                    open_buffers: Default::default(),
 220                    diagnostic_summaries: HashMap::default(),
 221                    queued_operations: Default::default(),
 222                    languages,
 223                    user_store,
 224                })
 225            })
 226        });
 227
 228        Ok(worktree)
 229    }
 230
 231    pub fn as_local(&self) -> Option<&LocalWorktree> {
 232        if let Worktree::Local(worktree) = self {
 233            Some(worktree)
 234        } else {
 235            None
 236        }
 237    }
 238
 239    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 240        if let Worktree::Remote(worktree) = self {
 241            Some(worktree)
 242        } else {
 243            None
 244        }
 245    }
 246
 247    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 248        if let Worktree::Local(worktree) = self {
 249            Some(worktree)
 250        } else {
 251            None
 252        }
 253    }
 254
 255    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 256        if let Worktree::Remote(worktree) = self {
 257            Some(worktree)
 258        } else {
 259            None
 260        }
 261    }
 262
 263    pub fn snapshot(&self) -> Snapshot {
 264        match self {
 265            Worktree::Local(worktree) => worktree.snapshot(),
 266            Worktree::Remote(worktree) => worktree.snapshot(),
 267        }
 268    }
 269
 270    pub fn replica_id(&self) -> ReplicaId {
 271        match self {
 272            Worktree::Local(_) => 0,
 273            Worktree::Remote(worktree) => worktree.replica_id,
 274        }
 275    }
 276
 277    pub fn remove_collaborator(
 278        &mut self,
 279        peer_id: PeerId,
 280        replica_id: ReplicaId,
 281        cx: &mut ModelContext<Self>,
 282    ) {
 283        match self {
 284            Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
 285            Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
 286        }
 287    }
 288
 289    pub fn languages(&self) -> &Arc<LanguageRegistry> {
 290        match self {
 291            Worktree::Local(worktree) => &worktree.languages,
 292            Worktree::Remote(worktree) => &worktree.languages,
 293        }
 294    }
 295
 296    pub fn user_store(&self) -> &ModelHandle<UserStore> {
 297        match self {
 298            Worktree::Local(worktree) => &worktree.user_store,
 299            Worktree::Remote(worktree) => &worktree.user_store,
 300        }
 301    }
 302
 303    pub fn handle_open_buffer(
 304        &mut self,
 305        envelope: TypedEnvelope<proto::OpenBuffer>,
 306        rpc: Arc<Client>,
 307        cx: &mut ModelContext<Self>,
 308    ) -> anyhow::Result<()> {
 309        let receipt = envelope.receipt();
 310
 311        let response = self
 312            .as_local_mut()
 313            .unwrap()
 314            .open_remote_buffer(envelope, cx);
 315
 316        cx.background()
 317            .spawn(
 318                async move {
 319                    rpc.respond(receipt, response.await?).await?;
 320                    Ok(())
 321                }
 322                .log_err(),
 323            )
 324            .detach();
 325
 326        Ok(())
 327    }
 328
 329    pub fn handle_close_buffer(
 330        &mut self,
 331        envelope: TypedEnvelope<proto::CloseBuffer>,
 332        _: Arc<Client>,
 333        cx: &mut ModelContext<Self>,
 334    ) -> anyhow::Result<()> {
 335        self.as_local_mut()
 336            .unwrap()
 337            .close_remote_buffer(envelope, cx)
 338    }
 339
 340    pub fn diagnostic_summaries<'a>(
 341        &'a self,
 342    ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
 343        match self {
 344            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 345            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 346        }
 347        .iter()
 348        .map(|(path, summary)| (path.clone(), summary.clone()))
 349    }
 350
 351    pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
 352        match self {
 353            Worktree::Local(worktree) => &mut worktree.loading_buffers,
 354            Worktree::Remote(worktree) => &mut worktree.loading_buffers,
 355        }
 356    }
 357
 358    pub fn open_buffer(
 359        &mut self,
 360        path: impl AsRef<Path>,
 361        cx: &mut ModelContext<Self>,
 362    ) -> Task<Result<ModelHandle<Buffer>>> {
 363        let path = path.as_ref();
 364
 365        // If there is already a buffer for the given path, then return it.
 366        let existing_buffer = match self {
 367            Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
 368            Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
 369        };
 370        if let Some(existing_buffer) = existing_buffer {
 371            return cx.spawn(move |_, _| async move { Ok(existing_buffer) });
 372        }
 373
 374        let path: Arc<Path> = Arc::from(path);
 375        let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
 376            // If the given path is already being loaded, then wait for that existing
 377            // task to complete and return the same buffer.
 378            hash_map::Entry::Occupied(e) => e.get().clone(),
 379
 380            // Otherwise, record the fact that this path is now being loaded.
 381            hash_map::Entry::Vacant(entry) => {
 382                let (mut tx, rx) = postage::watch::channel();
 383                entry.insert(rx.clone());
 384
 385                let load_buffer = match self {
 386                    Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
 387                    Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
 388                };
 389                cx.spawn(move |this, mut cx| async move {
 390                    let result = load_buffer.await;
 391
 392                    // After the buffer loads, record the fact that it is no longer
 393                    // loading.
 394                    this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
 395                    *tx.borrow_mut() = Some(result.map_err(|e| Arc::new(e)));
 396                })
 397                .detach();
 398                rx
 399            }
 400        };
 401
 402        cx.spawn(|_, _| async move {
 403            loop {
 404                if let Some(result) = loading_watch.borrow().as_ref() {
 405                    return result.clone().map_err(|e| anyhow!("{}", e));
 406                }
 407                loading_watch.recv().await;
 408            }
 409        })
 410    }
 411
 412    #[cfg(feature = "test-support")]
 413    pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
 414        let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
 415            Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
 416            Worktree::Remote(worktree) => {
 417                Box::new(worktree.open_buffers.values().filter_map(|buf| {
 418                    if let RemoteBuffer::Loaded(buf) = buf {
 419                        Some(buf)
 420                    } else {
 421                        None
 422                    }
 423                }))
 424            }
 425        };
 426
 427        let path = path.as_ref();
 428        open_buffers
 429            .find(|buffer| {
 430                if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
 431                    file.path().as_ref() == path
 432                } else {
 433                    false
 434                }
 435            })
 436            .is_some()
 437    }
 438
 439    pub fn handle_update_buffer(
 440        &mut self,
 441        envelope: TypedEnvelope<proto::UpdateBuffer>,
 442        cx: &mut ModelContext<Self>,
 443    ) -> Result<()> {
 444        let payload = envelope.payload.clone();
 445        let buffer_id = payload.buffer_id as usize;
 446        let ops = payload
 447            .operations
 448            .into_iter()
 449            .map(|op| language::proto::deserialize_operation(op))
 450            .collect::<Result<Vec<_>, _>>()?;
 451
 452        match self {
 453            Worktree::Local(worktree) => {
 454                let buffer = worktree
 455                    .open_buffers
 456                    .get(&buffer_id)
 457                    .and_then(|buf| buf.upgrade(cx))
 458                    .ok_or_else(|| {
 459                        anyhow!("invalid buffer {} in update buffer message", buffer_id)
 460                    })?;
 461                buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
 462            }
 463            Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
 464                Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
 465                Some(RemoteBuffer::Loaded(buffer)) => {
 466                    if let Some(buffer) = buffer.upgrade(cx) {
 467                        buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
 468                    } else {
 469                        worktree
 470                            .open_buffers
 471                            .insert(buffer_id, RemoteBuffer::Operations(ops));
 472                    }
 473                }
 474                None => {
 475                    worktree
 476                        .open_buffers
 477                        .insert(buffer_id, RemoteBuffer::Operations(ops));
 478                }
 479            },
 480        }
 481
 482        Ok(())
 483    }
 484
 485    pub fn handle_save_buffer(
 486        &mut self,
 487        envelope: TypedEnvelope<proto::SaveBuffer>,
 488        rpc: Arc<Client>,
 489        cx: &mut ModelContext<Self>,
 490    ) -> Result<()> {
 491        let sender_id = envelope.original_sender_id()?;
 492        let this = self.as_local().unwrap();
 493        let project_id = this
 494            .share
 495            .as_ref()
 496            .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
 497            .project_id;
 498
 499        let buffer = this
 500            .shared_buffers
 501            .get(&sender_id)
 502            .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
 503            .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
 504
 505        let receipt = envelope.receipt();
 506        let worktree_id = envelope.payload.worktree_id;
 507        let buffer_id = envelope.payload.buffer_id;
 508        let save = cx.spawn(|_, mut cx| async move {
 509            buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
 510        });
 511
 512        cx.background()
 513            .spawn(
 514                async move {
 515                    let (version, mtime) = save.await?;
 516
 517                    rpc.respond(
 518                        receipt,
 519                        proto::BufferSaved {
 520                            project_id,
 521                            worktree_id,
 522                            buffer_id,
 523                            version: (&version).into(),
 524                            mtime: Some(mtime.into()),
 525                        },
 526                    )
 527                    .await?;
 528
 529                    Ok(())
 530                }
 531                .log_err(),
 532            )
 533            .detach();
 534
 535        Ok(())
 536    }
 537
 538    pub fn handle_buffer_saved(
 539        &mut self,
 540        envelope: TypedEnvelope<proto::BufferSaved>,
 541        cx: &mut ModelContext<Self>,
 542    ) -> Result<()> {
 543        let payload = envelope.payload.clone();
 544        let worktree = self.as_remote_mut().unwrap();
 545        if let Some(buffer) = worktree
 546            .open_buffers
 547            .get(&(payload.buffer_id as usize))
 548            .and_then(|buf| buf.upgrade(cx))
 549        {
 550            buffer.update(cx, |buffer, cx| {
 551                let version = payload.version.try_into()?;
 552                let mtime = payload
 553                    .mtime
 554                    .ok_or_else(|| anyhow!("missing mtime"))?
 555                    .into();
 556                buffer.did_save(version, mtime, None, cx);
 557                Result::<_, anyhow::Error>::Ok(())
 558            })?;
 559        }
 560        Ok(())
 561    }
 562
 563    fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
 564        match self {
 565            Self::Local(worktree) => {
 566                let is_fake_fs = worktree.fs.is_fake();
 567                worktree.snapshot = worktree.background_snapshot.lock().clone();
 568                if worktree.is_scanning() {
 569                    if worktree.poll_task.is_none() {
 570                        worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
 571                            if is_fake_fs {
 572                                smol::future::yield_now().await;
 573                            } else {
 574                                smol::Timer::after(Duration::from_millis(100)).await;
 575                            }
 576                            this.update(&mut cx, |this, cx| {
 577                                this.as_local_mut().unwrap().poll_task = None;
 578                                this.poll_snapshot(cx);
 579                            })
 580                        }));
 581                    }
 582                } else {
 583                    worktree.poll_task.take();
 584                    self.update_open_buffers(cx);
 585                }
 586            }
 587            Self::Remote(worktree) => {
 588                worktree.snapshot = worktree.snapshot_rx.borrow().clone();
 589                self.update_open_buffers(cx);
 590            }
 591        };
 592
 593        cx.notify();
 594    }
 595
 596    fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
 597        let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
 598            Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
 599            Self::Remote(worktree) => {
 600                Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
 601                    if let RemoteBuffer::Loaded(buf) = buf {
 602                        Some((id, buf))
 603                    } else {
 604                        None
 605                    }
 606                }))
 607            }
 608        };
 609
 610        let local = self.as_local().is_some();
 611        let worktree_path = self.abs_path.clone();
 612        let worktree_handle = cx.handle();
 613        let mut buffers_to_delete = Vec::new();
 614        for (buffer_id, buffer) in open_buffers {
 615            if let Some(buffer) = buffer.upgrade(cx) {
 616                buffer.update(cx, |buffer, cx| {
 617                    if let Some(old_file) = buffer.file() {
 618                        let new_file = if let Some(entry) = old_file
 619                            .entry_id()
 620                            .and_then(|entry_id| self.entry_for_id(entry_id))
 621                        {
 622                            File {
 623                                is_local: local,
 624                                worktree_path: worktree_path.clone(),
 625                                entry_id: Some(entry.id),
 626                                mtime: entry.mtime,
 627                                path: entry.path.clone(),
 628                                worktree: worktree_handle.clone(),
 629                            }
 630                        } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
 631                            File {
 632                                is_local: local,
 633                                worktree_path: worktree_path.clone(),
 634                                entry_id: Some(entry.id),
 635                                mtime: entry.mtime,
 636                                path: entry.path.clone(),
 637                                worktree: worktree_handle.clone(),
 638                            }
 639                        } else {
 640                            File {
 641                                is_local: local,
 642                                worktree_path: worktree_path.clone(),
 643                                entry_id: None,
 644                                path: old_file.path().clone(),
 645                                mtime: old_file.mtime(),
 646                                worktree: worktree_handle.clone(),
 647                            }
 648                        };
 649
 650                        if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
 651                            task.detach();
 652                        }
 653                    }
 654                });
 655            } else {
 656                buffers_to_delete.push(*buffer_id);
 657            }
 658        }
 659
 660        for buffer_id in buffers_to_delete {
 661            match self {
 662                Self::Local(worktree) => {
 663                    worktree.open_buffers.remove(&buffer_id);
 664                }
 665                Self::Remote(worktree) => {
 666                    worktree.open_buffers.remove(&buffer_id);
 667                }
 668            }
 669        }
 670    }
 671
 672    fn update_diagnostics(
 673        &mut self,
 674        params: lsp::PublishDiagnosticsParams,
 675        cx: &mut ModelContext<Worktree>,
 676    ) -> Result<()> {
 677        let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
 678        let abs_path = params
 679            .uri
 680            .to_file_path()
 681            .map_err(|_| anyhow!("URI is not a file"))?;
 682        let worktree_path = Arc::from(
 683            abs_path
 684                .strip_prefix(&this.abs_path)
 685                .context("path is not within worktree")?,
 686        );
 687
 688        let mut group_ids_by_diagnostic_range = HashMap::default();
 689        let mut diagnostics_by_group_id = HashMap::default();
 690        let mut next_group_id = 0;
 691        for diagnostic in &params.diagnostics {
 692            let source = diagnostic.source.as_ref();
 693            let code = diagnostic.code.as_ref();
 694            let group_id = diagnostic_ranges(&diagnostic, &abs_path)
 695                .find_map(|range| group_ids_by_diagnostic_range.get(&(source, code, range)))
 696                .copied()
 697                .unwrap_or_else(|| {
 698                    let group_id = post_inc(&mut next_group_id);
 699                    for range in diagnostic_ranges(&diagnostic, &abs_path) {
 700                        group_ids_by_diagnostic_range.insert((source, code, range), group_id);
 701                    }
 702                    group_id
 703                });
 704
 705            diagnostics_by_group_id
 706                .entry(group_id)
 707                .or_insert(Vec::new())
 708                .push(DiagnosticEntry {
 709                    range: diagnostic.range.start.to_point_utf16()
 710                        ..diagnostic.range.end.to_point_utf16(),
 711                    diagnostic: Diagnostic {
 712                        source: diagnostic.source.clone(),
 713                        code: diagnostic.code.clone().map(|code| match code {
 714                            lsp::NumberOrString::Number(code) => code.to_string(),
 715                            lsp::NumberOrString::String(code) => code,
 716                        }),
 717                        severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
 718                        message: diagnostic.message.clone(),
 719                        group_id,
 720                        is_primary: false,
 721                    },
 722                });
 723        }
 724
 725        let diagnostics = diagnostics_by_group_id
 726            .into_values()
 727            .flat_map(|mut diagnostics| {
 728                let primary = diagnostics
 729                    .iter_mut()
 730                    .min_by_key(|entry| entry.diagnostic.severity)
 731                    .unwrap();
 732                primary.diagnostic.is_primary = true;
 733                diagnostics
 734            })
 735            .collect::<Vec<_>>();
 736
 737        for buffer in this.open_buffers.values() {
 738            if let Some(buffer) = buffer.upgrade(cx) {
 739                if buffer
 740                    .read(cx)
 741                    .file()
 742                    .map_or(false, |file| *file.path() == worktree_path)
 743                {
 744                    let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
 745                        (
 746                            buffer.remote_id(),
 747                            buffer.update_diagnostics(params.version, diagnostics.clone(), cx),
 748                        )
 749                    });
 750                    self.send_buffer_update(remote_id, operation?, cx);
 751                    break;
 752                }
 753            }
 754        }
 755
 756        let this = self.as_local_mut().unwrap();
 757        this.diagnostic_summaries
 758            .insert(worktree_path.clone(), DiagnosticSummary::new(&diagnostics));
 759        this.diagnostics.insert(worktree_path.clone(), diagnostics);
 760        Ok(())
 761    }
 762
 763    fn send_buffer_update(
 764        &mut self,
 765        buffer_id: u64,
 766        operation: Operation,
 767        cx: &mut ModelContext<Self>,
 768    ) {
 769        if let Some((project_id, rpc)) = match self {
 770            Worktree::Local(worktree) => worktree
 771                .share
 772                .as_ref()
 773                .map(|share| (share.project_id, worktree.client.clone())),
 774            Worktree::Remote(worktree) => Some((worktree.project_id, worktree.client.clone())),
 775        } {
 776            cx.spawn(|worktree, mut cx| async move {
 777                if let Err(error) = rpc
 778                    .request(proto::UpdateBuffer {
 779                        project_id,
 780                        worktree_id: worktree.id() as u64,
 781                        buffer_id,
 782                        operations: vec![language::proto::serialize_operation(&operation)],
 783                    })
 784                    .await
 785                {
 786                    worktree.update(&mut cx, |worktree, _| {
 787                        log::error!("error sending buffer operation: {}", error);
 788                        match worktree {
 789                            Worktree::Local(t) => &mut t.queued_operations,
 790                            Worktree::Remote(t) => &mut t.queued_operations,
 791                        }
 792                        .push((buffer_id, operation));
 793                    });
 794                }
 795            })
 796            .detach();
 797        }
 798    }
 799}
 800
 801#[derive(Clone)]
 802pub struct Snapshot {
 803    id: usize,
 804    scan_id: usize,
 805    abs_path: Arc<Path>,
 806    root_name: String,
 807    root_char_bag: CharBag,
 808    ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
 809    entries_by_path: SumTree<Entry>,
 810    entries_by_id: SumTree<PathEntry>,
 811    removed_entry_ids: HashMap<u64, usize>,
 812    next_entry_id: Arc<AtomicUsize>,
 813}
 814
 815pub struct LocalWorktree {
 816    snapshot: Snapshot,
 817    config: WorktreeConfig,
 818    background_snapshot: Arc<Mutex<Snapshot>>,
 819    last_scan_state_rx: watch::Receiver<ScanState>,
 820    _background_scanner_task: Option<Task<()>>,
 821    poll_task: Option<Task<()>>,
 822    share: Option<ShareState>,
 823    loading_buffers: LoadingBuffers,
 824    open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
 825    shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
 826    diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
 827    diagnostic_summaries: HashMap<Arc<Path>, DiagnosticSummary>,
 828    queued_operations: Vec<(u64, Operation)>,
 829    languages: Arc<LanguageRegistry>,
 830    client: Arc<Client>,
 831    user_store: ModelHandle<UserStore>,
 832    fs: Arc<dyn Fs>,
 833    language_servers: HashMap<String, Arc<LanguageServer>>,
 834}
 835
 836struct ShareState {
 837    project_id: u64,
 838    snapshots_tx: Sender<Snapshot>,
 839}
 840
 841pub struct RemoteWorktree {
 842    project_id: u64,
 843    remote_id: u64,
 844    snapshot: Snapshot,
 845    snapshot_rx: watch::Receiver<Snapshot>,
 846    client: Arc<Client>,
 847    updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
 848    replica_id: ReplicaId,
 849    loading_buffers: LoadingBuffers,
 850    open_buffers: HashMap<usize, RemoteBuffer>,
 851    diagnostic_summaries: HashMap<Arc<Path>, DiagnosticSummary>,
 852    languages: Arc<LanguageRegistry>,
 853    user_store: ModelHandle<UserStore>,
 854    queued_operations: Vec<(u64, Operation)>,
 855}
 856
 857type LoadingBuffers = HashMap<
 858    Arc<Path>,
 859    postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
 860>;
 861
 862#[derive(Default, Deserialize)]
 863struct WorktreeConfig {
 864    collaborators: Vec<String>,
 865}
 866
 867impl LocalWorktree {
 868    async fn new(
 869        client: Arc<Client>,
 870        user_store: ModelHandle<UserStore>,
 871        path: impl Into<Arc<Path>>,
 872        fs: Arc<dyn Fs>,
 873        languages: Arc<LanguageRegistry>,
 874        cx: &mut AsyncAppContext,
 875    ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
 876        let abs_path = path.into();
 877        let path: Arc<Path> = Arc::from(Path::new(""));
 878        let next_entry_id = AtomicUsize::new(0);
 879
 880        // After determining whether the root entry is a file or a directory, populate the
 881        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 882        let root_name = abs_path
 883            .file_name()
 884            .map_or(String::new(), |f| f.to_string_lossy().to_string());
 885        let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
 886        let metadata = fs.metadata(&abs_path).await?;
 887
 888        let mut config = WorktreeConfig::default();
 889        if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
 890            if let Ok(parsed) = toml::from_str(&zed_toml) {
 891                config = parsed;
 892            }
 893        }
 894
 895        let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
 896        let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
 897        let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
 898            let mut snapshot = Snapshot {
 899                id: cx.model_id(),
 900                scan_id: 0,
 901                abs_path,
 902                root_name: root_name.clone(),
 903                root_char_bag,
 904                ignores: Default::default(),
 905                entries_by_path: Default::default(),
 906                entries_by_id: Default::default(),
 907                removed_entry_ids: Default::default(),
 908                next_entry_id: Arc::new(next_entry_id),
 909            };
 910            if let Some(metadata) = metadata {
 911                snapshot.insert_entry(
 912                    Entry::new(
 913                        path.into(),
 914                        &metadata,
 915                        &snapshot.next_entry_id,
 916                        snapshot.root_char_bag,
 917                    ),
 918                    fs.as_ref(),
 919                );
 920            }
 921
 922            let tree = Self {
 923                snapshot: snapshot.clone(),
 924                config,
 925                background_snapshot: Arc::new(Mutex::new(snapshot)),
 926                last_scan_state_rx,
 927                _background_scanner_task: None,
 928                share: None,
 929                poll_task: None,
 930                loading_buffers: Default::default(),
 931                open_buffers: Default::default(),
 932                shared_buffers: Default::default(),
 933                diagnostics: Default::default(),
 934                diagnostic_summaries: Default::default(),
 935                queued_operations: Default::default(),
 936                languages,
 937                client,
 938                user_store,
 939                fs,
 940                language_servers: Default::default(),
 941            };
 942
 943            cx.spawn_weak(|this, mut cx| async move {
 944                while let Ok(scan_state) = scan_states_rx.recv().await {
 945                    if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
 946                        let to_send = handle.update(&mut cx, |this, cx| {
 947                            last_scan_state_tx.blocking_send(scan_state).ok();
 948                            this.poll_snapshot(cx);
 949                            let tree = this.as_local_mut().unwrap();
 950                            if !tree.is_scanning() {
 951                                if let Some(share) = tree.share.as_ref() {
 952                                    return Some((tree.snapshot(), share.snapshots_tx.clone()));
 953                                }
 954                            }
 955                            None
 956                        });
 957
 958                        if let Some((snapshot, snapshots_to_send_tx)) = to_send {
 959                            if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
 960                                log::error!("error submitting snapshot to send {}", err);
 961                            }
 962                        }
 963                    } else {
 964                        break;
 965                    }
 966                }
 967            })
 968            .detach();
 969
 970            Worktree::Local(tree)
 971        });
 972
 973        Ok((tree, scan_states_tx))
 974    }
 975
 976    pub fn authorized_logins(&self) -> Vec<String> {
 977        self.config.collaborators.clone()
 978    }
 979
 980    pub fn languages(&self) -> &LanguageRegistry {
 981        &self.languages
 982    }
 983
 984    pub fn ensure_language_server(
 985        &mut self,
 986        language: &Language,
 987        cx: &mut ModelContext<Worktree>,
 988    ) -> Option<Arc<LanguageServer>> {
 989        if let Some(server) = self.language_servers.get(language.name()) {
 990            return Some(server.clone());
 991        }
 992
 993        if let Some(language_server) = language
 994            .start_server(self.abs_path(), cx)
 995            .log_err()
 996            .flatten()
 997        {
 998            let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
 999            language_server
1000                .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
1001                    smol::block_on(diagnostics_tx.send(params)).ok();
1002                })
1003                .detach();
1004            cx.spawn_weak(|this, mut cx| async move {
1005                while let Ok(diagnostics) = diagnostics_rx.recv().await {
1006                    if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1007                        handle.update(&mut cx, |this, cx| {
1008                            this.update_diagnostics(diagnostics, cx).log_err();
1009                        });
1010                    } else {
1011                        break;
1012                    }
1013                }
1014            })
1015            .detach();
1016
1017            self.language_servers
1018                .insert(language.name().to_string(), language_server.clone());
1019            Some(language_server.clone())
1020        } else {
1021            None
1022        }
1023    }
1024
1025    fn get_open_buffer(
1026        &mut self,
1027        path: &Path,
1028        cx: &mut ModelContext<Worktree>,
1029    ) -> Option<ModelHandle<Buffer>> {
1030        let handle = cx.handle();
1031        let mut result = None;
1032        self.open_buffers.retain(|_buffer_id, buffer| {
1033            if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1034                if let Some(file) = buffer.read(cx.as_ref()).file() {
1035                    if file.worktree_id() == handle.id() && file.path().as_ref() == path {
1036                        result = Some(buffer);
1037                    }
1038                }
1039                true
1040            } else {
1041                false
1042            }
1043        });
1044        result
1045    }
1046
1047    fn open_buffer(
1048        &mut self,
1049        path: &Path,
1050        cx: &mut ModelContext<Worktree>,
1051    ) -> Task<Result<ModelHandle<Buffer>>> {
1052        let path = Arc::from(path);
1053        cx.spawn(move |this, mut cx| async move {
1054            let (file, contents) = this
1055                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
1056                .await?;
1057
1058            let (diagnostics, language, language_server) = this.update(&mut cx, |this, cx| {
1059                let this = this.as_local_mut().unwrap();
1060                let diagnostics = this.diagnostics.remove(&path);
1061                let language = this.languages.select_language(file.full_path()).cloned();
1062                let server = language
1063                    .as_ref()
1064                    .and_then(|language| this.ensure_language_server(language, cx));
1065                (diagnostics, language, server)
1066            });
1067
1068            let buffer = cx.add_model(|cx| {
1069                let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1070                buffer.set_language(language, language_server, cx);
1071                if let Some(diagnostics) = diagnostics {
1072                    buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1073                }
1074                buffer
1075            });
1076
1077            this.update(&mut cx, |this, _| {
1078                let this = this.as_local_mut().unwrap();
1079                this.open_buffers.insert(buffer.id(), buffer.downgrade());
1080            });
1081
1082            Ok(buffer)
1083        })
1084    }
1085
1086    pub fn open_remote_buffer(
1087        &mut self,
1088        envelope: TypedEnvelope<proto::OpenBuffer>,
1089        cx: &mut ModelContext<Worktree>,
1090    ) -> Task<Result<proto::OpenBufferResponse>> {
1091        cx.spawn(|this, mut cx| async move {
1092            let peer_id = envelope.original_sender_id();
1093            let path = Path::new(&envelope.payload.path);
1094            let buffer = this
1095                .update(&mut cx, |this, cx| this.open_buffer(path, cx))
1096                .await?;
1097            this.update(&mut cx, |this, cx| {
1098                this.as_local_mut()
1099                    .unwrap()
1100                    .shared_buffers
1101                    .entry(peer_id?)
1102                    .or_default()
1103                    .insert(buffer.id() as u64, buffer.clone());
1104
1105                Ok(proto::OpenBufferResponse {
1106                    buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1107                })
1108            })
1109        })
1110    }
1111
1112    pub fn close_remote_buffer(
1113        &mut self,
1114        envelope: TypedEnvelope<proto::CloseBuffer>,
1115        cx: &mut ModelContext<Worktree>,
1116    ) -> Result<()> {
1117        if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1118            shared_buffers.remove(&envelope.payload.buffer_id);
1119            cx.notify();
1120        }
1121
1122        Ok(())
1123    }
1124
1125    pub fn remove_collaborator(
1126        &mut self,
1127        peer_id: PeerId,
1128        replica_id: ReplicaId,
1129        cx: &mut ModelContext<Worktree>,
1130    ) {
1131        self.shared_buffers.remove(&peer_id);
1132        for (_, buffer) in &self.open_buffers {
1133            if let Some(buffer) = buffer.upgrade(cx) {
1134                buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1135            }
1136        }
1137        cx.notify();
1138    }
1139
1140    pub fn scan_complete(&self) -> impl Future<Output = ()> {
1141        let mut scan_state_rx = self.last_scan_state_rx.clone();
1142        async move {
1143            let mut scan_state = Some(scan_state_rx.borrow().clone());
1144            while let Some(ScanState::Scanning) = scan_state {
1145                scan_state = scan_state_rx.recv().await;
1146            }
1147        }
1148    }
1149
1150    fn is_scanning(&self) -> bool {
1151        if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1152            true
1153        } else {
1154            false
1155        }
1156    }
1157
1158    pub fn snapshot(&self) -> Snapshot {
1159        self.snapshot.clone()
1160    }
1161
1162    pub fn abs_path(&self) -> &Path {
1163        self.snapshot.abs_path.as_ref()
1164    }
1165
1166    pub fn contains_abs_path(&self, path: &Path) -> bool {
1167        path.starts_with(&self.snapshot.abs_path)
1168    }
1169
1170    fn absolutize(&self, path: &Path) -> PathBuf {
1171        if path.file_name().is_some() {
1172            self.snapshot.abs_path.join(path)
1173        } else {
1174            self.snapshot.abs_path.to_path_buf()
1175        }
1176    }
1177
1178    fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1179        let handle = cx.handle();
1180        let path = Arc::from(path);
1181        let worktree_path = self.abs_path.clone();
1182        let abs_path = self.absolutize(&path);
1183        let background_snapshot = self.background_snapshot.clone();
1184        let fs = self.fs.clone();
1185        cx.spawn(|this, mut cx| async move {
1186            let text = fs.load(&abs_path).await?;
1187            // Eagerly populate the snapshot with an updated entry for the loaded file
1188            let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1189            this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1190            Ok((
1191                File {
1192                    entry_id: Some(entry.id),
1193                    worktree: handle,
1194                    worktree_path,
1195                    path: entry.path,
1196                    mtime: entry.mtime,
1197                    is_local: true,
1198                },
1199                text,
1200            ))
1201        })
1202    }
1203
1204    pub fn save_buffer_as(
1205        &self,
1206        buffer: ModelHandle<Buffer>,
1207        path: impl Into<Arc<Path>>,
1208        text: Rope,
1209        cx: &mut ModelContext<Worktree>,
1210    ) -> Task<Result<File>> {
1211        let save = self.save(path, text, cx);
1212        cx.spawn(|this, mut cx| async move {
1213            let entry = save.await?;
1214            this.update(&mut cx, |this, cx| {
1215                let this = this.as_local_mut().unwrap();
1216                this.open_buffers.insert(buffer.id(), buffer.downgrade());
1217                Ok(File {
1218                    entry_id: Some(entry.id),
1219                    worktree: cx.handle(),
1220                    worktree_path: this.abs_path.clone(),
1221                    path: entry.path,
1222                    mtime: entry.mtime,
1223                    is_local: true,
1224                })
1225            })
1226        })
1227    }
1228
1229    fn save(
1230        &self,
1231        path: impl Into<Arc<Path>>,
1232        text: Rope,
1233        cx: &mut ModelContext<Worktree>,
1234    ) -> Task<Result<Entry>> {
1235        let path = path.into();
1236        let abs_path = self.absolutize(&path);
1237        let background_snapshot = self.background_snapshot.clone();
1238        let fs = self.fs.clone();
1239        let save = cx.background().spawn(async move {
1240            fs.save(&abs_path, &text).await?;
1241            refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1242        });
1243
1244        cx.spawn(|this, mut cx| async move {
1245            let entry = save.await?;
1246            this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1247            Ok(entry)
1248        })
1249    }
1250
1251    pub fn share(
1252        &mut self,
1253        project_id: u64,
1254        cx: &mut ModelContext<Worktree>,
1255    ) -> Task<anyhow::Result<()>> {
1256        if self.share.is_some() {
1257            return Task::ready(Ok(()));
1258        }
1259
1260        let snapshot = self.snapshot();
1261        let rpc = self.client.clone();
1262        let worktree_id = cx.model_id() as u64;
1263        let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1264        self.share = Some(ShareState {
1265            project_id,
1266            snapshots_tx: snapshots_to_send_tx,
1267        });
1268
1269        cx.background()
1270            .spawn({
1271                let rpc = rpc.clone();
1272                let snapshot = snapshot.clone();
1273                async move {
1274                    let mut prev_snapshot = snapshot;
1275                    while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1276                        let message =
1277                            snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1278                        match rpc.send(message).await {
1279                            Ok(()) => prev_snapshot = snapshot,
1280                            Err(err) => log::error!("error sending snapshot diff {}", err),
1281                        }
1282                    }
1283                }
1284            })
1285            .detach();
1286
1287        let share_message = cx.background().spawn(async move {
1288            proto::ShareWorktree {
1289                project_id,
1290                worktree: Some(snapshot.to_proto()),
1291            }
1292        });
1293
1294        cx.foreground().spawn(async move {
1295            rpc.request(share_message.await).await?;
1296            Ok(())
1297        })
1298    }
1299}
1300
1301fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1302    let contents = smol::block_on(fs.load(&abs_path))?;
1303    let parent = abs_path.parent().unwrap_or(Path::new("/"));
1304    let mut builder = GitignoreBuilder::new(parent);
1305    for line in contents.lines() {
1306        builder.add_line(Some(abs_path.into()), line)?;
1307    }
1308    Ok(builder.build()?)
1309}
1310
1311impl Deref for Worktree {
1312    type Target = Snapshot;
1313
1314    fn deref(&self) -> &Self::Target {
1315        match self {
1316            Worktree::Local(worktree) => &worktree.snapshot,
1317            Worktree::Remote(worktree) => &worktree.snapshot,
1318        }
1319    }
1320}
1321
1322impl Deref for LocalWorktree {
1323    type Target = Snapshot;
1324
1325    fn deref(&self) -> &Self::Target {
1326        &self.snapshot
1327    }
1328}
1329
1330impl fmt::Debug for LocalWorktree {
1331    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1332        self.snapshot.fmt(f)
1333    }
1334}
1335
1336impl RemoteWorktree {
1337    pub fn remote_id(&self) -> u64 {
1338        self.remote_id
1339    }
1340
1341    fn get_open_buffer(
1342        &mut self,
1343        path: &Path,
1344        cx: &mut ModelContext<Worktree>,
1345    ) -> Option<ModelHandle<Buffer>> {
1346        let handle = cx.handle();
1347        let mut existing_buffer = None;
1348        self.open_buffers.retain(|_buffer_id, buffer| {
1349            if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1350                if let Some(file) = buffer.read(cx.as_ref()).file() {
1351                    if file.worktree_id() == handle.id() && file.path().as_ref() == path {
1352                        existing_buffer = Some(buffer);
1353                    }
1354                }
1355                true
1356            } else {
1357                false
1358            }
1359        });
1360        existing_buffer
1361    }
1362
1363    fn open_buffer(
1364        &mut self,
1365        path: &Path,
1366        cx: &mut ModelContext<Worktree>,
1367    ) -> Task<Result<ModelHandle<Buffer>>> {
1368        let rpc = self.client.clone();
1369        let replica_id = self.replica_id;
1370        let project_id = self.project_id;
1371        let remote_worktree_id = self.remote_id;
1372        let root_path = self.snapshot.abs_path.clone();
1373        let path: Arc<Path> = Arc::from(path);
1374        let path_string = path.to_string_lossy().to_string();
1375        cx.spawn_weak(move |this, mut cx| async move {
1376            let entry = this
1377                .upgrade(&cx)
1378                .ok_or_else(|| anyhow!("worktree was closed"))?
1379                .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1380                .ok_or_else(|| anyhow!("file does not exist"))?;
1381            let response = rpc
1382                .request(proto::OpenBuffer {
1383                    project_id,
1384                    worktree_id: remote_worktree_id as u64,
1385                    path: path_string,
1386                })
1387                .await?;
1388
1389            let this = this
1390                .upgrade(&cx)
1391                .ok_or_else(|| anyhow!("worktree was closed"))?;
1392            let file = File {
1393                entry_id: Some(entry.id),
1394                worktree: this.clone(),
1395                worktree_path: root_path,
1396                path: entry.path,
1397                mtime: entry.mtime,
1398                is_local: false,
1399            };
1400            let language = this.read_with(&cx, |this, _| {
1401                use language::File;
1402                this.languages().select_language(file.full_path()).cloned()
1403            });
1404            let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1405            let buffer_id = remote_buffer.id as usize;
1406            let buffer = cx.add_model(|cx| {
1407                Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1408                    .unwrap()
1409                    .with_language(language, None, cx)
1410            });
1411            this.update(&mut cx, move |this, cx| {
1412                let this = this.as_remote_mut().unwrap();
1413                if let Some(RemoteBuffer::Operations(pending_ops)) = this
1414                    .open_buffers
1415                    .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1416                {
1417                    buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1418                }
1419                Result::<_, anyhow::Error>::Ok(buffer)
1420            })
1421        })
1422    }
1423
1424    pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1425        for (_, buffer) in self.open_buffers.drain() {
1426            if let RemoteBuffer::Loaded(buffer) = buffer {
1427                if let Some(buffer) = buffer.upgrade(cx) {
1428                    buffer.update(cx, |buffer, cx| buffer.close(cx))
1429                }
1430            }
1431        }
1432    }
1433
1434    fn snapshot(&self) -> Snapshot {
1435        self.snapshot.clone()
1436    }
1437
1438    pub fn update_from_remote(
1439        &mut self,
1440        envelope: TypedEnvelope<proto::UpdateWorktree>,
1441        cx: &mut ModelContext<Worktree>,
1442    ) -> Result<()> {
1443        let mut tx = self.updates_tx.clone();
1444        let payload = envelope.payload.clone();
1445        cx.background()
1446            .spawn(async move {
1447                tx.send(payload).await.expect("receiver runs to completion");
1448            })
1449            .detach();
1450
1451        Ok(())
1452    }
1453
1454    pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1455        for (_, buffer) in &self.open_buffers {
1456            if let Some(buffer) = buffer.upgrade(cx) {
1457                buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1458            }
1459        }
1460        cx.notify();
1461    }
1462}
1463
1464enum RemoteBuffer {
1465    Operations(Vec<Operation>),
1466    Loaded(WeakModelHandle<Buffer>),
1467}
1468
1469impl RemoteBuffer {
1470    fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1471        match self {
1472            Self::Operations(_) => None,
1473            Self::Loaded(buffer) => buffer.upgrade(cx),
1474        }
1475    }
1476}
1477
1478impl Snapshot {
1479    pub fn id(&self) -> usize {
1480        self.id
1481    }
1482
1483    pub fn to_proto(&self) -> proto::Worktree {
1484        let root_name = self.root_name.clone();
1485        proto::Worktree {
1486            id: self.id as u64,
1487            root_name,
1488            entries: self
1489                .entries_by_path
1490                .cursor::<()>()
1491                .filter(|e| !e.is_ignored)
1492                .map(Into::into)
1493                .collect(),
1494        }
1495    }
1496
1497    pub fn build_update(
1498        &self,
1499        other: &Self,
1500        project_id: u64,
1501        worktree_id: u64,
1502        include_ignored: bool,
1503    ) -> proto::UpdateWorktree {
1504        let mut updated_entries = Vec::new();
1505        let mut removed_entries = Vec::new();
1506        let mut self_entries = self
1507            .entries_by_id
1508            .cursor::<()>()
1509            .filter(|e| include_ignored || !e.is_ignored)
1510            .peekable();
1511        let mut other_entries = other
1512            .entries_by_id
1513            .cursor::<()>()
1514            .filter(|e| include_ignored || !e.is_ignored)
1515            .peekable();
1516        loop {
1517            match (self_entries.peek(), other_entries.peek()) {
1518                (Some(self_entry), Some(other_entry)) => {
1519                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1520                        Ordering::Less => {
1521                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1522                            updated_entries.push(entry);
1523                            self_entries.next();
1524                        }
1525                        Ordering::Equal => {
1526                            if self_entry.scan_id != other_entry.scan_id {
1527                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1528                                updated_entries.push(entry);
1529                            }
1530
1531                            self_entries.next();
1532                            other_entries.next();
1533                        }
1534                        Ordering::Greater => {
1535                            removed_entries.push(other_entry.id as u64);
1536                            other_entries.next();
1537                        }
1538                    }
1539                }
1540                (Some(self_entry), None) => {
1541                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1542                    updated_entries.push(entry);
1543                    self_entries.next();
1544                }
1545                (None, Some(other_entry)) => {
1546                    removed_entries.push(other_entry.id as u64);
1547                    other_entries.next();
1548                }
1549                (None, None) => break,
1550            }
1551        }
1552
1553        proto::UpdateWorktree {
1554            project_id,
1555            worktree_id,
1556            root_name: self.root_name().to_string(),
1557            updated_entries,
1558            removed_entries,
1559        }
1560    }
1561
1562    fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1563        self.scan_id += 1;
1564        let scan_id = self.scan_id;
1565
1566        let mut entries_by_path_edits = Vec::new();
1567        let mut entries_by_id_edits = Vec::new();
1568        for entry_id in update.removed_entries {
1569            let entry_id = entry_id as usize;
1570            let entry = self
1571                .entry_for_id(entry_id)
1572                .ok_or_else(|| anyhow!("unknown entry"))?;
1573            entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1574            entries_by_id_edits.push(Edit::Remove(entry.id));
1575        }
1576
1577        for entry in update.updated_entries {
1578            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1579            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1580                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1581            }
1582            entries_by_id_edits.push(Edit::Insert(PathEntry {
1583                id: entry.id,
1584                path: entry.path.clone(),
1585                is_ignored: entry.is_ignored,
1586                scan_id,
1587            }));
1588            entries_by_path_edits.push(Edit::Insert(entry));
1589        }
1590
1591        self.entries_by_path.edit(entries_by_path_edits, &());
1592        self.entries_by_id.edit(entries_by_id_edits, &());
1593
1594        Ok(())
1595    }
1596
1597    pub fn file_count(&self) -> usize {
1598        self.entries_by_path.summary().file_count
1599    }
1600
1601    pub fn visible_file_count(&self) -> usize {
1602        self.entries_by_path.summary().visible_file_count
1603    }
1604
1605    fn traverse_from_offset(
1606        &self,
1607        include_dirs: bool,
1608        include_ignored: bool,
1609        start_offset: usize,
1610    ) -> Traversal {
1611        let mut cursor = self.entries_by_path.cursor();
1612        cursor.seek(
1613            &TraversalTarget::Count {
1614                count: start_offset,
1615                include_dirs,
1616                include_ignored,
1617            },
1618            Bias::Right,
1619            &(),
1620        );
1621        Traversal {
1622            cursor,
1623            include_dirs,
1624            include_ignored,
1625        }
1626    }
1627
1628    fn traverse_from_path(
1629        &self,
1630        include_dirs: bool,
1631        include_ignored: bool,
1632        path: &Path,
1633    ) -> Traversal {
1634        let mut cursor = self.entries_by_path.cursor();
1635        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1636        Traversal {
1637            cursor,
1638            include_dirs,
1639            include_ignored,
1640        }
1641    }
1642
1643    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1644        self.traverse_from_offset(false, include_ignored, start)
1645    }
1646
1647    pub fn entries(&self, include_ignored: bool) -> Traversal {
1648        self.traverse_from_offset(true, include_ignored, 0)
1649    }
1650
1651    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1652        let empty_path = Path::new("");
1653        self.entries_by_path
1654            .cursor::<()>()
1655            .filter(move |entry| entry.path.as_ref() != empty_path)
1656            .map(|entry| &entry.path)
1657    }
1658
1659    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1660        let mut cursor = self.entries_by_path.cursor();
1661        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1662        let traversal = Traversal {
1663            cursor,
1664            include_dirs: true,
1665            include_ignored: true,
1666        };
1667        ChildEntriesIter {
1668            traversal,
1669            parent_path,
1670        }
1671    }
1672
1673    pub fn root_entry(&self) -> Option<&Entry> {
1674        self.entry_for_path("")
1675    }
1676
1677    pub fn root_name(&self) -> &str {
1678        &self.root_name
1679    }
1680
1681    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1682        let path = path.as_ref();
1683        self.traverse_from_path(true, true, path)
1684            .entry()
1685            .and_then(|entry| {
1686                if entry.path.as_ref() == path {
1687                    Some(entry)
1688                } else {
1689                    None
1690                }
1691            })
1692    }
1693
1694    pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1695        let entry = self.entries_by_id.get(&id, &())?;
1696        self.entry_for_path(&entry.path)
1697    }
1698
1699    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1700        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1701    }
1702
1703    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1704        if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1705            let abs_path = self.abs_path.join(&entry.path);
1706            match build_gitignore(&abs_path, fs) {
1707                Ok(ignore) => {
1708                    let ignore_dir_path = entry.path.parent().unwrap();
1709                    self.ignores
1710                        .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1711                }
1712                Err(error) => {
1713                    log::error!(
1714                        "error loading .gitignore file {:?} - {:?}",
1715                        &entry.path,
1716                        error
1717                    );
1718                }
1719            }
1720        }
1721
1722        self.reuse_entry_id(&mut entry);
1723        self.entries_by_path.insert_or_replace(entry.clone(), &());
1724        self.entries_by_id.insert_or_replace(
1725            PathEntry {
1726                id: entry.id,
1727                path: entry.path.clone(),
1728                is_ignored: entry.is_ignored,
1729                scan_id: self.scan_id,
1730            },
1731            &(),
1732        );
1733        entry
1734    }
1735
1736    fn populate_dir(
1737        &mut self,
1738        parent_path: Arc<Path>,
1739        entries: impl IntoIterator<Item = Entry>,
1740        ignore: Option<Arc<Gitignore>>,
1741    ) {
1742        let mut parent_entry = self
1743            .entries_by_path
1744            .get(&PathKey(parent_path.clone()), &())
1745            .unwrap()
1746            .clone();
1747        if let Some(ignore) = ignore {
1748            self.ignores.insert(parent_path, (ignore, self.scan_id));
1749        }
1750        if matches!(parent_entry.kind, EntryKind::PendingDir) {
1751            parent_entry.kind = EntryKind::Dir;
1752        } else {
1753            unreachable!();
1754        }
1755
1756        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1757        let mut entries_by_id_edits = Vec::new();
1758
1759        for mut entry in entries {
1760            self.reuse_entry_id(&mut entry);
1761            entries_by_id_edits.push(Edit::Insert(PathEntry {
1762                id: entry.id,
1763                path: entry.path.clone(),
1764                is_ignored: entry.is_ignored,
1765                scan_id: self.scan_id,
1766            }));
1767            entries_by_path_edits.push(Edit::Insert(entry));
1768        }
1769
1770        self.entries_by_path.edit(entries_by_path_edits, &());
1771        self.entries_by_id.edit(entries_by_id_edits, &());
1772    }
1773
1774    fn reuse_entry_id(&mut self, entry: &mut Entry) {
1775        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1776            entry.id = removed_entry_id;
1777        } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1778            entry.id = existing_entry.id;
1779        }
1780    }
1781
1782    fn remove_path(&mut self, path: &Path) {
1783        let mut new_entries;
1784        let removed_entries;
1785        {
1786            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1787            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1788            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1789            new_entries.push_tree(cursor.suffix(&()), &());
1790        }
1791        self.entries_by_path = new_entries;
1792
1793        let mut entries_by_id_edits = Vec::new();
1794        for entry in removed_entries.cursor::<()>() {
1795            let removed_entry_id = self
1796                .removed_entry_ids
1797                .entry(entry.inode)
1798                .or_insert(entry.id);
1799            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1800            entries_by_id_edits.push(Edit::Remove(entry.id));
1801        }
1802        self.entries_by_id.edit(entries_by_id_edits, &());
1803
1804        if path.file_name() == Some(&GITIGNORE) {
1805            if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1806                *scan_id = self.scan_id;
1807            }
1808        }
1809    }
1810
1811    fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1812        let mut new_ignores = Vec::new();
1813        for ancestor in path.ancestors().skip(1) {
1814            if let Some((ignore, _)) = self.ignores.get(ancestor) {
1815                new_ignores.push((ancestor, Some(ignore.clone())));
1816            } else {
1817                new_ignores.push((ancestor, None));
1818            }
1819        }
1820
1821        let mut ignore_stack = IgnoreStack::none();
1822        for (parent_path, ignore) in new_ignores.into_iter().rev() {
1823            if ignore_stack.is_path_ignored(&parent_path, true) {
1824                ignore_stack = IgnoreStack::all();
1825                break;
1826            } else if let Some(ignore) = ignore {
1827                ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
1828            }
1829        }
1830
1831        if ignore_stack.is_path_ignored(path, is_dir) {
1832            ignore_stack = IgnoreStack::all();
1833        }
1834
1835        ignore_stack
1836    }
1837}
1838
1839impl fmt::Debug for Snapshot {
1840    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1841        for entry in self.entries_by_path.cursor::<()>() {
1842            for _ in entry.path.ancestors().skip(1) {
1843                write!(f, " ")?;
1844            }
1845            writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
1846        }
1847        Ok(())
1848    }
1849}
1850
1851#[derive(Clone, PartialEq)]
1852pub struct File {
1853    entry_id: Option<usize>,
1854    worktree: ModelHandle<Worktree>,
1855    worktree_path: Arc<Path>,
1856    pub path: Arc<Path>,
1857    pub mtime: SystemTime,
1858    is_local: bool,
1859}
1860
1861impl language::File for File {
1862    fn worktree_id(&self) -> usize {
1863        self.worktree.id()
1864    }
1865
1866    fn entry_id(&self) -> Option<usize> {
1867        self.entry_id
1868    }
1869
1870    fn mtime(&self) -> SystemTime {
1871        self.mtime
1872    }
1873
1874    fn path(&self) -> &Arc<Path> {
1875        &self.path
1876    }
1877
1878    fn abs_path(&self) -> Option<PathBuf> {
1879        if self.is_local {
1880            Some(self.worktree_path.join(&self.path))
1881        } else {
1882            None
1883        }
1884    }
1885
1886    fn full_path(&self) -> PathBuf {
1887        let mut full_path = PathBuf::new();
1888        if let Some(worktree_name) = self.worktree_path.file_name() {
1889            full_path.push(worktree_name);
1890        }
1891        full_path.push(&self.path);
1892        full_path
1893    }
1894
1895    /// Returns the last component of this handle's absolute path. If this handle refers to the root
1896    /// of its worktree, then this method will return the name of the worktree itself.
1897    fn file_name<'a>(&'a self) -> Option<OsString> {
1898        self.path
1899            .file_name()
1900            .or_else(|| self.worktree_path.file_name())
1901            .map(Into::into)
1902    }
1903
1904    fn is_deleted(&self) -> bool {
1905        self.entry_id.is_none()
1906    }
1907
1908    fn save(
1909        &self,
1910        buffer_id: u64,
1911        text: Rope,
1912        version: clock::Global,
1913        cx: &mut MutableAppContext,
1914    ) -> Task<Result<(clock::Global, SystemTime)>> {
1915        let worktree_id = self.worktree.id() as u64;
1916        self.worktree.update(cx, |worktree, cx| match worktree {
1917            Worktree::Local(worktree) => {
1918                let rpc = worktree.client.clone();
1919                let project_id = worktree.share.as_ref().map(|share| share.project_id);
1920                let save = worktree.save(self.path.clone(), text, cx);
1921                cx.background().spawn(async move {
1922                    let entry = save.await?;
1923                    if let Some(project_id) = project_id {
1924                        rpc.send(proto::BufferSaved {
1925                            project_id,
1926                            worktree_id,
1927                            buffer_id,
1928                            version: (&version).into(),
1929                            mtime: Some(entry.mtime.into()),
1930                        })
1931                        .await?;
1932                    }
1933                    Ok((version, entry.mtime))
1934                })
1935            }
1936            Worktree::Remote(worktree) => {
1937                let rpc = worktree.client.clone();
1938                let project_id = worktree.project_id;
1939                cx.foreground().spawn(async move {
1940                    let response = rpc
1941                        .request(proto::SaveBuffer {
1942                            project_id,
1943                            worktree_id,
1944                            buffer_id,
1945                        })
1946                        .await?;
1947                    let version = response.version.try_into()?;
1948                    let mtime = response
1949                        .mtime
1950                        .ok_or_else(|| anyhow!("missing mtime"))?
1951                        .into();
1952                    Ok((version, mtime))
1953                })
1954            }
1955        })
1956    }
1957
1958    fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
1959        let worktree = self.worktree.read(cx).as_local()?;
1960        let abs_path = worktree.absolutize(&self.path);
1961        let fs = worktree.fs.clone();
1962        Some(
1963            cx.background()
1964                .spawn(async move { fs.load(&abs_path).await }),
1965        )
1966    }
1967
1968    fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
1969        self.worktree.update(cx, |worktree, cx| {
1970            worktree.send_buffer_update(buffer_id, operation, cx);
1971        });
1972    }
1973
1974    fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
1975        let worktree_id = self.worktree.id() as u64;
1976        self.worktree.update(cx, |worktree, cx| {
1977            if let Worktree::Remote(worktree) = worktree {
1978                let project_id = worktree.project_id;
1979                let rpc = worktree.client.clone();
1980                cx.background()
1981                    .spawn(async move {
1982                        if let Err(error) = rpc
1983                            .send(proto::CloseBuffer {
1984                                project_id,
1985                                worktree_id,
1986                                buffer_id,
1987                            })
1988                            .await
1989                        {
1990                            log::error!("error closing remote buffer: {}", error);
1991                        }
1992                    })
1993                    .detach();
1994            }
1995        });
1996    }
1997
1998    fn boxed_clone(&self) -> Box<dyn language::File> {
1999        Box::new(self.clone())
2000    }
2001
2002    fn as_any(&self) -> &dyn Any {
2003        self
2004    }
2005}
2006
2007#[derive(Clone, Debug)]
2008pub struct Entry {
2009    pub id: usize,
2010    pub kind: EntryKind,
2011    pub path: Arc<Path>,
2012    pub inode: u64,
2013    pub mtime: SystemTime,
2014    pub is_symlink: bool,
2015    pub is_ignored: bool,
2016}
2017
2018#[derive(Clone, Debug)]
2019pub enum EntryKind {
2020    PendingDir,
2021    Dir,
2022    File(CharBag),
2023}
2024
2025impl Entry {
2026    fn new(
2027        path: Arc<Path>,
2028        metadata: &fs::Metadata,
2029        next_entry_id: &AtomicUsize,
2030        root_char_bag: CharBag,
2031    ) -> Self {
2032        Self {
2033            id: next_entry_id.fetch_add(1, SeqCst),
2034            kind: if metadata.is_dir {
2035                EntryKind::PendingDir
2036            } else {
2037                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2038            },
2039            path,
2040            inode: metadata.inode,
2041            mtime: metadata.mtime,
2042            is_symlink: metadata.is_symlink,
2043            is_ignored: false,
2044        }
2045    }
2046
2047    pub fn is_dir(&self) -> bool {
2048        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2049    }
2050
2051    pub fn is_file(&self) -> bool {
2052        matches!(self.kind, EntryKind::File(_))
2053    }
2054}
2055
2056impl sum_tree::Item for Entry {
2057    type Summary = EntrySummary;
2058
2059    fn summary(&self) -> Self::Summary {
2060        let visible_count = if self.is_ignored { 0 } else { 1 };
2061        let file_count;
2062        let visible_file_count;
2063        if self.is_file() {
2064            file_count = 1;
2065            visible_file_count = visible_count;
2066        } else {
2067            file_count = 0;
2068            visible_file_count = 0;
2069        }
2070
2071        EntrySummary {
2072            max_path: self.path.clone(),
2073            count: 1,
2074            visible_count,
2075            file_count,
2076            visible_file_count,
2077        }
2078    }
2079}
2080
2081impl sum_tree::KeyedItem for Entry {
2082    type Key = PathKey;
2083
2084    fn key(&self) -> Self::Key {
2085        PathKey(self.path.clone())
2086    }
2087}
2088
2089#[derive(Clone, Debug)]
2090pub struct EntrySummary {
2091    max_path: Arc<Path>,
2092    count: usize,
2093    visible_count: usize,
2094    file_count: usize,
2095    visible_file_count: usize,
2096}
2097
2098impl Default for EntrySummary {
2099    fn default() -> Self {
2100        Self {
2101            max_path: Arc::from(Path::new("")),
2102            count: 0,
2103            visible_count: 0,
2104            file_count: 0,
2105            visible_file_count: 0,
2106        }
2107    }
2108}
2109
2110impl sum_tree::Summary for EntrySummary {
2111    type Context = ();
2112
2113    fn add_summary(&mut self, rhs: &Self, _: &()) {
2114        self.max_path = rhs.max_path.clone();
2115        self.visible_count += rhs.visible_count;
2116        self.file_count += rhs.file_count;
2117        self.visible_file_count += rhs.visible_file_count;
2118    }
2119}
2120
2121#[derive(Clone, Debug)]
2122struct PathEntry {
2123    id: usize,
2124    path: Arc<Path>,
2125    is_ignored: bool,
2126    scan_id: usize,
2127}
2128
2129impl sum_tree::Item for PathEntry {
2130    type Summary = PathEntrySummary;
2131
2132    fn summary(&self) -> Self::Summary {
2133        PathEntrySummary { max_id: self.id }
2134    }
2135}
2136
2137impl sum_tree::KeyedItem for PathEntry {
2138    type Key = usize;
2139
2140    fn key(&self) -> Self::Key {
2141        self.id
2142    }
2143}
2144
2145#[derive(Clone, Debug, Default)]
2146struct PathEntrySummary {
2147    max_id: usize,
2148}
2149
2150impl sum_tree::Summary for PathEntrySummary {
2151    type Context = ();
2152
2153    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2154        self.max_id = summary.max_id;
2155    }
2156}
2157
2158impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2159    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2160        *self = summary.max_id;
2161    }
2162}
2163
2164#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2165pub struct PathKey(Arc<Path>);
2166
2167impl Default for PathKey {
2168    fn default() -> Self {
2169        Self(Path::new("").into())
2170    }
2171}
2172
2173impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2174    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2175        self.0 = summary.max_path.clone();
2176    }
2177}
2178
2179struct BackgroundScanner {
2180    fs: Arc<dyn Fs>,
2181    snapshot: Arc<Mutex<Snapshot>>,
2182    notify: Sender<ScanState>,
2183    executor: Arc<executor::Background>,
2184}
2185
2186impl BackgroundScanner {
2187    fn new(
2188        snapshot: Arc<Mutex<Snapshot>>,
2189        notify: Sender<ScanState>,
2190        fs: Arc<dyn Fs>,
2191        executor: Arc<executor::Background>,
2192    ) -> Self {
2193        Self {
2194            fs,
2195            snapshot,
2196            notify,
2197            executor,
2198        }
2199    }
2200
2201    fn abs_path(&self) -> Arc<Path> {
2202        self.snapshot.lock().abs_path.clone()
2203    }
2204
2205    fn snapshot(&self) -> Snapshot {
2206        self.snapshot.lock().clone()
2207    }
2208
2209    async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2210        if self.notify.send(ScanState::Scanning).await.is_err() {
2211            return;
2212        }
2213
2214        if let Err(err) = self.scan_dirs().await {
2215            if self
2216                .notify
2217                .send(ScanState::Err(Arc::new(err)))
2218                .await
2219                .is_err()
2220            {
2221                return;
2222            }
2223        }
2224
2225        if self.notify.send(ScanState::Idle).await.is_err() {
2226            return;
2227        }
2228
2229        futures::pin_mut!(events_rx);
2230        while let Some(events) = events_rx.next().await {
2231            if self.notify.send(ScanState::Scanning).await.is_err() {
2232                break;
2233            }
2234
2235            if !self.process_events(events).await {
2236                break;
2237            }
2238
2239            if self.notify.send(ScanState::Idle).await.is_err() {
2240                break;
2241            }
2242        }
2243    }
2244
2245    async fn scan_dirs(&mut self) -> Result<()> {
2246        let root_char_bag;
2247        let next_entry_id;
2248        let is_dir;
2249        {
2250            let snapshot = self.snapshot.lock();
2251            root_char_bag = snapshot.root_char_bag;
2252            next_entry_id = snapshot.next_entry_id.clone();
2253            is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2254        };
2255
2256        if is_dir {
2257            let path: Arc<Path> = Arc::from(Path::new(""));
2258            let abs_path = self.abs_path();
2259            let (tx, rx) = channel::unbounded();
2260            tx.send(ScanJob {
2261                abs_path: abs_path.to_path_buf(),
2262                path,
2263                ignore_stack: IgnoreStack::none(),
2264                scan_queue: tx.clone(),
2265            })
2266            .await
2267            .unwrap();
2268            drop(tx);
2269
2270            self.executor
2271                .scoped(|scope| {
2272                    for _ in 0..self.executor.num_cpus() {
2273                        scope.spawn(async {
2274                            while let Ok(job) = rx.recv().await {
2275                                if let Err(err) = self
2276                                    .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2277                                    .await
2278                                {
2279                                    log::error!("error scanning {:?}: {}", job.abs_path, err);
2280                                }
2281                            }
2282                        });
2283                    }
2284                })
2285                .await;
2286        }
2287
2288        Ok(())
2289    }
2290
2291    async fn scan_dir(
2292        &self,
2293        root_char_bag: CharBag,
2294        next_entry_id: Arc<AtomicUsize>,
2295        job: &ScanJob,
2296    ) -> Result<()> {
2297        let mut new_entries: Vec<Entry> = Vec::new();
2298        let mut new_jobs: Vec<ScanJob> = Vec::new();
2299        let mut ignore_stack = job.ignore_stack.clone();
2300        let mut new_ignore = None;
2301
2302        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2303        while let Some(child_abs_path) = child_paths.next().await {
2304            let child_abs_path = match child_abs_path {
2305                Ok(child_abs_path) => child_abs_path,
2306                Err(error) => {
2307                    log::error!("error processing entry {:?}", error);
2308                    continue;
2309                }
2310            };
2311            let child_name = child_abs_path.file_name().unwrap();
2312            let child_path: Arc<Path> = job.path.join(child_name).into();
2313            let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2314                Some(metadata) => metadata,
2315                None => continue,
2316            };
2317
2318            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2319            if child_name == *GITIGNORE {
2320                match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2321                    Ok(ignore) => {
2322                        let ignore = Arc::new(ignore);
2323                        ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2324                        new_ignore = Some(ignore);
2325                    }
2326                    Err(error) => {
2327                        log::error!(
2328                            "error loading .gitignore file {:?} - {:?}",
2329                            child_name,
2330                            error
2331                        );
2332                    }
2333                }
2334
2335                // Update ignore status of any child entries we've already processed to reflect the
2336                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2337                // there should rarely be too numerous. Update the ignore stack associated with any
2338                // new jobs as well.
2339                let mut new_jobs = new_jobs.iter_mut();
2340                for entry in &mut new_entries {
2341                    entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2342                    if entry.is_dir() {
2343                        new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2344                            IgnoreStack::all()
2345                        } else {
2346                            ignore_stack.clone()
2347                        };
2348                    }
2349                }
2350            }
2351
2352            let mut child_entry = Entry::new(
2353                child_path.clone(),
2354                &child_metadata,
2355                &next_entry_id,
2356                root_char_bag,
2357            );
2358
2359            if child_metadata.is_dir {
2360                let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2361                child_entry.is_ignored = is_ignored;
2362                new_entries.push(child_entry);
2363                new_jobs.push(ScanJob {
2364                    abs_path: child_abs_path,
2365                    path: child_path,
2366                    ignore_stack: if is_ignored {
2367                        IgnoreStack::all()
2368                    } else {
2369                        ignore_stack.clone()
2370                    },
2371                    scan_queue: job.scan_queue.clone(),
2372                });
2373            } else {
2374                child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2375                new_entries.push(child_entry);
2376            };
2377        }
2378
2379        self.snapshot
2380            .lock()
2381            .populate_dir(job.path.clone(), new_entries, new_ignore);
2382        for new_job in new_jobs {
2383            job.scan_queue.send(new_job).await.unwrap();
2384        }
2385
2386        Ok(())
2387    }
2388
2389    async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2390        let mut snapshot = self.snapshot();
2391        snapshot.scan_id += 1;
2392
2393        let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2394            abs_path
2395        } else {
2396            return false;
2397        };
2398        let root_char_bag = snapshot.root_char_bag;
2399        let next_entry_id = snapshot.next_entry_id.clone();
2400
2401        events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2402        events.dedup_by(|a, b| a.path.starts_with(&b.path));
2403
2404        for event in &events {
2405            match event.path.strip_prefix(&root_abs_path) {
2406                Ok(path) => snapshot.remove_path(&path),
2407                Err(_) => {
2408                    log::error!(
2409                        "unexpected event {:?} for root path {:?}",
2410                        event.path,
2411                        root_abs_path
2412                    );
2413                    continue;
2414                }
2415            }
2416        }
2417
2418        let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2419        for event in events {
2420            let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2421                Ok(path) => Arc::from(path.to_path_buf()),
2422                Err(_) => {
2423                    log::error!(
2424                        "unexpected event {:?} for root path {:?}",
2425                        event.path,
2426                        root_abs_path
2427                    );
2428                    continue;
2429                }
2430            };
2431
2432            match self.fs.metadata(&event.path).await {
2433                Ok(Some(metadata)) => {
2434                    let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2435                    let mut fs_entry = Entry::new(
2436                        path.clone(),
2437                        &metadata,
2438                        snapshot.next_entry_id.as_ref(),
2439                        snapshot.root_char_bag,
2440                    );
2441                    fs_entry.is_ignored = ignore_stack.is_all();
2442                    snapshot.insert_entry(fs_entry, self.fs.as_ref());
2443                    if metadata.is_dir {
2444                        scan_queue_tx
2445                            .send(ScanJob {
2446                                abs_path: event.path,
2447                                path,
2448                                ignore_stack,
2449                                scan_queue: scan_queue_tx.clone(),
2450                            })
2451                            .await
2452                            .unwrap();
2453                    }
2454                }
2455                Ok(None) => {}
2456                Err(err) => {
2457                    // TODO - create a special 'error' entry in the entries tree to mark this
2458                    log::error!("error reading file on event {:?}", err);
2459                }
2460            }
2461        }
2462
2463        *self.snapshot.lock() = snapshot;
2464
2465        // Scan any directories that were created as part of this event batch.
2466        drop(scan_queue_tx);
2467        self.executor
2468            .scoped(|scope| {
2469                for _ in 0..self.executor.num_cpus() {
2470                    scope.spawn(async {
2471                        while let Ok(job) = scan_queue_rx.recv().await {
2472                            if let Err(err) = self
2473                                .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2474                                .await
2475                            {
2476                                log::error!("error scanning {:?}: {}", job.abs_path, err);
2477                            }
2478                        }
2479                    });
2480                }
2481            })
2482            .await;
2483
2484        // Attempt to detect renames only over a single batch of file-system events.
2485        self.snapshot.lock().removed_entry_ids.clear();
2486
2487        self.update_ignore_statuses().await;
2488        true
2489    }
2490
2491    async fn update_ignore_statuses(&self) {
2492        let mut snapshot = self.snapshot();
2493
2494        let mut ignores_to_update = Vec::new();
2495        let mut ignores_to_delete = Vec::new();
2496        for (parent_path, (_, scan_id)) in &snapshot.ignores {
2497            if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2498                ignores_to_update.push(parent_path.clone());
2499            }
2500
2501            let ignore_path = parent_path.join(&*GITIGNORE);
2502            if snapshot.entry_for_path(ignore_path).is_none() {
2503                ignores_to_delete.push(parent_path.clone());
2504            }
2505        }
2506
2507        for parent_path in ignores_to_delete {
2508            snapshot.ignores.remove(&parent_path);
2509            self.snapshot.lock().ignores.remove(&parent_path);
2510        }
2511
2512        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2513        ignores_to_update.sort_unstable();
2514        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2515        while let Some(parent_path) = ignores_to_update.next() {
2516            while ignores_to_update
2517                .peek()
2518                .map_or(false, |p| p.starts_with(&parent_path))
2519            {
2520                ignores_to_update.next().unwrap();
2521            }
2522
2523            let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2524            ignore_queue_tx
2525                .send(UpdateIgnoreStatusJob {
2526                    path: parent_path,
2527                    ignore_stack,
2528                    ignore_queue: ignore_queue_tx.clone(),
2529                })
2530                .await
2531                .unwrap();
2532        }
2533        drop(ignore_queue_tx);
2534
2535        self.executor
2536            .scoped(|scope| {
2537                for _ in 0..self.executor.num_cpus() {
2538                    scope.spawn(async {
2539                        while let Ok(job) = ignore_queue_rx.recv().await {
2540                            self.update_ignore_status(job, &snapshot).await;
2541                        }
2542                    });
2543                }
2544            })
2545            .await;
2546    }
2547
2548    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2549        let mut ignore_stack = job.ignore_stack;
2550        if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2551            ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2552        }
2553
2554        let mut entries_by_id_edits = Vec::new();
2555        let mut entries_by_path_edits = Vec::new();
2556        for mut entry in snapshot.child_entries(&job.path).cloned() {
2557            let was_ignored = entry.is_ignored;
2558            entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2559            if entry.is_dir() {
2560                let child_ignore_stack = if entry.is_ignored {
2561                    IgnoreStack::all()
2562                } else {
2563                    ignore_stack.clone()
2564                };
2565                job.ignore_queue
2566                    .send(UpdateIgnoreStatusJob {
2567                        path: entry.path.clone(),
2568                        ignore_stack: child_ignore_stack,
2569                        ignore_queue: job.ignore_queue.clone(),
2570                    })
2571                    .await
2572                    .unwrap();
2573            }
2574
2575            if entry.is_ignored != was_ignored {
2576                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2577                path_entry.scan_id = snapshot.scan_id;
2578                path_entry.is_ignored = entry.is_ignored;
2579                entries_by_id_edits.push(Edit::Insert(path_entry));
2580                entries_by_path_edits.push(Edit::Insert(entry));
2581            }
2582        }
2583
2584        let mut snapshot = self.snapshot.lock();
2585        snapshot.entries_by_path.edit(entries_by_path_edits, &());
2586        snapshot.entries_by_id.edit(entries_by_id_edits, &());
2587    }
2588}
2589
2590async fn refresh_entry(
2591    fs: &dyn Fs,
2592    snapshot: &Mutex<Snapshot>,
2593    path: Arc<Path>,
2594    abs_path: &Path,
2595) -> Result<Entry> {
2596    let root_char_bag;
2597    let next_entry_id;
2598    {
2599        let snapshot = snapshot.lock();
2600        root_char_bag = snapshot.root_char_bag;
2601        next_entry_id = snapshot.next_entry_id.clone();
2602    }
2603    let entry = Entry::new(
2604        path,
2605        &fs.metadata(abs_path)
2606            .await?
2607            .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2608        &next_entry_id,
2609        root_char_bag,
2610    );
2611    Ok(snapshot.lock().insert_entry(entry, fs))
2612}
2613
2614fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2615    let mut result = root_char_bag;
2616    result.extend(
2617        path.to_string_lossy()
2618            .chars()
2619            .map(|c| c.to_ascii_lowercase()),
2620    );
2621    result
2622}
2623
2624struct ScanJob {
2625    abs_path: PathBuf,
2626    path: Arc<Path>,
2627    ignore_stack: Arc<IgnoreStack>,
2628    scan_queue: Sender<ScanJob>,
2629}
2630
2631struct UpdateIgnoreStatusJob {
2632    path: Arc<Path>,
2633    ignore_stack: Arc<IgnoreStack>,
2634    ignore_queue: Sender<UpdateIgnoreStatusJob>,
2635}
2636
2637pub trait WorktreeHandle {
2638    #[cfg(test)]
2639    fn flush_fs_events<'a>(
2640        &self,
2641        cx: &'a gpui::TestAppContext,
2642    ) -> futures::future::LocalBoxFuture<'a, ()>;
2643}
2644
2645impl WorktreeHandle for ModelHandle<Worktree> {
2646    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2647    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2648    // extra directory scans, and emit extra scan-state notifications.
2649    //
2650    // This function mutates the worktree's directory and waits for those mutations to be picked up,
2651    // to ensure that all redundant FS events have already been processed.
2652    #[cfg(test)]
2653    fn flush_fs_events<'a>(
2654        &self,
2655        cx: &'a gpui::TestAppContext,
2656    ) -> futures::future::LocalBoxFuture<'a, ()> {
2657        use smol::future::FutureExt;
2658
2659        let filename = "fs-event-sentinel";
2660        let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2661        let tree = self.clone();
2662        async move {
2663            std::fs::write(root_path.join(filename), "").unwrap();
2664            tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2665                .await;
2666
2667            std::fs::remove_file(root_path.join(filename)).unwrap();
2668            tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2669                .await;
2670
2671            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2672                .await;
2673        }
2674        .boxed_local()
2675    }
2676}
2677
2678#[derive(Clone, Debug)]
2679struct TraversalProgress<'a> {
2680    max_path: &'a Path,
2681    count: usize,
2682    visible_count: usize,
2683    file_count: usize,
2684    visible_file_count: usize,
2685}
2686
2687impl<'a> TraversalProgress<'a> {
2688    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2689        match (include_ignored, include_dirs) {
2690            (true, true) => self.count,
2691            (true, false) => self.file_count,
2692            (false, true) => self.visible_count,
2693            (false, false) => self.visible_file_count,
2694        }
2695    }
2696}
2697
2698impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2699    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2700        self.max_path = summary.max_path.as_ref();
2701        self.count += summary.count;
2702        self.visible_count += summary.visible_count;
2703        self.file_count += summary.file_count;
2704        self.visible_file_count += summary.visible_file_count;
2705    }
2706}
2707
2708impl<'a> Default for TraversalProgress<'a> {
2709    fn default() -> Self {
2710        Self {
2711            max_path: Path::new(""),
2712            count: 0,
2713            visible_count: 0,
2714            file_count: 0,
2715            visible_file_count: 0,
2716        }
2717    }
2718}
2719
2720pub struct Traversal<'a> {
2721    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2722    include_ignored: bool,
2723    include_dirs: bool,
2724}
2725
2726impl<'a> Traversal<'a> {
2727    pub fn advance(&mut self) -> bool {
2728        self.advance_to_offset(self.offset() + 1)
2729    }
2730
2731    pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2732        self.cursor.seek_forward(
2733            &TraversalTarget::Count {
2734                count: offset,
2735                include_dirs: self.include_dirs,
2736                include_ignored: self.include_ignored,
2737            },
2738            Bias::Right,
2739            &(),
2740        )
2741    }
2742
2743    pub fn advance_to_sibling(&mut self) -> bool {
2744        while let Some(entry) = self.cursor.item() {
2745            self.cursor.seek_forward(
2746                &TraversalTarget::PathSuccessor(&entry.path),
2747                Bias::Left,
2748                &(),
2749            );
2750            if let Some(entry) = self.cursor.item() {
2751                if (self.include_dirs || !entry.is_dir())
2752                    && (self.include_ignored || !entry.is_ignored)
2753                {
2754                    return true;
2755                }
2756            }
2757        }
2758        false
2759    }
2760
2761    pub fn entry(&self) -> Option<&'a Entry> {
2762        self.cursor.item()
2763    }
2764
2765    pub fn offset(&self) -> usize {
2766        self.cursor
2767            .start()
2768            .count(self.include_dirs, self.include_ignored)
2769    }
2770}
2771
2772impl<'a> Iterator for Traversal<'a> {
2773    type Item = &'a Entry;
2774
2775    fn next(&mut self) -> Option<Self::Item> {
2776        if let Some(item) = self.entry() {
2777            self.advance();
2778            Some(item)
2779        } else {
2780            None
2781        }
2782    }
2783}
2784
2785#[derive(Debug)]
2786enum TraversalTarget<'a> {
2787    Path(&'a Path),
2788    PathSuccessor(&'a Path),
2789    Count {
2790        count: usize,
2791        include_ignored: bool,
2792        include_dirs: bool,
2793    },
2794}
2795
2796impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2797    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2798        match self {
2799            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2800            TraversalTarget::PathSuccessor(path) => {
2801                if !cursor_location.max_path.starts_with(path) {
2802                    Ordering::Equal
2803                } else {
2804                    Ordering::Greater
2805                }
2806            }
2807            TraversalTarget::Count {
2808                count,
2809                include_dirs,
2810                include_ignored,
2811            } => Ord::cmp(
2812                count,
2813                &cursor_location.count(*include_dirs, *include_ignored),
2814            ),
2815        }
2816    }
2817}
2818
2819struct ChildEntriesIter<'a> {
2820    parent_path: &'a Path,
2821    traversal: Traversal<'a>,
2822}
2823
2824impl<'a> Iterator for ChildEntriesIter<'a> {
2825    type Item = &'a Entry;
2826
2827    fn next(&mut self) -> Option<Self::Item> {
2828        if let Some(item) = self.traversal.entry() {
2829            if item.path.starts_with(&self.parent_path) {
2830                self.traversal.advance_to_sibling();
2831                return Some(item);
2832            }
2833        }
2834        None
2835    }
2836}
2837
2838impl<'a> From<&'a Entry> for proto::Entry {
2839    fn from(entry: &'a Entry) -> Self {
2840        Self {
2841            id: entry.id as u64,
2842            is_dir: entry.is_dir(),
2843            path: entry.path.to_string_lossy().to_string(),
2844            inode: entry.inode,
2845            mtime: Some(entry.mtime.into()),
2846            is_symlink: entry.is_symlink,
2847            is_ignored: entry.is_ignored,
2848        }
2849    }
2850}
2851
2852impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2853    type Error = anyhow::Error;
2854
2855    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2856        if let Some(mtime) = entry.mtime {
2857            let kind = if entry.is_dir {
2858                EntryKind::Dir
2859            } else {
2860                let mut char_bag = root_char_bag.clone();
2861                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
2862                EntryKind::File(char_bag)
2863            };
2864            let path: Arc<Path> = Arc::from(Path::new(&entry.path));
2865            Ok(Entry {
2866                id: entry.id as usize,
2867                kind,
2868                path: path.clone(),
2869                inode: entry.inode,
2870                mtime: mtime.into(),
2871                is_symlink: entry.is_symlink,
2872                is_ignored: entry.is_ignored,
2873            })
2874        } else {
2875            Err(anyhow!(
2876                "missing mtime in remote worktree entry {:?}",
2877                entry.path
2878            ))
2879        }
2880    }
2881}
2882
2883trait ToPointUtf16 {
2884    fn to_point_utf16(self) -> PointUtf16;
2885}
2886
2887impl ToPointUtf16 for lsp::Position {
2888    fn to_point_utf16(self) -> PointUtf16 {
2889        PointUtf16::new(self.line, self.character)
2890    }
2891}
2892
2893fn diagnostic_ranges<'a>(
2894    diagnostic: &'a lsp::Diagnostic,
2895    abs_path: &'a Path,
2896) -> impl 'a + Iterator<Item = Range<PointUtf16>> {
2897    diagnostic
2898        .related_information
2899        .iter()
2900        .flatten()
2901        .filter_map(move |info| {
2902            if info.location.uri.to_file_path().ok()? == abs_path {
2903                let info_start = PointUtf16::new(
2904                    info.location.range.start.line,
2905                    info.location.range.start.character,
2906                );
2907                let info_end = PointUtf16::new(
2908                    info.location.range.end.line,
2909                    info.location.range.end.character,
2910                );
2911                Some(info_start..info_end)
2912            } else {
2913                None
2914            }
2915        })
2916        .chain(Some(
2917            diagnostic.range.start.to_point_utf16()..diagnostic.range.end.to_point_utf16(),
2918        ))
2919}
2920
2921#[cfg(test)]
2922mod tests {
2923    use super::*;
2924    use crate::fs::FakeFs;
2925    use anyhow::Result;
2926    use client::test::{FakeHttpClient, FakeServer};
2927    use fs::RealFs;
2928    use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
2929    use language::{Diagnostic, LanguageConfig};
2930    use lsp::Url;
2931    use rand::prelude::*;
2932    use serde_json::json;
2933    use std::{cell::RefCell, rc::Rc};
2934    use std::{
2935        env,
2936        fmt::Write,
2937        time::{SystemTime, UNIX_EPOCH},
2938    };
2939    use text::Point;
2940    use unindent::Unindent as _;
2941    use util::test::temp_tree;
2942
2943    #[gpui::test]
2944    async fn test_traversal(mut cx: gpui::TestAppContext) {
2945        let fs = FakeFs::new();
2946        fs.insert_tree(
2947            "/root",
2948            json!({
2949               ".gitignore": "a/b\n",
2950               "a": {
2951                   "b": "",
2952                   "c": "",
2953               }
2954            }),
2955        )
2956        .await;
2957
2958        let client = Client::new();
2959        let http_client = FakeHttpClient::with_404_response();
2960        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2961
2962        let tree = Worktree::open_local(
2963            client,
2964            user_store,
2965            Arc::from(Path::new("/root")),
2966            Arc::new(fs),
2967            Default::default(),
2968            &mut cx.to_async(),
2969        )
2970        .await
2971        .unwrap();
2972        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2973            .await;
2974
2975        tree.read_with(&cx, |tree, _| {
2976            assert_eq!(
2977                tree.entries(false)
2978                    .map(|entry| entry.path.as_ref())
2979                    .collect::<Vec<_>>(),
2980                vec![
2981                    Path::new(""),
2982                    Path::new(".gitignore"),
2983                    Path::new("a"),
2984                    Path::new("a/c"),
2985                ]
2986            );
2987        })
2988    }
2989
2990    #[gpui::test]
2991    async fn test_save_file(mut cx: gpui::TestAppContext) {
2992        let dir = temp_tree(json!({
2993            "file1": "the old contents",
2994        }));
2995
2996        let client = Client::new();
2997        let http_client = FakeHttpClient::with_404_response();
2998        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2999
3000        let tree = Worktree::open_local(
3001            client,
3002            user_store,
3003            dir.path(),
3004            Arc::new(RealFs),
3005            Default::default(),
3006            &mut cx.to_async(),
3007        )
3008        .await
3009        .unwrap();
3010        let buffer = tree
3011            .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3012            .await
3013            .unwrap();
3014        let save = buffer.update(&mut cx, |buffer, cx| {
3015            buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3016            buffer.save(cx).unwrap()
3017        });
3018        save.await.unwrap();
3019
3020        let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3021        assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3022    }
3023
3024    #[gpui::test]
3025    async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3026        let dir = temp_tree(json!({
3027            "file1": "the old contents",
3028        }));
3029        let file_path = dir.path().join("file1");
3030
3031        let client = Client::new();
3032        let http_client = FakeHttpClient::with_404_response();
3033        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3034
3035        let tree = Worktree::open_local(
3036            client,
3037            user_store,
3038            file_path.clone(),
3039            Arc::new(RealFs),
3040            Default::default(),
3041            &mut cx.to_async(),
3042        )
3043        .await
3044        .unwrap();
3045        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3046            .await;
3047        cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3048
3049        let buffer = tree
3050            .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3051            .await
3052            .unwrap();
3053        let save = buffer.update(&mut cx, |buffer, cx| {
3054            buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3055            buffer.save(cx).unwrap()
3056        });
3057        save.await.unwrap();
3058
3059        let new_text = std::fs::read_to_string(file_path).unwrap();
3060        assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3061    }
3062
3063    // #[gpui::test]
3064    // async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3065    //     let dir = temp_tree(json!({
3066    //         "a": {
3067    //             "file1": "",
3068    //             "file2": "",
3069    //             "file3": "",
3070    //         },
3071    //         "b": {
3072    //             "c": {
3073    //                 "file4": "",
3074    //                 "file5": "",
3075    //             }
3076    //         }
3077    //     }));
3078
3079    //     let user_id = 5;
3080    //     let mut client = Client::new();
3081    //     let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3082    //     let user_store = server.build_user_store(client.clone(), &mut cx).await;
3083    //     let tree = Worktree::open_local(
3084    //         client,
3085    //         user_store.clone(),
3086    //         dir.path(),
3087    //         Arc::new(RealFs),
3088    //         Default::default(),
3089    //         &mut cx.to_async(),
3090    //     )
3091    //     .await
3092    //     .unwrap();
3093
3094    //     let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3095    //         let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3096    //         async move { buffer.await.unwrap() }
3097    //     };
3098    //     let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3099    //         tree.read_with(cx, |tree, _| {
3100    //             tree.entry_for_path(path)
3101    //                 .expect(&format!("no entry for path {}", path))
3102    //                 .id
3103    //         })
3104    //     };
3105
3106    //     let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3107    //     let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3108    //     let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3109    //     let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3110
3111    //     let file2_id = id_for_path("a/file2", &cx);
3112    //     let file3_id = id_for_path("a/file3", &cx);
3113    //     let file4_id = id_for_path("b/c/file4", &cx);
3114
3115    //     // Wait for the initial scan.
3116    //     cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3117    //         .await;
3118
3119    //     // Create a remote copy of this worktree.
3120    //     let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3121    //     let worktree_id = 1;
3122    //     let proto_message = tree.update(&mut cx, |tree, cx| tree.as_local().unwrap().to_proto(cx));
3123    //     let open_worktree = server.receive::<proto::OpenWorktree>().await.unwrap();
3124    //     server
3125    //         .respond(
3126    //             open_worktree.receipt(),
3127    //             proto::OpenWorktreeResponse { worktree_id: 1 },
3128    //         )
3129    //         .await;
3130
3131    //     let remote = Worktree::remote(
3132    //         proto::JoinWorktreeResponse {
3133    //             worktree: Some(proto_message.await),
3134    //             replica_id: 1,
3135    //             collaborators: Vec::new(),
3136    //         },
3137    //         Client::new(),
3138    //         user_store,
3139    //         Default::default(),
3140    //         &mut cx.to_async(),
3141    //     )
3142    //     .await
3143    //     .unwrap();
3144
3145    //     cx.read(|cx| {
3146    //         assert!(!buffer2.read(cx).is_dirty());
3147    //         assert!(!buffer3.read(cx).is_dirty());
3148    //         assert!(!buffer4.read(cx).is_dirty());
3149    //         assert!(!buffer5.read(cx).is_dirty());
3150    //     });
3151
3152    //     // Rename and delete files and directories.
3153    //     tree.flush_fs_events(&cx).await;
3154    //     std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3155    //     std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3156    //     std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3157    //     std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3158    //     tree.flush_fs_events(&cx).await;
3159
3160    //     let expected_paths = vec![
3161    //         "a",
3162    //         "a/file1",
3163    //         "a/file2.new",
3164    //         "b",
3165    //         "d",
3166    //         "d/file3",
3167    //         "d/file4",
3168    //     ];
3169
3170    //     cx.read(|app| {
3171    //         assert_eq!(
3172    //             tree.read(app)
3173    //                 .paths()
3174    //                 .map(|p| p.to_str().unwrap())
3175    //                 .collect::<Vec<_>>(),
3176    //             expected_paths
3177    //         );
3178
3179    //         assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3180    //         assert_eq!(id_for_path("d/file3", &cx), file3_id);
3181    //         assert_eq!(id_for_path("d/file4", &cx), file4_id);
3182
3183    //         assert_eq!(
3184    //             buffer2.read(app).file().unwrap().path().as_ref(),
3185    //             Path::new("a/file2.new")
3186    //         );
3187    //         assert_eq!(
3188    //             buffer3.read(app).file().unwrap().path().as_ref(),
3189    //             Path::new("d/file3")
3190    //         );
3191    //         assert_eq!(
3192    //             buffer4.read(app).file().unwrap().path().as_ref(),
3193    //             Path::new("d/file4")
3194    //         );
3195    //         assert_eq!(
3196    //             buffer5.read(app).file().unwrap().path().as_ref(),
3197    //             Path::new("b/c/file5")
3198    //         );
3199
3200    //         assert!(!buffer2.read(app).file().unwrap().is_deleted());
3201    //         assert!(!buffer3.read(app).file().unwrap().is_deleted());
3202    //         assert!(!buffer4.read(app).file().unwrap().is_deleted());
3203    //         assert!(buffer5.read(app).file().unwrap().is_deleted());
3204    //     });
3205
3206    //     // Update the remote worktree. Check that it becomes consistent with the
3207    //     // local worktree.
3208    //     remote.update(&mut cx, |remote, cx| {
3209    //         let update_message =
3210    //             tree.read(cx)
3211    //                 .snapshot()
3212    //                 .build_update(&initial_snapshot, worktree_id, true);
3213    //         remote
3214    //             .as_remote_mut()
3215    //             .unwrap()
3216    //             .snapshot
3217    //             .apply_update(update_message)
3218    //             .unwrap();
3219
3220    //         assert_eq!(
3221    //             remote
3222    //                 .paths()
3223    //                 .map(|p| p.to_str().unwrap())
3224    //                 .collect::<Vec<_>>(),
3225    //             expected_paths
3226    //         );
3227    //     });
3228    // }
3229
3230    #[gpui::test]
3231    async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3232        let dir = temp_tree(json!({
3233            ".git": {},
3234            ".gitignore": "ignored-dir\n",
3235            "tracked-dir": {
3236                "tracked-file1": "tracked contents",
3237            },
3238            "ignored-dir": {
3239                "ignored-file1": "ignored contents",
3240            }
3241        }));
3242
3243        let client = Client::new();
3244        let http_client = FakeHttpClient::with_404_response();
3245        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3246
3247        let tree = Worktree::open_local(
3248            client,
3249            user_store,
3250            dir.path(),
3251            Arc::new(RealFs),
3252            Default::default(),
3253            &mut cx.to_async(),
3254        )
3255        .await
3256        .unwrap();
3257        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3258            .await;
3259        tree.flush_fs_events(&cx).await;
3260        cx.read(|cx| {
3261            let tree = tree.read(cx);
3262            let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3263            let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3264            assert_eq!(tracked.is_ignored, false);
3265            assert_eq!(ignored.is_ignored, true);
3266        });
3267
3268        std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3269        std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3270        tree.flush_fs_events(&cx).await;
3271        cx.read(|cx| {
3272            let tree = tree.read(cx);
3273            let dot_git = tree.entry_for_path(".git").unwrap();
3274            let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3275            let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3276            assert_eq!(tracked.is_ignored, false);
3277            assert_eq!(ignored.is_ignored, true);
3278            assert_eq!(dot_git.is_ignored, true);
3279        });
3280    }
3281
3282    // #[gpui::test]
3283    // async fn test_open_and_share_worktree(mut cx: gpui::TestAppContext) {
3284    //     let user_id = 100;
3285    //     let mut client = Client::new();
3286    //     let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3287    //     let user_store = server.build_user_store(client.clone(), &mut cx).await;
3288
3289    //     let fs = Arc::new(FakeFs::new());
3290    //     fs.insert_tree(
3291    //         "/path",
3292    //         json!({
3293    //             "to": {
3294    //                 "the-dir": {
3295    //                     ".zed.toml": r#"collaborators = ["friend-1", "friend-2"]"#,
3296    //                     "a.txt": "a-contents",
3297    //                 },
3298    //             },
3299    //         }),
3300    //     )
3301    //     .await;
3302
3303    //     let worktree = Worktree::open_local(
3304    //         client.clone(),
3305    //         user_store,
3306    //         "/path/to/the-dir".as_ref(),
3307    //         fs,
3308    //         Default::default(),
3309    //         &mut cx.to_async(),
3310    //     )
3311    //     .await
3312    //     .unwrap();
3313
3314    //     let open_worktree = server.receive::<proto::OpenWorktree>().await.unwrap();
3315    //     assert_eq!(
3316    //         open_worktree.payload,
3317    //         proto::OpenWorktree {
3318    //             root_name: "the-dir".to_string(),
3319    //             authorized_logins: vec!["friend-1".to_string(), "friend-2".to_string()],
3320    //         }
3321    //     );
3322
3323    //     server
3324    //         .respond(
3325    //             open_worktree.receipt(),
3326    //             proto::OpenWorktreeResponse { worktree_id: 5 },
3327    //         )
3328    //         .await;
3329    //     let remote_id = worktree
3330    //         .update(&mut cx, |tree, _| tree.as_local().unwrap().next_remote_id())
3331    //         .await;
3332    //     assert_eq!(remote_id, Some(5));
3333
3334    //     cx.update(move |_| drop(worktree));
3335    //     server.receive::<proto::CloseWorktree>().await.unwrap();
3336    // }
3337
3338    #[gpui::test]
3339    async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3340        let user_id = 100;
3341        let mut client = Client::new();
3342        let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3343        let user_store = server.build_user_store(client.clone(), &mut cx).await;
3344
3345        let fs = Arc::new(FakeFs::new());
3346        fs.insert_tree(
3347            "/the-dir",
3348            json!({
3349                "a.txt": "a-contents",
3350                "b.txt": "b-contents",
3351            }),
3352        )
3353        .await;
3354
3355        let worktree = Worktree::open_local(
3356            client.clone(),
3357            user_store,
3358            "/the-dir".as_ref(),
3359            fs,
3360            Default::default(),
3361            &mut cx.to_async(),
3362        )
3363        .await
3364        .unwrap();
3365
3366        // Spawn multiple tasks to open paths, repeating some paths.
3367        let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3368            (
3369                worktree.open_buffer("a.txt", cx),
3370                worktree.open_buffer("b.txt", cx),
3371                worktree.open_buffer("a.txt", cx),
3372            )
3373        });
3374
3375        let buffer_a_1 = buffer_a_1.await.unwrap();
3376        let buffer_a_2 = buffer_a_2.await.unwrap();
3377        let buffer_b = buffer_b.await.unwrap();
3378        assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3379        assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3380
3381        // There is only one buffer per path.
3382        let buffer_a_id = buffer_a_1.id();
3383        assert_eq!(buffer_a_2.id(), buffer_a_id);
3384
3385        // Open the same path again while it is still open.
3386        drop(buffer_a_1);
3387        let buffer_a_3 = worktree
3388            .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3389            .await
3390            .unwrap();
3391
3392        // There's still only one buffer per path.
3393        assert_eq!(buffer_a_3.id(), buffer_a_id);
3394    }
3395
3396    #[gpui::test]
3397    async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3398        use std::fs;
3399
3400        let dir = temp_tree(json!({
3401            "file1": "abc",
3402            "file2": "def",
3403            "file3": "ghi",
3404        }));
3405        let client = Client::new();
3406        let http_client = FakeHttpClient::with_404_response();
3407        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3408
3409        let tree = Worktree::open_local(
3410            client,
3411            user_store,
3412            dir.path(),
3413            Arc::new(RealFs),
3414            Default::default(),
3415            &mut cx.to_async(),
3416        )
3417        .await
3418        .unwrap();
3419        tree.flush_fs_events(&cx).await;
3420        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3421            .await;
3422
3423        let buffer1 = tree
3424            .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3425            .await
3426            .unwrap();
3427        let events = Rc::new(RefCell::new(Vec::new()));
3428
3429        // initially, the buffer isn't dirty.
3430        buffer1.update(&mut cx, |buffer, cx| {
3431            cx.subscribe(&buffer1, {
3432                let events = events.clone();
3433                move |_, _, event, _| events.borrow_mut().push(event.clone())
3434            })
3435            .detach();
3436
3437            assert!(!buffer.is_dirty());
3438            assert!(events.borrow().is_empty());
3439
3440            buffer.edit(vec![1..2], "", cx);
3441        });
3442
3443        // after the first edit, the buffer is dirty, and emits a dirtied event.
3444        buffer1.update(&mut cx, |buffer, cx| {
3445            assert!(buffer.text() == "ac");
3446            assert!(buffer.is_dirty());
3447            assert_eq!(
3448                *events.borrow(),
3449                &[language::Event::Edited, language::Event::Dirtied]
3450            );
3451            events.borrow_mut().clear();
3452            buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3453        });
3454
3455        // after saving, the buffer is not dirty, and emits a saved event.
3456        buffer1.update(&mut cx, |buffer, cx| {
3457            assert!(!buffer.is_dirty());
3458            assert_eq!(*events.borrow(), &[language::Event::Saved]);
3459            events.borrow_mut().clear();
3460
3461            buffer.edit(vec![1..1], "B", cx);
3462            buffer.edit(vec![2..2], "D", cx);
3463        });
3464
3465        // after editing again, the buffer is dirty, and emits another dirty event.
3466        buffer1.update(&mut cx, |buffer, cx| {
3467            assert!(buffer.text() == "aBDc");
3468            assert!(buffer.is_dirty());
3469            assert_eq!(
3470                *events.borrow(),
3471                &[
3472                    language::Event::Edited,
3473                    language::Event::Dirtied,
3474                    language::Event::Edited,
3475                ],
3476            );
3477            events.borrow_mut().clear();
3478
3479            // TODO - currently, after restoring the buffer to its
3480            // previously-saved state, the is still considered dirty.
3481            buffer.edit([1..3], "", cx);
3482            assert!(buffer.text() == "ac");
3483            assert!(buffer.is_dirty());
3484        });
3485
3486        assert_eq!(*events.borrow(), &[language::Event::Edited]);
3487
3488        // When a file is deleted, the buffer is considered dirty.
3489        let events = Rc::new(RefCell::new(Vec::new()));
3490        let buffer2 = tree
3491            .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3492            .await
3493            .unwrap();
3494        buffer2.update(&mut cx, |_, cx| {
3495            cx.subscribe(&buffer2, {
3496                let events = events.clone();
3497                move |_, _, event, _| events.borrow_mut().push(event.clone())
3498            })
3499            .detach();
3500        });
3501
3502        fs::remove_file(dir.path().join("file2")).unwrap();
3503        buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3504        assert_eq!(
3505            *events.borrow(),
3506            &[language::Event::Dirtied, language::Event::FileHandleChanged]
3507        );
3508
3509        // When a file is already dirty when deleted, we don't emit a Dirtied event.
3510        let events = Rc::new(RefCell::new(Vec::new()));
3511        let buffer3 = tree
3512            .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3513            .await
3514            .unwrap();
3515        buffer3.update(&mut cx, |_, cx| {
3516            cx.subscribe(&buffer3, {
3517                let events = events.clone();
3518                move |_, _, event, _| events.borrow_mut().push(event.clone())
3519            })
3520            .detach();
3521        });
3522
3523        tree.flush_fs_events(&cx).await;
3524        buffer3.update(&mut cx, |buffer, cx| {
3525            buffer.edit(Some(0..0), "x", cx);
3526        });
3527        events.borrow_mut().clear();
3528        fs::remove_file(dir.path().join("file3")).unwrap();
3529        buffer3
3530            .condition(&cx, |_, _| !events.borrow().is_empty())
3531            .await;
3532        assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3533        cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3534    }
3535
3536    #[gpui::test]
3537    async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3538        use std::fs;
3539
3540        let initial_contents = "aaa\nbbbbb\nc\n";
3541        let dir = temp_tree(json!({ "the-file": initial_contents }));
3542        let client = Client::new();
3543        let http_client = FakeHttpClient::with_404_response();
3544        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3545
3546        let tree = Worktree::open_local(
3547            client,
3548            user_store,
3549            dir.path(),
3550            Arc::new(RealFs),
3551            Default::default(),
3552            &mut cx.to_async(),
3553        )
3554        .await
3555        .unwrap();
3556        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3557            .await;
3558
3559        let abs_path = dir.path().join("the-file");
3560        let buffer = tree
3561            .update(&mut cx, |tree, cx| {
3562                tree.open_buffer(Path::new("the-file"), cx)
3563            })
3564            .await
3565            .unwrap();
3566
3567        // TODO
3568        // Add a cursor on each row.
3569        // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3570        //     assert!(!buffer.is_dirty());
3571        //     buffer.add_selection_set(
3572        //         &(0..3)
3573        //             .map(|row| Selection {
3574        //                 id: row as usize,
3575        //                 start: Point::new(row, 1),
3576        //                 end: Point::new(row, 1),
3577        //                 reversed: false,
3578        //                 goal: SelectionGoal::None,
3579        //             })
3580        //             .collect::<Vec<_>>(),
3581        //         cx,
3582        //     )
3583        // });
3584
3585        // Change the file on disk, adding two new lines of text, and removing
3586        // one line.
3587        buffer.read_with(&cx, |buffer, _| {
3588            assert!(!buffer.is_dirty());
3589            assert!(!buffer.has_conflict());
3590        });
3591        let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3592        fs::write(&abs_path, new_contents).unwrap();
3593
3594        // Because the buffer was not modified, it is reloaded from disk. Its
3595        // contents are edited according to the diff between the old and new
3596        // file contents.
3597        buffer
3598            .condition(&cx, |buffer, _| buffer.text() == new_contents)
3599            .await;
3600
3601        buffer.update(&mut cx, |buffer, _| {
3602            assert_eq!(buffer.text(), new_contents);
3603            assert!(!buffer.is_dirty());
3604            assert!(!buffer.has_conflict());
3605
3606            // TODO
3607            // let cursor_positions = buffer
3608            //     .selection_set(selection_set_id)
3609            //     .unwrap()
3610            //     .selections::<Point>(&*buffer)
3611            //     .map(|selection| {
3612            //         assert_eq!(selection.start, selection.end);
3613            //         selection.start
3614            //     })
3615            //     .collect::<Vec<_>>();
3616            // assert_eq!(
3617            //     cursor_positions,
3618            //     [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3619            // );
3620        });
3621
3622        // Modify the buffer
3623        buffer.update(&mut cx, |buffer, cx| {
3624            buffer.edit(vec![0..0], " ", cx);
3625            assert!(buffer.is_dirty());
3626            assert!(!buffer.has_conflict());
3627        });
3628
3629        // Change the file on disk again, adding blank lines to the beginning.
3630        fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3631
3632        // Because the buffer is modified, it doesn't reload from disk, but is
3633        // marked as having a conflict.
3634        buffer
3635            .condition(&cx, |buffer, _| buffer.has_conflict())
3636            .await;
3637    }
3638
3639    #[gpui::test]
3640    async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3641        let (language_server_config, mut fake_server) =
3642            LanguageServerConfig::fake(cx.background()).await;
3643        let mut languages = LanguageRegistry::new();
3644        languages.add(Arc::new(Language::new(
3645            LanguageConfig {
3646                name: "Rust".to_string(),
3647                path_suffixes: vec!["rs".to_string()],
3648                language_server: Some(language_server_config),
3649                ..Default::default()
3650            },
3651            Some(tree_sitter_rust::language()),
3652        )));
3653
3654        let dir = temp_tree(json!({
3655            "a.rs": "fn a() { A }",
3656            "b.rs": "const y: i32 = 1",
3657        }));
3658
3659        let client = Client::new();
3660        let http_client = FakeHttpClient::with_404_response();
3661        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3662
3663        let tree = Worktree::open_local(
3664            client,
3665            user_store,
3666            dir.path(),
3667            Arc::new(RealFs),
3668            Arc::new(languages),
3669            &mut cx.to_async(),
3670        )
3671        .await
3672        .unwrap();
3673        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3674            .await;
3675
3676        // Cause worktree to start the fake language server
3677        let _buffer = tree
3678            .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3679            .await
3680            .unwrap();
3681
3682        fake_server
3683            .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3684                uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3685                version: None,
3686                diagnostics: vec![lsp::Diagnostic {
3687                    range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
3688                    severity: Some(lsp::DiagnosticSeverity::ERROR),
3689                    message: "undefined variable 'A'".to_string(),
3690                    ..Default::default()
3691                }],
3692            })
3693            .await;
3694
3695        let buffer = tree
3696            .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3697            .await
3698            .unwrap();
3699
3700        buffer.read_with(&cx, |buffer, _| {
3701            let diagnostics = buffer
3702                .snapshot()
3703                .diagnostics_in_range::<_, Point>(0..buffer.len())
3704                .collect::<Vec<_>>();
3705            assert_eq!(
3706                diagnostics,
3707                &[DiagnosticEntry {
3708                    range: Point::new(0, 9)..Point::new(0, 10),
3709                    diagnostic: Diagnostic {
3710                        severity: lsp::DiagnosticSeverity::ERROR,
3711                        message: "undefined variable 'A'".to_string(),
3712                        group_id: 0,
3713                        is_primary: true,
3714                        ..Default::default()
3715                    }
3716                }]
3717            )
3718        });
3719    }
3720
3721    #[gpui::test]
3722    async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3723        let fs = Arc::new(FakeFs::new());
3724        let client = Client::new();
3725        let http_client = FakeHttpClient::with_404_response();
3726        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3727
3728        fs.insert_tree(
3729            "/the-dir",
3730            json!({
3731                "a.rs": "
3732                    fn foo(mut v: Vec<usize>) {
3733                        for x in &v {
3734                            v.push(1);
3735                        }
3736                    }
3737                "
3738                .unindent(),
3739            }),
3740        )
3741        .await;
3742
3743        let worktree = Worktree::open_local(
3744            client.clone(),
3745            user_store,
3746            "/the-dir".as_ref(),
3747            fs,
3748            Default::default(),
3749            &mut cx.to_async(),
3750        )
3751        .await
3752        .unwrap();
3753
3754        let buffer = worktree
3755            .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3756            .await
3757            .unwrap();
3758
3759        let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3760        let message = lsp::PublishDiagnosticsParams {
3761            uri: buffer_uri.clone(),
3762            diagnostics: vec![
3763                lsp::Diagnostic {
3764                    range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3765                    severity: Some(DiagnosticSeverity::WARNING),
3766                    message: "error 1".to_string(),
3767                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3768                        location: lsp::Location {
3769                            uri: buffer_uri.clone(),
3770                            range: lsp::Range::new(
3771                                lsp::Position::new(1, 8),
3772                                lsp::Position::new(1, 9),
3773                            ),
3774                        },
3775                        message: "error 1 hint 1".to_string(),
3776                    }]),
3777                    ..Default::default()
3778                },
3779                lsp::Diagnostic {
3780                    range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3781                    severity: Some(DiagnosticSeverity::HINT),
3782                    message: "error 1 hint 1".to_string(),
3783                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3784                        location: lsp::Location {
3785                            uri: buffer_uri.clone(),
3786                            range: lsp::Range::new(
3787                                lsp::Position::new(1, 8),
3788                                lsp::Position::new(1, 9),
3789                            ),
3790                        },
3791                        message: "original diagnostic".to_string(),
3792                    }]),
3793                    ..Default::default()
3794                },
3795                lsp::Diagnostic {
3796                    range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3797                    severity: Some(DiagnosticSeverity::ERROR),
3798                    message: "error 2".to_string(),
3799                    related_information: Some(vec![
3800                        lsp::DiagnosticRelatedInformation {
3801                            location: lsp::Location {
3802                                uri: buffer_uri.clone(),
3803                                range: lsp::Range::new(
3804                                    lsp::Position::new(1, 13),
3805                                    lsp::Position::new(1, 15),
3806                                ),
3807                            },
3808                            message: "error 2 hint 1".to_string(),
3809                        },
3810                        lsp::DiagnosticRelatedInformation {
3811                            location: lsp::Location {
3812                                uri: buffer_uri.clone(),
3813                                range: lsp::Range::new(
3814                                    lsp::Position::new(1, 13),
3815                                    lsp::Position::new(1, 15),
3816                                ),
3817                            },
3818                            message: "error 2 hint 2".to_string(),
3819                        },
3820                    ]),
3821                    ..Default::default()
3822                },
3823                lsp::Diagnostic {
3824                    range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3825                    severity: Some(DiagnosticSeverity::HINT),
3826                    message: "error 2 hint 1".to_string(),
3827                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3828                        location: lsp::Location {
3829                            uri: buffer_uri.clone(),
3830                            range: lsp::Range::new(
3831                                lsp::Position::new(2, 8),
3832                                lsp::Position::new(2, 17),
3833                            ),
3834                        },
3835                        message: "original diagnostic".to_string(),
3836                    }]),
3837                    ..Default::default()
3838                },
3839                lsp::Diagnostic {
3840                    range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3841                    severity: Some(DiagnosticSeverity::HINT),
3842                    message: "error 2 hint 2".to_string(),
3843                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3844                        location: lsp::Location {
3845                            uri: buffer_uri.clone(),
3846                            range: lsp::Range::new(
3847                                lsp::Position::new(2, 8),
3848                                lsp::Position::new(2, 17),
3849                            ),
3850                        },
3851                        message: "original diagnostic".to_string(),
3852                    }]),
3853                    ..Default::default()
3854                },
3855            ],
3856            version: None,
3857        };
3858
3859        worktree
3860            .update(&mut cx, |tree, cx| tree.update_diagnostics(message, cx))
3861            .unwrap();
3862        let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
3863
3864        assert_eq!(
3865            buffer
3866                .diagnostics_in_range::<_, Point>(0..buffer.len())
3867                .collect::<Vec<_>>(),
3868            &[
3869                DiagnosticEntry {
3870                    range: Point::new(1, 8)..Point::new(1, 9),
3871                    diagnostic: Diagnostic {
3872                        severity: DiagnosticSeverity::WARNING,
3873                        message: "error 1".to_string(),
3874                        group_id: 0,
3875                        is_primary: true,
3876                        ..Default::default()
3877                    }
3878                },
3879                DiagnosticEntry {
3880                    range: Point::new(1, 8)..Point::new(1, 9),
3881                    diagnostic: Diagnostic {
3882                        severity: DiagnosticSeverity::HINT,
3883                        message: "error 1 hint 1".to_string(),
3884                        group_id: 0,
3885                        is_primary: false,
3886                        ..Default::default()
3887                    }
3888                },
3889                DiagnosticEntry {
3890                    range: Point::new(1, 13)..Point::new(1, 15),
3891                    diagnostic: Diagnostic {
3892                        severity: DiagnosticSeverity::HINT,
3893                        message: "error 2 hint 1".to_string(),
3894                        group_id: 1,
3895                        is_primary: false,
3896                        ..Default::default()
3897                    }
3898                },
3899                DiagnosticEntry {
3900                    range: Point::new(1, 13)..Point::new(1, 15),
3901                    diagnostic: Diagnostic {
3902                        severity: DiagnosticSeverity::HINT,
3903                        message: "error 2 hint 2".to_string(),
3904                        group_id: 1,
3905                        is_primary: false,
3906                        ..Default::default()
3907                    }
3908                },
3909                DiagnosticEntry {
3910                    range: Point::new(2, 8)..Point::new(2, 17),
3911                    diagnostic: Diagnostic {
3912                        severity: DiagnosticSeverity::ERROR,
3913                        message: "error 2".to_string(),
3914                        group_id: 1,
3915                        is_primary: true,
3916                        ..Default::default()
3917                    }
3918                }
3919            ]
3920        );
3921
3922        assert_eq!(
3923            buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
3924            &[
3925                DiagnosticEntry {
3926                    range: Point::new(1, 8)..Point::new(1, 9),
3927                    diagnostic: Diagnostic {
3928                        severity: DiagnosticSeverity::WARNING,
3929                        message: "error 1".to_string(),
3930                        group_id: 0,
3931                        is_primary: true,
3932                        ..Default::default()
3933                    }
3934                },
3935                DiagnosticEntry {
3936                    range: Point::new(1, 8)..Point::new(1, 9),
3937                    diagnostic: Diagnostic {
3938                        severity: DiagnosticSeverity::HINT,
3939                        message: "error 1 hint 1".to_string(),
3940                        group_id: 0,
3941                        is_primary: false,
3942                        ..Default::default()
3943                    }
3944                },
3945            ]
3946        );
3947        assert_eq!(
3948            buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
3949            &[
3950                DiagnosticEntry {
3951                    range: Point::new(1, 13)..Point::new(1, 15),
3952                    diagnostic: Diagnostic {
3953                        severity: DiagnosticSeverity::HINT,
3954                        message: "error 2 hint 1".to_string(),
3955                        group_id: 1,
3956                        is_primary: false,
3957                        ..Default::default()
3958                    }
3959                },
3960                DiagnosticEntry {
3961                    range: Point::new(1, 13)..Point::new(1, 15),
3962                    diagnostic: Diagnostic {
3963                        severity: DiagnosticSeverity::HINT,
3964                        message: "error 2 hint 2".to_string(),
3965                        group_id: 1,
3966                        is_primary: false,
3967                        ..Default::default()
3968                    }
3969                },
3970                DiagnosticEntry {
3971                    range: Point::new(2, 8)..Point::new(2, 17),
3972                    diagnostic: Diagnostic {
3973                        severity: DiagnosticSeverity::ERROR,
3974                        message: "error 2".to_string(),
3975                        group_id: 1,
3976                        is_primary: true,
3977                        ..Default::default()
3978                    }
3979                }
3980            ]
3981        );
3982    }
3983
3984    #[gpui::test(iterations = 100)]
3985    fn test_random(mut rng: StdRng) {
3986        let operations = env::var("OPERATIONS")
3987            .map(|o| o.parse().unwrap())
3988            .unwrap_or(40);
3989        let initial_entries = env::var("INITIAL_ENTRIES")
3990            .map(|o| o.parse().unwrap())
3991            .unwrap_or(20);
3992
3993        let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3994        for _ in 0..initial_entries {
3995            randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3996        }
3997        log::info!("Generated initial tree");
3998
3999        let (notify_tx, _notify_rx) = smol::channel::unbounded();
4000        let fs = Arc::new(RealFs);
4001        let next_entry_id = Arc::new(AtomicUsize::new(0));
4002        let mut initial_snapshot = Snapshot {
4003            id: 0,
4004            scan_id: 0,
4005            abs_path: root_dir.path().into(),
4006            entries_by_path: Default::default(),
4007            entries_by_id: Default::default(),
4008            removed_entry_ids: Default::default(),
4009            ignores: Default::default(),
4010            root_name: Default::default(),
4011            root_char_bag: Default::default(),
4012            next_entry_id: next_entry_id.clone(),
4013        };
4014        initial_snapshot.insert_entry(
4015            Entry::new(
4016                Path::new("").into(),
4017                &smol::block_on(fs.metadata(root_dir.path()))
4018                    .unwrap()
4019                    .unwrap(),
4020                &next_entry_id,
4021                Default::default(),
4022            ),
4023            fs.as_ref(),
4024        );
4025        let mut scanner = BackgroundScanner::new(
4026            Arc::new(Mutex::new(initial_snapshot.clone())),
4027            notify_tx,
4028            fs.clone(),
4029            Arc::new(gpui::executor::Background::new()),
4030        );
4031        smol::block_on(scanner.scan_dirs()).unwrap();
4032        scanner.snapshot().check_invariants();
4033
4034        let mut events = Vec::new();
4035        let mut snapshots = Vec::new();
4036        let mut mutations_len = operations;
4037        while mutations_len > 1 {
4038            if !events.is_empty() && rng.gen_bool(0.4) {
4039                let len = rng.gen_range(0..=events.len());
4040                let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4041                log::info!("Delivering events: {:#?}", to_deliver);
4042                smol::block_on(scanner.process_events(to_deliver));
4043                scanner.snapshot().check_invariants();
4044            } else {
4045                events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4046                mutations_len -= 1;
4047            }
4048
4049            if rng.gen_bool(0.2) {
4050                snapshots.push(scanner.snapshot());
4051            }
4052        }
4053        log::info!("Quiescing: {:#?}", events);
4054        smol::block_on(scanner.process_events(events));
4055        scanner.snapshot().check_invariants();
4056
4057        let (notify_tx, _notify_rx) = smol::channel::unbounded();
4058        let mut new_scanner = BackgroundScanner::new(
4059            Arc::new(Mutex::new(initial_snapshot)),
4060            notify_tx,
4061            scanner.fs.clone(),
4062            scanner.executor.clone(),
4063        );
4064        smol::block_on(new_scanner.scan_dirs()).unwrap();
4065        assert_eq!(
4066            scanner.snapshot().to_vec(true),
4067            new_scanner.snapshot().to_vec(true)
4068        );
4069
4070        for mut prev_snapshot in snapshots {
4071            let include_ignored = rng.gen::<bool>();
4072            if !include_ignored {
4073                let mut entries_by_path_edits = Vec::new();
4074                let mut entries_by_id_edits = Vec::new();
4075                for entry in prev_snapshot
4076                    .entries_by_id
4077                    .cursor::<()>()
4078                    .filter(|e| e.is_ignored)
4079                {
4080                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4081                    entries_by_id_edits.push(Edit::Remove(entry.id));
4082                }
4083
4084                prev_snapshot
4085                    .entries_by_path
4086                    .edit(entries_by_path_edits, &());
4087                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4088            }
4089
4090            let update = scanner
4091                .snapshot()
4092                .build_update(&prev_snapshot, 0, 0, include_ignored);
4093            prev_snapshot.apply_update(update).unwrap();
4094            assert_eq!(
4095                prev_snapshot.to_vec(true),
4096                scanner.snapshot().to_vec(include_ignored)
4097            );
4098        }
4099    }
4100
4101    fn randomly_mutate_tree(
4102        root_path: &Path,
4103        insertion_probability: f64,
4104        rng: &mut impl Rng,
4105    ) -> Result<Vec<fsevent::Event>> {
4106        let root_path = root_path.canonicalize().unwrap();
4107        let (dirs, files) = read_dir_recursive(root_path.clone());
4108
4109        let mut events = Vec::new();
4110        let mut record_event = |path: PathBuf| {
4111            events.push(fsevent::Event {
4112                event_id: SystemTime::now()
4113                    .duration_since(UNIX_EPOCH)
4114                    .unwrap()
4115                    .as_secs(),
4116                flags: fsevent::StreamFlags::empty(),
4117                path,
4118            });
4119        };
4120
4121        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4122            let path = dirs.choose(rng).unwrap();
4123            let new_path = path.join(gen_name(rng));
4124
4125            if rng.gen() {
4126                log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4127                std::fs::create_dir(&new_path)?;
4128            } else {
4129                log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4130                std::fs::write(&new_path, "")?;
4131            }
4132            record_event(new_path);
4133        } else if rng.gen_bool(0.05) {
4134            let ignore_dir_path = dirs.choose(rng).unwrap();
4135            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4136
4137            let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4138            let files_to_ignore = {
4139                let len = rng.gen_range(0..=subfiles.len());
4140                subfiles.choose_multiple(rng, len)
4141            };
4142            let dirs_to_ignore = {
4143                let len = rng.gen_range(0..subdirs.len());
4144                subdirs.choose_multiple(rng, len)
4145            };
4146
4147            let mut ignore_contents = String::new();
4148            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4149                write!(
4150                    ignore_contents,
4151                    "{}\n",
4152                    path_to_ignore
4153                        .strip_prefix(&ignore_dir_path)?
4154                        .to_str()
4155                        .unwrap()
4156                )
4157                .unwrap();
4158            }
4159            log::info!(
4160                "Creating {:?} with contents:\n{}",
4161                ignore_path.strip_prefix(&root_path)?,
4162                ignore_contents
4163            );
4164            std::fs::write(&ignore_path, ignore_contents).unwrap();
4165            record_event(ignore_path);
4166        } else {
4167            let old_path = {
4168                let file_path = files.choose(rng);
4169                let dir_path = dirs[1..].choose(rng);
4170                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4171            };
4172
4173            let is_rename = rng.gen();
4174            if is_rename {
4175                let new_path_parent = dirs
4176                    .iter()
4177                    .filter(|d| !d.starts_with(old_path))
4178                    .choose(rng)
4179                    .unwrap();
4180
4181                let overwrite_existing_dir =
4182                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4183                let new_path = if overwrite_existing_dir {
4184                    std::fs::remove_dir_all(&new_path_parent).ok();
4185                    new_path_parent.to_path_buf()
4186                } else {
4187                    new_path_parent.join(gen_name(rng))
4188                };
4189
4190                log::info!(
4191                    "Renaming {:?} to {}{:?}",
4192                    old_path.strip_prefix(&root_path)?,
4193                    if overwrite_existing_dir {
4194                        "overwrite "
4195                    } else {
4196                        ""
4197                    },
4198                    new_path.strip_prefix(&root_path)?
4199                );
4200                std::fs::rename(&old_path, &new_path)?;
4201                record_event(old_path.clone());
4202                record_event(new_path);
4203            } else if old_path.is_dir() {
4204                let (dirs, files) = read_dir_recursive(old_path.clone());
4205
4206                log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4207                std::fs::remove_dir_all(&old_path).unwrap();
4208                for file in files {
4209                    record_event(file);
4210                }
4211                for dir in dirs {
4212                    record_event(dir);
4213                }
4214            } else {
4215                log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4216                std::fs::remove_file(old_path).unwrap();
4217                record_event(old_path.clone());
4218            }
4219        }
4220
4221        Ok(events)
4222    }
4223
4224    fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4225        let child_entries = std::fs::read_dir(&path).unwrap();
4226        let mut dirs = vec![path];
4227        let mut files = Vec::new();
4228        for child_entry in child_entries {
4229            let child_path = child_entry.unwrap().path();
4230            if child_path.is_dir() {
4231                let (child_dirs, child_files) = read_dir_recursive(child_path);
4232                dirs.extend(child_dirs);
4233                files.extend(child_files);
4234            } else {
4235                files.push(child_path);
4236            }
4237        }
4238        (dirs, files)
4239    }
4240
4241    fn gen_name(rng: &mut impl Rng) -> String {
4242        (0..6)
4243            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4244            .map(char::from)
4245            .collect()
4246    }
4247
4248    impl Snapshot {
4249        fn check_invariants(&self) {
4250            let mut files = self.files(true, 0);
4251            let mut visible_files = self.files(false, 0);
4252            for entry in self.entries_by_path.cursor::<()>() {
4253                if entry.is_file() {
4254                    assert_eq!(files.next().unwrap().inode, entry.inode);
4255                    if !entry.is_ignored {
4256                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4257                    }
4258                }
4259            }
4260            assert!(files.next().is_none());
4261            assert!(visible_files.next().is_none());
4262
4263            let mut bfs_paths = Vec::new();
4264            let mut stack = vec![Path::new("")];
4265            while let Some(path) = stack.pop() {
4266                bfs_paths.push(path);
4267                let ix = stack.len();
4268                for child_entry in self.child_entries(path) {
4269                    stack.insert(ix, &child_entry.path);
4270                }
4271            }
4272
4273            let dfs_paths = self
4274                .entries_by_path
4275                .cursor::<()>()
4276                .map(|e| e.path.as_ref())
4277                .collect::<Vec<_>>();
4278            assert_eq!(bfs_paths, dfs_paths);
4279
4280            for (ignore_parent_path, _) in &self.ignores {
4281                assert!(self.entry_for_path(ignore_parent_path).is_some());
4282                assert!(self
4283                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4284                    .is_some());
4285            }
4286        }
4287
4288        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4289            let mut paths = Vec::new();
4290            for entry in self.entries_by_path.cursor::<()>() {
4291                if include_ignored || !entry.is_ignored {
4292                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4293                }
4294            }
4295            paths.sort_by(|a, b| a.0.cmp(&b.0));
4296            paths
4297        }
4298    }
4299}