worktree.rs

   1use super::{
   2    fs::{self, Fs},
   3    ignore::IgnoreStack,
   4    DiagnosticSummary, ProjectEntry,
   5};
   6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
   7use anyhow::{anyhow, Result};
   8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
   9use clock::ReplicaId;
  10use collections::{hash_map, HashMap, HashSet};
  11use futures::{Stream, StreamExt};
  12use fuzzy::CharBag;
  13use gpui::{
  14    executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
  15    Task, UpgradeModelHandle, WeakModelHandle,
  16};
  17use language::{
  18    range_from_lsp, Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Operation,
  19    PointUtf16, Rope,
  20};
  21use lazy_static::lazy_static;
  22use parking_lot::Mutex;
  23use postage::{
  24    prelude::{Sink as _, Stream as _},
  25    watch,
  26};
  27use serde::Deserialize;
  28use smol::channel::{self, Sender};
  29use std::{
  30    any::Any,
  31    cmp::{self, Ordering},
  32    convert::{TryFrom, TryInto},
  33    ffi::{OsStr, OsString},
  34    fmt,
  35    future::Future,
  36    ops::Deref,
  37    path::{Path, PathBuf},
  38    sync::{
  39        atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst},
  40        Arc,
  41    },
  42    time::{Duration, SystemTime},
  43};
  44use sum_tree::{Bias, TreeMap};
  45use sum_tree::{Edit, SeekTarget, SumTree};
  46use util::{post_inc, ResultExt, TryFutureExt};
  47
  48lazy_static! {
  49    static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
  50}
  51
  52#[derive(Clone, Debug)]
  53enum ScanState {
  54    Idle,
  55    Scanning,
  56    Err(Arc<anyhow::Error>),
  57}
  58
  59#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
  60pub struct WorktreeId(usize);
  61
  62pub enum Worktree {
  63    Local(LocalWorktree),
  64    Remote(RemoteWorktree),
  65}
  66
  67impl Entity for Worktree {
  68    type Event = ();
  69
  70    fn release(&mut self, cx: &mut MutableAppContext) {
  71        if let Some(worktree) = self.as_local_mut() {
  72            if let Registration::Done { project_id } = worktree.registration {
  73                let client = worktree.client.clone();
  74                let unregister_message = proto::UnregisterWorktree {
  75                    project_id,
  76                    worktree_id: worktree.id().to_proto(),
  77                };
  78                cx.foreground()
  79                    .spawn(async move {
  80                        client.send(unregister_message).await?;
  81                        Ok::<_, anyhow::Error>(())
  82                    })
  83                    .detach_and_log_err(cx);
  84            }
  85        }
  86    }
  87}
  88
  89impl Worktree {
  90    pub async fn open_local(
  91        client: Arc<Client>,
  92        user_store: ModelHandle<UserStore>,
  93        path: impl Into<Arc<Path>>,
  94        fs: Arc<dyn Fs>,
  95        cx: &mut AsyncAppContext,
  96    ) -> Result<ModelHandle<Self>> {
  97        let (tree, scan_states_tx) =
  98            LocalWorktree::new(client, user_store, path, fs.clone(), cx).await?;
  99        tree.update(cx, |tree, cx| {
 100            let tree = tree.as_local_mut().unwrap();
 101            let abs_path = tree.snapshot.abs_path.clone();
 102            let background_snapshot = tree.background_snapshot.clone();
 103            let background = cx.background().clone();
 104            tree._background_scanner_task = Some(cx.background().spawn(async move {
 105                let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
 106                let scanner =
 107                    BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
 108                scanner.run(events).await;
 109            }));
 110        });
 111        Ok(tree)
 112    }
 113
 114    pub async fn remote(
 115        project_remote_id: u64,
 116        replica_id: ReplicaId,
 117        worktree: proto::Worktree,
 118        client: Arc<Client>,
 119        user_store: ModelHandle<UserStore>,
 120        cx: &mut AsyncAppContext,
 121    ) -> Result<ModelHandle<Self>> {
 122        let remote_id = worktree.id;
 123        let root_char_bag: CharBag = worktree
 124            .root_name
 125            .chars()
 126            .map(|c| c.to_ascii_lowercase())
 127            .collect();
 128        let root_name = worktree.root_name.clone();
 129        let (entries_by_path, entries_by_id, diagnostic_summaries) = cx
 130            .background()
 131            .spawn(async move {
 132                let mut entries_by_path_edits = Vec::new();
 133                let mut entries_by_id_edits = Vec::new();
 134                for entry in worktree.entries {
 135                    match Entry::try_from((&root_char_bag, entry)) {
 136                        Ok(entry) => {
 137                            entries_by_id_edits.push(Edit::Insert(PathEntry {
 138                                id: entry.id,
 139                                path: entry.path.clone(),
 140                                is_ignored: entry.is_ignored,
 141                                scan_id: 0,
 142                            }));
 143                            entries_by_path_edits.push(Edit::Insert(entry));
 144                        }
 145                        Err(err) => log::warn!("error for remote worktree entry {:?}", err),
 146                    }
 147                }
 148
 149                let mut entries_by_path = SumTree::new();
 150                let mut entries_by_id = SumTree::new();
 151                entries_by_path.edit(entries_by_path_edits, &());
 152                entries_by_id.edit(entries_by_id_edits, &());
 153
 154                let diagnostic_summaries = TreeMap::from_ordered_entries(
 155                    worktree.diagnostic_summaries.into_iter().map(|summary| {
 156                        (
 157                            PathKey(PathBuf::from(summary.path).into()),
 158                            DiagnosticSummary {
 159                                error_count: summary.error_count as usize,
 160                                warning_count: summary.warning_count as usize,
 161                                info_count: summary.info_count as usize,
 162                                hint_count: summary.hint_count as usize,
 163                            },
 164                        )
 165                    }),
 166                );
 167
 168                (entries_by_path, entries_by_id, diagnostic_summaries)
 169            })
 170            .await;
 171
 172        let worktree = cx.update(|cx| {
 173            cx.add_model(|cx: &mut ModelContext<Worktree>| {
 174                let snapshot = Snapshot {
 175                    id: WorktreeId(remote_id as usize),
 176                    scan_id: 0,
 177                    abs_path: Path::new("").into(),
 178                    root_name,
 179                    root_char_bag,
 180                    ignores: Default::default(),
 181                    entries_by_path,
 182                    entries_by_id,
 183                    removed_entry_ids: Default::default(),
 184                    next_entry_id: Default::default(),
 185                };
 186
 187                let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
 188                let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
 189
 190                cx.background()
 191                    .spawn(async move {
 192                        while let Some(update) = updates_rx.recv().await {
 193                            let mut snapshot = snapshot_tx.borrow().clone();
 194                            if let Err(error) = snapshot.apply_update(update) {
 195                                log::error!("error applying worktree update: {}", error);
 196                            }
 197                            *snapshot_tx.borrow_mut() = snapshot;
 198                        }
 199                    })
 200                    .detach();
 201
 202                {
 203                    let mut snapshot_rx = snapshot_rx.clone();
 204                    cx.spawn_weak(|this, mut cx| async move {
 205                        while let Some(_) = snapshot_rx.recv().await {
 206                            if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
 207                                this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
 208                            } else {
 209                                break;
 210                            }
 211                        }
 212                    })
 213                    .detach();
 214                }
 215
 216                Worktree::Remote(RemoteWorktree {
 217                    project_id: project_remote_id,
 218                    replica_id,
 219                    snapshot,
 220                    snapshot_rx,
 221                    updates_tx,
 222                    client: client.clone(),
 223                    loading_buffers: Default::default(),
 224                    open_buffers: Default::default(),
 225                    queued_operations: Default::default(),
 226                    user_store,
 227                    diagnostic_summaries,
 228                })
 229            })
 230        });
 231
 232        Ok(worktree)
 233    }
 234
 235    pub fn as_local(&self) -> Option<&LocalWorktree> {
 236        if let Worktree::Local(worktree) = self {
 237            Some(worktree)
 238        } else {
 239            None
 240        }
 241    }
 242
 243    pub fn as_remote(&self) -> Option<&RemoteWorktree> {
 244        if let Worktree::Remote(worktree) = self {
 245            Some(worktree)
 246        } else {
 247            None
 248        }
 249    }
 250
 251    pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
 252        if let Worktree::Local(worktree) = self {
 253            Some(worktree)
 254        } else {
 255            None
 256        }
 257    }
 258
 259    pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
 260        if let Worktree::Remote(worktree) = self {
 261            Some(worktree)
 262        } else {
 263            None
 264        }
 265    }
 266
 267    pub fn snapshot(&self) -> Snapshot {
 268        match self {
 269            Worktree::Local(worktree) => worktree.snapshot(),
 270            Worktree::Remote(worktree) => worktree.snapshot(),
 271        }
 272    }
 273
 274    pub fn replica_id(&self) -> ReplicaId {
 275        match self {
 276            Worktree::Local(_) => 0,
 277            Worktree::Remote(worktree) => worktree.replica_id,
 278        }
 279    }
 280
 281    pub fn remove_collaborator(
 282        &mut self,
 283        peer_id: PeerId,
 284        replica_id: ReplicaId,
 285        cx: &mut ModelContext<Self>,
 286    ) {
 287        match self {
 288            Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
 289            Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
 290        }
 291    }
 292
 293    pub fn user_store(&self) -> &ModelHandle<UserStore> {
 294        match self {
 295            Worktree::Local(worktree) => &worktree.user_store,
 296            Worktree::Remote(worktree) => &worktree.user_store,
 297        }
 298    }
 299
 300    pub fn diagnostic_summaries<'a>(
 301        &'a self,
 302    ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
 303        match self {
 304            Worktree::Local(worktree) => &worktree.diagnostic_summaries,
 305            Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
 306        }
 307        .iter()
 308        .map(|(path, summary)| (path.0.clone(), summary.clone()))
 309    }
 310
 311    pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
 312        match self {
 313            Worktree::Local(worktree) => &mut worktree.loading_buffers,
 314            Worktree::Remote(worktree) => &mut worktree.loading_buffers,
 315        }
 316    }
 317
 318    pub fn open_buffer(
 319        &mut self,
 320        path: impl AsRef<Path>,
 321        cx: &mut ModelContext<Self>,
 322    ) -> Task<Result<(ModelHandle<Buffer>, bool)>> {
 323        let path = path.as_ref();
 324
 325        // If there is already a buffer for the given path, then return it.
 326        let existing_buffer = match self {
 327            Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
 328            Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
 329        };
 330        if let Some(existing_buffer) = existing_buffer {
 331            return cx.spawn(move |_, _| async move { Ok((existing_buffer, false)) });
 332        }
 333
 334        let is_new = Arc::new(AtomicBool::new(true));
 335        let path: Arc<Path> = Arc::from(path);
 336        let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
 337            // If the given path is already being loaded, then wait for that existing
 338            // task to complete and return the same buffer.
 339            hash_map::Entry::Occupied(e) => e.get().clone(),
 340
 341            // Otherwise, record the fact that this path is now being loaded.
 342            hash_map::Entry::Vacant(entry) => {
 343                let (mut tx, rx) = postage::watch::channel();
 344                entry.insert(rx.clone());
 345
 346                let load_buffer = match self {
 347                    Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
 348                    Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
 349                };
 350                cx.spawn(move |this, mut cx| async move {
 351                    let result = load_buffer.await;
 352
 353                    // After the buffer loads, record the fact that it is no longer
 354                    // loading.
 355                    this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
 356                    *tx.borrow_mut() = Some(match result {
 357                        Ok(buffer) => Ok((buffer, is_new)),
 358                        Err(error) => Err(Arc::new(error)),
 359                    });
 360                })
 361                .detach();
 362                rx
 363            }
 364        };
 365
 366        cx.spawn(|_, _| async move {
 367            loop {
 368                if let Some(result) = loading_watch.borrow().as_ref() {
 369                    return match result {
 370                        Ok((buf, is_new)) => Ok((buf.clone(), is_new.fetch_and(false, SeqCst))),
 371                        Err(error) => Err(anyhow!("{}", error)),
 372                    };
 373                }
 374                loading_watch.recv().await;
 375            }
 376        })
 377    }
 378
 379    #[cfg(feature = "test-support")]
 380    pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
 381        let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
 382            Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
 383            Worktree::Remote(worktree) => {
 384                Box::new(worktree.open_buffers.values().filter_map(|buf| {
 385                    if let RemoteBuffer::Loaded(buf) = buf {
 386                        Some(buf)
 387                    } else {
 388                        None
 389                    }
 390                }))
 391            }
 392        };
 393
 394        let path = path.as_ref();
 395        open_buffers
 396            .find(|buffer| {
 397                if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
 398                    file.path().as_ref() == path
 399                } else {
 400                    false
 401                }
 402            })
 403            .is_some()
 404    }
 405
 406    pub fn handle_update_buffer(
 407        &mut self,
 408        envelope: TypedEnvelope<proto::UpdateBuffer>,
 409        cx: &mut ModelContext<Self>,
 410    ) -> Result<()> {
 411        let payload = envelope.payload.clone();
 412        let buffer_id = payload.buffer_id as usize;
 413        let ops = payload
 414            .operations
 415            .into_iter()
 416            .map(|op| language::proto::deserialize_operation(op))
 417            .collect::<Result<Vec<_>, _>>()?;
 418
 419        match self {
 420            Worktree::Local(worktree) => {
 421                let buffer = worktree
 422                    .open_buffers
 423                    .get(&buffer_id)
 424                    .and_then(|buf| buf.upgrade(cx))
 425                    .ok_or_else(|| {
 426                        anyhow!("invalid buffer {} in update buffer message", buffer_id)
 427                    })?;
 428                buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
 429            }
 430            Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
 431                Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
 432                Some(RemoteBuffer::Loaded(buffer)) => {
 433                    if let Some(buffer) = buffer.upgrade(cx) {
 434                        buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
 435                    } else {
 436                        worktree
 437                            .open_buffers
 438                            .insert(buffer_id, RemoteBuffer::Operations(ops));
 439                    }
 440                }
 441                None => {
 442                    worktree
 443                        .open_buffers
 444                        .insert(buffer_id, RemoteBuffer::Operations(ops));
 445                }
 446            },
 447        }
 448
 449        Ok(())
 450    }
 451
 452    pub fn handle_save_buffer(
 453        &mut self,
 454        envelope: TypedEnvelope<proto::SaveBuffer>,
 455        rpc: Arc<Client>,
 456        cx: &mut ModelContext<Self>,
 457    ) -> Result<()> {
 458        let sender_id = envelope.original_sender_id()?;
 459        let this = self.as_local().unwrap();
 460        let project_id = this
 461            .share
 462            .as_ref()
 463            .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
 464            .project_id;
 465
 466        let buffer = this
 467            .shared_buffers
 468            .get(&sender_id)
 469            .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
 470            .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
 471
 472        let receipt = envelope.receipt();
 473        let worktree_id = envelope.payload.worktree_id;
 474        let buffer_id = envelope.payload.buffer_id;
 475        let save = cx.spawn(|_, mut cx| async move {
 476            buffer.update(&mut cx, |buffer, cx| buffer.save(cx)).await
 477        });
 478
 479        cx.background()
 480            .spawn(
 481                async move {
 482                    let (version, mtime) = save.await?;
 483
 484                    rpc.respond(
 485                        receipt,
 486                        proto::BufferSaved {
 487                            project_id,
 488                            worktree_id,
 489                            buffer_id,
 490                            version: (&version).into(),
 491                            mtime: Some(mtime.into()),
 492                        },
 493                    )
 494                    .await?;
 495
 496                    Ok(())
 497                }
 498                .log_err(),
 499            )
 500            .detach();
 501
 502        Ok(())
 503    }
 504
 505    pub fn handle_buffer_saved(
 506        &mut self,
 507        envelope: TypedEnvelope<proto::BufferSaved>,
 508        cx: &mut ModelContext<Self>,
 509    ) -> Result<()> {
 510        let payload = envelope.payload.clone();
 511        let worktree = self.as_remote_mut().unwrap();
 512        if let Some(buffer) = worktree
 513            .open_buffers
 514            .get(&(payload.buffer_id as usize))
 515            .and_then(|buf| buf.upgrade(cx))
 516        {
 517            buffer.update(cx, |buffer, cx| {
 518                let version = payload.version.try_into()?;
 519                let mtime = payload
 520                    .mtime
 521                    .ok_or_else(|| anyhow!("missing mtime"))?
 522                    .into();
 523                buffer.did_save(version, mtime, None, cx);
 524                Result::<_, anyhow::Error>::Ok(())
 525            })?;
 526        }
 527        Ok(())
 528    }
 529
 530    pub fn handle_format_buffer(
 531        &mut self,
 532        envelope: TypedEnvelope<proto::FormatBuffer>,
 533        rpc: Arc<Client>,
 534        cx: &mut ModelContext<Self>,
 535    ) -> Result<()> {
 536        let sender_id = envelope.original_sender_id()?;
 537        let this = self.as_local().unwrap();
 538        let buffer = this
 539            .shared_buffers
 540            .get(&sender_id)
 541            .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
 542            .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
 543
 544        let receipt = envelope.receipt();
 545        cx.spawn(|_, mut cx| async move {
 546            let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
 547            // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
 548            // associated with formatting.
 549            cx.spawn(|_| async move {
 550                match format {
 551                    Ok(()) => rpc.respond(receipt, proto::Ack {}).await?,
 552                    Err(error) => {
 553                        rpc.respond_with_error(
 554                            receipt,
 555                            proto::Error {
 556                                message: error.to_string(),
 557                            },
 558                        )
 559                        .await?
 560                    }
 561                }
 562                Ok::<_, anyhow::Error>(())
 563            })
 564            .await
 565            .log_err();
 566        })
 567        .detach();
 568
 569        Ok(())
 570    }
 571
 572    fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
 573        match self {
 574            Self::Local(worktree) => {
 575                let is_fake_fs = worktree.fs.is_fake();
 576                worktree.snapshot = worktree.background_snapshot.lock().clone();
 577                if worktree.is_scanning() {
 578                    if worktree.poll_task.is_none() {
 579                        worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
 580                            if is_fake_fs {
 581                                smol::future::yield_now().await;
 582                            } else {
 583                                smol::Timer::after(Duration::from_millis(100)).await;
 584                            }
 585                            this.update(&mut cx, |this, cx| {
 586                                this.as_local_mut().unwrap().poll_task = None;
 587                                this.poll_snapshot(cx);
 588                            })
 589                        }));
 590                    }
 591                } else {
 592                    worktree.poll_task.take();
 593                    self.update_open_buffers(cx);
 594                }
 595            }
 596            Self::Remote(worktree) => {
 597                worktree.snapshot = worktree.snapshot_rx.borrow().clone();
 598                self.update_open_buffers(cx);
 599            }
 600        };
 601
 602        cx.notify();
 603    }
 604
 605    fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
 606        let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
 607            Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
 608            Self::Remote(worktree) => {
 609                Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
 610                    if let RemoteBuffer::Loaded(buf) = buf {
 611                        Some((id, buf))
 612                    } else {
 613                        None
 614                    }
 615                }))
 616            }
 617        };
 618
 619        let local = self.as_local().is_some();
 620        let worktree_path = self.abs_path.clone();
 621        let worktree_handle = cx.handle();
 622        let mut buffers_to_delete = Vec::new();
 623        for (buffer_id, buffer) in open_buffers {
 624            if let Some(buffer) = buffer.upgrade(cx) {
 625                buffer.update(cx, |buffer, cx| {
 626                    if let Some(old_file) = File::from_dyn(buffer.file()) {
 627                        let new_file = if let Some(entry) = old_file
 628                            .entry_id
 629                            .and_then(|entry_id| self.entry_for_id(entry_id))
 630                        {
 631                            File {
 632                                is_local: local,
 633                                worktree_path: worktree_path.clone(),
 634                                entry_id: Some(entry.id),
 635                                mtime: entry.mtime,
 636                                path: entry.path.clone(),
 637                                worktree: worktree_handle.clone(),
 638                            }
 639                        } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
 640                            File {
 641                                is_local: local,
 642                                worktree_path: worktree_path.clone(),
 643                                entry_id: Some(entry.id),
 644                                mtime: entry.mtime,
 645                                path: entry.path.clone(),
 646                                worktree: worktree_handle.clone(),
 647                            }
 648                        } else {
 649                            File {
 650                                is_local: local,
 651                                worktree_path: worktree_path.clone(),
 652                                entry_id: None,
 653                                path: old_file.path().clone(),
 654                                mtime: old_file.mtime(),
 655                                worktree: worktree_handle.clone(),
 656                            }
 657                        };
 658
 659                        if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
 660                            task.detach();
 661                        }
 662                    }
 663                });
 664            } else {
 665                buffers_to_delete.push(*buffer_id);
 666            }
 667        }
 668
 669        for buffer_id in buffers_to_delete {
 670            match self {
 671                Self::Local(worktree) => {
 672                    worktree.open_buffers.remove(&buffer_id);
 673                }
 674                Self::Remote(worktree) => {
 675                    worktree.open_buffers.remove(&buffer_id);
 676                }
 677            }
 678        }
 679    }
 680
 681    fn send_buffer_update(
 682        &mut self,
 683        buffer_id: u64,
 684        operation: Operation,
 685        cx: &mut ModelContext<Self>,
 686    ) {
 687        if let Some((project_id, worktree_id, rpc)) = match self {
 688            Worktree::Local(worktree) => worktree
 689                .share
 690                .as_ref()
 691                .map(|share| (share.project_id, worktree.id(), worktree.client.clone())),
 692            Worktree::Remote(worktree) => Some((
 693                worktree.project_id,
 694                worktree.snapshot.id(),
 695                worktree.client.clone(),
 696            )),
 697        } {
 698            cx.spawn(|worktree, mut cx| async move {
 699                if let Err(error) = rpc
 700                    .request(proto::UpdateBuffer {
 701                        project_id,
 702                        worktree_id: worktree_id.0 as u64,
 703                        buffer_id,
 704                        operations: vec![language::proto::serialize_operation(&operation)],
 705                    })
 706                    .await
 707                {
 708                    worktree.update(&mut cx, |worktree, _| {
 709                        log::error!("error sending buffer operation: {}", error);
 710                        match worktree {
 711                            Worktree::Local(t) => &mut t.queued_operations,
 712                            Worktree::Remote(t) => &mut t.queued_operations,
 713                        }
 714                        .push((buffer_id, operation));
 715                    });
 716                }
 717            })
 718            .detach();
 719        }
 720    }
 721}
 722
 723impl WorktreeId {
 724    pub fn from_usize(handle_id: usize) -> Self {
 725        Self(handle_id)
 726    }
 727
 728    pub(crate) fn from_proto(id: u64) -> Self {
 729        Self(id as usize)
 730    }
 731
 732    pub fn to_proto(&self) -> u64 {
 733        self.0 as u64
 734    }
 735
 736    pub fn to_usize(&self) -> usize {
 737        self.0
 738    }
 739}
 740
 741impl fmt::Display for WorktreeId {
 742    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
 743        self.0.fmt(f)
 744    }
 745}
 746
 747#[derive(Clone)]
 748pub struct Snapshot {
 749    id: WorktreeId,
 750    scan_id: usize,
 751    abs_path: Arc<Path>,
 752    root_name: String,
 753    root_char_bag: CharBag,
 754    ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
 755    entries_by_path: SumTree<Entry>,
 756    entries_by_id: SumTree<PathEntry>,
 757    removed_entry_ids: HashMap<u64, usize>,
 758    next_entry_id: Arc<AtomicUsize>,
 759}
 760
 761pub struct LocalWorktree {
 762    snapshot: Snapshot,
 763    config: WorktreeConfig,
 764    background_snapshot: Arc<Mutex<Snapshot>>,
 765    last_scan_state_rx: watch::Receiver<ScanState>,
 766    _background_scanner_task: Option<Task<()>>,
 767    poll_task: Option<Task<()>>,
 768    registration: Registration,
 769    share: Option<ShareState>,
 770    loading_buffers: LoadingBuffers,
 771    open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
 772    shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
 773    diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
 774    diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
 775    queued_operations: Vec<(u64, Operation)>,
 776    client: Arc<Client>,
 777    user_store: ModelHandle<UserStore>,
 778    fs: Arc<dyn Fs>,
 779}
 780
 781#[derive(Debug, Eq, PartialEq)]
 782enum Registration {
 783    None,
 784    Pending,
 785    Done { project_id: u64 },
 786}
 787
 788struct ShareState {
 789    project_id: u64,
 790    snapshots_tx: Sender<Snapshot>,
 791    _maintain_remote_snapshot: Option<Task<()>>,
 792}
 793
 794pub struct RemoteWorktree {
 795    project_id: u64,
 796    snapshot: Snapshot,
 797    snapshot_rx: watch::Receiver<Snapshot>,
 798    client: Arc<Client>,
 799    updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
 800    replica_id: ReplicaId,
 801    loading_buffers: LoadingBuffers,
 802    open_buffers: HashMap<usize, RemoteBuffer>,
 803    user_store: ModelHandle<UserStore>,
 804    queued_operations: Vec<(u64, Operation)>,
 805    diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
 806}
 807
 808type LoadingBuffers = HashMap<
 809    Arc<Path>,
 810    postage::watch::Receiver<
 811        Option<Result<(ModelHandle<Buffer>, Arc<AtomicBool>), Arc<anyhow::Error>>>,
 812    >,
 813>;
 814
 815#[derive(Default, Deserialize)]
 816struct WorktreeConfig {
 817    collaborators: Vec<String>,
 818}
 819
 820impl LocalWorktree {
 821    async fn new(
 822        client: Arc<Client>,
 823        user_store: ModelHandle<UserStore>,
 824        path: impl Into<Arc<Path>>,
 825        fs: Arc<dyn Fs>,
 826        cx: &mut AsyncAppContext,
 827    ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
 828        let abs_path = path.into();
 829        let path: Arc<Path> = Arc::from(Path::new(""));
 830        let next_entry_id = AtomicUsize::new(0);
 831
 832        // After determining whether the root entry is a file or a directory, populate the
 833        // snapshot's "root name", which will be used for the purpose of fuzzy matching.
 834        let root_name = abs_path
 835            .file_name()
 836            .map_or(String::new(), |f| f.to_string_lossy().to_string());
 837        let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
 838        let metadata = fs.metadata(&abs_path).await?;
 839
 840        let mut config = WorktreeConfig::default();
 841        if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
 842            if let Ok(parsed) = toml::from_str(&zed_toml) {
 843                config = parsed;
 844            }
 845        }
 846
 847        let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
 848        let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
 849        let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
 850            let mut snapshot = Snapshot {
 851                id: WorktreeId::from_usize(cx.model_id()),
 852                scan_id: 0,
 853                abs_path,
 854                root_name: root_name.clone(),
 855                root_char_bag,
 856                ignores: Default::default(),
 857                entries_by_path: Default::default(),
 858                entries_by_id: Default::default(),
 859                removed_entry_ids: Default::default(),
 860                next_entry_id: Arc::new(next_entry_id),
 861            };
 862            if let Some(metadata) = metadata {
 863                snapshot.insert_entry(
 864                    Entry::new(
 865                        path.into(),
 866                        &metadata,
 867                        &snapshot.next_entry_id,
 868                        snapshot.root_char_bag,
 869                    ),
 870                    fs.as_ref(),
 871                );
 872            }
 873
 874            let tree = Self {
 875                snapshot: snapshot.clone(),
 876                config,
 877                background_snapshot: Arc::new(Mutex::new(snapshot)),
 878                last_scan_state_rx,
 879                _background_scanner_task: None,
 880                registration: Registration::None,
 881                share: None,
 882                poll_task: None,
 883                loading_buffers: Default::default(),
 884                open_buffers: Default::default(),
 885                shared_buffers: Default::default(),
 886                diagnostics: Default::default(),
 887                diagnostic_summaries: Default::default(),
 888                queued_operations: Default::default(),
 889                client,
 890                user_store,
 891                fs,
 892            };
 893
 894            cx.spawn_weak(|this, mut cx| async move {
 895                while let Ok(scan_state) = scan_states_rx.recv().await {
 896                    if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
 897                        let to_send = handle.update(&mut cx, |this, cx| {
 898                            last_scan_state_tx.blocking_send(scan_state).ok();
 899                            this.poll_snapshot(cx);
 900                            let tree = this.as_local_mut().unwrap();
 901                            if !tree.is_scanning() {
 902                                if let Some(share) = tree.share.as_ref() {
 903                                    return Some((tree.snapshot(), share.snapshots_tx.clone()));
 904                                }
 905                            }
 906                            None
 907                        });
 908
 909                        if let Some((snapshot, snapshots_to_send_tx)) = to_send {
 910                            if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
 911                                log::error!("error submitting snapshot to send {}", err);
 912                            }
 913                        }
 914                    } else {
 915                        break;
 916                    }
 917                }
 918            })
 919            .detach();
 920
 921            Worktree::Local(tree)
 922        });
 923
 924        Ok((tree, scan_states_tx))
 925    }
 926
 927    pub fn authorized_logins(&self) -> Vec<String> {
 928        self.config.collaborators.clone()
 929    }
 930
 931    fn get_open_buffer(
 932        &mut self,
 933        path: &Path,
 934        cx: &mut ModelContext<Worktree>,
 935    ) -> Option<ModelHandle<Buffer>> {
 936        let handle = cx.handle();
 937        let mut result = None;
 938        self.open_buffers.retain(|_buffer_id, buffer| {
 939            if let Some(buffer) = buffer.upgrade(cx) {
 940                if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
 941                    if file.worktree == handle && file.path().as_ref() == path {
 942                        result = Some(buffer);
 943                    }
 944                }
 945                true
 946            } else {
 947                false
 948            }
 949        });
 950        result
 951    }
 952
 953    fn open_buffer(
 954        &mut self,
 955        path: &Path,
 956        cx: &mut ModelContext<Worktree>,
 957    ) -> Task<Result<ModelHandle<Buffer>>> {
 958        let path = Arc::from(path);
 959        cx.spawn(move |this, mut cx| async move {
 960            let (file, contents) = this
 961                .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
 962                .await?;
 963
 964            let diagnostics = this.update(&mut cx, |this, _| {
 965                this.as_local_mut().unwrap().diagnostics.get(&path).cloned()
 966            });
 967
 968            let mut buffer_operations = Vec::new();
 969            let buffer = cx.add_model(|cx| {
 970                let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
 971                if let Some(diagnostics) = diagnostics {
 972                    let op = buffer.update_diagnostics(None, diagnostics, cx).unwrap();
 973                    buffer_operations.push(op);
 974                }
 975                buffer
 976            });
 977
 978            this.update(&mut cx, |this, cx| {
 979                for op in buffer_operations {
 980                    this.send_buffer_update(buffer.read(cx).remote_id(), op, cx);
 981                }
 982                let this = this.as_local_mut().unwrap();
 983                this.open_buffers.insert(buffer.id(), buffer.downgrade());
 984            });
 985
 986            Ok(buffer)
 987        })
 988    }
 989
 990    pub fn open_remote_buffer(
 991        &mut self,
 992        peer_id: PeerId,
 993        buffer: ModelHandle<Buffer>,
 994        cx: &mut ModelContext<Worktree>,
 995    ) -> proto::OpenBufferResponse {
 996        self.shared_buffers
 997            .entry(peer_id)
 998            .or_default()
 999            .insert(buffer.id() as u64, buffer.clone());
1000        proto::OpenBufferResponse {
1001            buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1002        }
1003    }
1004
1005    pub fn close_remote_buffer(
1006        &mut self,
1007        envelope: TypedEnvelope<proto::CloseBuffer>,
1008        cx: &mut ModelContext<Worktree>,
1009    ) -> Result<()> {
1010        if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1011            shared_buffers.remove(&envelope.payload.buffer_id);
1012            cx.notify();
1013        }
1014
1015        Ok(())
1016    }
1017
1018    pub fn remove_collaborator(
1019        &mut self,
1020        peer_id: PeerId,
1021        replica_id: ReplicaId,
1022        cx: &mut ModelContext<Worktree>,
1023    ) {
1024        self.shared_buffers.remove(&peer_id);
1025        for (_, buffer) in &self.open_buffers {
1026            if let Some(buffer) = buffer.upgrade(cx) {
1027                buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1028            }
1029        }
1030        cx.notify();
1031    }
1032
1033    pub fn update_diagnostics(
1034        &mut self,
1035        worktree_path: Arc<Path>,
1036        params: lsp::PublishDiagnosticsParams,
1037        disk_based_sources: &HashSet<String>,
1038        cx: &mut ModelContext<Worktree>,
1039    ) -> Result<()> {
1040        let mut next_group_id = 0;
1041        let mut diagnostics = Vec::default();
1042        let mut primary_diagnostic_group_ids = HashMap::default();
1043        let mut sources_by_group_id = HashMap::default();
1044        let mut supporting_diagnostic_severities = HashMap::default();
1045        for diagnostic in &params.diagnostics {
1046            let source = diagnostic.source.as_ref();
1047            let code = diagnostic.code.as_ref().map(|code| match code {
1048                lsp::NumberOrString::Number(code) => code.to_string(),
1049                lsp::NumberOrString::String(code) => code.clone(),
1050            });
1051            let range = range_from_lsp(diagnostic.range);
1052            let is_supporting = diagnostic
1053                .related_information
1054                .as_ref()
1055                .map_or(false, |infos| {
1056                    infos.iter().any(|info| {
1057                        primary_diagnostic_group_ids.contains_key(&(
1058                            source,
1059                            code.clone(),
1060                            range_from_lsp(info.location.range),
1061                        ))
1062                    })
1063                });
1064
1065            if is_supporting {
1066                if let Some(severity) = diagnostic.severity {
1067                    supporting_diagnostic_severities
1068                        .insert((source, code.clone(), range), severity);
1069                }
1070            } else {
1071                let group_id = post_inc(&mut next_group_id);
1072                let is_disk_based =
1073                    source.map_or(false, |source| disk_based_sources.contains(source));
1074
1075                sources_by_group_id.insert(group_id, source);
1076                primary_diagnostic_group_ids
1077                    .insert((source, code.clone(), range.clone()), group_id);
1078
1079                diagnostics.push(DiagnosticEntry {
1080                    range,
1081                    diagnostic: Diagnostic {
1082                        code: code.clone(),
1083                        severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
1084                        message: diagnostic.message.clone(),
1085                        group_id,
1086                        is_primary: true,
1087                        is_valid: true,
1088                        is_disk_based,
1089                    },
1090                });
1091                if let Some(infos) = &diagnostic.related_information {
1092                    for info in infos {
1093                        if info.location.uri == params.uri {
1094                            let range = range_from_lsp(info.location.range);
1095                            diagnostics.push(DiagnosticEntry {
1096                                range,
1097                                diagnostic: Diagnostic {
1098                                    code: code.clone(),
1099                                    severity: DiagnosticSeverity::INFORMATION,
1100                                    message: info.message.clone(),
1101                                    group_id,
1102                                    is_primary: false,
1103                                    is_valid: true,
1104                                    is_disk_based,
1105                                },
1106                            });
1107                        }
1108                    }
1109                }
1110            }
1111        }
1112
1113        for entry in &mut diagnostics {
1114            let diagnostic = &mut entry.diagnostic;
1115            if !diagnostic.is_primary {
1116                let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
1117                if let Some(&severity) = supporting_diagnostic_severities.get(&(
1118                    source,
1119                    diagnostic.code.clone(),
1120                    entry.range.clone(),
1121                )) {
1122                    diagnostic.severity = severity;
1123                }
1124            }
1125        }
1126
1127        self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)?;
1128        Ok(())
1129    }
1130
1131    pub fn update_diagnostic_entries(
1132        &mut self,
1133        worktree_path: Arc<Path>,
1134        version: Option<i32>,
1135        diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
1136        cx: &mut ModelContext<Worktree>,
1137    ) -> Result<()> {
1138        for buffer in self.open_buffers.values() {
1139            if let Some(buffer) = buffer.upgrade(cx) {
1140                if buffer
1141                    .read(cx)
1142                    .file()
1143                    .map_or(false, |file| *file.path() == worktree_path)
1144                {
1145                    let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
1146                        (
1147                            buffer.remote_id(),
1148                            buffer.update_diagnostics(version, diagnostics.clone(), cx),
1149                        )
1150                    });
1151                    self.send_buffer_update(remote_id, operation?, cx);
1152                    break;
1153                }
1154            }
1155        }
1156
1157        let summary = DiagnosticSummary::new(&diagnostics);
1158        self.diagnostic_summaries
1159            .insert(PathKey(worktree_path.clone()), summary.clone());
1160        self.diagnostics.insert(worktree_path.clone(), diagnostics);
1161
1162        if let Some(share) = self.share.as_ref() {
1163            cx.foreground()
1164                .spawn({
1165                    let client = self.client.clone();
1166                    let project_id = share.project_id;
1167                    let worktree_id = self.id().to_proto();
1168                    let path = worktree_path.to_string_lossy().to_string();
1169                    async move {
1170                        client
1171                            .send(proto::UpdateDiagnosticSummary {
1172                                project_id,
1173                                worktree_id,
1174                                summary: Some(proto::DiagnosticSummary {
1175                                    path,
1176                                    error_count: summary.error_count as u32,
1177                                    warning_count: summary.warning_count as u32,
1178                                    info_count: summary.info_count as u32,
1179                                    hint_count: summary.hint_count as u32,
1180                                }),
1181                            })
1182                            .await
1183                            .log_err()
1184                    }
1185                })
1186                .detach();
1187        }
1188
1189        Ok(())
1190    }
1191
1192    fn send_buffer_update(
1193        &mut self,
1194        buffer_id: u64,
1195        operation: Operation,
1196        cx: &mut ModelContext<Worktree>,
1197    ) -> Option<()> {
1198        let share = self.share.as_ref()?;
1199        let project_id = share.project_id;
1200        let worktree_id = self.id();
1201        let rpc = self.client.clone();
1202        cx.spawn(|worktree, mut cx| async move {
1203            if let Err(error) = rpc
1204                .request(proto::UpdateBuffer {
1205                    project_id,
1206                    worktree_id: worktree_id.0 as u64,
1207                    buffer_id,
1208                    operations: vec![language::proto::serialize_operation(&operation)],
1209                })
1210                .await
1211            {
1212                worktree.update(&mut cx, |worktree, _| {
1213                    log::error!("error sending buffer operation: {}", error);
1214                    worktree
1215                        .as_local_mut()
1216                        .unwrap()
1217                        .queued_operations
1218                        .push((buffer_id, operation));
1219                });
1220            }
1221        })
1222        .detach();
1223        None
1224    }
1225
1226    pub fn scan_complete(&self) -> impl Future<Output = ()> {
1227        let mut scan_state_rx = self.last_scan_state_rx.clone();
1228        async move {
1229            let mut scan_state = Some(scan_state_rx.borrow().clone());
1230            while let Some(ScanState::Scanning) = scan_state {
1231                scan_state = scan_state_rx.recv().await;
1232            }
1233        }
1234    }
1235
1236    fn is_scanning(&self) -> bool {
1237        if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1238            true
1239        } else {
1240            false
1241        }
1242    }
1243
1244    pub fn snapshot(&self) -> Snapshot {
1245        self.snapshot.clone()
1246    }
1247
1248    pub fn abs_path(&self) -> &Arc<Path> {
1249        &self.snapshot.abs_path
1250    }
1251
1252    pub fn contains_abs_path(&self, path: &Path) -> bool {
1253        path.starts_with(&self.snapshot.abs_path)
1254    }
1255
1256    fn absolutize(&self, path: &Path) -> PathBuf {
1257        if path.file_name().is_some() {
1258            self.snapshot.abs_path.join(path)
1259        } else {
1260            self.snapshot.abs_path.to_path_buf()
1261        }
1262    }
1263
1264    fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1265        let handle = cx.handle();
1266        let path = Arc::from(path);
1267        let worktree_path = self.abs_path.clone();
1268        let abs_path = self.absolutize(&path);
1269        let background_snapshot = self.background_snapshot.clone();
1270        let fs = self.fs.clone();
1271        cx.spawn(|this, mut cx| async move {
1272            let text = fs.load(&abs_path).await?;
1273            // Eagerly populate the snapshot with an updated entry for the loaded file
1274            let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1275            this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1276            Ok((
1277                File {
1278                    entry_id: Some(entry.id),
1279                    worktree: handle,
1280                    worktree_path,
1281                    path: entry.path,
1282                    mtime: entry.mtime,
1283                    is_local: true,
1284                },
1285                text,
1286            ))
1287        })
1288    }
1289
1290    pub fn save_buffer_as(
1291        &self,
1292        buffer_handle: ModelHandle<Buffer>,
1293        path: impl Into<Arc<Path>>,
1294        cx: &mut ModelContext<Worktree>,
1295    ) -> Task<Result<()>> {
1296        let buffer = buffer_handle.read(cx);
1297        let text = buffer.as_rope().clone();
1298        let version = buffer.version();
1299        let save = self.save(path, text, cx);
1300        cx.spawn(|this, mut cx| async move {
1301            let entry = save.await?;
1302            let file = this.update(&mut cx, |this, cx| {
1303                let this = this.as_local_mut().unwrap();
1304                this.open_buffers
1305                    .insert(buffer_handle.id(), buffer_handle.downgrade());
1306                File {
1307                    entry_id: Some(entry.id),
1308                    worktree: cx.handle(),
1309                    worktree_path: this.abs_path.clone(),
1310                    path: entry.path,
1311                    mtime: entry.mtime,
1312                    is_local: true,
1313                }
1314            });
1315
1316            buffer_handle.update(&mut cx, |buffer, cx| {
1317                buffer.did_save(version, file.mtime, Some(Box::new(file)), cx);
1318            });
1319
1320            Ok(())
1321        })
1322    }
1323
1324    fn save(
1325        &self,
1326        path: impl Into<Arc<Path>>,
1327        text: Rope,
1328        cx: &mut ModelContext<Worktree>,
1329    ) -> Task<Result<Entry>> {
1330        let path = path.into();
1331        let abs_path = self.absolutize(&path);
1332        let background_snapshot = self.background_snapshot.clone();
1333        let fs = self.fs.clone();
1334        let save = cx.background().spawn(async move {
1335            fs.save(&abs_path, &text).await?;
1336            refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1337        });
1338
1339        cx.spawn(|this, mut cx| async move {
1340            let entry = save.await?;
1341            this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1342            Ok(entry)
1343        })
1344    }
1345
1346    pub fn register(
1347        &mut self,
1348        project_id: u64,
1349        cx: &mut ModelContext<Worktree>,
1350    ) -> Task<anyhow::Result<()>> {
1351        if self.registration != Registration::None {
1352            return Task::ready(Ok(()));
1353        }
1354
1355        self.registration = Registration::Pending;
1356        let client = self.client.clone();
1357        let register_message = proto::RegisterWorktree {
1358            project_id,
1359            worktree_id: self.id().to_proto(),
1360            root_name: self.root_name().to_string(),
1361            authorized_logins: self.authorized_logins(),
1362        };
1363        cx.spawn(|this, mut cx| async move {
1364            let response = client.request(register_message).await;
1365            this.update(&mut cx, |this, _| {
1366                let worktree = this.as_local_mut().unwrap();
1367                match response {
1368                    Ok(_) => {
1369                        worktree.registration = Registration::Done { project_id };
1370                        Ok(())
1371                    }
1372                    Err(error) => {
1373                        worktree.registration = Registration::None;
1374                        Err(error)
1375                    }
1376                }
1377            })
1378        })
1379    }
1380
1381    pub fn share(&mut self, cx: &mut ModelContext<Worktree>) -> Task<anyhow::Result<()>> {
1382        let project_id = if let Registration::Done { project_id } = self.registration {
1383            project_id
1384        } else {
1385            return Task::ready(Err(anyhow!("cannot share worktree before registering it")));
1386        };
1387
1388        if self.share.is_some() {
1389            return Task::ready(Ok(()));
1390        }
1391
1392        let snapshot = self.snapshot();
1393        let rpc = self.client.clone();
1394        let worktree_id = cx.model_id() as u64;
1395        let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1396        let maintain_remote_snapshot = cx.background().spawn({
1397            let rpc = rpc.clone();
1398            let snapshot = snapshot.clone();
1399            async move {
1400                let mut prev_snapshot = snapshot;
1401                while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1402                    let message =
1403                        snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1404                    match rpc.send(message).await {
1405                        Ok(()) => prev_snapshot = snapshot,
1406                        Err(err) => log::error!("error sending snapshot diff {}", err),
1407                    }
1408                }
1409            }
1410        });
1411        self.share = Some(ShareState {
1412            project_id,
1413            snapshots_tx: snapshots_to_send_tx,
1414            _maintain_remote_snapshot: Some(maintain_remote_snapshot),
1415        });
1416
1417        let diagnostic_summaries = self.diagnostic_summaries.clone();
1418        let share_message = cx.background().spawn(async move {
1419            proto::ShareWorktree {
1420                project_id,
1421                worktree: Some(snapshot.to_proto(&diagnostic_summaries)),
1422            }
1423        });
1424
1425        cx.foreground().spawn(async move {
1426            rpc.request(share_message.await).await?;
1427            Ok(())
1428        })
1429    }
1430
1431    pub fn unshare(&mut self) {
1432        self.share.take();
1433    }
1434
1435    pub fn is_shared(&self) -> bool {
1436        self.share.is_some()
1437    }
1438}
1439
1440fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1441    let contents = smol::block_on(fs.load(&abs_path))?;
1442    let parent = abs_path.parent().unwrap_or(Path::new("/"));
1443    let mut builder = GitignoreBuilder::new(parent);
1444    for line in contents.lines() {
1445        builder.add_line(Some(abs_path.into()), line)?;
1446    }
1447    Ok(builder.build()?)
1448}
1449
1450impl Deref for Worktree {
1451    type Target = Snapshot;
1452
1453    fn deref(&self) -> &Self::Target {
1454        match self {
1455            Worktree::Local(worktree) => &worktree.snapshot,
1456            Worktree::Remote(worktree) => &worktree.snapshot,
1457        }
1458    }
1459}
1460
1461impl Deref for LocalWorktree {
1462    type Target = Snapshot;
1463
1464    fn deref(&self) -> &Self::Target {
1465        &self.snapshot
1466    }
1467}
1468
1469impl Deref for RemoteWorktree {
1470    type Target = Snapshot;
1471
1472    fn deref(&self) -> &Self::Target {
1473        &self.snapshot
1474    }
1475}
1476
1477impl fmt::Debug for LocalWorktree {
1478    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1479        self.snapshot.fmt(f)
1480    }
1481}
1482
1483impl RemoteWorktree {
1484    fn get_open_buffer(
1485        &mut self,
1486        path: &Path,
1487        cx: &mut ModelContext<Worktree>,
1488    ) -> Option<ModelHandle<Buffer>> {
1489        let handle = cx.handle();
1490        let mut existing_buffer = None;
1491        self.open_buffers.retain(|_buffer_id, buffer| {
1492            if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1493                if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1494                    if file.worktree == handle && file.path().as_ref() == path {
1495                        existing_buffer = Some(buffer);
1496                    }
1497                }
1498                true
1499            } else {
1500                false
1501            }
1502        });
1503        existing_buffer
1504    }
1505
1506    fn open_buffer(
1507        &mut self,
1508        path: &Path,
1509        cx: &mut ModelContext<Worktree>,
1510    ) -> Task<Result<ModelHandle<Buffer>>> {
1511        let rpc = self.client.clone();
1512        let replica_id = self.replica_id;
1513        let project_id = self.project_id;
1514        let remote_worktree_id = self.id();
1515        let root_path = self.snapshot.abs_path.clone();
1516        let path: Arc<Path> = Arc::from(path);
1517        let path_string = path.to_string_lossy().to_string();
1518        cx.spawn_weak(move |this, mut cx| async move {
1519            let entry = this
1520                .upgrade(&cx)
1521                .ok_or_else(|| anyhow!("worktree was closed"))?
1522                .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1523                .ok_or_else(|| anyhow!("file does not exist"))?;
1524            let response = rpc
1525                .request(proto::OpenBuffer {
1526                    project_id,
1527                    worktree_id: remote_worktree_id.to_proto(),
1528                    path: path_string,
1529                })
1530                .await?;
1531
1532            let this = this
1533                .upgrade(&cx)
1534                .ok_or_else(|| anyhow!("worktree was closed"))?;
1535            let file = File {
1536                entry_id: Some(entry.id),
1537                worktree: this.clone(),
1538                worktree_path: root_path,
1539                path: entry.path,
1540                mtime: entry.mtime,
1541                is_local: false,
1542            };
1543            let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1544            let buffer_id = remote_buffer.id as usize;
1545            let buffer = cx.add_model(|cx| {
1546                Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx).unwrap()
1547            });
1548            this.update(&mut cx, move |this, cx| {
1549                let this = this.as_remote_mut().unwrap();
1550                if let Some(RemoteBuffer::Operations(pending_ops)) = this
1551                    .open_buffers
1552                    .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1553                {
1554                    buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1555                }
1556                Result::<_, anyhow::Error>::Ok(buffer)
1557            })
1558        })
1559    }
1560
1561    pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1562        for (_, buffer) in self.open_buffers.drain() {
1563            if let RemoteBuffer::Loaded(buffer) = buffer {
1564                if let Some(buffer) = buffer.upgrade(cx) {
1565                    buffer.update(cx, |buffer, cx| buffer.close(cx))
1566                }
1567            }
1568        }
1569    }
1570
1571    fn snapshot(&self) -> Snapshot {
1572        self.snapshot.clone()
1573    }
1574
1575    pub fn update_from_remote(
1576        &mut self,
1577        envelope: TypedEnvelope<proto::UpdateWorktree>,
1578        cx: &mut ModelContext<Worktree>,
1579    ) -> Result<()> {
1580        let mut tx = self.updates_tx.clone();
1581        let payload = envelope.payload.clone();
1582        cx.background()
1583            .spawn(async move {
1584                tx.send(payload).await.expect("receiver runs to completion");
1585            })
1586            .detach();
1587
1588        Ok(())
1589    }
1590
1591    pub fn update_diagnostic_summary(
1592        &mut self,
1593        path: Arc<Path>,
1594        summary: &proto::DiagnosticSummary,
1595    ) {
1596        self.diagnostic_summaries.insert(
1597            PathKey(path.clone()),
1598            DiagnosticSummary {
1599                error_count: summary.error_count as usize,
1600                warning_count: summary.warning_count as usize,
1601                info_count: summary.info_count as usize,
1602                hint_count: summary.hint_count as usize,
1603            },
1604        );
1605    }
1606
1607    pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1608        for (_, buffer) in &self.open_buffers {
1609            if let Some(buffer) = buffer.upgrade(cx) {
1610                buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1611            }
1612        }
1613        cx.notify();
1614    }
1615}
1616
1617enum RemoteBuffer {
1618    Operations(Vec<Operation>),
1619    Loaded(WeakModelHandle<Buffer>),
1620}
1621
1622impl RemoteBuffer {
1623    fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1624        match self {
1625            Self::Operations(_) => None,
1626            Self::Loaded(buffer) => buffer.upgrade(cx),
1627        }
1628    }
1629}
1630
1631impl Snapshot {
1632    pub fn id(&self) -> WorktreeId {
1633        self.id
1634    }
1635
1636    pub fn to_proto(
1637        &self,
1638        diagnostic_summaries: &TreeMap<PathKey, DiagnosticSummary>,
1639    ) -> proto::Worktree {
1640        let root_name = self.root_name.clone();
1641        proto::Worktree {
1642            id: self.id.0 as u64,
1643            root_name,
1644            entries: self
1645                .entries_by_path
1646                .iter()
1647                .filter(|e| !e.is_ignored)
1648                .map(Into::into)
1649                .collect(),
1650            diagnostic_summaries: diagnostic_summaries
1651                .iter()
1652                .map(|(path, summary)| summary.to_proto(path.0.clone()))
1653                .collect(),
1654        }
1655    }
1656
1657    pub fn build_update(
1658        &self,
1659        other: &Self,
1660        project_id: u64,
1661        worktree_id: u64,
1662        include_ignored: bool,
1663    ) -> proto::UpdateWorktree {
1664        let mut updated_entries = Vec::new();
1665        let mut removed_entries = Vec::new();
1666        let mut self_entries = self
1667            .entries_by_id
1668            .cursor::<()>()
1669            .filter(|e| include_ignored || !e.is_ignored)
1670            .peekable();
1671        let mut other_entries = other
1672            .entries_by_id
1673            .cursor::<()>()
1674            .filter(|e| include_ignored || !e.is_ignored)
1675            .peekable();
1676        loop {
1677            match (self_entries.peek(), other_entries.peek()) {
1678                (Some(self_entry), Some(other_entry)) => {
1679                    match Ord::cmp(&self_entry.id, &other_entry.id) {
1680                        Ordering::Less => {
1681                            let entry = self.entry_for_id(self_entry.id).unwrap().into();
1682                            updated_entries.push(entry);
1683                            self_entries.next();
1684                        }
1685                        Ordering::Equal => {
1686                            if self_entry.scan_id != other_entry.scan_id {
1687                                let entry = self.entry_for_id(self_entry.id).unwrap().into();
1688                                updated_entries.push(entry);
1689                            }
1690
1691                            self_entries.next();
1692                            other_entries.next();
1693                        }
1694                        Ordering::Greater => {
1695                            removed_entries.push(other_entry.id as u64);
1696                            other_entries.next();
1697                        }
1698                    }
1699                }
1700                (Some(self_entry), None) => {
1701                    let entry = self.entry_for_id(self_entry.id).unwrap().into();
1702                    updated_entries.push(entry);
1703                    self_entries.next();
1704                }
1705                (None, Some(other_entry)) => {
1706                    removed_entries.push(other_entry.id as u64);
1707                    other_entries.next();
1708                }
1709                (None, None) => break,
1710            }
1711        }
1712
1713        proto::UpdateWorktree {
1714            project_id,
1715            worktree_id,
1716            root_name: self.root_name().to_string(),
1717            updated_entries,
1718            removed_entries,
1719        }
1720    }
1721
1722    fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1723        self.scan_id += 1;
1724        let scan_id = self.scan_id;
1725
1726        let mut entries_by_path_edits = Vec::new();
1727        let mut entries_by_id_edits = Vec::new();
1728        for entry_id in update.removed_entries {
1729            let entry_id = entry_id as usize;
1730            let entry = self
1731                .entry_for_id(entry_id)
1732                .ok_or_else(|| anyhow!("unknown entry"))?;
1733            entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1734            entries_by_id_edits.push(Edit::Remove(entry.id));
1735        }
1736
1737        for entry in update.updated_entries {
1738            let entry = Entry::try_from((&self.root_char_bag, entry))?;
1739            if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1740                entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1741            }
1742            entries_by_id_edits.push(Edit::Insert(PathEntry {
1743                id: entry.id,
1744                path: entry.path.clone(),
1745                is_ignored: entry.is_ignored,
1746                scan_id,
1747            }));
1748            entries_by_path_edits.push(Edit::Insert(entry));
1749        }
1750
1751        self.entries_by_path.edit(entries_by_path_edits, &());
1752        self.entries_by_id.edit(entries_by_id_edits, &());
1753
1754        Ok(())
1755    }
1756
1757    pub fn file_count(&self) -> usize {
1758        self.entries_by_path.summary().file_count
1759    }
1760
1761    pub fn visible_file_count(&self) -> usize {
1762        self.entries_by_path.summary().visible_file_count
1763    }
1764
1765    fn traverse_from_offset(
1766        &self,
1767        include_dirs: bool,
1768        include_ignored: bool,
1769        start_offset: usize,
1770    ) -> Traversal {
1771        let mut cursor = self.entries_by_path.cursor();
1772        cursor.seek(
1773            &TraversalTarget::Count {
1774                count: start_offset,
1775                include_dirs,
1776                include_ignored,
1777            },
1778            Bias::Right,
1779            &(),
1780        );
1781        Traversal {
1782            cursor,
1783            include_dirs,
1784            include_ignored,
1785        }
1786    }
1787
1788    fn traverse_from_path(
1789        &self,
1790        include_dirs: bool,
1791        include_ignored: bool,
1792        path: &Path,
1793    ) -> Traversal {
1794        let mut cursor = self.entries_by_path.cursor();
1795        cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1796        Traversal {
1797            cursor,
1798            include_dirs,
1799            include_ignored,
1800        }
1801    }
1802
1803    pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1804        self.traverse_from_offset(false, include_ignored, start)
1805    }
1806
1807    pub fn entries(&self, include_ignored: bool) -> Traversal {
1808        self.traverse_from_offset(true, include_ignored, 0)
1809    }
1810
1811    pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1812        let empty_path = Path::new("");
1813        self.entries_by_path
1814            .cursor::<()>()
1815            .filter(move |entry| entry.path.as_ref() != empty_path)
1816            .map(|entry| &entry.path)
1817    }
1818
1819    fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1820        let mut cursor = self.entries_by_path.cursor();
1821        cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1822        let traversal = Traversal {
1823            cursor,
1824            include_dirs: true,
1825            include_ignored: true,
1826        };
1827        ChildEntriesIter {
1828            traversal,
1829            parent_path,
1830        }
1831    }
1832
1833    pub fn root_entry(&self) -> Option<&Entry> {
1834        self.entry_for_path("")
1835    }
1836
1837    pub fn root_name(&self) -> &str {
1838        &self.root_name
1839    }
1840
1841    pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1842        let path = path.as_ref();
1843        self.traverse_from_path(true, true, path)
1844            .entry()
1845            .and_then(|entry| {
1846                if entry.path.as_ref() == path {
1847                    Some(entry)
1848                } else {
1849                    None
1850                }
1851            })
1852    }
1853
1854    pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1855        let entry = self.entries_by_id.get(&id, &())?;
1856        self.entry_for_path(&entry.path)
1857    }
1858
1859    pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1860        self.entry_for_path(path.as_ref()).map(|e| e.inode)
1861    }
1862
1863    fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1864        if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1865            let abs_path = self.abs_path.join(&entry.path);
1866            match build_gitignore(&abs_path, fs) {
1867                Ok(ignore) => {
1868                    let ignore_dir_path = entry.path.parent().unwrap();
1869                    self.ignores
1870                        .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1871                }
1872                Err(error) => {
1873                    log::error!(
1874                        "error loading .gitignore file {:?} - {:?}",
1875                        &entry.path,
1876                        error
1877                    );
1878                }
1879            }
1880        }
1881
1882        self.reuse_entry_id(&mut entry);
1883        self.entries_by_path.insert_or_replace(entry.clone(), &());
1884        self.entries_by_id.insert_or_replace(
1885            PathEntry {
1886                id: entry.id,
1887                path: entry.path.clone(),
1888                is_ignored: entry.is_ignored,
1889                scan_id: self.scan_id,
1890            },
1891            &(),
1892        );
1893        entry
1894    }
1895
1896    fn populate_dir(
1897        &mut self,
1898        parent_path: Arc<Path>,
1899        entries: impl IntoIterator<Item = Entry>,
1900        ignore: Option<Arc<Gitignore>>,
1901    ) {
1902        let mut parent_entry = self
1903            .entries_by_path
1904            .get(&PathKey(parent_path.clone()), &())
1905            .unwrap()
1906            .clone();
1907        if let Some(ignore) = ignore {
1908            self.ignores.insert(parent_path, (ignore, self.scan_id));
1909        }
1910        if matches!(parent_entry.kind, EntryKind::PendingDir) {
1911            parent_entry.kind = EntryKind::Dir;
1912        } else {
1913            unreachable!();
1914        }
1915
1916        let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1917        let mut entries_by_id_edits = Vec::new();
1918
1919        for mut entry in entries {
1920            self.reuse_entry_id(&mut entry);
1921            entries_by_id_edits.push(Edit::Insert(PathEntry {
1922                id: entry.id,
1923                path: entry.path.clone(),
1924                is_ignored: entry.is_ignored,
1925                scan_id: self.scan_id,
1926            }));
1927            entries_by_path_edits.push(Edit::Insert(entry));
1928        }
1929
1930        self.entries_by_path.edit(entries_by_path_edits, &());
1931        self.entries_by_id.edit(entries_by_id_edits, &());
1932    }
1933
1934    fn reuse_entry_id(&mut self, entry: &mut Entry) {
1935        if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1936            entry.id = removed_entry_id;
1937        } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1938            entry.id = existing_entry.id;
1939        }
1940    }
1941
1942    fn remove_path(&mut self, path: &Path) {
1943        let mut new_entries;
1944        let removed_entries;
1945        {
1946            let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1947            new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1948            removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1949            new_entries.push_tree(cursor.suffix(&()), &());
1950        }
1951        self.entries_by_path = new_entries;
1952
1953        let mut entries_by_id_edits = Vec::new();
1954        for entry in removed_entries.cursor::<()>() {
1955            let removed_entry_id = self
1956                .removed_entry_ids
1957                .entry(entry.inode)
1958                .or_insert(entry.id);
1959            *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1960            entries_by_id_edits.push(Edit::Remove(entry.id));
1961        }
1962        self.entries_by_id.edit(entries_by_id_edits, &());
1963
1964        if path.file_name() == Some(&GITIGNORE) {
1965            if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1966                *scan_id = self.scan_id;
1967            }
1968        }
1969    }
1970
1971    fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1972        let mut new_ignores = Vec::new();
1973        for ancestor in path.ancestors().skip(1) {
1974            if let Some((ignore, _)) = self.ignores.get(ancestor) {
1975                new_ignores.push((ancestor, Some(ignore.clone())));
1976            } else {
1977                new_ignores.push((ancestor, None));
1978            }
1979        }
1980
1981        let mut ignore_stack = IgnoreStack::none();
1982        for (parent_path, ignore) in new_ignores.into_iter().rev() {
1983            if ignore_stack.is_path_ignored(&parent_path, true) {
1984                ignore_stack = IgnoreStack::all();
1985                break;
1986            } else if let Some(ignore) = ignore {
1987                ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
1988            }
1989        }
1990
1991        if ignore_stack.is_path_ignored(path, is_dir) {
1992            ignore_stack = IgnoreStack::all();
1993        }
1994
1995        ignore_stack
1996    }
1997}
1998
1999impl fmt::Debug for Snapshot {
2000    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2001        for entry in self.entries_by_path.cursor::<()>() {
2002            for _ in entry.path.ancestors().skip(1) {
2003                write!(f, " ")?;
2004            }
2005            writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
2006        }
2007        Ok(())
2008    }
2009}
2010
2011#[derive(Clone, PartialEq)]
2012pub struct File {
2013    entry_id: Option<usize>,
2014    pub worktree: ModelHandle<Worktree>,
2015    worktree_path: Arc<Path>,
2016    pub path: Arc<Path>,
2017    pub mtime: SystemTime,
2018    is_local: bool,
2019}
2020
2021impl language::File for File {
2022    fn mtime(&self) -> SystemTime {
2023        self.mtime
2024    }
2025
2026    fn path(&self) -> &Arc<Path> {
2027        &self.path
2028    }
2029
2030    fn abs_path(&self) -> Option<PathBuf> {
2031        if self.is_local {
2032            Some(self.worktree_path.join(&self.path))
2033        } else {
2034            None
2035        }
2036    }
2037
2038    fn full_path(&self) -> PathBuf {
2039        let mut full_path = PathBuf::new();
2040        if let Some(worktree_name) = self.worktree_path.file_name() {
2041            full_path.push(worktree_name);
2042        }
2043        full_path.push(&self.path);
2044        full_path
2045    }
2046
2047    /// Returns the last component of this handle's absolute path. If this handle refers to the root
2048    /// of its worktree, then this method will return the name of the worktree itself.
2049    fn file_name<'a>(&'a self) -> Option<OsString> {
2050        self.path
2051            .file_name()
2052            .or_else(|| self.worktree_path.file_name())
2053            .map(Into::into)
2054    }
2055
2056    fn is_deleted(&self) -> bool {
2057        self.entry_id.is_none()
2058    }
2059
2060    fn save(
2061        &self,
2062        buffer_id: u64,
2063        text: Rope,
2064        version: clock::Global,
2065        cx: &mut MutableAppContext,
2066    ) -> Task<Result<(clock::Global, SystemTime)>> {
2067        let worktree_id = self.worktree.read(cx).id().to_proto();
2068        self.worktree.update(cx, |worktree, cx| match worktree {
2069            Worktree::Local(worktree) => {
2070                let rpc = worktree.client.clone();
2071                let project_id = worktree.share.as_ref().map(|share| share.project_id);
2072                let save = worktree.save(self.path.clone(), text, cx);
2073                cx.background().spawn(async move {
2074                    let entry = save.await?;
2075                    if let Some(project_id) = project_id {
2076                        rpc.send(proto::BufferSaved {
2077                            project_id,
2078                            worktree_id,
2079                            buffer_id,
2080                            version: (&version).into(),
2081                            mtime: Some(entry.mtime.into()),
2082                        })
2083                        .await?;
2084                    }
2085                    Ok((version, entry.mtime))
2086                })
2087            }
2088            Worktree::Remote(worktree) => {
2089                let rpc = worktree.client.clone();
2090                let project_id = worktree.project_id;
2091                cx.foreground().spawn(async move {
2092                    let response = rpc
2093                        .request(proto::SaveBuffer {
2094                            project_id,
2095                            worktree_id,
2096                            buffer_id,
2097                        })
2098                        .await?;
2099                    let version = response.version.try_into()?;
2100                    let mtime = response
2101                        .mtime
2102                        .ok_or_else(|| anyhow!("missing mtime"))?
2103                        .into();
2104                    Ok((version, mtime))
2105                })
2106            }
2107        })
2108    }
2109
2110    fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2111        let worktree = self.worktree.read(cx).as_local()?;
2112        let abs_path = worktree.absolutize(&self.path);
2113        let fs = worktree.fs.clone();
2114        Some(
2115            cx.background()
2116                .spawn(async move { fs.load(&abs_path).await }),
2117        )
2118    }
2119
2120    fn format_remote(
2121        &self,
2122        buffer_id: u64,
2123        cx: &mut MutableAppContext,
2124    ) -> Option<Task<Result<()>>> {
2125        let worktree = self.worktree.read(cx);
2126        let worktree_id = worktree.id().to_proto();
2127        let worktree = worktree.as_remote()?;
2128        let rpc = worktree.client.clone();
2129        let project_id = worktree.project_id;
2130        Some(cx.foreground().spawn(async move {
2131            rpc.request(proto::FormatBuffer {
2132                project_id,
2133                worktree_id,
2134                buffer_id,
2135            })
2136            .await?;
2137            Ok(())
2138        }))
2139    }
2140
2141    fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2142        self.worktree.update(cx, |worktree, cx| {
2143            worktree.send_buffer_update(buffer_id, operation, cx);
2144        });
2145    }
2146
2147    fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2148        self.worktree.update(cx, |worktree, cx| {
2149            if let Worktree::Remote(worktree) = worktree {
2150                let project_id = worktree.project_id;
2151                let worktree_id = worktree.id().to_proto();
2152                let rpc = worktree.client.clone();
2153                cx.background()
2154                    .spawn(async move {
2155                        if let Err(error) = rpc
2156                            .send(proto::CloseBuffer {
2157                                project_id,
2158                                worktree_id,
2159                                buffer_id,
2160                            })
2161                            .await
2162                        {
2163                            log::error!("error closing remote buffer: {}", error);
2164                        }
2165                    })
2166                    .detach();
2167            }
2168        });
2169    }
2170
2171    fn as_any(&self) -> &dyn Any {
2172        self
2173    }
2174}
2175
2176impl File {
2177    pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2178        file.and_then(|f| f.as_any().downcast_ref())
2179    }
2180
2181    pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2182        self.worktree.read(cx).id()
2183    }
2184
2185    pub fn project_entry(&self, cx: &AppContext) -> Option<ProjectEntry> {
2186        self.entry_id.map(|entry_id| ProjectEntry {
2187            worktree_id: self.worktree_id(cx),
2188            entry_id,
2189        })
2190    }
2191}
2192
2193#[derive(Clone, Debug)]
2194pub struct Entry {
2195    pub id: usize,
2196    pub kind: EntryKind,
2197    pub path: Arc<Path>,
2198    pub inode: u64,
2199    pub mtime: SystemTime,
2200    pub is_symlink: bool,
2201    pub is_ignored: bool,
2202}
2203
2204#[derive(Clone, Debug)]
2205pub enum EntryKind {
2206    PendingDir,
2207    Dir,
2208    File(CharBag),
2209}
2210
2211impl Entry {
2212    fn new(
2213        path: Arc<Path>,
2214        metadata: &fs::Metadata,
2215        next_entry_id: &AtomicUsize,
2216        root_char_bag: CharBag,
2217    ) -> Self {
2218        Self {
2219            id: next_entry_id.fetch_add(1, SeqCst),
2220            kind: if metadata.is_dir {
2221                EntryKind::PendingDir
2222            } else {
2223                EntryKind::File(char_bag_for_path(root_char_bag, &path))
2224            },
2225            path,
2226            inode: metadata.inode,
2227            mtime: metadata.mtime,
2228            is_symlink: metadata.is_symlink,
2229            is_ignored: false,
2230        }
2231    }
2232
2233    pub fn is_dir(&self) -> bool {
2234        matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2235    }
2236
2237    pub fn is_file(&self) -> bool {
2238        matches!(self.kind, EntryKind::File(_))
2239    }
2240}
2241
2242impl sum_tree::Item for Entry {
2243    type Summary = EntrySummary;
2244
2245    fn summary(&self) -> Self::Summary {
2246        let visible_count = if self.is_ignored { 0 } else { 1 };
2247        let file_count;
2248        let visible_file_count;
2249        if self.is_file() {
2250            file_count = 1;
2251            visible_file_count = visible_count;
2252        } else {
2253            file_count = 0;
2254            visible_file_count = 0;
2255        }
2256
2257        EntrySummary {
2258            max_path: self.path.clone(),
2259            count: 1,
2260            visible_count,
2261            file_count,
2262            visible_file_count,
2263        }
2264    }
2265}
2266
2267impl sum_tree::KeyedItem for Entry {
2268    type Key = PathKey;
2269
2270    fn key(&self) -> Self::Key {
2271        PathKey(self.path.clone())
2272    }
2273}
2274
2275#[derive(Clone, Debug)]
2276pub struct EntrySummary {
2277    max_path: Arc<Path>,
2278    count: usize,
2279    visible_count: usize,
2280    file_count: usize,
2281    visible_file_count: usize,
2282}
2283
2284impl Default for EntrySummary {
2285    fn default() -> Self {
2286        Self {
2287            max_path: Arc::from(Path::new("")),
2288            count: 0,
2289            visible_count: 0,
2290            file_count: 0,
2291            visible_file_count: 0,
2292        }
2293    }
2294}
2295
2296impl sum_tree::Summary for EntrySummary {
2297    type Context = ();
2298
2299    fn add_summary(&mut self, rhs: &Self, _: &()) {
2300        self.max_path = rhs.max_path.clone();
2301        self.visible_count += rhs.visible_count;
2302        self.file_count += rhs.file_count;
2303        self.visible_file_count += rhs.visible_file_count;
2304    }
2305}
2306
2307#[derive(Clone, Debug)]
2308struct PathEntry {
2309    id: usize,
2310    path: Arc<Path>,
2311    is_ignored: bool,
2312    scan_id: usize,
2313}
2314
2315impl sum_tree::Item for PathEntry {
2316    type Summary = PathEntrySummary;
2317
2318    fn summary(&self) -> Self::Summary {
2319        PathEntrySummary { max_id: self.id }
2320    }
2321}
2322
2323impl sum_tree::KeyedItem for PathEntry {
2324    type Key = usize;
2325
2326    fn key(&self) -> Self::Key {
2327        self.id
2328    }
2329}
2330
2331#[derive(Clone, Debug, Default)]
2332struct PathEntrySummary {
2333    max_id: usize,
2334}
2335
2336impl sum_tree::Summary for PathEntrySummary {
2337    type Context = ();
2338
2339    fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2340        self.max_id = summary.max_id;
2341    }
2342}
2343
2344impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2345    fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2346        *self = summary.max_id;
2347    }
2348}
2349
2350#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2351pub struct PathKey(Arc<Path>);
2352
2353impl Default for PathKey {
2354    fn default() -> Self {
2355        Self(Path::new("").into())
2356    }
2357}
2358
2359impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2360    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2361        self.0 = summary.max_path.clone();
2362    }
2363}
2364
2365struct BackgroundScanner {
2366    fs: Arc<dyn Fs>,
2367    snapshot: Arc<Mutex<Snapshot>>,
2368    notify: Sender<ScanState>,
2369    executor: Arc<executor::Background>,
2370}
2371
2372impl BackgroundScanner {
2373    fn new(
2374        snapshot: Arc<Mutex<Snapshot>>,
2375        notify: Sender<ScanState>,
2376        fs: Arc<dyn Fs>,
2377        executor: Arc<executor::Background>,
2378    ) -> Self {
2379        Self {
2380            fs,
2381            snapshot,
2382            notify,
2383            executor,
2384        }
2385    }
2386
2387    fn abs_path(&self) -> Arc<Path> {
2388        self.snapshot.lock().abs_path.clone()
2389    }
2390
2391    fn snapshot(&self) -> Snapshot {
2392        self.snapshot.lock().clone()
2393    }
2394
2395    async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2396        if self.notify.send(ScanState::Scanning).await.is_err() {
2397            return;
2398        }
2399
2400        if let Err(err) = self.scan_dirs().await {
2401            if self
2402                .notify
2403                .send(ScanState::Err(Arc::new(err)))
2404                .await
2405                .is_err()
2406            {
2407                return;
2408            }
2409        }
2410
2411        if self.notify.send(ScanState::Idle).await.is_err() {
2412            return;
2413        }
2414
2415        futures::pin_mut!(events_rx);
2416        while let Some(events) = events_rx.next().await {
2417            if self.notify.send(ScanState::Scanning).await.is_err() {
2418                break;
2419            }
2420
2421            if !self.process_events(events).await {
2422                break;
2423            }
2424
2425            if self.notify.send(ScanState::Idle).await.is_err() {
2426                break;
2427            }
2428        }
2429    }
2430
2431    async fn scan_dirs(&mut self) -> Result<()> {
2432        let root_char_bag;
2433        let next_entry_id;
2434        let is_dir;
2435        {
2436            let snapshot = self.snapshot.lock();
2437            root_char_bag = snapshot.root_char_bag;
2438            next_entry_id = snapshot.next_entry_id.clone();
2439            is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2440        };
2441
2442        if is_dir {
2443            let path: Arc<Path> = Arc::from(Path::new(""));
2444            let abs_path = self.abs_path();
2445            let (tx, rx) = channel::unbounded();
2446            tx.send(ScanJob {
2447                abs_path: abs_path.to_path_buf(),
2448                path,
2449                ignore_stack: IgnoreStack::none(),
2450                scan_queue: tx.clone(),
2451            })
2452            .await
2453            .unwrap();
2454            drop(tx);
2455
2456            self.executor
2457                .scoped(|scope| {
2458                    for _ in 0..self.executor.num_cpus() {
2459                        scope.spawn(async {
2460                            while let Ok(job) = rx.recv().await {
2461                                if let Err(err) = self
2462                                    .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2463                                    .await
2464                                {
2465                                    log::error!("error scanning {:?}: {}", job.abs_path, err);
2466                                }
2467                            }
2468                        });
2469                    }
2470                })
2471                .await;
2472        }
2473
2474        Ok(())
2475    }
2476
2477    async fn scan_dir(
2478        &self,
2479        root_char_bag: CharBag,
2480        next_entry_id: Arc<AtomicUsize>,
2481        job: &ScanJob,
2482    ) -> Result<()> {
2483        let mut new_entries: Vec<Entry> = Vec::new();
2484        let mut new_jobs: Vec<ScanJob> = Vec::new();
2485        let mut ignore_stack = job.ignore_stack.clone();
2486        let mut new_ignore = None;
2487
2488        let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2489        while let Some(child_abs_path) = child_paths.next().await {
2490            let child_abs_path = match child_abs_path {
2491                Ok(child_abs_path) => child_abs_path,
2492                Err(error) => {
2493                    log::error!("error processing entry {:?}", error);
2494                    continue;
2495                }
2496            };
2497            let child_name = child_abs_path.file_name().unwrap();
2498            let child_path: Arc<Path> = job.path.join(child_name).into();
2499            let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2500                Some(metadata) => metadata,
2501                None => continue,
2502            };
2503
2504            // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2505            if child_name == *GITIGNORE {
2506                match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2507                    Ok(ignore) => {
2508                        let ignore = Arc::new(ignore);
2509                        ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2510                        new_ignore = Some(ignore);
2511                    }
2512                    Err(error) => {
2513                        log::error!(
2514                            "error loading .gitignore file {:?} - {:?}",
2515                            child_name,
2516                            error
2517                        );
2518                    }
2519                }
2520
2521                // Update ignore status of any child entries we've already processed to reflect the
2522                // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2523                // there should rarely be too numerous. Update the ignore stack associated with any
2524                // new jobs as well.
2525                let mut new_jobs = new_jobs.iter_mut();
2526                for entry in &mut new_entries {
2527                    entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2528                    if entry.is_dir() {
2529                        new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2530                            IgnoreStack::all()
2531                        } else {
2532                            ignore_stack.clone()
2533                        };
2534                    }
2535                }
2536            }
2537
2538            let mut child_entry = Entry::new(
2539                child_path.clone(),
2540                &child_metadata,
2541                &next_entry_id,
2542                root_char_bag,
2543            );
2544
2545            if child_metadata.is_dir {
2546                let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2547                child_entry.is_ignored = is_ignored;
2548                new_entries.push(child_entry);
2549                new_jobs.push(ScanJob {
2550                    abs_path: child_abs_path,
2551                    path: child_path,
2552                    ignore_stack: if is_ignored {
2553                        IgnoreStack::all()
2554                    } else {
2555                        ignore_stack.clone()
2556                    },
2557                    scan_queue: job.scan_queue.clone(),
2558                });
2559            } else {
2560                child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2561                new_entries.push(child_entry);
2562            };
2563        }
2564
2565        self.snapshot
2566            .lock()
2567            .populate_dir(job.path.clone(), new_entries, new_ignore);
2568        for new_job in new_jobs {
2569            job.scan_queue.send(new_job).await.unwrap();
2570        }
2571
2572        Ok(())
2573    }
2574
2575    async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2576        let mut snapshot = self.snapshot();
2577        snapshot.scan_id += 1;
2578
2579        let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2580            abs_path
2581        } else {
2582            return false;
2583        };
2584        let root_char_bag = snapshot.root_char_bag;
2585        let next_entry_id = snapshot.next_entry_id.clone();
2586
2587        events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2588        events.dedup_by(|a, b| a.path.starts_with(&b.path));
2589
2590        for event in &events {
2591            match event.path.strip_prefix(&root_abs_path) {
2592                Ok(path) => snapshot.remove_path(&path),
2593                Err(_) => {
2594                    log::error!(
2595                        "unexpected event {:?} for root path {:?}",
2596                        event.path,
2597                        root_abs_path
2598                    );
2599                    continue;
2600                }
2601            }
2602        }
2603
2604        let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2605        for event in events {
2606            let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2607                Ok(path) => Arc::from(path.to_path_buf()),
2608                Err(_) => {
2609                    log::error!(
2610                        "unexpected event {:?} for root path {:?}",
2611                        event.path,
2612                        root_abs_path
2613                    );
2614                    continue;
2615                }
2616            };
2617
2618            match self.fs.metadata(&event.path).await {
2619                Ok(Some(metadata)) => {
2620                    let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2621                    let mut fs_entry = Entry::new(
2622                        path.clone(),
2623                        &metadata,
2624                        snapshot.next_entry_id.as_ref(),
2625                        snapshot.root_char_bag,
2626                    );
2627                    fs_entry.is_ignored = ignore_stack.is_all();
2628                    snapshot.insert_entry(fs_entry, self.fs.as_ref());
2629                    if metadata.is_dir {
2630                        scan_queue_tx
2631                            .send(ScanJob {
2632                                abs_path: event.path,
2633                                path,
2634                                ignore_stack,
2635                                scan_queue: scan_queue_tx.clone(),
2636                            })
2637                            .await
2638                            .unwrap();
2639                    }
2640                }
2641                Ok(None) => {}
2642                Err(err) => {
2643                    // TODO - create a special 'error' entry in the entries tree to mark this
2644                    log::error!("error reading file on event {:?}", err);
2645                }
2646            }
2647        }
2648
2649        *self.snapshot.lock() = snapshot;
2650
2651        // Scan any directories that were created as part of this event batch.
2652        drop(scan_queue_tx);
2653        self.executor
2654            .scoped(|scope| {
2655                for _ in 0..self.executor.num_cpus() {
2656                    scope.spawn(async {
2657                        while let Ok(job) = scan_queue_rx.recv().await {
2658                            if let Err(err) = self
2659                                .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2660                                .await
2661                            {
2662                                log::error!("error scanning {:?}: {}", job.abs_path, err);
2663                            }
2664                        }
2665                    });
2666                }
2667            })
2668            .await;
2669
2670        // Attempt to detect renames only over a single batch of file-system events.
2671        self.snapshot.lock().removed_entry_ids.clear();
2672
2673        self.update_ignore_statuses().await;
2674        true
2675    }
2676
2677    async fn update_ignore_statuses(&self) {
2678        let mut snapshot = self.snapshot();
2679
2680        let mut ignores_to_update = Vec::new();
2681        let mut ignores_to_delete = Vec::new();
2682        for (parent_path, (_, scan_id)) in &snapshot.ignores {
2683            if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2684                ignores_to_update.push(parent_path.clone());
2685            }
2686
2687            let ignore_path = parent_path.join(&*GITIGNORE);
2688            if snapshot.entry_for_path(ignore_path).is_none() {
2689                ignores_to_delete.push(parent_path.clone());
2690            }
2691        }
2692
2693        for parent_path in ignores_to_delete {
2694            snapshot.ignores.remove(&parent_path);
2695            self.snapshot.lock().ignores.remove(&parent_path);
2696        }
2697
2698        let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2699        ignores_to_update.sort_unstable();
2700        let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2701        while let Some(parent_path) = ignores_to_update.next() {
2702            while ignores_to_update
2703                .peek()
2704                .map_or(false, |p| p.starts_with(&parent_path))
2705            {
2706                ignores_to_update.next().unwrap();
2707            }
2708
2709            let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2710            ignore_queue_tx
2711                .send(UpdateIgnoreStatusJob {
2712                    path: parent_path,
2713                    ignore_stack,
2714                    ignore_queue: ignore_queue_tx.clone(),
2715                })
2716                .await
2717                .unwrap();
2718        }
2719        drop(ignore_queue_tx);
2720
2721        self.executor
2722            .scoped(|scope| {
2723                for _ in 0..self.executor.num_cpus() {
2724                    scope.spawn(async {
2725                        while let Ok(job) = ignore_queue_rx.recv().await {
2726                            self.update_ignore_status(job, &snapshot).await;
2727                        }
2728                    });
2729                }
2730            })
2731            .await;
2732    }
2733
2734    async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2735        let mut ignore_stack = job.ignore_stack;
2736        if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2737            ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2738        }
2739
2740        let mut entries_by_id_edits = Vec::new();
2741        let mut entries_by_path_edits = Vec::new();
2742        for mut entry in snapshot.child_entries(&job.path).cloned() {
2743            let was_ignored = entry.is_ignored;
2744            entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2745            if entry.is_dir() {
2746                let child_ignore_stack = if entry.is_ignored {
2747                    IgnoreStack::all()
2748                } else {
2749                    ignore_stack.clone()
2750                };
2751                job.ignore_queue
2752                    .send(UpdateIgnoreStatusJob {
2753                        path: entry.path.clone(),
2754                        ignore_stack: child_ignore_stack,
2755                        ignore_queue: job.ignore_queue.clone(),
2756                    })
2757                    .await
2758                    .unwrap();
2759            }
2760
2761            if entry.is_ignored != was_ignored {
2762                let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2763                path_entry.scan_id = snapshot.scan_id;
2764                path_entry.is_ignored = entry.is_ignored;
2765                entries_by_id_edits.push(Edit::Insert(path_entry));
2766                entries_by_path_edits.push(Edit::Insert(entry));
2767            }
2768        }
2769
2770        let mut snapshot = self.snapshot.lock();
2771        snapshot.entries_by_path.edit(entries_by_path_edits, &());
2772        snapshot.entries_by_id.edit(entries_by_id_edits, &());
2773    }
2774}
2775
2776async fn refresh_entry(
2777    fs: &dyn Fs,
2778    snapshot: &Mutex<Snapshot>,
2779    path: Arc<Path>,
2780    abs_path: &Path,
2781) -> Result<Entry> {
2782    let root_char_bag;
2783    let next_entry_id;
2784    {
2785        let snapshot = snapshot.lock();
2786        root_char_bag = snapshot.root_char_bag;
2787        next_entry_id = snapshot.next_entry_id.clone();
2788    }
2789    let entry = Entry::new(
2790        path,
2791        &fs.metadata(abs_path)
2792            .await?
2793            .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2794        &next_entry_id,
2795        root_char_bag,
2796    );
2797    Ok(snapshot.lock().insert_entry(entry, fs))
2798}
2799
2800fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2801    let mut result = root_char_bag;
2802    result.extend(
2803        path.to_string_lossy()
2804            .chars()
2805            .map(|c| c.to_ascii_lowercase()),
2806    );
2807    result
2808}
2809
2810struct ScanJob {
2811    abs_path: PathBuf,
2812    path: Arc<Path>,
2813    ignore_stack: Arc<IgnoreStack>,
2814    scan_queue: Sender<ScanJob>,
2815}
2816
2817struct UpdateIgnoreStatusJob {
2818    path: Arc<Path>,
2819    ignore_stack: Arc<IgnoreStack>,
2820    ignore_queue: Sender<UpdateIgnoreStatusJob>,
2821}
2822
2823pub trait WorktreeHandle {
2824    #[cfg(test)]
2825    fn flush_fs_events<'a>(
2826        &self,
2827        cx: &'a gpui::TestAppContext,
2828    ) -> futures::future::LocalBoxFuture<'a, ()>;
2829}
2830
2831impl WorktreeHandle for ModelHandle<Worktree> {
2832    // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2833    // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2834    // extra directory scans, and emit extra scan-state notifications.
2835    //
2836    // This function mutates the worktree's directory and waits for those mutations to be picked up,
2837    // to ensure that all redundant FS events have already been processed.
2838    #[cfg(test)]
2839    fn flush_fs_events<'a>(
2840        &self,
2841        cx: &'a gpui::TestAppContext,
2842    ) -> futures::future::LocalBoxFuture<'a, ()> {
2843        use smol::future::FutureExt;
2844
2845        let filename = "fs-event-sentinel";
2846        let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2847        let tree = self.clone();
2848        async move {
2849            std::fs::write(root_path.join(filename), "").unwrap();
2850            tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2851                .await;
2852
2853            std::fs::remove_file(root_path.join(filename)).unwrap();
2854            tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2855                .await;
2856
2857            cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2858                .await;
2859        }
2860        .boxed_local()
2861    }
2862}
2863
2864#[derive(Clone, Debug)]
2865struct TraversalProgress<'a> {
2866    max_path: &'a Path,
2867    count: usize,
2868    visible_count: usize,
2869    file_count: usize,
2870    visible_file_count: usize,
2871}
2872
2873impl<'a> TraversalProgress<'a> {
2874    fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2875        match (include_ignored, include_dirs) {
2876            (true, true) => self.count,
2877            (true, false) => self.file_count,
2878            (false, true) => self.visible_count,
2879            (false, false) => self.visible_file_count,
2880        }
2881    }
2882}
2883
2884impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2885    fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2886        self.max_path = summary.max_path.as_ref();
2887        self.count += summary.count;
2888        self.visible_count += summary.visible_count;
2889        self.file_count += summary.file_count;
2890        self.visible_file_count += summary.visible_file_count;
2891    }
2892}
2893
2894impl<'a> Default for TraversalProgress<'a> {
2895    fn default() -> Self {
2896        Self {
2897            max_path: Path::new(""),
2898            count: 0,
2899            visible_count: 0,
2900            file_count: 0,
2901            visible_file_count: 0,
2902        }
2903    }
2904}
2905
2906pub struct Traversal<'a> {
2907    cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2908    include_ignored: bool,
2909    include_dirs: bool,
2910}
2911
2912impl<'a> Traversal<'a> {
2913    pub fn advance(&mut self) -> bool {
2914        self.advance_to_offset(self.offset() + 1)
2915    }
2916
2917    pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2918        self.cursor.seek_forward(
2919            &TraversalTarget::Count {
2920                count: offset,
2921                include_dirs: self.include_dirs,
2922                include_ignored: self.include_ignored,
2923            },
2924            Bias::Right,
2925            &(),
2926        )
2927    }
2928
2929    pub fn advance_to_sibling(&mut self) -> bool {
2930        while let Some(entry) = self.cursor.item() {
2931            self.cursor.seek_forward(
2932                &TraversalTarget::PathSuccessor(&entry.path),
2933                Bias::Left,
2934                &(),
2935            );
2936            if let Some(entry) = self.cursor.item() {
2937                if (self.include_dirs || !entry.is_dir())
2938                    && (self.include_ignored || !entry.is_ignored)
2939                {
2940                    return true;
2941                }
2942            }
2943        }
2944        false
2945    }
2946
2947    pub fn entry(&self) -> Option<&'a Entry> {
2948        self.cursor.item()
2949    }
2950
2951    pub fn offset(&self) -> usize {
2952        self.cursor
2953            .start()
2954            .count(self.include_dirs, self.include_ignored)
2955    }
2956}
2957
2958impl<'a> Iterator for Traversal<'a> {
2959    type Item = &'a Entry;
2960
2961    fn next(&mut self) -> Option<Self::Item> {
2962        if let Some(item) = self.entry() {
2963            self.advance();
2964            Some(item)
2965        } else {
2966            None
2967        }
2968    }
2969}
2970
2971#[derive(Debug)]
2972enum TraversalTarget<'a> {
2973    Path(&'a Path),
2974    PathSuccessor(&'a Path),
2975    Count {
2976        count: usize,
2977        include_ignored: bool,
2978        include_dirs: bool,
2979    },
2980}
2981
2982impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2983    fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2984        match self {
2985            TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2986            TraversalTarget::PathSuccessor(path) => {
2987                if !cursor_location.max_path.starts_with(path) {
2988                    Ordering::Equal
2989                } else {
2990                    Ordering::Greater
2991                }
2992            }
2993            TraversalTarget::Count {
2994                count,
2995                include_dirs,
2996                include_ignored,
2997            } => Ord::cmp(
2998                count,
2999                &cursor_location.count(*include_dirs, *include_ignored),
3000            ),
3001        }
3002    }
3003}
3004
3005struct ChildEntriesIter<'a> {
3006    parent_path: &'a Path,
3007    traversal: Traversal<'a>,
3008}
3009
3010impl<'a> Iterator for ChildEntriesIter<'a> {
3011    type Item = &'a Entry;
3012
3013    fn next(&mut self) -> Option<Self::Item> {
3014        if let Some(item) = self.traversal.entry() {
3015            if item.path.starts_with(&self.parent_path) {
3016                self.traversal.advance_to_sibling();
3017                return Some(item);
3018            }
3019        }
3020        None
3021    }
3022}
3023
3024impl<'a> From<&'a Entry> for proto::Entry {
3025    fn from(entry: &'a Entry) -> Self {
3026        Self {
3027            id: entry.id as u64,
3028            is_dir: entry.is_dir(),
3029            path: entry.path.to_string_lossy().to_string(),
3030            inode: entry.inode,
3031            mtime: Some(entry.mtime.into()),
3032            is_symlink: entry.is_symlink,
3033            is_ignored: entry.is_ignored,
3034        }
3035    }
3036}
3037
3038impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3039    type Error = anyhow::Error;
3040
3041    fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3042        if let Some(mtime) = entry.mtime {
3043            let kind = if entry.is_dir {
3044                EntryKind::Dir
3045            } else {
3046                let mut char_bag = root_char_bag.clone();
3047                char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3048                EntryKind::File(char_bag)
3049            };
3050            let path: Arc<Path> = Arc::from(Path::new(&entry.path));
3051            Ok(Entry {
3052                id: entry.id as usize,
3053                kind,
3054                path: path.clone(),
3055                inode: entry.inode,
3056                mtime: mtime.into(),
3057                is_symlink: entry.is_symlink,
3058                is_ignored: entry.is_ignored,
3059            })
3060        } else {
3061            Err(anyhow!(
3062                "missing mtime in remote worktree entry {:?}",
3063                entry.path
3064            ))
3065        }
3066    }
3067}
3068
3069#[cfg(test)]
3070mod tests {
3071    use super::*;
3072    use crate::fs::FakeFs;
3073    use anyhow::Result;
3074    use client::test::{FakeHttpClient, FakeServer};
3075    use fs::RealFs;
3076    use language::{Diagnostic, DiagnosticEntry};
3077    use lsp::Url;
3078    use rand::prelude::*;
3079    use serde_json::json;
3080    use std::{cell::RefCell, rc::Rc};
3081    use std::{
3082        env,
3083        fmt::Write,
3084        time::{SystemTime, UNIX_EPOCH},
3085    };
3086    use text::Point;
3087    use unindent::Unindent as _;
3088    use util::test::temp_tree;
3089
3090    #[gpui::test]
3091    async fn test_traversal(mut cx: gpui::TestAppContext) {
3092        let fs = FakeFs::new();
3093        fs.insert_tree(
3094            "/root",
3095            json!({
3096               ".gitignore": "a/b\n",
3097               "a": {
3098                   "b": "",
3099                   "c": "",
3100               }
3101            }),
3102        )
3103        .await;
3104
3105        let http_client = FakeHttpClient::with_404_response();
3106        let client = Client::new(http_client.clone());
3107        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3108
3109        let tree = Worktree::open_local(
3110            client,
3111            user_store,
3112            Arc::from(Path::new("/root")),
3113            Arc::new(fs),
3114            &mut cx.to_async(),
3115        )
3116        .await
3117        .unwrap();
3118        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3119            .await;
3120
3121        tree.read_with(&cx, |tree, _| {
3122            assert_eq!(
3123                tree.entries(false)
3124                    .map(|entry| entry.path.as_ref())
3125                    .collect::<Vec<_>>(),
3126                vec![
3127                    Path::new(""),
3128                    Path::new(".gitignore"),
3129                    Path::new("a"),
3130                    Path::new("a/c"),
3131                ]
3132            );
3133        })
3134    }
3135
3136    #[gpui::test]
3137    async fn test_save_file(mut cx: gpui::TestAppContext) {
3138        let dir = temp_tree(json!({
3139            "file1": "the old contents",
3140        }));
3141
3142        let http_client = FakeHttpClient::with_404_response();
3143        let client = Client::new(http_client.clone());
3144        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3145
3146        let tree = Worktree::open_local(
3147            client,
3148            user_store,
3149            dir.path(),
3150            Arc::new(RealFs),
3151            &mut cx.to_async(),
3152        )
3153        .await
3154        .unwrap();
3155        let (buffer, _) = tree
3156            .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3157            .await
3158            .unwrap();
3159        let save = buffer.update(&mut cx, |buffer, cx| {
3160            buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3161            buffer.save(cx)
3162        });
3163        save.await.unwrap();
3164
3165        let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3166        assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3167    }
3168
3169    #[gpui::test]
3170    async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3171        let dir = temp_tree(json!({
3172            "file1": "the old contents",
3173        }));
3174        let file_path = dir.path().join("file1");
3175
3176        let http_client = FakeHttpClient::with_404_response();
3177        let client = Client::new(http_client.clone());
3178        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3179
3180        let tree = Worktree::open_local(
3181            client,
3182            user_store,
3183            file_path.clone(),
3184            Arc::new(RealFs),
3185            &mut cx.to_async(),
3186        )
3187        .await
3188        .unwrap();
3189        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3190            .await;
3191        cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3192
3193        let (buffer, _) = tree
3194            .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3195            .await
3196            .unwrap();
3197        let save = buffer.update(&mut cx, |buffer, cx| {
3198            buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3199            buffer.save(cx)
3200        });
3201        save.await.unwrap();
3202
3203        let new_text = std::fs::read_to_string(file_path).unwrap();
3204        assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3205    }
3206
3207    #[gpui::test]
3208    async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3209        let dir = temp_tree(json!({
3210            "a": {
3211                "file1": "",
3212                "file2": "",
3213                "file3": "",
3214            },
3215            "b": {
3216                "c": {
3217                    "file4": "",
3218                    "file5": "",
3219                }
3220            }
3221        }));
3222
3223        let user_id = 5;
3224        let http_client = FakeHttpClient::with_404_response();
3225        let mut client = Client::new(http_client.clone());
3226        let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3227        let user_store = server.build_user_store(client.clone(), &mut cx).await;
3228        let tree = Worktree::open_local(
3229            client,
3230            user_store.clone(),
3231            dir.path(),
3232            Arc::new(RealFs),
3233            &mut cx.to_async(),
3234        )
3235        .await
3236        .unwrap();
3237
3238        let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3239            let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3240            async move { buffer.await.unwrap().0 }
3241        };
3242        let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3243            tree.read_with(cx, |tree, _| {
3244                tree.entry_for_path(path)
3245                    .expect(&format!("no entry for path {}", path))
3246                    .id
3247            })
3248        };
3249
3250        let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3251        let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3252        let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3253        let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3254
3255        let file2_id = id_for_path("a/file2", &cx);
3256        let file3_id = id_for_path("a/file3", &cx);
3257        let file4_id = id_for_path("b/c/file4", &cx);
3258
3259        // Wait for the initial scan.
3260        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3261            .await;
3262
3263        // Create a remote copy of this worktree.
3264        let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3265        let remote = Worktree::remote(
3266            1,
3267            1,
3268            initial_snapshot.to_proto(&Default::default()),
3269            Client::new(http_client.clone()),
3270            user_store,
3271            &mut cx.to_async(),
3272        )
3273        .await
3274        .unwrap();
3275
3276        cx.read(|cx| {
3277            assert!(!buffer2.read(cx).is_dirty());
3278            assert!(!buffer3.read(cx).is_dirty());
3279            assert!(!buffer4.read(cx).is_dirty());
3280            assert!(!buffer5.read(cx).is_dirty());
3281        });
3282
3283        // Rename and delete files and directories.
3284        tree.flush_fs_events(&cx).await;
3285        std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3286        std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3287        std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3288        std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3289        tree.flush_fs_events(&cx).await;
3290
3291        let expected_paths = vec![
3292            "a",
3293            "a/file1",
3294            "a/file2.new",
3295            "b",
3296            "d",
3297            "d/file3",
3298            "d/file4",
3299        ];
3300
3301        cx.read(|app| {
3302            assert_eq!(
3303                tree.read(app)
3304                    .paths()
3305                    .map(|p| p.to_str().unwrap())
3306                    .collect::<Vec<_>>(),
3307                expected_paths
3308            );
3309
3310            assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3311            assert_eq!(id_for_path("d/file3", &cx), file3_id);
3312            assert_eq!(id_for_path("d/file4", &cx), file4_id);
3313
3314            assert_eq!(
3315                buffer2.read(app).file().unwrap().path().as_ref(),
3316                Path::new("a/file2.new")
3317            );
3318            assert_eq!(
3319                buffer3.read(app).file().unwrap().path().as_ref(),
3320                Path::new("d/file3")
3321            );
3322            assert_eq!(
3323                buffer4.read(app).file().unwrap().path().as_ref(),
3324                Path::new("d/file4")
3325            );
3326            assert_eq!(
3327                buffer5.read(app).file().unwrap().path().as_ref(),
3328                Path::new("b/c/file5")
3329            );
3330
3331            assert!(!buffer2.read(app).file().unwrap().is_deleted());
3332            assert!(!buffer3.read(app).file().unwrap().is_deleted());
3333            assert!(!buffer4.read(app).file().unwrap().is_deleted());
3334            assert!(buffer5.read(app).file().unwrap().is_deleted());
3335        });
3336
3337        // Update the remote worktree. Check that it becomes consistent with the
3338        // local worktree.
3339        remote.update(&mut cx, |remote, cx| {
3340            let update_message =
3341                tree.read(cx)
3342                    .snapshot()
3343                    .build_update(&initial_snapshot, 1, 1, true);
3344            remote
3345                .as_remote_mut()
3346                .unwrap()
3347                .snapshot
3348                .apply_update(update_message)
3349                .unwrap();
3350
3351            assert_eq!(
3352                remote
3353                    .paths()
3354                    .map(|p| p.to_str().unwrap())
3355                    .collect::<Vec<_>>(),
3356                expected_paths
3357            );
3358        });
3359    }
3360
3361    #[gpui::test]
3362    async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3363        let dir = temp_tree(json!({
3364            ".git": {},
3365            ".gitignore": "ignored-dir\n",
3366            "tracked-dir": {
3367                "tracked-file1": "tracked contents",
3368            },
3369            "ignored-dir": {
3370                "ignored-file1": "ignored contents",
3371            }
3372        }));
3373
3374        let http_client = FakeHttpClient::with_404_response();
3375        let client = Client::new(http_client.clone());
3376        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3377
3378        let tree = Worktree::open_local(
3379            client,
3380            user_store,
3381            dir.path(),
3382            Arc::new(RealFs),
3383            &mut cx.to_async(),
3384        )
3385        .await
3386        .unwrap();
3387        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3388            .await;
3389        tree.flush_fs_events(&cx).await;
3390        cx.read(|cx| {
3391            let tree = tree.read(cx);
3392            let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3393            let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3394            assert_eq!(tracked.is_ignored, false);
3395            assert_eq!(ignored.is_ignored, true);
3396        });
3397
3398        std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3399        std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3400        tree.flush_fs_events(&cx).await;
3401        cx.read(|cx| {
3402            let tree = tree.read(cx);
3403            let dot_git = tree.entry_for_path(".git").unwrap();
3404            let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3405            let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3406            assert_eq!(tracked.is_ignored, false);
3407            assert_eq!(ignored.is_ignored, true);
3408            assert_eq!(dot_git.is_ignored, true);
3409        });
3410    }
3411
3412    #[gpui::test]
3413    async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3414        let user_id = 100;
3415        let http_client = FakeHttpClient::with_404_response();
3416        let mut client = Client::new(http_client);
3417        let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3418        let user_store = server.build_user_store(client.clone(), &mut cx).await;
3419
3420        let fs = Arc::new(FakeFs::new());
3421        fs.insert_tree(
3422            "/the-dir",
3423            json!({
3424                "a.txt": "a-contents",
3425                "b.txt": "b-contents",
3426            }),
3427        )
3428        .await;
3429
3430        let worktree = Worktree::open_local(
3431            client.clone(),
3432            user_store,
3433            "/the-dir".as_ref(),
3434            fs,
3435            &mut cx.to_async(),
3436        )
3437        .await
3438        .unwrap();
3439
3440        // Spawn multiple tasks to open paths, repeating some paths.
3441        let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3442            (
3443                worktree.open_buffer("a.txt", cx),
3444                worktree.open_buffer("b.txt", cx),
3445                worktree.open_buffer("a.txt", cx),
3446            )
3447        });
3448
3449        let buffer_a_1 = buffer_a_1.await.unwrap().0;
3450        let buffer_a_2 = buffer_a_2.await.unwrap().0;
3451        let buffer_b = buffer_b.await.unwrap().0;
3452        assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3453        assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3454
3455        // There is only one buffer per path.
3456        let buffer_a_id = buffer_a_1.id();
3457        assert_eq!(buffer_a_2.id(), buffer_a_id);
3458
3459        // Open the same path again while it is still open.
3460        drop(buffer_a_1);
3461        let buffer_a_3 = worktree
3462            .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3463            .await
3464            .unwrap()
3465            .0;
3466
3467        // There's still only one buffer per path.
3468        assert_eq!(buffer_a_3.id(), buffer_a_id);
3469    }
3470
3471    #[gpui::test]
3472    async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3473        use std::fs;
3474
3475        let dir = temp_tree(json!({
3476            "file1": "abc",
3477            "file2": "def",
3478            "file3": "ghi",
3479        }));
3480        let http_client = FakeHttpClient::with_404_response();
3481        let client = Client::new(http_client.clone());
3482        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3483
3484        let tree = Worktree::open_local(
3485            client,
3486            user_store,
3487            dir.path(),
3488            Arc::new(RealFs),
3489            &mut cx.to_async(),
3490        )
3491        .await
3492        .unwrap();
3493        tree.flush_fs_events(&cx).await;
3494        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3495            .await;
3496
3497        let (buffer1, _) = tree
3498            .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3499            .await
3500            .unwrap();
3501        let events = Rc::new(RefCell::new(Vec::new()));
3502
3503        // initially, the buffer isn't dirty.
3504        buffer1.update(&mut cx, |buffer, cx| {
3505            cx.subscribe(&buffer1, {
3506                let events = events.clone();
3507                move |_, _, event, _| events.borrow_mut().push(event.clone())
3508            })
3509            .detach();
3510
3511            assert!(!buffer.is_dirty());
3512            assert!(events.borrow().is_empty());
3513
3514            buffer.edit(vec![1..2], "", cx);
3515        });
3516
3517        // after the first edit, the buffer is dirty, and emits a dirtied event.
3518        buffer1.update(&mut cx, |buffer, cx| {
3519            assert!(buffer.text() == "ac");
3520            assert!(buffer.is_dirty());
3521            assert_eq!(
3522                *events.borrow(),
3523                &[language::Event::Edited, language::Event::Dirtied]
3524            );
3525            events.borrow_mut().clear();
3526            buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3527        });
3528
3529        // after saving, the buffer is not dirty, and emits a saved event.
3530        buffer1.update(&mut cx, |buffer, cx| {
3531            assert!(!buffer.is_dirty());
3532            assert_eq!(*events.borrow(), &[language::Event::Saved]);
3533            events.borrow_mut().clear();
3534
3535            buffer.edit(vec![1..1], "B", cx);
3536            buffer.edit(vec![2..2], "D", cx);
3537        });
3538
3539        // after editing again, the buffer is dirty, and emits another dirty event.
3540        buffer1.update(&mut cx, |buffer, cx| {
3541            assert!(buffer.text() == "aBDc");
3542            assert!(buffer.is_dirty());
3543            assert_eq!(
3544                *events.borrow(),
3545                &[
3546                    language::Event::Edited,
3547                    language::Event::Dirtied,
3548                    language::Event::Edited,
3549                ],
3550            );
3551            events.borrow_mut().clear();
3552
3553            // TODO - currently, after restoring the buffer to its
3554            // previously-saved state, the is still considered dirty.
3555            buffer.edit([1..3], "", cx);
3556            assert!(buffer.text() == "ac");
3557            assert!(buffer.is_dirty());
3558        });
3559
3560        assert_eq!(*events.borrow(), &[language::Event::Edited]);
3561
3562        // When a file is deleted, the buffer is considered dirty.
3563        let events = Rc::new(RefCell::new(Vec::new()));
3564        let (buffer2, _) = tree
3565            .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3566            .await
3567            .unwrap();
3568        buffer2.update(&mut cx, |_, cx| {
3569            cx.subscribe(&buffer2, {
3570                let events = events.clone();
3571                move |_, _, event, _| events.borrow_mut().push(event.clone())
3572            })
3573            .detach();
3574        });
3575
3576        fs::remove_file(dir.path().join("file2")).unwrap();
3577        buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3578        assert_eq!(
3579            *events.borrow(),
3580            &[language::Event::Dirtied, language::Event::FileHandleChanged]
3581        );
3582
3583        // When a file is already dirty when deleted, we don't emit a Dirtied event.
3584        let events = Rc::new(RefCell::new(Vec::new()));
3585        let (buffer3, _) = tree
3586            .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3587            .await
3588            .unwrap();
3589        buffer3.update(&mut cx, |_, cx| {
3590            cx.subscribe(&buffer3, {
3591                let events = events.clone();
3592                move |_, _, event, _| events.borrow_mut().push(event.clone())
3593            })
3594            .detach();
3595        });
3596
3597        tree.flush_fs_events(&cx).await;
3598        buffer3.update(&mut cx, |buffer, cx| {
3599            buffer.edit(Some(0..0), "x", cx);
3600        });
3601        events.borrow_mut().clear();
3602        fs::remove_file(dir.path().join("file3")).unwrap();
3603        buffer3
3604            .condition(&cx, |_, _| !events.borrow().is_empty())
3605            .await;
3606        assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3607        cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3608    }
3609
3610    #[gpui::test]
3611    async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3612        use std::fs;
3613
3614        let initial_contents = "aaa\nbbbbb\nc\n";
3615        let dir = temp_tree(json!({ "the-file": initial_contents }));
3616        let http_client = FakeHttpClient::with_404_response();
3617        let client = Client::new(http_client.clone());
3618        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3619
3620        let tree = Worktree::open_local(
3621            client,
3622            user_store,
3623            dir.path(),
3624            Arc::new(RealFs),
3625            &mut cx.to_async(),
3626        )
3627        .await
3628        .unwrap();
3629        cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3630            .await;
3631
3632        let abs_path = dir.path().join("the-file");
3633        let (buffer, _) = tree
3634            .update(&mut cx, |tree, cx| {
3635                tree.open_buffer(Path::new("the-file"), cx)
3636            })
3637            .await
3638            .unwrap();
3639
3640        // TODO
3641        // Add a cursor on each row.
3642        // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3643        //     assert!(!buffer.is_dirty());
3644        //     buffer.add_selection_set(
3645        //         &(0..3)
3646        //             .map(|row| Selection {
3647        //                 id: row as usize,
3648        //                 start: Point::new(row, 1),
3649        //                 end: Point::new(row, 1),
3650        //                 reversed: false,
3651        //                 goal: SelectionGoal::None,
3652        //             })
3653        //             .collect::<Vec<_>>(),
3654        //         cx,
3655        //     )
3656        // });
3657
3658        // Change the file on disk, adding two new lines of text, and removing
3659        // one line.
3660        buffer.read_with(&cx, |buffer, _| {
3661            assert!(!buffer.is_dirty());
3662            assert!(!buffer.has_conflict());
3663        });
3664        let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3665        fs::write(&abs_path, new_contents).unwrap();
3666
3667        // Because the buffer was not modified, it is reloaded from disk. Its
3668        // contents are edited according to the diff between the old and new
3669        // file contents.
3670        buffer
3671            .condition(&cx, |buffer, _| buffer.text() == new_contents)
3672            .await;
3673
3674        buffer.update(&mut cx, |buffer, _| {
3675            assert_eq!(buffer.text(), new_contents);
3676            assert!(!buffer.is_dirty());
3677            assert!(!buffer.has_conflict());
3678
3679            // TODO
3680            // let cursor_positions = buffer
3681            //     .selection_set(selection_set_id)
3682            //     .unwrap()
3683            //     .selections::<Point>(&*buffer)
3684            //     .map(|selection| {
3685            //         assert_eq!(selection.start, selection.end);
3686            //         selection.start
3687            //     })
3688            //     .collect::<Vec<_>>();
3689            // assert_eq!(
3690            //     cursor_positions,
3691            //     [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3692            // );
3693        });
3694
3695        // Modify the buffer
3696        buffer.update(&mut cx, |buffer, cx| {
3697            buffer.edit(vec![0..0], " ", cx);
3698            assert!(buffer.is_dirty());
3699            assert!(!buffer.has_conflict());
3700        });
3701
3702        // Change the file on disk again, adding blank lines to the beginning.
3703        fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3704
3705        // Because the buffer is modified, it doesn't reload from disk, but is
3706        // marked as having a conflict.
3707        buffer
3708            .condition(&cx, |buffer, _| buffer.has_conflict())
3709            .await;
3710    }
3711
3712    #[gpui::test]
3713    async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3714        let fs = Arc::new(FakeFs::new());
3715        let http_client = FakeHttpClient::with_404_response();
3716        let client = Client::new(http_client.clone());
3717        let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3718
3719        fs.insert_tree(
3720            "/the-dir",
3721            json!({
3722                "a.rs": "
3723                    fn foo(mut v: Vec<usize>) {
3724                        for x in &v {
3725                            v.push(1);
3726                        }
3727                    }
3728                "
3729                .unindent(),
3730            }),
3731        )
3732        .await;
3733
3734        let worktree = Worktree::open_local(
3735            client.clone(),
3736            user_store,
3737            "/the-dir".as_ref(),
3738            fs,
3739            &mut cx.to_async(),
3740        )
3741        .await
3742        .unwrap();
3743
3744        let (buffer, _) = worktree
3745            .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3746            .await
3747            .unwrap();
3748
3749        let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3750        let message = lsp::PublishDiagnosticsParams {
3751            uri: buffer_uri.clone(),
3752            diagnostics: vec![
3753                lsp::Diagnostic {
3754                    range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3755                    severity: Some(DiagnosticSeverity::WARNING),
3756                    message: "error 1".to_string(),
3757                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3758                        location: lsp::Location {
3759                            uri: buffer_uri.clone(),
3760                            range: lsp::Range::new(
3761                                lsp::Position::new(1, 8),
3762                                lsp::Position::new(1, 9),
3763                            ),
3764                        },
3765                        message: "error 1 hint 1".to_string(),
3766                    }]),
3767                    ..Default::default()
3768                },
3769                lsp::Diagnostic {
3770                    range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3771                    severity: Some(DiagnosticSeverity::HINT),
3772                    message: "error 1 hint 1".to_string(),
3773                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3774                        location: lsp::Location {
3775                            uri: buffer_uri.clone(),
3776                            range: lsp::Range::new(
3777                                lsp::Position::new(1, 8),
3778                                lsp::Position::new(1, 9),
3779                            ),
3780                        },
3781                        message: "original diagnostic".to_string(),
3782                    }]),
3783                    ..Default::default()
3784                },
3785                lsp::Diagnostic {
3786                    range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3787                    severity: Some(DiagnosticSeverity::ERROR),
3788                    message: "error 2".to_string(),
3789                    related_information: Some(vec![
3790                        lsp::DiagnosticRelatedInformation {
3791                            location: lsp::Location {
3792                                uri: buffer_uri.clone(),
3793                                range: lsp::Range::new(
3794                                    lsp::Position::new(1, 13),
3795                                    lsp::Position::new(1, 15),
3796                                ),
3797                            },
3798                            message: "error 2 hint 1".to_string(),
3799                        },
3800                        lsp::DiagnosticRelatedInformation {
3801                            location: lsp::Location {
3802                                uri: buffer_uri.clone(),
3803                                range: lsp::Range::new(
3804                                    lsp::Position::new(1, 13),
3805                                    lsp::Position::new(1, 15),
3806                                ),
3807                            },
3808                            message: "error 2 hint 2".to_string(),
3809                        },
3810                    ]),
3811                    ..Default::default()
3812                },
3813                lsp::Diagnostic {
3814                    range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3815                    severity: Some(DiagnosticSeverity::HINT),
3816                    message: "error 2 hint 1".to_string(),
3817                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3818                        location: lsp::Location {
3819                            uri: buffer_uri.clone(),
3820                            range: lsp::Range::new(
3821                                lsp::Position::new(2, 8),
3822                                lsp::Position::new(2, 17),
3823                            ),
3824                        },
3825                        message: "original diagnostic".to_string(),
3826                    }]),
3827                    ..Default::default()
3828                },
3829                lsp::Diagnostic {
3830                    range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3831                    severity: Some(DiagnosticSeverity::HINT),
3832                    message: "error 2 hint 2".to_string(),
3833                    related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3834                        location: lsp::Location {
3835                            uri: buffer_uri.clone(),
3836                            range: lsp::Range::new(
3837                                lsp::Position::new(2, 8),
3838                                lsp::Position::new(2, 17),
3839                            ),
3840                        },
3841                        message: "original diagnostic".to_string(),
3842                    }]),
3843                    ..Default::default()
3844                },
3845            ],
3846            version: None,
3847        };
3848
3849        worktree
3850            .update(&mut cx, |tree, cx| {
3851                tree.as_local_mut().unwrap().update_diagnostics(
3852                    Arc::from("a.rs".as_ref()),
3853                    message,
3854                    &Default::default(),
3855                    cx,
3856                )
3857            })
3858            .unwrap();
3859        let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
3860
3861        assert_eq!(
3862            buffer
3863                .diagnostics_in_range::<_, Point>(0..buffer.len())
3864                .collect::<Vec<_>>(),
3865            &[
3866                DiagnosticEntry {
3867                    range: Point::new(1, 8)..Point::new(1, 9),
3868                    diagnostic: Diagnostic {
3869                        severity: DiagnosticSeverity::WARNING,
3870                        message: "error 1".to_string(),
3871                        group_id: 0,
3872                        is_primary: true,
3873                        ..Default::default()
3874                    }
3875                },
3876                DiagnosticEntry {
3877                    range: Point::new(1, 8)..Point::new(1, 9),
3878                    diagnostic: Diagnostic {
3879                        severity: DiagnosticSeverity::HINT,
3880                        message: "error 1 hint 1".to_string(),
3881                        group_id: 0,
3882                        is_primary: false,
3883                        ..Default::default()
3884                    }
3885                },
3886                DiagnosticEntry {
3887                    range: Point::new(1, 13)..Point::new(1, 15),
3888                    diagnostic: Diagnostic {
3889                        severity: DiagnosticSeverity::HINT,
3890                        message: "error 2 hint 1".to_string(),
3891                        group_id: 1,
3892                        is_primary: false,
3893                        ..Default::default()
3894                    }
3895                },
3896                DiagnosticEntry {
3897                    range: Point::new(1, 13)..Point::new(1, 15),
3898                    diagnostic: Diagnostic {
3899                        severity: DiagnosticSeverity::HINT,
3900                        message: "error 2 hint 2".to_string(),
3901                        group_id: 1,
3902                        is_primary: false,
3903                        ..Default::default()
3904                    }
3905                },
3906                DiagnosticEntry {
3907                    range: Point::new(2, 8)..Point::new(2, 17),
3908                    diagnostic: Diagnostic {
3909                        severity: DiagnosticSeverity::ERROR,
3910                        message: "error 2".to_string(),
3911                        group_id: 1,
3912                        is_primary: true,
3913                        ..Default::default()
3914                    }
3915                }
3916            ]
3917        );
3918
3919        assert_eq!(
3920            buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
3921            &[
3922                DiagnosticEntry {
3923                    range: Point::new(1, 8)..Point::new(1, 9),
3924                    diagnostic: Diagnostic {
3925                        severity: DiagnosticSeverity::WARNING,
3926                        message: "error 1".to_string(),
3927                        group_id: 0,
3928                        is_primary: true,
3929                        ..Default::default()
3930                    }
3931                },
3932                DiagnosticEntry {
3933                    range: Point::new(1, 8)..Point::new(1, 9),
3934                    diagnostic: Diagnostic {
3935                        severity: DiagnosticSeverity::HINT,
3936                        message: "error 1 hint 1".to_string(),
3937                        group_id: 0,
3938                        is_primary: false,
3939                        ..Default::default()
3940                    }
3941                },
3942            ]
3943        );
3944        assert_eq!(
3945            buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
3946            &[
3947                DiagnosticEntry {
3948                    range: Point::new(1, 13)..Point::new(1, 15),
3949                    diagnostic: Diagnostic {
3950                        severity: DiagnosticSeverity::HINT,
3951                        message: "error 2 hint 1".to_string(),
3952                        group_id: 1,
3953                        is_primary: false,
3954                        ..Default::default()
3955                    }
3956                },
3957                DiagnosticEntry {
3958                    range: Point::new(1, 13)..Point::new(1, 15),
3959                    diagnostic: Diagnostic {
3960                        severity: DiagnosticSeverity::HINT,
3961                        message: "error 2 hint 2".to_string(),
3962                        group_id: 1,
3963                        is_primary: false,
3964                        ..Default::default()
3965                    }
3966                },
3967                DiagnosticEntry {
3968                    range: Point::new(2, 8)..Point::new(2, 17),
3969                    diagnostic: Diagnostic {
3970                        severity: DiagnosticSeverity::ERROR,
3971                        message: "error 2".to_string(),
3972                        group_id: 1,
3973                        is_primary: true,
3974                        ..Default::default()
3975                    }
3976                }
3977            ]
3978        );
3979    }
3980
3981    #[gpui::test(iterations = 100)]
3982    fn test_random(mut rng: StdRng) {
3983        let operations = env::var("OPERATIONS")
3984            .map(|o| o.parse().unwrap())
3985            .unwrap_or(40);
3986        let initial_entries = env::var("INITIAL_ENTRIES")
3987            .map(|o| o.parse().unwrap())
3988            .unwrap_or(20);
3989
3990        let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3991        for _ in 0..initial_entries {
3992            randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3993        }
3994        log::info!("Generated initial tree");
3995
3996        let (notify_tx, _notify_rx) = smol::channel::unbounded();
3997        let fs = Arc::new(RealFs);
3998        let next_entry_id = Arc::new(AtomicUsize::new(0));
3999        let mut initial_snapshot = Snapshot {
4000            id: WorktreeId::from_usize(0),
4001            scan_id: 0,
4002            abs_path: root_dir.path().into(),
4003            entries_by_path: Default::default(),
4004            entries_by_id: Default::default(),
4005            removed_entry_ids: Default::default(),
4006            ignores: Default::default(),
4007            root_name: Default::default(),
4008            root_char_bag: Default::default(),
4009            next_entry_id: next_entry_id.clone(),
4010        };
4011        initial_snapshot.insert_entry(
4012            Entry::new(
4013                Path::new("").into(),
4014                &smol::block_on(fs.metadata(root_dir.path()))
4015                    .unwrap()
4016                    .unwrap(),
4017                &next_entry_id,
4018                Default::default(),
4019            ),
4020            fs.as_ref(),
4021        );
4022        let mut scanner = BackgroundScanner::new(
4023            Arc::new(Mutex::new(initial_snapshot.clone())),
4024            notify_tx,
4025            fs.clone(),
4026            Arc::new(gpui::executor::Background::new()),
4027        );
4028        smol::block_on(scanner.scan_dirs()).unwrap();
4029        scanner.snapshot().check_invariants();
4030
4031        let mut events = Vec::new();
4032        let mut snapshots = Vec::new();
4033        let mut mutations_len = operations;
4034        while mutations_len > 1 {
4035            if !events.is_empty() && rng.gen_bool(0.4) {
4036                let len = rng.gen_range(0..=events.len());
4037                let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4038                log::info!("Delivering events: {:#?}", to_deliver);
4039                smol::block_on(scanner.process_events(to_deliver));
4040                scanner.snapshot().check_invariants();
4041            } else {
4042                events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4043                mutations_len -= 1;
4044            }
4045
4046            if rng.gen_bool(0.2) {
4047                snapshots.push(scanner.snapshot());
4048            }
4049        }
4050        log::info!("Quiescing: {:#?}", events);
4051        smol::block_on(scanner.process_events(events));
4052        scanner.snapshot().check_invariants();
4053
4054        let (notify_tx, _notify_rx) = smol::channel::unbounded();
4055        let mut new_scanner = BackgroundScanner::new(
4056            Arc::new(Mutex::new(initial_snapshot)),
4057            notify_tx,
4058            scanner.fs.clone(),
4059            scanner.executor.clone(),
4060        );
4061        smol::block_on(new_scanner.scan_dirs()).unwrap();
4062        assert_eq!(
4063            scanner.snapshot().to_vec(true),
4064            new_scanner.snapshot().to_vec(true)
4065        );
4066
4067        for mut prev_snapshot in snapshots {
4068            let include_ignored = rng.gen::<bool>();
4069            if !include_ignored {
4070                let mut entries_by_path_edits = Vec::new();
4071                let mut entries_by_id_edits = Vec::new();
4072                for entry in prev_snapshot
4073                    .entries_by_id
4074                    .cursor::<()>()
4075                    .filter(|e| e.is_ignored)
4076                {
4077                    entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4078                    entries_by_id_edits.push(Edit::Remove(entry.id));
4079                }
4080
4081                prev_snapshot
4082                    .entries_by_path
4083                    .edit(entries_by_path_edits, &());
4084                prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4085            }
4086
4087            let update = scanner
4088                .snapshot()
4089                .build_update(&prev_snapshot, 0, 0, include_ignored);
4090            prev_snapshot.apply_update(update).unwrap();
4091            assert_eq!(
4092                prev_snapshot.to_vec(true),
4093                scanner.snapshot().to_vec(include_ignored)
4094            );
4095        }
4096    }
4097
4098    fn randomly_mutate_tree(
4099        root_path: &Path,
4100        insertion_probability: f64,
4101        rng: &mut impl Rng,
4102    ) -> Result<Vec<fsevent::Event>> {
4103        let root_path = root_path.canonicalize().unwrap();
4104        let (dirs, files) = read_dir_recursive(root_path.clone());
4105
4106        let mut events = Vec::new();
4107        let mut record_event = |path: PathBuf| {
4108            events.push(fsevent::Event {
4109                event_id: SystemTime::now()
4110                    .duration_since(UNIX_EPOCH)
4111                    .unwrap()
4112                    .as_secs(),
4113                flags: fsevent::StreamFlags::empty(),
4114                path,
4115            });
4116        };
4117
4118        if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4119            let path = dirs.choose(rng).unwrap();
4120            let new_path = path.join(gen_name(rng));
4121
4122            if rng.gen() {
4123                log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4124                std::fs::create_dir(&new_path)?;
4125            } else {
4126                log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4127                std::fs::write(&new_path, "")?;
4128            }
4129            record_event(new_path);
4130        } else if rng.gen_bool(0.05) {
4131            let ignore_dir_path = dirs.choose(rng).unwrap();
4132            let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4133
4134            let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4135            let files_to_ignore = {
4136                let len = rng.gen_range(0..=subfiles.len());
4137                subfiles.choose_multiple(rng, len)
4138            };
4139            let dirs_to_ignore = {
4140                let len = rng.gen_range(0..subdirs.len());
4141                subdirs.choose_multiple(rng, len)
4142            };
4143
4144            let mut ignore_contents = String::new();
4145            for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4146                write!(
4147                    ignore_contents,
4148                    "{}\n",
4149                    path_to_ignore
4150                        .strip_prefix(&ignore_dir_path)?
4151                        .to_str()
4152                        .unwrap()
4153                )
4154                .unwrap();
4155            }
4156            log::info!(
4157                "Creating {:?} with contents:\n{}",
4158                ignore_path.strip_prefix(&root_path)?,
4159                ignore_contents
4160            );
4161            std::fs::write(&ignore_path, ignore_contents).unwrap();
4162            record_event(ignore_path);
4163        } else {
4164            let old_path = {
4165                let file_path = files.choose(rng);
4166                let dir_path = dirs[1..].choose(rng);
4167                file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4168            };
4169
4170            let is_rename = rng.gen();
4171            if is_rename {
4172                let new_path_parent = dirs
4173                    .iter()
4174                    .filter(|d| !d.starts_with(old_path))
4175                    .choose(rng)
4176                    .unwrap();
4177
4178                let overwrite_existing_dir =
4179                    !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4180                let new_path = if overwrite_existing_dir {
4181                    std::fs::remove_dir_all(&new_path_parent).ok();
4182                    new_path_parent.to_path_buf()
4183                } else {
4184                    new_path_parent.join(gen_name(rng))
4185                };
4186
4187                log::info!(
4188                    "Renaming {:?} to {}{:?}",
4189                    old_path.strip_prefix(&root_path)?,
4190                    if overwrite_existing_dir {
4191                        "overwrite "
4192                    } else {
4193                        ""
4194                    },
4195                    new_path.strip_prefix(&root_path)?
4196                );
4197                std::fs::rename(&old_path, &new_path)?;
4198                record_event(old_path.clone());
4199                record_event(new_path);
4200            } else if old_path.is_dir() {
4201                let (dirs, files) = read_dir_recursive(old_path.clone());
4202
4203                log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4204                std::fs::remove_dir_all(&old_path).unwrap();
4205                for file in files {
4206                    record_event(file);
4207                }
4208                for dir in dirs {
4209                    record_event(dir);
4210                }
4211            } else {
4212                log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4213                std::fs::remove_file(old_path).unwrap();
4214                record_event(old_path.clone());
4215            }
4216        }
4217
4218        Ok(events)
4219    }
4220
4221    fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4222        let child_entries = std::fs::read_dir(&path).unwrap();
4223        let mut dirs = vec![path];
4224        let mut files = Vec::new();
4225        for child_entry in child_entries {
4226            let child_path = child_entry.unwrap().path();
4227            if child_path.is_dir() {
4228                let (child_dirs, child_files) = read_dir_recursive(child_path);
4229                dirs.extend(child_dirs);
4230                files.extend(child_files);
4231            } else {
4232                files.push(child_path);
4233            }
4234        }
4235        (dirs, files)
4236    }
4237
4238    fn gen_name(rng: &mut impl Rng) -> String {
4239        (0..6)
4240            .map(|_| rng.sample(rand::distributions::Alphanumeric))
4241            .map(char::from)
4242            .collect()
4243    }
4244
4245    impl Snapshot {
4246        fn check_invariants(&self) {
4247            let mut files = self.files(true, 0);
4248            let mut visible_files = self.files(false, 0);
4249            for entry in self.entries_by_path.cursor::<()>() {
4250                if entry.is_file() {
4251                    assert_eq!(files.next().unwrap().inode, entry.inode);
4252                    if !entry.is_ignored {
4253                        assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4254                    }
4255                }
4256            }
4257            assert!(files.next().is_none());
4258            assert!(visible_files.next().is_none());
4259
4260            let mut bfs_paths = Vec::new();
4261            let mut stack = vec![Path::new("")];
4262            while let Some(path) = stack.pop() {
4263                bfs_paths.push(path);
4264                let ix = stack.len();
4265                for child_entry in self.child_entries(path) {
4266                    stack.insert(ix, &child_entry.path);
4267                }
4268            }
4269
4270            let dfs_paths = self
4271                .entries_by_path
4272                .cursor::<()>()
4273                .map(|e| e.path.as_ref())
4274                .collect::<Vec<_>>();
4275            assert_eq!(bfs_paths, dfs_paths);
4276
4277            for (ignore_parent_path, _) in &self.ignores {
4278                assert!(self.entry_for_path(ignore_parent_path).is_some());
4279                assert!(self
4280                    .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4281                    .is_some());
4282            }
4283        }
4284
4285        fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4286            let mut paths = Vec::new();
4287            for entry in self.entries_by_path.cursor::<()>() {
4288                if include_ignored || !entry.is_ignored {
4289                    paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4290                }
4291            }
4292            paths.sort_by(|a, b| a.0.cmp(&b.0));
4293            paths
4294        }
4295    }
4296}