1use super::{
2 fs::{self, Fs},
3 ignore::IgnoreStack,
4 DiagnosticSummary,
5};
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context, Result};
8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
9use clock::ReplicaId;
10use collections::BTreeMap;
11use collections::{hash_map, HashMap};
12use futures::{Stream, StreamExt};
13use fuzzy::CharBag;
14use gpui::{
15 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
16 Task, UpgradeModelHandle, WeakModelHandle,
17};
18use language::{
19 Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Language, LanguageRegistry,
20 Operation, PointUtf16, Rope,
21};
22use lazy_static::lazy_static;
23use lsp::LanguageServer;
24use parking_lot::Mutex;
25use postage::{
26 prelude::{Sink as _, Stream as _},
27 watch,
28};
29use serde::Deserialize;
30use smol::channel::{self, Sender};
31use std::{
32 any::Any,
33 cmp::{self, Ordering},
34 convert::{TryFrom, TryInto},
35 ffi::{OsStr, OsString},
36 fmt,
37 future::Future,
38 mem,
39 ops::{Deref, Range},
40 path::{Path, PathBuf},
41 sync::{
42 atomic::{AtomicUsize, Ordering::SeqCst},
43 Arc,
44 },
45 time::{Duration, SystemTime},
46};
47use sum_tree::Bias;
48use sum_tree::{Edit, SeekTarget, SumTree};
49use util::{post_inc, ResultExt, TryFutureExt};
50
51lazy_static! {
52 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
53}
54
55#[derive(Clone, Debug)]
56enum ScanState {
57 Idle,
58 Scanning,
59 Err(Arc<anyhow::Error>),
60}
61
62pub enum Worktree {
63 Local(LocalWorktree),
64 Remote(RemoteWorktree),
65}
66
67#[derive(Debug)]
68pub enum Event {
69 DiagnosticsUpdated(Arc<Path>),
70}
71
72impl Entity for Worktree {
73 type Event = Event;
74
75 fn app_will_quit(
76 &mut self,
77 _: &mut MutableAppContext,
78 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
79 use futures::FutureExt;
80
81 if let Self::Local(worktree) = self {
82 let shutdown_futures = worktree
83 .language_servers
84 .drain()
85 .filter_map(|(_, server)| server.shutdown())
86 .collect::<Vec<_>>();
87 Some(
88 async move {
89 futures::future::join_all(shutdown_futures).await;
90 }
91 .boxed(),
92 )
93 } else {
94 None
95 }
96 }
97}
98
99impl Worktree {
100 pub async fn open_local(
101 client: Arc<Client>,
102 user_store: ModelHandle<UserStore>,
103 path: impl Into<Arc<Path>>,
104 fs: Arc<dyn Fs>,
105 languages: Arc<LanguageRegistry>,
106 cx: &mut AsyncAppContext,
107 ) -> Result<ModelHandle<Self>> {
108 let (tree, scan_states_tx) =
109 LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
110 tree.update(cx, |tree, cx| {
111 let tree = tree.as_local_mut().unwrap();
112 let abs_path = tree.snapshot.abs_path.clone();
113 let background_snapshot = tree.background_snapshot.clone();
114 let background = cx.background().clone();
115 tree._background_scanner_task = Some(cx.background().spawn(async move {
116 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
117 let scanner =
118 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
119 scanner.run(events).await;
120 }));
121 });
122 Ok(tree)
123 }
124
125 pub async fn remote(
126 project_remote_id: u64,
127 replica_id: ReplicaId,
128 worktree: proto::Worktree,
129 client: Arc<Client>,
130 user_store: ModelHandle<UserStore>,
131 languages: Arc<LanguageRegistry>,
132 cx: &mut AsyncAppContext,
133 ) -> Result<ModelHandle<Self>> {
134 let remote_id = worktree.id;
135 let root_char_bag: CharBag = worktree
136 .root_name
137 .chars()
138 .map(|c| c.to_ascii_lowercase())
139 .collect();
140 let root_name = worktree.root_name.clone();
141 let (entries_by_path, entries_by_id) = cx
142 .background()
143 .spawn(async move {
144 let mut entries_by_path_edits = Vec::new();
145 let mut entries_by_id_edits = Vec::new();
146 for entry in worktree.entries {
147 match Entry::try_from((&root_char_bag, entry)) {
148 Ok(entry) => {
149 entries_by_id_edits.push(Edit::Insert(PathEntry {
150 id: entry.id,
151 path: entry.path.clone(),
152 is_ignored: entry.is_ignored,
153 scan_id: 0,
154 }));
155 entries_by_path_edits.push(Edit::Insert(entry));
156 }
157 Err(err) => log::warn!("error for remote worktree entry {:?}", err),
158 }
159 }
160
161 let mut entries_by_path = SumTree::new();
162 let mut entries_by_id = SumTree::new();
163 entries_by_path.edit(entries_by_path_edits, &());
164 entries_by_id.edit(entries_by_id_edits, &());
165 (entries_by_path, entries_by_id)
166 })
167 .await;
168
169 let worktree = cx.update(|cx| {
170 cx.add_model(|cx: &mut ModelContext<Worktree>| {
171 let snapshot = Snapshot {
172 id: remote_id as usize,
173 scan_id: 0,
174 abs_path: Path::new("").into(),
175 root_name,
176 root_char_bag,
177 ignores: Default::default(),
178 entries_by_path,
179 entries_by_id,
180 removed_entry_ids: Default::default(),
181 next_entry_id: Default::default(),
182 };
183
184 let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
185 let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
186
187 cx.background()
188 .spawn(async move {
189 while let Some(update) = updates_rx.recv().await {
190 let mut snapshot = snapshot_tx.borrow().clone();
191 if let Err(error) = snapshot.apply_update(update) {
192 log::error!("error applying worktree update: {}", error);
193 }
194 *snapshot_tx.borrow_mut() = snapshot;
195 }
196 })
197 .detach();
198
199 {
200 let mut snapshot_rx = snapshot_rx.clone();
201 cx.spawn_weak(|this, mut cx| async move {
202 while let Some(_) = snapshot_rx.recv().await {
203 if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
204 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
205 } else {
206 break;
207 }
208 }
209 })
210 .detach();
211 }
212
213 Worktree::Remote(RemoteWorktree {
214 project_id: project_remote_id,
215 remote_id,
216 replica_id,
217 snapshot,
218 snapshot_rx,
219 updates_tx,
220 client: client.clone(),
221 loading_buffers: Default::default(),
222 open_buffers: Default::default(),
223 diagnostic_summaries: Default::default(),
224 queued_operations: Default::default(),
225 languages,
226 user_store,
227 })
228 })
229 });
230
231 Ok(worktree)
232 }
233
234 pub fn as_local(&self) -> Option<&LocalWorktree> {
235 if let Worktree::Local(worktree) = self {
236 Some(worktree)
237 } else {
238 None
239 }
240 }
241
242 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
243 if let Worktree::Remote(worktree) = self {
244 Some(worktree)
245 } else {
246 None
247 }
248 }
249
250 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
251 if let Worktree::Local(worktree) = self {
252 Some(worktree)
253 } else {
254 None
255 }
256 }
257
258 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
259 if let Worktree::Remote(worktree) = self {
260 Some(worktree)
261 } else {
262 None
263 }
264 }
265
266 pub fn snapshot(&self) -> Snapshot {
267 match self {
268 Worktree::Local(worktree) => worktree.snapshot(),
269 Worktree::Remote(worktree) => worktree.snapshot(),
270 }
271 }
272
273 pub fn replica_id(&self) -> ReplicaId {
274 match self {
275 Worktree::Local(_) => 0,
276 Worktree::Remote(worktree) => worktree.replica_id,
277 }
278 }
279
280 pub fn remove_collaborator(
281 &mut self,
282 peer_id: PeerId,
283 replica_id: ReplicaId,
284 cx: &mut ModelContext<Self>,
285 ) {
286 match self {
287 Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
288 Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
289 }
290 }
291
292 pub fn languages(&self) -> &Arc<LanguageRegistry> {
293 match self {
294 Worktree::Local(worktree) => &worktree.languages,
295 Worktree::Remote(worktree) => &worktree.languages,
296 }
297 }
298
299 pub fn user_store(&self) -> &ModelHandle<UserStore> {
300 match self {
301 Worktree::Local(worktree) => &worktree.user_store,
302 Worktree::Remote(worktree) => &worktree.user_store,
303 }
304 }
305
306 pub fn handle_open_buffer(
307 &mut self,
308 envelope: TypedEnvelope<proto::OpenBuffer>,
309 rpc: Arc<Client>,
310 cx: &mut ModelContext<Self>,
311 ) -> anyhow::Result<()> {
312 let receipt = envelope.receipt();
313
314 let response = self
315 .as_local_mut()
316 .unwrap()
317 .open_remote_buffer(envelope, cx);
318
319 cx.background()
320 .spawn(
321 async move {
322 rpc.respond(receipt, response.await?).await?;
323 Ok(())
324 }
325 .log_err(),
326 )
327 .detach();
328
329 Ok(())
330 }
331
332 pub fn handle_close_buffer(
333 &mut self,
334 envelope: TypedEnvelope<proto::CloseBuffer>,
335 _: Arc<Client>,
336 cx: &mut ModelContext<Self>,
337 ) -> anyhow::Result<()> {
338 self.as_local_mut()
339 .unwrap()
340 .close_remote_buffer(envelope, cx)
341 }
342
343 pub fn diagnostic_summaries<'a>(
344 &'a self,
345 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
346 match self {
347 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
348 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
349 }
350 .iter()
351 .map(|(path, summary)| (path.clone(), summary.clone()))
352 }
353
354 pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
355 match self {
356 Worktree::Local(worktree) => &mut worktree.loading_buffers,
357 Worktree::Remote(worktree) => &mut worktree.loading_buffers,
358 }
359 }
360
361 pub fn open_buffer(
362 &mut self,
363 path: impl AsRef<Path>,
364 cx: &mut ModelContext<Self>,
365 ) -> Task<Result<ModelHandle<Buffer>>> {
366 let path = path.as_ref();
367
368 // If there is already a buffer for the given path, then return it.
369 let existing_buffer = match self {
370 Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
371 Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
372 };
373 if let Some(existing_buffer) = existing_buffer {
374 return cx.spawn(move |_, _| async move { Ok(existing_buffer) });
375 }
376
377 let path: Arc<Path> = Arc::from(path);
378 let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
379 // If the given path is already being loaded, then wait for that existing
380 // task to complete and return the same buffer.
381 hash_map::Entry::Occupied(e) => e.get().clone(),
382
383 // Otherwise, record the fact that this path is now being loaded.
384 hash_map::Entry::Vacant(entry) => {
385 let (mut tx, rx) = postage::watch::channel();
386 entry.insert(rx.clone());
387
388 let load_buffer = match self {
389 Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
390 Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
391 };
392 cx.spawn(move |this, mut cx| async move {
393 let result = load_buffer.await;
394
395 // After the buffer loads, record the fact that it is no longer
396 // loading.
397 this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
398 *tx.borrow_mut() = Some(result.map_err(|e| Arc::new(e)));
399 })
400 .detach();
401 rx
402 }
403 };
404
405 cx.spawn(|_, _| async move {
406 loop {
407 if let Some(result) = loading_watch.borrow().as_ref() {
408 return result.clone().map_err(|e| anyhow!("{}", e));
409 }
410 loading_watch.recv().await;
411 }
412 })
413 }
414
415 #[cfg(feature = "test-support")]
416 pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
417 let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
418 Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
419 Worktree::Remote(worktree) => {
420 Box::new(worktree.open_buffers.values().filter_map(|buf| {
421 if let RemoteBuffer::Loaded(buf) = buf {
422 Some(buf)
423 } else {
424 None
425 }
426 }))
427 }
428 };
429
430 let path = path.as_ref();
431 open_buffers
432 .find(|buffer| {
433 if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
434 file.path().as_ref() == path
435 } else {
436 false
437 }
438 })
439 .is_some()
440 }
441
442 pub fn handle_update_buffer(
443 &mut self,
444 envelope: TypedEnvelope<proto::UpdateBuffer>,
445 cx: &mut ModelContext<Self>,
446 ) -> Result<()> {
447 let payload = envelope.payload.clone();
448 let buffer_id = payload.buffer_id as usize;
449 let ops = payload
450 .operations
451 .into_iter()
452 .map(|op| language::proto::deserialize_operation(op))
453 .collect::<Result<Vec<_>, _>>()?;
454
455 match self {
456 Worktree::Local(worktree) => {
457 let buffer = worktree
458 .open_buffers
459 .get(&buffer_id)
460 .and_then(|buf| buf.upgrade(cx))
461 .ok_or_else(|| {
462 anyhow!("invalid buffer {} in update buffer message", buffer_id)
463 })?;
464 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
465 }
466 Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
467 Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
468 Some(RemoteBuffer::Loaded(buffer)) => {
469 if let Some(buffer) = buffer.upgrade(cx) {
470 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
471 } else {
472 worktree
473 .open_buffers
474 .insert(buffer_id, RemoteBuffer::Operations(ops));
475 }
476 }
477 None => {
478 worktree
479 .open_buffers
480 .insert(buffer_id, RemoteBuffer::Operations(ops));
481 }
482 },
483 }
484
485 Ok(())
486 }
487
488 pub fn handle_save_buffer(
489 &mut self,
490 envelope: TypedEnvelope<proto::SaveBuffer>,
491 rpc: Arc<Client>,
492 cx: &mut ModelContext<Self>,
493 ) -> Result<()> {
494 let sender_id = envelope.original_sender_id()?;
495 let this = self.as_local().unwrap();
496 let project_id = this
497 .share
498 .as_ref()
499 .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
500 .project_id;
501
502 let buffer = this
503 .shared_buffers
504 .get(&sender_id)
505 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
506 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
507
508 let receipt = envelope.receipt();
509 let worktree_id = envelope.payload.worktree_id;
510 let buffer_id = envelope.payload.buffer_id;
511 let save = cx.spawn(|_, mut cx| async move {
512 buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
513 });
514
515 cx.background()
516 .spawn(
517 async move {
518 let (version, mtime) = save.await?;
519
520 rpc.respond(
521 receipt,
522 proto::BufferSaved {
523 project_id,
524 worktree_id,
525 buffer_id,
526 version: (&version).into(),
527 mtime: Some(mtime.into()),
528 },
529 )
530 .await?;
531
532 Ok(())
533 }
534 .log_err(),
535 )
536 .detach();
537
538 Ok(())
539 }
540
541 pub fn handle_buffer_saved(
542 &mut self,
543 envelope: TypedEnvelope<proto::BufferSaved>,
544 cx: &mut ModelContext<Self>,
545 ) -> Result<()> {
546 let payload = envelope.payload.clone();
547 let worktree = self.as_remote_mut().unwrap();
548 if let Some(buffer) = worktree
549 .open_buffers
550 .get(&(payload.buffer_id as usize))
551 .and_then(|buf| buf.upgrade(cx))
552 {
553 buffer.update(cx, |buffer, cx| {
554 let version = payload.version.try_into()?;
555 let mtime = payload
556 .mtime
557 .ok_or_else(|| anyhow!("missing mtime"))?
558 .into();
559 buffer.did_save(version, mtime, None, cx);
560 Result::<_, anyhow::Error>::Ok(())
561 })?;
562 }
563 Ok(())
564 }
565
566 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
567 match self {
568 Self::Local(worktree) => {
569 let is_fake_fs = worktree.fs.is_fake();
570 worktree.snapshot = worktree.background_snapshot.lock().clone();
571 if worktree.is_scanning() {
572 if worktree.poll_task.is_none() {
573 worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
574 if is_fake_fs {
575 smol::future::yield_now().await;
576 } else {
577 smol::Timer::after(Duration::from_millis(100)).await;
578 }
579 this.update(&mut cx, |this, cx| {
580 this.as_local_mut().unwrap().poll_task = None;
581 this.poll_snapshot(cx);
582 })
583 }));
584 }
585 } else {
586 worktree.poll_task.take();
587 self.update_open_buffers(cx);
588 }
589 }
590 Self::Remote(worktree) => {
591 worktree.snapshot = worktree.snapshot_rx.borrow().clone();
592 self.update_open_buffers(cx);
593 }
594 };
595
596 cx.notify();
597 }
598
599 fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
600 let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
601 Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
602 Self::Remote(worktree) => {
603 Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
604 if let RemoteBuffer::Loaded(buf) = buf {
605 Some((id, buf))
606 } else {
607 None
608 }
609 }))
610 }
611 };
612
613 let local = self.as_local().is_some();
614 let worktree_path = self.abs_path.clone();
615 let worktree_handle = cx.handle();
616 let mut buffers_to_delete = Vec::new();
617 for (buffer_id, buffer) in open_buffers {
618 if let Some(buffer) = buffer.upgrade(cx) {
619 buffer.update(cx, |buffer, cx| {
620 if let Some(old_file) = buffer.file() {
621 let new_file = if let Some(entry) = old_file
622 .entry_id()
623 .and_then(|entry_id| self.entry_for_id(entry_id))
624 {
625 File {
626 is_local: local,
627 worktree_path: worktree_path.clone(),
628 entry_id: Some(entry.id),
629 mtime: entry.mtime,
630 path: entry.path.clone(),
631 worktree: worktree_handle.clone(),
632 }
633 } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
634 File {
635 is_local: local,
636 worktree_path: worktree_path.clone(),
637 entry_id: Some(entry.id),
638 mtime: entry.mtime,
639 path: entry.path.clone(),
640 worktree: worktree_handle.clone(),
641 }
642 } else {
643 File {
644 is_local: local,
645 worktree_path: worktree_path.clone(),
646 entry_id: None,
647 path: old_file.path().clone(),
648 mtime: old_file.mtime(),
649 worktree: worktree_handle.clone(),
650 }
651 };
652
653 if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
654 task.detach();
655 }
656 }
657 });
658 } else {
659 buffers_to_delete.push(*buffer_id);
660 }
661 }
662
663 for buffer_id in buffers_to_delete {
664 match self {
665 Self::Local(worktree) => {
666 worktree.open_buffers.remove(&buffer_id);
667 }
668 Self::Remote(worktree) => {
669 worktree.open_buffers.remove(&buffer_id);
670 }
671 }
672 }
673 }
674
675 pub fn update_diagnostics(
676 &mut self,
677 mut params: lsp::PublishDiagnosticsParams,
678 cx: &mut ModelContext<Worktree>,
679 ) -> Result<()> {
680 let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
681 let abs_path = params
682 .uri
683 .to_file_path()
684 .map_err(|_| anyhow!("URI is not a file"))?;
685 let worktree_path = Arc::from(
686 abs_path
687 .strip_prefix(&this.abs_path)
688 .context("path is not within worktree")?,
689 );
690
691 let mut group_ids_by_diagnostic_range = HashMap::default();
692 let mut diagnostics_by_group_id = HashMap::default();
693 let mut next_group_id = 0;
694 for diagnostic in &mut params.diagnostics {
695 let source = diagnostic.source.as_ref();
696 let code = diagnostic.code.as_ref();
697 let group_id = diagnostic_ranges(&diagnostic, &abs_path)
698 .find_map(|range| group_ids_by_diagnostic_range.get(&(source, code, range)))
699 .copied()
700 .unwrap_or_else(|| {
701 let group_id = post_inc(&mut next_group_id);
702 for range in diagnostic_ranges(&diagnostic, &abs_path) {
703 group_ids_by_diagnostic_range.insert((source, code, range), group_id);
704 }
705 group_id
706 });
707
708 diagnostics_by_group_id
709 .entry(group_id)
710 .or_insert(Vec::new())
711 .push(DiagnosticEntry {
712 range: diagnostic.range.start.to_point_utf16()
713 ..diagnostic.range.end.to_point_utf16(),
714 diagnostic: Diagnostic {
715 source: diagnostic.source.clone(),
716 code: diagnostic.code.clone().map(|code| match code {
717 lsp::NumberOrString::Number(code) => code.to_string(),
718 lsp::NumberOrString::String(code) => code,
719 }),
720 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
721 message: mem::take(&mut diagnostic.message),
722 group_id,
723 is_primary: false,
724 },
725 });
726 }
727
728 let diagnostics = diagnostics_by_group_id
729 .into_values()
730 .flat_map(|mut diagnostics| {
731 let primary = diagnostics
732 .iter_mut()
733 .min_by_key(|entry| entry.diagnostic.severity)
734 .unwrap();
735 primary.diagnostic.is_primary = true;
736 diagnostics
737 })
738 .collect::<Vec<_>>();
739
740 self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)
741 }
742
743 pub fn update_diagnostic_entries(
744 &mut self,
745 path: Arc<Path>,
746 version: Option<i32>,
747 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
748 cx: &mut ModelContext<Worktree>,
749 ) -> Result<()> {
750 let this = self.as_local_mut().unwrap();
751 for buffer in this.open_buffers.values() {
752 if let Some(buffer) = buffer.upgrade(cx) {
753 if buffer
754 .read(cx)
755 .file()
756 .map_or(false, |file| *file.path() == path)
757 {
758 let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
759 (
760 buffer.remote_id(),
761 buffer.update_diagnostics(version, diagnostics.clone(), cx),
762 )
763 });
764 self.send_buffer_update(remote_id, operation?, cx);
765 break;
766 }
767 }
768 }
769
770 let this = self.as_local_mut().unwrap();
771 this.diagnostic_summaries
772 .insert(path.clone(), DiagnosticSummary::new(&diagnostics));
773 this.diagnostics.insert(path.clone(), diagnostics);
774 cx.emit(Event::DiagnosticsUpdated(path.clone()));
775 Ok(())
776 }
777
778 fn send_buffer_update(
779 &mut self,
780 buffer_id: u64,
781 operation: Operation,
782 cx: &mut ModelContext<Self>,
783 ) {
784 if let Some((project_id, worktree_id, rpc)) = match self {
785 Worktree::Local(worktree) => worktree.share.as_ref().map(|share| {
786 (
787 share.project_id,
788 worktree.id() as u64,
789 worktree.client.clone(),
790 )
791 }),
792 Worktree::Remote(worktree) => Some((
793 worktree.project_id,
794 worktree.remote_id,
795 worktree.client.clone(),
796 )),
797 } {
798 cx.spawn(|worktree, mut cx| async move {
799 if let Err(error) = rpc
800 .request(proto::UpdateBuffer {
801 project_id,
802 worktree_id,
803 buffer_id,
804 operations: vec![language::proto::serialize_operation(&operation)],
805 })
806 .await
807 {
808 worktree.update(&mut cx, |worktree, _| {
809 log::error!("error sending buffer operation: {}", error);
810 match worktree {
811 Worktree::Local(t) => &mut t.queued_operations,
812 Worktree::Remote(t) => &mut t.queued_operations,
813 }
814 .push((buffer_id, operation));
815 });
816 }
817 })
818 .detach();
819 }
820 }
821}
822
823#[derive(Clone)]
824pub struct Snapshot {
825 id: usize,
826 scan_id: usize,
827 abs_path: Arc<Path>,
828 root_name: String,
829 root_char_bag: CharBag,
830 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
831 entries_by_path: SumTree<Entry>,
832 entries_by_id: SumTree<PathEntry>,
833 removed_entry_ids: HashMap<u64, usize>,
834 next_entry_id: Arc<AtomicUsize>,
835}
836
837pub struct LocalWorktree {
838 snapshot: Snapshot,
839 config: WorktreeConfig,
840 background_snapshot: Arc<Mutex<Snapshot>>,
841 last_scan_state_rx: watch::Receiver<ScanState>,
842 _background_scanner_task: Option<Task<()>>,
843 poll_task: Option<Task<()>>,
844 share: Option<ShareState>,
845 loading_buffers: LoadingBuffers,
846 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
847 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
848 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
849 diagnostic_summaries: BTreeMap<Arc<Path>, DiagnosticSummary>,
850 queued_operations: Vec<(u64, Operation)>,
851 languages: Arc<LanguageRegistry>,
852 client: Arc<Client>,
853 user_store: ModelHandle<UserStore>,
854 fs: Arc<dyn Fs>,
855 language_servers: HashMap<String, Arc<LanguageServer>>,
856}
857
858struct ShareState {
859 project_id: u64,
860 snapshots_tx: Sender<Snapshot>,
861}
862
863pub struct RemoteWorktree {
864 project_id: u64,
865 remote_id: u64,
866 snapshot: Snapshot,
867 snapshot_rx: watch::Receiver<Snapshot>,
868 client: Arc<Client>,
869 updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
870 replica_id: ReplicaId,
871 loading_buffers: LoadingBuffers,
872 open_buffers: HashMap<usize, RemoteBuffer>,
873 diagnostic_summaries: BTreeMap<Arc<Path>, DiagnosticSummary>,
874 languages: Arc<LanguageRegistry>,
875 user_store: ModelHandle<UserStore>,
876 queued_operations: Vec<(u64, Operation)>,
877}
878
879type LoadingBuffers = HashMap<
880 Arc<Path>,
881 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
882>;
883
884#[derive(Default, Deserialize)]
885struct WorktreeConfig {
886 collaborators: Vec<String>,
887}
888
889impl LocalWorktree {
890 async fn new(
891 client: Arc<Client>,
892 user_store: ModelHandle<UserStore>,
893 path: impl Into<Arc<Path>>,
894 fs: Arc<dyn Fs>,
895 languages: Arc<LanguageRegistry>,
896 cx: &mut AsyncAppContext,
897 ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
898 let abs_path = path.into();
899 let path: Arc<Path> = Arc::from(Path::new(""));
900 let next_entry_id = AtomicUsize::new(0);
901
902 // After determining whether the root entry is a file or a directory, populate the
903 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
904 let root_name = abs_path
905 .file_name()
906 .map_or(String::new(), |f| f.to_string_lossy().to_string());
907 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
908 let metadata = fs.metadata(&abs_path).await?;
909
910 let mut config = WorktreeConfig::default();
911 if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
912 if let Ok(parsed) = toml::from_str(&zed_toml) {
913 config = parsed;
914 }
915 }
916
917 let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
918 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
919 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
920 let mut snapshot = Snapshot {
921 id: cx.model_id(),
922 scan_id: 0,
923 abs_path,
924 root_name: root_name.clone(),
925 root_char_bag,
926 ignores: Default::default(),
927 entries_by_path: Default::default(),
928 entries_by_id: Default::default(),
929 removed_entry_ids: Default::default(),
930 next_entry_id: Arc::new(next_entry_id),
931 };
932 if let Some(metadata) = metadata {
933 snapshot.insert_entry(
934 Entry::new(
935 path.into(),
936 &metadata,
937 &snapshot.next_entry_id,
938 snapshot.root_char_bag,
939 ),
940 fs.as_ref(),
941 );
942 }
943
944 let tree = Self {
945 snapshot: snapshot.clone(),
946 config,
947 background_snapshot: Arc::new(Mutex::new(snapshot)),
948 last_scan_state_rx,
949 _background_scanner_task: None,
950 share: None,
951 poll_task: None,
952 loading_buffers: Default::default(),
953 open_buffers: Default::default(),
954 shared_buffers: Default::default(),
955 diagnostics: Default::default(),
956 diagnostic_summaries: Default::default(),
957 queued_operations: Default::default(),
958 languages,
959 client,
960 user_store,
961 fs,
962 language_servers: Default::default(),
963 };
964
965 cx.spawn_weak(|this, mut cx| async move {
966 while let Ok(scan_state) = scan_states_rx.recv().await {
967 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
968 let to_send = handle.update(&mut cx, |this, cx| {
969 last_scan_state_tx.blocking_send(scan_state).ok();
970 this.poll_snapshot(cx);
971 let tree = this.as_local_mut().unwrap();
972 if !tree.is_scanning() {
973 if let Some(share) = tree.share.as_ref() {
974 return Some((tree.snapshot(), share.snapshots_tx.clone()));
975 }
976 }
977 None
978 });
979
980 if let Some((snapshot, snapshots_to_send_tx)) = to_send {
981 if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
982 log::error!("error submitting snapshot to send {}", err);
983 }
984 }
985 } else {
986 break;
987 }
988 }
989 })
990 .detach();
991
992 Worktree::Local(tree)
993 });
994
995 Ok((tree, scan_states_tx))
996 }
997
998 pub fn authorized_logins(&self) -> Vec<String> {
999 self.config.collaborators.clone()
1000 }
1001
1002 pub fn languages(&self) -> &LanguageRegistry {
1003 &self.languages
1004 }
1005
1006 pub fn ensure_language_server(
1007 &mut self,
1008 language: &Language,
1009 cx: &mut ModelContext<Worktree>,
1010 ) -> Option<Arc<LanguageServer>> {
1011 if let Some(server) = self.language_servers.get(language.name()) {
1012 return Some(server.clone());
1013 }
1014
1015 if let Some(language_server) = language
1016 .start_server(self.abs_path(), cx)
1017 .log_err()
1018 .flatten()
1019 {
1020 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
1021 language_server
1022 .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
1023 smol::block_on(diagnostics_tx.send(params)).ok();
1024 })
1025 .detach();
1026 cx.spawn_weak(|this, mut cx| async move {
1027 while let Ok(diagnostics) = diagnostics_rx.recv().await {
1028 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1029 handle.update(&mut cx, |this, cx| {
1030 this.update_diagnostics(diagnostics, cx).log_err();
1031 });
1032 } else {
1033 break;
1034 }
1035 }
1036 })
1037 .detach();
1038
1039 self.language_servers
1040 .insert(language.name().to_string(), language_server.clone());
1041 Some(language_server.clone())
1042 } else {
1043 None
1044 }
1045 }
1046
1047 fn get_open_buffer(
1048 &mut self,
1049 path: &Path,
1050 cx: &mut ModelContext<Worktree>,
1051 ) -> Option<ModelHandle<Buffer>> {
1052 let worktree_id = self.id();
1053 let mut result = None;
1054 self.open_buffers.retain(|_buffer_id, buffer| {
1055 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1056 if let Some(file) = buffer.read(cx.as_ref()).file() {
1057 if file.worktree_id() == worktree_id && file.path().as_ref() == path {
1058 result = Some(buffer);
1059 }
1060 }
1061 true
1062 } else {
1063 false
1064 }
1065 });
1066 result
1067 }
1068
1069 fn open_buffer(
1070 &mut self,
1071 path: &Path,
1072 cx: &mut ModelContext<Worktree>,
1073 ) -> Task<Result<ModelHandle<Buffer>>> {
1074 let path = Arc::from(path);
1075 cx.spawn(move |this, mut cx| async move {
1076 let (file, contents) = this
1077 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
1078 .await?;
1079
1080 let (diagnostics, language, language_server) = this.update(&mut cx, |this, cx| {
1081 let this = this.as_local_mut().unwrap();
1082 let diagnostics = this.diagnostics.remove(&path);
1083 let language = this.languages.select_language(file.full_path()).cloned();
1084 let server = language
1085 .as_ref()
1086 .and_then(|language| this.ensure_language_server(language, cx));
1087 (diagnostics, language, server)
1088 });
1089
1090 let buffer = cx.add_model(|cx| {
1091 let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1092 buffer.set_language(language, language_server, cx);
1093 if let Some(diagnostics) = diagnostics {
1094 buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1095 }
1096 buffer
1097 });
1098
1099 this.update(&mut cx, |this, _| {
1100 let this = this.as_local_mut().unwrap();
1101 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1102 });
1103
1104 Ok(buffer)
1105 })
1106 }
1107
1108 pub fn open_remote_buffer(
1109 &mut self,
1110 envelope: TypedEnvelope<proto::OpenBuffer>,
1111 cx: &mut ModelContext<Worktree>,
1112 ) -> Task<Result<proto::OpenBufferResponse>> {
1113 cx.spawn(|this, mut cx| async move {
1114 let peer_id = envelope.original_sender_id();
1115 let path = Path::new(&envelope.payload.path);
1116 let buffer = this
1117 .update(&mut cx, |this, cx| this.open_buffer(path, cx))
1118 .await?;
1119 this.update(&mut cx, |this, cx| {
1120 this.as_local_mut()
1121 .unwrap()
1122 .shared_buffers
1123 .entry(peer_id?)
1124 .or_default()
1125 .insert(buffer.id() as u64, buffer.clone());
1126
1127 Ok(proto::OpenBufferResponse {
1128 buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1129 })
1130 })
1131 })
1132 }
1133
1134 pub fn close_remote_buffer(
1135 &mut self,
1136 envelope: TypedEnvelope<proto::CloseBuffer>,
1137 cx: &mut ModelContext<Worktree>,
1138 ) -> Result<()> {
1139 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1140 shared_buffers.remove(&envelope.payload.buffer_id);
1141 cx.notify();
1142 }
1143
1144 Ok(())
1145 }
1146
1147 pub fn remove_collaborator(
1148 &mut self,
1149 peer_id: PeerId,
1150 replica_id: ReplicaId,
1151 cx: &mut ModelContext<Worktree>,
1152 ) {
1153 self.shared_buffers.remove(&peer_id);
1154 for (_, buffer) in &self.open_buffers {
1155 if let Some(buffer) = buffer.upgrade(cx) {
1156 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1157 }
1158 }
1159 cx.notify();
1160 }
1161
1162 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1163 let mut scan_state_rx = self.last_scan_state_rx.clone();
1164 async move {
1165 let mut scan_state = Some(scan_state_rx.borrow().clone());
1166 while let Some(ScanState::Scanning) = scan_state {
1167 scan_state = scan_state_rx.recv().await;
1168 }
1169 }
1170 }
1171
1172 fn is_scanning(&self) -> bool {
1173 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1174 true
1175 } else {
1176 false
1177 }
1178 }
1179
1180 pub fn snapshot(&self) -> Snapshot {
1181 self.snapshot.clone()
1182 }
1183
1184 pub fn abs_path(&self) -> &Path {
1185 self.snapshot.abs_path.as_ref()
1186 }
1187
1188 pub fn contains_abs_path(&self, path: &Path) -> bool {
1189 path.starts_with(&self.snapshot.abs_path)
1190 }
1191
1192 fn absolutize(&self, path: &Path) -> PathBuf {
1193 if path.file_name().is_some() {
1194 self.snapshot.abs_path.join(path)
1195 } else {
1196 self.snapshot.abs_path.to_path_buf()
1197 }
1198 }
1199
1200 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1201 let handle = cx.handle();
1202 let path = Arc::from(path);
1203 let worktree_path = self.abs_path.clone();
1204 let abs_path = self.absolutize(&path);
1205 let background_snapshot = self.background_snapshot.clone();
1206 let fs = self.fs.clone();
1207 cx.spawn(|this, mut cx| async move {
1208 let text = fs.load(&abs_path).await?;
1209 // Eagerly populate the snapshot with an updated entry for the loaded file
1210 let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1211 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1212 Ok((
1213 File {
1214 entry_id: Some(entry.id),
1215 worktree: handle,
1216 worktree_path,
1217 path: entry.path,
1218 mtime: entry.mtime,
1219 is_local: true,
1220 },
1221 text,
1222 ))
1223 })
1224 }
1225
1226 pub fn save_buffer_as(
1227 &self,
1228 buffer: ModelHandle<Buffer>,
1229 path: impl Into<Arc<Path>>,
1230 text: Rope,
1231 cx: &mut ModelContext<Worktree>,
1232 ) -> Task<Result<File>> {
1233 let save = self.save(path, text, cx);
1234 cx.spawn(|this, mut cx| async move {
1235 let entry = save.await?;
1236 this.update(&mut cx, |this, cx| {
1237 let this = this.as_local_mut().unwrap();
1238 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1239 Ok(File {
1240 entry_id: Some(entry.id),
1241 worktree: cx.handle(),
1242 worktree_path: this.abs_path.clone(),
1243 path: entry.path,
1244 mtime: entry.mtime,
1245 is_local: true,
1246 })
1247 })
1248 })
1249 }
1250
1251 fn save(
1252 &self,
1253 path: impl Into<Arc<Path>>,
1254 text: Rope,
1255 cx: &mut ModelContext<Worktree>,
1256 ) -> Task<Result<Entry>> {
1257 let path = path.into();
1258 let abs_path = self.absolutize(&path);
1259 let background_snapshot = self.background_snapshot.clone();
1260 let fs = self.fs.clone();
1261 let save = cx.background().spawn(async move {
1262 fs.save(&abs_path, &text).await?;
1263 refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1264 });
1265
1266 cx.spawn(|this, mut cx| async move {
1267 let entry = save.await?;
1268 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1269 Ok(entry)
1270 })
1271 }
1272
1273 pub fn share(
1274 &mut self,
1275 project_id: u64,
1276 cx: &mut ModelContext<Worktree>,
1277 ) -> Task<anyhow::Result<()>> {
1278 if self.share.is_some() {
1279 return Task::ready(Ok(()));
1280 }
1281
1282 let snapshot = self.snapshot();
1283 let rpc = self.client.clone();
1284 let worktree_id = cx.model_id() as u64;
1285 let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1286 self.share = Some(ShareState {
1287 project_id,
1288 snapshots_tx: snapshots_to_send_tx,
1289 });
1290
1291 cx.background()
1292 .spawn({
1293 let rpc = rpc.clone();
1294 let snapshot = snapshot.clone();
1295 async move {
1296 let mut prev_snapshot = snapshot;
1297 while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1298 let message =
1299 snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1300 match rpc.send(message).await {
1301 Ok(()) => prev_snapshot = snapshot,
1302 Err(err) => log::error!("error sending snapshot diff {}", err),
1303 }
1304 }
1305 }
1306 })
1307 .detach();
1308
1309 let share_message = cx.background().spawn(async move {
1310 proto::ShareWorktree {
1311 project_id,
1312 worktree: Some(snapshot.to_proto()),
1313 }
1314 });
1315
1316 cx.foreground().spawn(async move {
1317 rpc.request(share_message.await).await?;
1318 Ok(())
1319 })
1320 }
1321}
1322
1323fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1324 let contents = smol::block_on(fs.load(&abs_path))?;
1325 let parent = abs_path.parent().unwrap_or(Path::new("/"));
1326 let mut builder = GitignoreBuilder::new(parent);
1327 for line in contents.lines() {
1328 builder.add_line(Some(abs_path.into()), line)?;
1329 }
1330 Ok(builder.build()?)
1331}
1332
1333impl Deref for Worktree {
1334 type Target = Snapshot;
1335
1336 fn deref(&self) -> &Self::Target {
1337 match self {
1338 Worktree::Local(worktree) => &worktree.snapshot,
1339 Worktree::Remote(worktree) => &worktree.snapshot,
1340 }
1341 }
1342}
1343
1344impl Deref for LocalWorktree {
1345 type Target = Snapshot;
1346
1347 fn deref(&self) -> &Self::Target {
1348 &self.snapshot
1349 }
1350}
1351
1352impl fmt::Debug for LocalWorktree {
1353 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1354 self.snapshot.fmt(f)
1355 }
1356}
1357
1358impl RemoteWorktree {
1359 pub fn remote_id(&self) -> u64 {
1360 self.remote_id
1361 }
1362
1363 fn get_open_buffer(
1364 &mut self,
1365 path: &Path,
1366 cx: &mut ModelContext<Worktree>,
1367 ) -> Option<ModelHandle<Buffer>> {
1368 let handle = cx.handle();
1369 let mut existing_buffer = None;
1370 self.open_buffers.retain(|_buffer_id, buffer| {
1371 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1372 if let Some(file) = buffer.read(cx.as_ref()).file() {
1373 if file.worktree_id() == handle.id() && file.path().as_ref() == path {
1374 existing_buffer = Some(buffer);
1375 }
1376 }
1377 true
1378 } else {
1379 false
1380 }
1381 });
1382 existing_buffer
1383 }
1384
1385 fn open_buffer(
1386 &mut self,
1387 path: &Path,
1388 cx: &mut ModelContext<Worktree>,
1389 ) -> Task<Result<ModelHandle<Buffer>>> {
1390 let rpc = self.client.clone();
1391 let replica_id = self.replica_id;
1392 let project_id = self.project_id;
1393 let remote_worktree_id = self.remote_id;
1394 let root_path = self.snapshot.abs_path.clone();
1395 let path: Arc<Path> = Arc::from(path);
1396 let path_string = path.to_string_lossy().to_string();
1397 cx.spawn_weak(move |this, mut cx| async move {
1398 let entry = this
1399 .upgrade(&cx)
1400 .ok_or_else(|| anyhow!("worktree was closed"))?
1401 .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1402 .ok_or_else(|| anyhow!("file does not exist"))?;
1403 let response = rpc
1404 .request(proto::OpenBuffer {
1405 project_id,
1406 worktree_id: remote_worktree_id as u64,
1407 path: path_string,
1408 })
1409 .await?;
1410
1411 let this = this
1412 .upgrade(&cx)
1413 .ok_or_else(|| anyhow!("worktree was closed"))?;
1414 let file = File {
1415 entry_id: Some(entry.id),
1416 worktree: this.clone(),
1417 worktree_path: root_path,
1418 path: entry.path,
1419 mtime: entry.mtime,
1420 is_local: false,
1421 };
1422 let language = this.read_with(&cx, |this, _| {
1423 use language::File;
1424 this.languages().select_language(file.full_path()).cloned()
1425 });
1426 let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1427 let buffer_id = remote_buffer.id as usize;
1428 let buffer = cx.add_model(|cx| {
1429 Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1430 .unwrap()
1431 .with_language(language, None, cx)
1432 });
1433 this.update(&mut cx, move |this, cx| {
1434 let this = this.as_remote_mut().unwrap();
1435 if let Some(RemoteBuffer::Operations(pending_ops)) = this
1436 .open_buffers
1437 .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1438 {
1439 buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1440 }
1441 Result::<_, anyhow::Error>::Ok(buffer)
1442 })
1443 })
1444 }
1445
1446 pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1447 for (_, buffer) in self.open_buffers.drain() {
1448 if let RemoteBuffer::Loaded(buffer) = buffer {
1449 if let Some(buffer) = buffer.upgrade(cx) {
1450 buffer.update(cx, |buffer, cx| buffer.close(cx))
1451 }
1452 }
1453 }
1454 }
1455
1456 fn snapshot(&self) -> Snapshot {
1457 self.snapshot.clone()
1458 }
1459
1460 pub fn update_from_remote(
1461 &mut self,
1462 envelope: TypedEnvelope<proto::UpdateWorktree>,
1463 cx: &mut ModelContext<Worktree>,
1464 ) -> Result<()> {
1465 let mut tx = self.updates_tx.clone();
1466 let payload = envelope.payload.clone();
1467 cx.background()
1468 .spawn(async move {
1469 tx.send(payload).await.expect("receiver runs to completion");
1470 })
1471 .detach();
1472
1473 Ok(())
1474 }
1475
1476 pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1477 for (_, buffer) in &self.open_buffers {
1478 if let Some(buffer) = buffer.upgrade(cx) {
1479 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1480 }
1481 }
1482 cx.notify();
1483 }
1484}
1485
1486enum RemoteBuffer {
1487 Operations(Vec<Operation>),
1488 Loaded(WeakModelHandle<Buffer>),
1489}
1490
1491impl RemoteBuffer {
1492 fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1493 match self {
1494 Self::Operations(_) => None,
1495 Self::Loaded(buffer) => buffer.upgrade(cx),
1496 }
1497 }
1498}
1499
1500impl Snapshot {
1501 pub fn id(&self) -> usize {
1502 self.id
1503 }
1504
1505 pub fn to_proto(&self) -> proto::Worktree {
1506 let root_name = self.root_name.clone();
1507 proto::Worktree {
1508 id: self.id as u64,
1509 root_name,
1510 entries: self
1511 .entries_by_path
1512 .cursor::<()>()
1513 .filter(|e| !e.is_ignored)
1514 .map(Into::into)
1515 .collect(),
1516 }
1517 }
1518
1519 pub fn build_update(
1520 &self,
1521 other: &Self,
1522 project_id: u64,
1523 worktree_id: u64,
1524 include_ignored: bool,
1525 ) -> proto::UpdateWorktree {
1526 let mut updated_entries = Vec::new();
1527 let mut removed_entries = Vec::new();
1528 let mut self_entries = self
1529 .entries_by_id
1530 .cursor::<()>()
1531 .filter(|e| include_ignored || !e.is_ignored)
1532 .peekable();
1533 let mut other_entries = other
1534 .entries_by_id
1535 .cursor::<()>()
1536 .filter(|e| include_ignored || !e.is_ignored)
1537 .peekable();
1538 loop {
1539 match (self_entries.peek(), other_entries.peek()) {
1540 (Some(self_entry), Some(other_entry)) => {
1541 match Ord::cmp(&self_entry.id, &other_entry.id) {
1542 Ordering::Less => {
1543 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1544 updated_entries.push(entry);
1545 self_entries.next();
1546 }
1547 Ordering::Equal => {
1548 if self_entry.scan_id != other_entry.scan_id {
1549 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1550 updated_entries.push(entry);
1551 }
1552
1553 self_entries.next();
1554 other_entries.next();
1555 }
1556 Ordering::Greater => {
1557 removed_entries.push(other_entry.id as u64);
1558 other_entries.next();
1559 }
1560 }
1561 }
1562 (Some(self_entry), None) => {
1563 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1564 updated_entries.push(entry);
1565 self_entries.next();
1566 }
1567 (None, Some(other_entry)) => {
1568 removed_entries.push(other_entry.id as u64);
1569 other_entries.next();
1570 }
1571 (None, None) => break,
1572 }
1573 }
1574
1575 proto::UpdateWorktree {
1576 project_id,
1577 worktree_id,
1578 root_name: self.root_name().to_string(),
1579 updated_entries,
1580 removed_entries,
1581 }
1582 }
1583
1584 fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1585 self.scan_id += 1;
1586 let scan_id = self.scan_id;
1587
1588 let mut entries_by_path_edits = Vec::new();
1589 let mut entries_by_id_edits = Vec::new();
1590 for entry_id in update.removed_entries {
1591 let entry_id = entry_id as usize;
1592 let entry = self
1593 .entry_for_id(entry_id)
1594 .ok_or_else(|| anyhow!("unknown entry"))?;
1595 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1596 entries_by_id_edits.push(Edit::Remove(entry.id));
1597 }
1598
1599 for entry in update.updated_entries {
1600 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1601 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1602 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1603 }
1604 entries_by_id_edits.push(Edit::Insert(PathEntry {
1605 id: entry.id,
1606 path: entry.path.clone(),
1607 is_ignored: entry.is_ignored,
1608 scan_id,
1609 }));
1610 entries_by_path_edits.push(Edit::Insert(entry));
1611 }
1612
1613 self.entries_by_path.edit(entries_by_path_edits, &());
1614 self.entries_by_id.edit(entries_by_id_edits, &());
1615
1616 Ok(())
1617 }
1618
1619 pub fn file_count(&self) -> usize {
1620 self.entries_by_path.summary().file_count
1621 }
1622
1623 pub fn visible_file_count(&self) -> usize {
1624 self.entries_by_path.summary().visible_file_count
1625 }
1626
1627 fn traverse_from_offset(
1628 &self,
1629 include_dirs: bool,
1630 include_ignored: bool,
1631 start_offset: usize,
1632 ) -> Traversal {
1633 let mut cursor = self.entries_by_path.cursor();
1634 cursor.seek(
1635 &TraversalTarget::Count {
1636 count: start_offset,
1637 include_dirs,
1638 include_ignored,
1639 },
1640 Bias::Right,
1641 &(),
1642 );
1643 Traversal {
1644 cursor,
1645 include_dirs,
1646 include_ignored,
1647 }
1648 }
1649
1650 fn traverse_from_path(
1651 &self,
1652 include_dirs: bool,
1653 include_ignored: bool,
1654 path: &Path,
1655 ) -> Traversal {
1656 let mut cursor = self.entries_by_path.cursor();
1657 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1658 Traversal {
1659 cursor,
1660 include_dirs,
1661 include_ignored,
1662 }
1663 }
1664
1665 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1666 self.traverse_from_offset(false, include_ignored, start)
1667 }
1668
1669 pub fn entries(&self, include_ignored: bool) -> Traversal {
1670 self.traverse_from_offset(true, include_ignored, 0)
1671 }
1672
1673 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1674 let empty_path = Path::new("");
1675 self.entries_by_path
1676 .cursor::<()>()
1677 .filter(move |entry| entry.path.as_ref() != empty_path)
1678 .map(|entry| &entry.path)
1679 }
1680
1681 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1682 let mut cursor = self.entries_by_path.cursor();
1683 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1684 let traversal = Traversal {
1685 cursor,
1686 include_dirs: true,
1687 include_ignored: true,
1688 };
1689 ChildEntriesIter {
1690 traversal,
1691 parent_path,
1692 }
1693 }
1694
1695 pub fn root_entry(&self) -> Option<&Entry> {
1696 self.entry_for_path("")
1697 }
1698
1699 pub fn root_name(&self) -> &str {
1700 &self.root_name
1701 }
1702
1703 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1704 let path = path.as_ref();
1705 self.traverse_from_path(true, true, path)
1706 .entry()
1707 .and_then(|entry| {
1708 if entry.path.as_ref() == path {
1709 Some(entry)
1710 } else {
1711 None
1712 }
1713 })
1714 }
1715
1716 pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1717 let entry = self.entries_by_id.get(&id, &())?;
1718 self.entry_for_path(&entry.path)
1719 }
1720
1721 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1722 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1723 }
1724
1725 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1726 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1727 let abs_path = self.abs_path.join(&entry.path);
1728 match build_gitignore(&abs_path, fs) {
1729 Ok(ignore) => {
1730 let ignore_dir_path = entry.path.parent().unwrap();
1731 self.ignores
1732 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1733 }
1734 Err(error) => {
1735 log::error!(
1736 "error loading .gitignore file {:?} - {:?}",
1737 &entry.path,
1738 error
1739 );
1740 }
1741 }
1742 }
1743
1744 self.reuse_entry_id(&mut entry);
1745 self.entries_by_path.insert_or_replace(entry.clone(), &());
1746 self.entries_by_id.insert_or_replace(
1747 PathEntry {
1748 id: entry.id,
1749 path: entry.path.clone(),
1750 is_ignored: entry.is_ignored,
1751 scan_id: self.scan_id,
1752 },
1753 &(),
1754 );
1755 entry
1756 }
1757
1758 fn populate_dir(
1759 &mut self,
1760 parent_path: Arc<Path>,
1761 entries: impl IntoIterator<Item = Entry>,
1762 ignore: Option<Arc<Gitignore>>,
1763 ) {
1764 let mut parent_entry = self
1765 .entries_by_path
1766 .get(&PathKey(parent_path.clone()), &())
1767 .unwrap()
1768 .clone();
1769 if let Some(ignore) = ignore {
1770 self.ignores.insert(parent_path, (ignore, self.scan_id));
1771 }
1772 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1773 parent_entry.kind = EntryKind::Dir;
1774 } else {
1775 unreachable!();
1776 }
1777
1778 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1779 let mut entries_by_id_edits = Vec::new();
1780
1781 for mut entry in entries {
1782 self.reuse_entry_id(&mut entry);
1783 entries_by_id_edits.push(Edit::Insert(PathEntry {
1784 id: entry.id,
1785 path: entry.path.clone(),
1786 is_ignored: entry.is_ignored,
1787 scan_id: self.scan_id,
1788 }));
1789 entries_by_path_edits.push(Edit::Insert(entry));
1790 }
1791
1792 self.entries_by_path.edit(entries_by_path_edits, &());
1793 self.entries_by_id.edit(entries_by_id_edits, &());
1794 }
1795
1796 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1797 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1798 entry.id = removed_entry_id;
1799 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1800 entry.id = existing_entry.id;
1801 }
1802 }
1803
1804 fn remove_path(&mut self, path: &Path) {
1805 let mut new_entries;
1806 let removed_entries;
1807 {
1808 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1809 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1810 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1811 new_entries.push_tree(cursor.suffix(&()), &());
1812 }
1813 self.entries_by_path = new_entries;
1814
1815 let mut entries_by_id_edits = Vec::new();
1816 for entry in removed_entries.cursor::<()>() {
1817 let removed_entry_id = self
1818 .removed_entry_ids
1819 .entry(entry.inode)
1820 .or_insert(entry.id);
1821 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1822 entries_by_id_edits.push(Edit::Remove(entry.id));
1823 }
1824 self.entries_by_id.edit(entries_by_id_edits, &());
1825
1826 if path.file_name() == Some(&GITIGNORE) {
1827 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1828 *scan_id = self.scan_id;
1829 }
1830 }
1831 }
1832
1833 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1834 let mut new_ignores = Vec::new();
1835 for ancestor in path.ancestors().skip(1) {
1836 if let Some((ignore, _)) = self.ignores.get(ancestor) {
1837 new_ignores.push((ancestor, Some(ignore.clone())));
1838 } else {
1839 new_ignores.push((ancestor, None));
1840 }
1841 }
1842
1843 let mut ignore_stack = IgnoreStack::none();
1844 for (parent_path, ignore) in new_ignores.into_iter().rev() {
1845 if ignore_stack.is_path_ignored(&parent_path, true) {
1846 ignore_stack = IgnoreStack::all();
1847 break;
1848 } else if let Some(ignore) = ignore {
1849 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
1850 }
1851 }
1852
1853 if ignore_stack.is_path_ignored(path, is_dir) {
1854 ignore_stack = IgnoreStack::all();
1855 }
1856
1857 ignore_stack
1858 }
1859}
1860
1861impl fmt::Debug for Snapshot {
1862 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1863 for entry in self.entries_by_path.cursor::<()>() {
1864 for _ in entry.path.ancestors().skip(1) {
1865 write!(f, " ")?;
1866 }
1867 writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
1868 }
1869 Ok(())
1870 }
1871}
1872
1873#[derive(Clone, PartialEq)]
1874pub struct File {
1875 entry_id: Option<usize>,
1876 worktree: ModelHandle<Worktree>,
1877 worktree_path: Arc<Path>,
1878 pub path: Arc<Path>,
1879 pub mtime: SystemTime,
1880 is_local: bool,
1881}
1882
1883impl language::File for File {
1884 fn worktree_id(&self) -> usize {
1885 self.worktree.id()
1886 }
1887
1888 fn entry_id(&self) -> Option<usize> {
1889 self.entry_id
1890 }
1891
1892 fn mtime(&self) -> SystemTime {
1893 self.mtime
1894 }
1895
1896 fn path(&self) -> &Arc<Path> {
1897 &self.path
1898 }
1899
1900 fn abs_path(&self) -> Option<PathBuf> {
1901 if self.is_local {
1902 Some(self.worktree_path.join(&self.path))
1903 } else {
1904 None
1905 }
1906 }
1907
1908 fn full_path(&self) -> PathBuf {
1909 let mut full_path = PathBuf::new();
1910 if let Some(worktree_name) = self.worktree_path.file_name() {
1911 full_path.push(worktree_name);
1912 }
1913 full_path.push(&self.path);
1914 full_path
1915 }
1916
1917 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1918 /// of its worktree, then this method will return the name of the worktree itself.
1919 fn file_name<'a>(&'a self) -> Option<OsString> {
1920 self.path
1921 .file_name()
1922 .or_else(|| self.worktree_path.file_name())
1923 .map(Into::into)
1924 }
1925
1926 fn is_deleted(&self) -> bool {
1927 self.entry_id.is_none()
1928 }
1929
1930 fn save(
1931 &self,
1932 buffer_id: u64,
1933 text: Rope,
1934 version: clock::Global,
1935 cx: &mut MutableAppContext,
1936 ) -> Task<Result<(clock::Global, SystemTime)>> {
1937 let worktree_id = self.worktree.read(cx).id() as u64;
1938 self.worktree.update(cx, |worktree, cx| match worktree {
1939 Worktree::Local(worktree) => {
1940 let rpc = worktree.client.clone();
1941 let project_id = worktree.share.as_ref().map(|share| share.project_id);
1942 let save = worktree.save(self.path.clone(), text, cx);
1943 cx.background().spawn(async move {
1944 let entry = save.await?;
1945 if let Some(project_id) = project_id {
1946 rpc.send(proto::BufferSaved {
1947 project_id,
1948 worktree_id,
1949 buffer_id,
1950 version: (&version).into(),
1951 mtime: Some(entry.mtime.into()),
1952 })
1953 .await?;
1954 }
1955 Ok((version, entry.mtime))
1956 })
1957 }
1958 Worktree::Remote(worktree) => {
1959 let rpc = worktree.client.clone();
1960 let project_id = worktree.project_id;
1961 cx.foreground().spawn(async move {
1962 let response = rpc
1963 .request(proto::SaveBuffer {
1964 project_id,
1965 worktree_id,
1966 buffer_id,
1967 })
1968 .await?;
1969 let version = response.version.try_into()?;
1970 let mtime = response
1971 .mtime
1972 .ok_or_else(|| anyhow!("missing mtime"))?
1973 .into();
1974 Ok((version, mtime))
1975 })
1976 }
1977 })
1978 }
1979
1980 fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
1981 let worktree = self.worktree.read(cx).as_local()?;
1982 let abs_path = worktree.absolutize(&self.path);
1983 let fs = worktree.fs.clone();
1984 Some(
1985 cx.background()
1986 .spawn(async move { fs.load(&abs_path).await }),
1987 )
1988 }
1989
1990 fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
1991 self.worktree.update(cx, |worktree, cx| {
1992 worktree.send_buffer_update(buffer_id, operation, cx);
1993 });
1994 }
1995
1996 fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
1997 self.worktree.update(cx, |worktree, cx| {
1998 if let Worktree::Remote(worktree) = worktree {
1999 let project_id = worktree.project_id;
2000 let worktree_id = worktree.remote_id;
2001 let rpc = worktree.client.clone();
2002 cx.background()
2003 .spawn(async move {
2004 if let Err(error) = rpc
2005 .send(proto::CloseBuffer {
2006 project_id,
2007 worktree_id,
2008 buffer_id,
2009 })
2010 .await
2011 {
2012 log::error!("error closing remote buffer: {}", error);
2013 }
2014 })
2015 .detach();
2016 }
2017 });
2018 }
2019
2020 fn boxed_clone(&self) -> Box<dyn language::File> {
2021 Box::new(self.clone())
2022 }
2023
2024 fn as_any(&self) -> &dyn Any {
2025 self
2026 }
2027}
2028
2029#[derive(Clone, Debug)]
2030pub struct Entry {
2031 pub id: usize,
2032 pub kind: EntryKind,
2033 pub path: Arc<Path>,
2034 pub inode: u64,
2035 pub mtime: SystemTime,
2036 pub is_symlink: bool,
2037 pub is_ignored: bool,
2038}
2039
2040#[derive(Clone, Debug)]
2041pub enum EntryKind {
2042 PendingDir,
2043 Dir,
2044 File(CharBag),
2045}
2046
2047impl Entry {
2048 fn new(
2049 path: Arc<Path>,
2050 metadata: &fs::Metadata,
2051 next_entry_id: &AtomicUsize,
2052 root_char_bag: CharBag,
2053 ) -> Self {
2054 Self {
2055 id: next_entry_id.fetch_add(1, SeqCst),
2056 kind: if metadata.is_dir {
2057 EntryKind::PendingDir
2058 } else {
2059 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2060 },
2061 path,
2062 inode: metadata.inode,
2063 mtime: metadata.mtime,
2064 is_symlink: metadata.is_symlink,
2065 is_ignored: false,
2066 }
2067 }
2068
2069 pub fn is_dir(&self) -> bool {
2070 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2071 }
2072
2073 pub fn is_file(&self) -> bool {
2074 matches!(self.kind, EntryKind::File(_))
2075 }
2076}
2077
2078impl sum_tree::Item for Entry {
2079 type Summary = EntrySummary;
2080
2081 fn summary(&self) -> Self::Summary {
2082 let visible_count = if self.is_ignored { 0 } else { 1 };
2083 let file_count;
2084 let visible_file_count;
2085 if self.is_file() {
2086 file_count = 1;
2087 visible_file_count = visible_count;
2088 } else {
2089 file_count = 0;
2090 visible_file_count = 0;
2091 }
2092
2093 EntrySummary {
2094 max_path: self.path.clone(),
2095 count: 1,
2096 visible_count,
2097 file_count,
2098 visible_file_count,
2099 }
2100 }
2101}
2102
2103impl sum_tree::KeyedItem for Entry {
2104 type Key = PathKey;
2105
2106 fn key(&self) -> Self::Key {
2107 PathKey(self.path.clone())
2108 }
2109}
2110
2111#[derive(Clone, Debug)]
2112pub struct EntrySummary {
2113 max_path: Arc<Path>,
2114 count: usize,
2115 visible_count: usize,
2116 file_count: usize,
2117 visible_file_count: usize,
2118}
2119
2120impl Default for EntrySummary {
2121 fn default() -> Self {
2122 Self {
2123 max_path: Arc::from(Path::new("")),
2124 count: 0,
2125 visible_count: 0,
2126 file_count: 0,
2127 visible_file_count: 0,
2128 }
2129 }
2130}
2131
2132impl sum_tree::Summary for EntrySummary {
2133 type Context = ();
2134
2135 fn add_summary(&mut self, rhs: &Self, _: &()) {
2136 self.max_path = rhs.max_path.clone();
2137 self.visible_count += rhs.visible_count;
2138 self.file_count += rhs.file_count;
2139 self.visible_file_count += rhs.visible_file_count;
2140 }
2141}
2142
2143#[derive(Clone, Debug)]
2144struct PathEntry {
2145 id: usize,
2146 path: Arc<Path>,
2147 is_ignored: bool,
2148 scan_id: usize,
2149}
2150
2151impl sum_tree::Item for PathEntry {
2152 type Summary = PathEntrySummary;
2153
2154 fn summary(&self) -> Self::Summary {
2155 PathEntrySummary { max_id: self.id }
2156 }
2157}
2158
2159impl sum_tree::KeyedItem for PathEntry {
2160 type Key = usize;
2161
2162 fn key(&self) -> Self::Key {
2163 self.id
2164 }
2165}
2166
2167#[derive(Clone, Debug, Default)]
2168struct PathEntrySummary {
2169 max_id: usize,
2170}
2171
2172impl sum_tree::Summary for PathEntrySummary {
2173 type Context = ();
2174
2175 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2176 self.max_id = summary.max_id;
2177 }
2178}
2179
2180impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2181 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2182 *self = summary.max_id;
2183 }
2184}
2185
2186#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2187pub struct PathKey(Arc<Path>);
2188
2189impl Default for PathKey {
2190 fn default() -> Self {
2191 Self(Path::new("").into())
2192 }
2193}
2194
2195impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2196 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2197 self.0 = summary.max_path.clone();
2198 }
2199}
2200
2201struct BackgroundScanner {
2202 fs: Arc<dyn Fs>,
2203 snapshot: Arc<Mutex<Snapshot>>,
2204 notify: Sender<ScanState>,
2205 executor: Arc<executor::Background>,
2206}
2207
2208impl BackgroundScanner {
2209 fn new(
2210 snapshot: Arc<Mutex<Snapshot>>,
2211 notify: Sender<ScanState>,
2212 fs: Arc<dyn Fs>,
2213 executor: Arc<executor::Background>,
2214 ) -> Self {
2215 Self {
2216 fs,
2217 snapshot,
2218 notify,
2219 executor,
2220 }
2221 }
2222
2223 fn abs_path(&self) -> Arc<Path> {
2224 self.snapshot.lock().abs_path.clone()
2225 }
2226
2227 fn snapshot(&self) -> Snapshot {
2228 self.snapshot.lock().clone()
2229 }
2230
2231 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2232 if self.notify.send(ScanState::Scanning).await.is_err() {
2233 return;
2234 }
2235
2236 if let Err(err) = self.scan_dirs().await {
2237 if self
2238 .notify
2239 .send(ScanState::Err(Arc::new(err)))
2240 .await
2241 .is_err()
2242 {
2243 return;
2244 }
2245 }
2246
2247 if self.notify.send(ScanState::Idle).await.is_err() {
2248 return;
2249 }
2250
2251 futures::pin_mut!(events_rx);
2252 while let Some(events) = events_rx.next().await {
2253 if self.notify.send(ScanState::Scanning).await.is_err() {
2254 break;
2255 }
2256
2257 if !self.process_events(events).await {
2258 break;
2259 }
2260
2261 if self.notify.send(ScanState::Idle).await.is_err() {
2262 break;
2263 }
2264 }
2265 }
2266
2267 async fn scan_dirs(&mut self) -> Result<()> {
2268 let root_char_bag;
2269 let next_entry_id;
2270 let is_dir;
2271 {
2272 let snapshot = self.snapshot.lock();
2273 root_char_bag = snapshot.root_char_bag;
2274 next_entry_id = snapshot.next_entry_id.clone();
2275 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2276 };
2277
2278 if is_dir {
2279 let path: Arc<Path> = Arc::from(Path::new(""));
2280 let abs_path = self.abs_path();
2281 let (tx, rx) = channel::unbounded();
2282 tx.send(ScanJob {
2283 abs_path: abs_path.to_path_buf(),
2284 path,
2285 ignore_stack: IgnoreStack::none(),
2286 scan_queue: tx.clone(),
2287 })
2288 .await
2289 .unwrap();
2290 drop(tx);
2291
2292 self.executor
2293 .scoped(|scope| {
2294 for _ in 0..self.executor.num_cpus() {
2295 scope.spawn(async {
2296 while let Ok(job) = rx.recv().await {
2297 if let Err(err) = self
2298 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2299 .await
2300 {
2301 log::error!("error scanning {:?}: {}", job.abs_path, err);
2302 }
2303 }
2304 });
2305 }
2306 })
2307 .await;
2308 }
2309
2310 Ok(())
2311 }
2312
2313 async fn scan_dir(
2314 &self,
2315 root_char_bag: CharBag,
2316 next_entry_id: Arc<AtomicUsize>,
2317 job: &ScanJob,
2318 ) -> Result<()> {
2319 let mut new_entries: Vec<Entry> = Vec::new();
2320 let mut new_jobs: Vec<ScanJob> = Vec::new();
2321 let mut ignore_stack = job.ignore_stack.clone();
2322 let mut new_ignore = None;
2323
2324 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2325 while let Some(child_abs_path) = child_paths.next().await {
2326 let child_abs_path = match child_abs_path {
2327 Ok(child_abs_path) => child_abs_path,
2328 Err(error) => {
2329 log::error!("error processing entry {:?}", error);
2330 continue;
2331 }
2332 };
2333 let child_name = child_abs_path.file_name().unwrap();
2334 let child_path: Arc<Path> = job.path.join(child_name).into();
2335 let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2336 Some(metadata) => metadata,
2337 None => continue,
2338 };
2339
2340 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2341 if child_name == *GITIGNORE {
2342 match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2343 Ok(ignore) => {
2344 let ignore = Arc::new(ignore);
2345 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2346 new_ignore = Some(ignore);
2347 }
2348 Err(error) => {
2349 log::error!(
2350 "error loading .gitignore file {:?} - {:?}",
2351 child_name,
2352 error
2353 );
2354 }
2355 }
2356
2357 // Update ignore status of any child entries we've already processed to reflect the
2358 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2359 // there should rarely be too numerous. Update the ignore stack associated with any
2360 // new jobs as well.
2361 let mut new_jobs = new_jobs.iter_mut();
2362 for entry in &mut new_entries {
2363 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2364 if entry.is_dir() {
2365 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2366 IgnoreStack::all()
2367 } else {
2368 ignore_stack.clone()
2369 };
2370 }
2371 }
2372 }
2373
2374 let mut child_entry = Entry::new(
2375 child_path.clone(),
2376 &child_metadata,
2377 &next_entry_id,
2378 root_char_bag,
2379 );
2380
2381 if child_metadata.is_dir {
2382 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2383 child_entry.is_ignored = is_ignored;
2384 new_entries.push(child_entry);
2385 new_jobs.push(ScanJob {
2386 abs_path: child_abs_path,
2387 path: child_path,
2388 ignore_stack: if is_ignored {
2389 IgnoreStack::all()
2390 } else {
2391 ignore_stack.clone()
2392 },
2393 scan_queue: job.scan_queue.clone(),
2394 });
2395 } else {
2396 child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2397 new_entries.push(child_entry);
2398 };
2399 }
2400
2401 self.snapshot
2402 .lock()
2403 .populate_dir(job.path.clone(), new_entries, new_ignore);
2404 for new_job in new_jobs {
2405 job.scan_queue.send(new_job).await.unwrap();
2406 }
2407
2408 Ok(())
2409 }
2410
2411 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2412 let mut snapshot = self.snapshot();
2413 snapshot.scan_id += 1;
2414
2415 let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2416 abs_path
2417 } else {
2418 return false;
2419 };
2420 let root_char_bag = snapshot.root_char_bag;
2421 let next_entry_id = snapshot.next_entry_id.clone();
2422
2423 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2424 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2425
2426 for event in &events {
2427 match event.path.strip_prefix(&root_abs_path) {
2428 Ok(path) => snapshot.remove_path(&path),
2429 Err(_) => {
2430 log::error!(
2431 "unexpected event {:?} for root path {:?}",
2432 event.path,
2433 root_abs_path
2434 );
2435 continue;
2436 }
2437 }
2438 }
2439
2440 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2441 for event in events {
2442 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2443 Ok(path) => Arc::from(path.to_path_buf()),
2444 Err(_) => {
2445 log::error!(
2446 "unexpected event {:?} for root path {:?}",
2447 event.path,
2448 root_abs_path
2449 );
2450 continue;
2451 }
2452 };
2453
2454 match self.fs.metadata(&event.path).await {
2455 Ok(Some(metadata)) => {
2456 let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2457 let mut fs_entry = Entry::new(
2458 path.clone(),
2459 &metadata,
2460 snapshot.next_entry_id.as_ref(),
2461 snapshot.root_char_bag,
2462 );
2463 fs_entry.is_ignored = ignore_stack.is_all();
2464 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2465 if metadata.is_dir {
2466 scan_queue_tx
2467 .send(ScanJob {
2468 abs_path: event.path,
2469 path,
2470 ignore_stack,
2471 scan_queue: scan_queue_tx.clone(),
2472 })
2473 .await
2474 .unwrap();
2475 }
2476 }
2477 Ok(None) => {}
2478 Err(err) => {
2479 // TODO - create a special 'error' entry in the entries tree to mark this
2480 log::error!("error reading file on event {:?}", err);
2481 }
2482 }
2483 }
2484
2485 *self.snapshot.lock() = snapshot;
2486
2487 // Scan any directories that were created as part of this event batch.
2488 drop(scan_queue_tx);
2489 self.executor
2490 .scoped(|scope| {
2491 for _ in 0..self.executor.num_cpus() {
2492 scope.spawn(async {
2493 while let Ok(job) = scan_queue_rx.recv().await {
2494 if let Err(err) = self
2495 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2496 .await
2497 {
2498 log::error!("error scanning {:?}: {}", job.abs_path, err);
2499 }
2500 }
2501 });
2502 }
2503 })
2504 .await;
2505
2506 // Attempt to detect renames only over a single batch of file-system events.
2507 self.snapshot.lock().removed_entry_ids.clear();
2508
2509 self.update_ignore_statuses().await;
2510 true
2511 }
2512
2513 async fn update_ignore_statuses(&self) {
2514 let mut snapshot = self.snapshot();
2515
2516 let mut ignores_to_update = Vec::new();
2517 let mut ignores_to_delete = Vec::new();
2518 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2519 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2520 ignores_to_update.push(parent_path.clone());
2521 }
2522
2523 let ignore_path = parent_path.join(&*GITIGNORE);
2524 if snapshot.entry_for_path(ignore_path).is_none() {
2525 ignores_to_delete.push(parent_path.clone());
2526 }
2527 }
2528
2529 for parent_path in ignores_to_delete {
2530 snapshot.ignores.remove(&parent_path);
2531 self.snapshot.lock().ignores.remove(&parent_path);
2532 }
2533
2534 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2535 ignores_to_update.sort_unstable();
2536 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2537 while let Some(parent_path) = ignores_to_update.next() {
2538 while ignores_to_update
2539 .peek()
2540 .map_or(false, |p| p.starts_with(&parent_path))
2541 {
2542 ignores_to_update.next().unwrap();
2543 }
2544
2545 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2546 ignore_queue_tx
2547 .send(UpdateIgnoreStatusJob {
2548 path: parent_path,
2549 ignore_stack,
2550 ignore_queue: ignore_queue_tx.clone(),
2551 })
2552 .await
2553 .unwrap();
2554 }
2555 drop(ignore_queue_tx);
2556
2557 self.executor
2558 .scoped(|scope| {
2559 for _ in 0..self.executor.num_cpus() {
2560 scope.spawn(async {
2561 while let Ok(job) = ignore_queue_rx.recv().await {
2562 self.update_ignore_status(job, &snapshot).await;
2563 }
2564 });
2565 }
2566 })
2567 .await;
2568 }
2569
2570 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2571 let mut ignore_stack = job.ignore_stack;
2572 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2573 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2574 }
2575
2576 let mut entries_by_id_edits = Vec::new();
2577 let mut entries_by_path_edits = Vec::new();
2578 for mut entry in snapshot.child_entries(&job.path).cloned() {
2579 let was_ignored = entry.is_ignored;
2580 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2581 if entry.is_dir() {
2582 let child_ignore_stack = if entry.is_ignored {
2583 IgnoreStack::all()
2584 } else {
2585 ignore_stack.clone()
2586 };
2587 job.ignore_queue
2588 .send(UpdateIgnoreStatusJob {
2589 path: entry.path.clone(),
2590 ignore_stack: child_ignore_stack,
2591 ignore_queue: job.ignore_queue.clone(),
2592 })
2593 .await
2594 .unwrap();
2595 }
2596
2597 if entry.is_ignored != was_ignored {
2598 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2599 path_entry.scan_id = snapshot.scan_id;
2600 path_entry.is_ignored = entry.is_ignored;
2601 entries_by_id_edits.push(Edit::Insert(path_entry));
2602 entries_by_path_edits.push(Edit::Insert(entry));
2603 }
2604 }
2605
2606 let mut snapshot = self.snapshot.lock();
2607 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2608 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2609 }
2610}
2611
2612async fn refresh_entry(
2613 fs: &dyn Fs,
2614 snapshot: &Mutex<Snapshot>,
2615 path: Arc<Path>,
2616 abs_path: &Path,
2617) -> Result<Entry> {
2618 let root_char_bag;
2619 let next_entry_id;
2620 {
2621 let snapshot = snapshot.lock();
2622 root_char_bag = snapshot.root_char_bag;
2623 next_entry_id = snapshot.next_entry_id.clone();
2624 }
2625 let entry = Entry::new(
2626 path,
2627 &fs.metadata(abs_path)
2628 .await?
2629 .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2630 &next_entry_id,
2631 root_char_bag,
2632 );
2633 Ok(snapshot.lock().insert_entry(entry, fs))
2634}
2635
2636fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2637 let mut result = root_char_bag;
2638 result.extend(
2639 path.to_string_lossy()
2640 .chars()
2641 .map(|c| c.to_ascii_lowercase()),
2642 );
2643 result
2644}
2645
2646struct ScanJob {
2647 abs_path: PathBuf,
2648 path: Arc<Path>,
2649 ignore_stack: Arc<IgnoreStack>,
2650 scan_queue: Sender<ScanJob>,
2651}
2652
2653struct UpdateIgnoreStatusJob {
2654 path: Arc<Path>,
2655 ignore_stack: Arc<IgnoreStack>,
2656 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2657}
2658
2659pub trait WorktreeHandle {
2660 #[cfg(test)]
2661 fn flush_fs_events<'a>(
2662 &self,
2663 cx: &'a gpui::TestAppContext,
2664 ) -> futures::future::LocalBoxFuture<'a, ()>;
2665}
2666
2667impl WorktreeHandle for ModelHandle<Worktree> {
2668 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2669 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2670 // extra directory scans, and emit extra scan-state notifications.
2671 //
2672 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2673 // to ensure that all redundant FS events have already been processed.
2674 #[cfg(test)]
2675 fn flush_fs_events<'a>(
2676 &self,
2677 cx: &'a gpui::TestAppContext,
2678 ) -> futures::future::LocalBoxFuture<'a, ()> {
2679 use smol::future::FutureExt;
2680
2681 let filename = "fs-event-sentinel";
2682 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2683 let tree = self.clone();
2684 async move {
2685 std::fs::write(root_path.join(filename), "").unwrap();
2686 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2687 .await;
2688
2689 std::fs::remove_file(root_path.join(filename)).unwrap();
2690 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2691 .await;
2692
2693 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2694 .await;
2695 }
2696 .boxed_local()
2697 }
2698}
2699
2700#[derive(Clone, Debug)]
2701struct TraversalProgress<'a> {
2702 max_path: &'a Path,
2703 count: usize,
2704 visible_count: usize,
2705 file_count: usize,
2706 visible_file_count: usize,
2707}
2708
2709impl<'a> TraversalProgress<'a> {
2710 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2711 match (include_ignored, include_dirs) {
2712 (true, true) => self.count,
2713 (true, false) => self.file_count,
2714 (false, true) => self.visible_count,
2715 (false, false) => self.visible_file_count,
2716 }
2717 }
2718}
2719
2720impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2721 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2722 self.max_path = summary.max_path.as_ref();
2723 self.count += summary.count;
2724 self.visible_count += summary.visible_count;
2725 self.file_count += summary.file_count;
2726 self.visible_file_count += summary.visible_file_count;
2727 }
2728}
2729
2730impl<'a> Default for TraversalProgress<'a> {
2731 fn default() -> Self {
2732 Self {
2733 max_path: Path::new(""),
2734 count: 0,
2735 visible_count: 0,
2736 file_count: 0,
2737 visible_file_count: 0,
2738 }
2739 }
2740}
2741
2742pub struct Traversal<'a> {
2743 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2744 include_ignored: bool,
2745 include_dirs: bool,
2746}
2747
2748impl<'a> Traversal<'a> {
2749 pub fn advance(&mut self) -> bool {
2750 self.advance_to_offset(self.offset() + 1)
2751 }
2752
2753 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2754 self.cursor.seek_forward(
2755 &TraversalTarget::Count {
2756 count: offset,
2757 include_dirs: self.include_dirs,
2758 include_ignored: self.include_ignored,
2759 },
2760 Bias::Right,
2761 &(),
2762 )
2763 }
2764
2765 pub fn advance_to_sibling(&mut self) -> bool {
2766 while let Some(entry) = self.cursor.item() {
2767 self.cursor.seek_forward(
2768 &TraversalTarget::PathSuccessor(&entry.path),
2769 Bias::Left,
2770 &(),
2771 );
2772 if let Some(entry) = self.cursor.item() {
2773 if (self.include_dirs || !entry.is_dir())
2774 && (self.include_ignored || !entry.is_ignored)
2775 {
2776 return true;
2777 }
2778 }
2779 }
2780 false
2781 }
2782
2783 pub fn entry(&self) -> Option<&'a Entry> {
2784 self.cursor.item()
2785 }
2786
2787 pub fn offset(&self) -> usize {
2788 self.cursor
2789 .start()
2790 .count(self.include_dirs, self.include_ignored)
2791 }
2792}
2793
2794impl<'a> Iterator for Traversal<'a> {
2795 type Item = &'a Entry;
2796
2797 fn next(&mut self) -> Option<Self::Item> {
2798 if let Some(item) = self.entry() {
2799 self.advance();
2800 Some(item)
2801 } else {
2802 None
2803 }
2804 }
2805}
2806
2807#[derive(Debug)]
2808enum TraversalTarget<'a> {
2809 Path(&'a Path),
2810 PathSuccessor(&'a Path),
2811 Count {
2812 count: usize,
2813 include_ignored: bool,
2814 include_dirs: bool,
2815 },
2816}
2817
2818impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2819 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2820 match self {
2821 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2822 TraversalTarget::PathSuccessor(path) => {
2823 if !cursor_location.max_path.starts_with(path) {
2824 Ordering::Equal
2825 } else {
2826 Ordering::Greater
2827 }
2828 }
2829 TraversalTarget::Count {
2830 count,
2831 include_dirs,
2832 include_ignored,
2833 } => Ord::cmp(
2834 count,
2835 &cursor_location.count(*include_dirs, *include_ignored),
2836 ),
2837 }
2838 }
2839}
2840
2841struct ChildEntriesIter<'a> {
2842 parent_path: &'a Path,
2843 traversal: Traversal<'a>,
2844}
2845
2846impl<'a> Iterator for ChildEntriesIter<'a> {
2847 type Item = &'a Entry;
2848
2849 fn next(&mut self) -> Option<Self::Item> {
2850 if let Some(item) = self.traversal.entry() {
2851 if item.path.starts_with(&self.parent_path) {
2852 self.traversal.advance_to_sibling();
2853 return Some(item);
2854 }
2855 }
2856 None
2857 }
2858}
2859
2860impl<'a> From<&'a Entry> for proto::Entry {
2861 fn from(entry: &'a Entry) -> Self {
2862 Self {
2863 id: entry.id as u64,
2864 is_dir: entry.is_dir(),
2865 path: entry.path.to_string_lossy().to_string(),
2866 inode: entry.inode,
2867 mtime: Some(entry.mtime.into()),
2868 is_symlink: entry.is_symlink,
2869 is_ignored: entry.is_ignored,
2870 }
2871 }
2872}
2873
2874impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2875 type Error = anyhow::Error;
2876
2877 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2878 if let Some(mtime) = entry.mtime {
2879 let kind = if entry.is_dir {
2880 EntryKind::Dir
2881 } else {
2882 let mut char_bag = root_char_bag.clone();
2883 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
2884 EntryKind::File(char_bag)
2885 };
2886 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
2887 Ok(Entry {
2888 id: entry.id as usize,
2889 kind,
2890 path: path.clone(),
2891 inode: entry.inode,
2892 mtime: mtime.into(),
2893 is_symlink: entry.is_symlink,
2894 is_ignored: entry.is_ignored,
2895 })
2896 } else {
2897 Err(anyhow!(
2898 "missing mtime in remote worktree entry {:?}",
2899 entry.path
2900 ))
2901 }
2902 }
2903}
2904
2905trait ToPointUtf16 {
2906 fn to_point_utf16(self) -> PointUtf16;
2907}
2908
2909impl ToPointUtf16 for lsp::Position {
2910 fn to_point_utf16(self) -> PointUtf16 {
2911 PointUtf16::new(self.line, self.character)
2912 }
2913}
2914
2915fn diagnostic_ranges<'a>(
2916 diagnostic: &'a lsp::Diagnostic,
2917 abs_path: &'a Path,
2918) -> impl 'a + Iterator<Item = Range<PointUtf16>> {
2919 diagnostic
2920 .related_information
2921 .iter()
2922 .flatten()
2923 .filter_map(move |info| {
2924 if info.location.uri.to_file_path().ok()? == abs_path {
2925 let info_start = PointUtf16::new(
2926 info.location.range.start.line,
2927 info.location.range.start.character,
2928 );
2929 let info_end = PointUtf16::new(
2930 info.location.range.end.line,
2931 info.location.range.end.character,
2932 );
2933 Some(info_start..info_end)
2934 } else {
2935 None
2936 }
2937 })
2938 .chain(Some(
2939 diagnostic.range.start.to_point_utf16()..diagnostic.range.end.to_point_utf16(),
2940 ))
2941}
2942
2943#[cfg(test)]
2944mod tests {
2945 use super::*;
2946 use crate::fs::FakeFs;
2947 use anyhow::Result;
2948 use client::test::{FakeHttpClient, FakeServer};
2949 use fs::RealFs;
2950 use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
2951 use language::{Diagnostic, LanguageConfig};
2952 use lsp::Url;
2953 use rand::prelude::*;
2954 use serde_json::json;
2955 use std::{cell::RefCell, rc::Rc};
2956 use std::{
2957 env,
2958 fmt::Write,
2959 time::{SystemTime, UNIX_EPOCH},
2960 };
2961 use text::Point;
2962 use unindent::Unindent as _;
2963 use util::test::temp_tree;
2964
2965 #[gpui::test]
2966 async fn test_traversal(mut cx: gpui::TestAppContext) {
2967 let fs = FakeFs::new();
2968 fs.insert_tree(
2969 "/root",
2970 json!({
2971 ".gitignore": "a/b\n",
2972 "a": {
2973 "b": "",
2974 "c": "",
2975 }
2976 }),
2977 )
2978 .await;
2979
2980 let client = Client::new();
2981 let http_client = FakeHttpClient::with_404_response();
2982 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
2983
2984 let tree = Worktree::open_local(
2985 client,
2986 user_store,
2987 Arc::from(Path::new("/root")),
2988 Arc::new(fs),
2989 Default::default(),
2990 &mut cx.to_async(),
2991 )
2992 .await
2993 .unwrap();
2994 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2995 .await;
2996
2997 tree.read_with(&cx, |tree, _| {
2998 assert_eq!(
2999 tree.entries(false)
3000 .map(|entry| entry.path.as_ref())
3001 .collect::<Vec<_>>(),
3002 vec![
3003 Path::new(""),
3004 Path::new(".gitignore"),
3005 Path::new("a"),
3006 Path::new("a/c"),
3007 ]
3008 );
3009 })
3010 }
3011
3012 #[gpui::test]
3013 async fn test_save_file(mut cx: gpui::TestAppContext) {
3014 let dir = temp_tree(json!({
3015 "file1": "the old contents",
3016 }));
3017
3018 let client = Client::new();
3019 let http_client = FakeHttpClient::with_404_response();
3020 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3021
3022 let tree = Worktree::open_local(
3023 client,
3024 user_store,
3025 dir.path(),
3026 Arc::new(RealFs),
3027 Default::default(),
3028 &mut cx.to_async(),
3029 )
3030 .await
3031 .unwrap();
3032 let buffer = tree
3033 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3034 .await
3035 .unwrap();
3036 let save = buffer.update(&mut cx, |buffer, cx| {
3037 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3038 buffer.save(cx).unwrap()
3039 });
3040 save.await.unwrap();
3041
3042 let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3043 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3044 }
3045
3046 #[gpui::test]
3047 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3048 let dir = temp_tree(json!({
3049 "file1": "the old contents",
3050 }));
3051 let file_path = dir.path().join("file1");
3052
3053 let client = Client::new();
3054 let http_client = FakeHttpClient::with_404_response();
3055 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3056
3057 let tree = Worktree::open_local(
3058 client,
3059 user_store,
3060 file_path.clone(),
3061 Arc::new(RealFs),
3062 Default::default(),
3063 &mut cx.to_async(),
3064 )
3065 .await
3066 .unwrap();
3067 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3068 .await;
3069 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3070
3071 let buffer = tree
3072 .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3073 .await
3074 .unwrap();
3075 let save = buffer.update(&mut cx, |buffer, cx| {
3076 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3077 buffer.save(cx).unwrap()
3078 });
3079 save.await.unwrap();
3080
3081 let new_text = std::fs::read_to_string(file_path).unwrap();
3082 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3083 }
3084
3085 #[gpui::test]
3086 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3087 let dir = temp_tree(json!({
3088 "a": {
3089 "file1": "",
3090 "file2": "",
3091 "file3": "",
3092 },
3093 "b": {
3094 "c": {
3095 "file4": "",
3096 "file5": "",
3097 }
3098 }
3099 }));
3100
3101 let user_id = 5;
3102 let mut client = Client::new();
3103 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3104 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3105 let tree = Worktree::open_local(
3106 client,
3107 user_store.clone(),
3108 dir.path(),
3109 Arc::new(RealFs),
3110 Default::default(),
3111 &mut cx.to_async(),
3112 )
3113 .await
3114 .unwrap();
3115
3116 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3117 let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3118 async move { buffer.await.unwrap() }
3119 };
3120 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3121 tree.read_with(cx, |tree, _| {
3122 tree.entry_for_path(path)
3123 .expect(&format!("no entry for path {}", path))
3124 .id
3125 })
3126 };
3127
3128 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3129 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3130 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3131 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3132
3133 let file2_id = id_for_path("a/file2", &cx);
3134 let file3_id = id_for_path("a/file3", &cx);
3135 let file4_id = id_for_path("b/c/file4", &cx);
3136
3137 // Wait for the initial scan.
3138 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3139 .await;
3140
3141 // Create a remote copy of this worktree.
3142 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3143 let remote = Worktree::remote(
3144 1,
3145 1,
3146 initial_snapshot.to_proto(),
3147 Client::new(),
3148 user_store,
3149 Default::default(),
3150 &mut cx.to_async(),
3151 )
3152 .await
3153 .unwrap();
3154
3155 cx.read(|cx| {
3156 assert!(!buffer2.read(cx).is_dirty());
3157 assert!(!buffer3.read(cx).is_dirty());
3158 assert!(!buffer4.read(cx).is_dirty());
3159 assert!(!buffer5.read(cx).is_dirty());
3160 });
3161
3162 // Rename and delete files and directories.
3163 tree.flush_fs_events(&cx).await;
3164 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3165 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3166 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3167 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3168 tree.flush_fs_events(&cx).await;
3169
3170 let expected_paths = vec![
3171 "a",
3172 "a/file1",
3173 "a/file2.new",
3174 "b",
3175 "d",
3176 "d/file3",
3177 "d/file4",
3178 ];
3179
3180 cx.read(|app| {
3181 assert_eq!(
3182 tree.read(app)
3183 .paths()
3184 .map(|p| p.to_str().unwrap())
3185 .collect::<Vec<_>>(),
3186 expected_paths
3187 );
3188
3189 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3190 assert_eq!(id_for_path("d/file3", &cx), file3_id);
3191 assert_eq!(id_for_path("d/file4", &cx), file4_id);
3192
3193 assert_eq!(
3194 buffer2.read(app).file().unwrap().path().as_ref(),
3195 Path::new("a/file2.new")
3196 );
3197 assert_eq!(
3198 buffer3.read(app).file().unwrap().path().as_ref(),
3199 Path::new("d/file3")
3200 );
3201 assert_eq!(
3202 buffer4.read(app).file().unwrap().path().as_ref(),
3203 Path::new("d/file4")
3204 );
3205 assert_eq!(
3206 buffer5.read(app).file().unwrap().path().as_ref(),
3207 Path::new("b/c/file5")
3208 );
3209
3210 assert!(!buffer2.read(app).file().unwrap().is_deleted());
3211 assert!(!buffer3.read(app).file().unwrap().is_deleted());
3212 assert!(!buffer4.read(app).file().unwrap().is_deleted());
3213 assert!(buffer5.read(app).file().unwrap().is_deleted());
3214 });
3215
3216 // Update the remote worktree. Check that it becomes consistent with the
3217 // local worktree.
3218 remote.update(&mut cx, |remote, cx| {
3219 let update_message =
3220 tree.read(cx)
3221 .snapshot()
3222 .build_update(&initial_snapshot, 1, 1, true);
3223 remote
3224 .as_remote_mut()
3225 .unwrap()
3226 .snapshot
3227 .apply_update(update_message)
3228 .unwrap();
3229
3230 assert_eq!(
3231 remote
3232 .paths()
3233 .map(|p| p.to_str().unwrap())
3234 .collect::<Vec<_>>(),
3235 expected_paths
3236 );
3237 });
3238 }
3239
3240 #[gpui::test]
3241 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3242 let dir = temp_tree(json!({
3243 ".git": {},
3244 ".gitignore": "ignored-dir\n",
3245 "tracked-dir": {
3246 "tracked-file1": "tracked contents",
3247 },
3248 "ignored-dir": {
3249 "ignored-file1": "ignored contents",
3250 }
3251 }));
3252
3253 let client = Client::new();
3254 let http_client = FakeHttpClient::with_404_response();
3255 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3256
3257 let tree = Worktree::open_local(
3258 client,
3259 user_store,
3260 dir.path(),
3261 Arc::new(RealFs),
3262 Default::default(),
3263 &mut cx.to_async(),
3264 )
3265 .await
3266 .unwrap();
3267 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3268 .await;
3269 tree.flush_fs_events(&cx).await;
3270 cx.read(|cx| {
3271 let tree = tree.read(cx);
3272 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3273 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3274 assert_eq!(tracked.is_ignored, false);
3275 assert_eq!(ignored.is_ignored, true);
3276 });
3277
3278 std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3279 std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3280 tree.flush_fs_events(&cx).await;
3281 cx.read(|cx| {
3282 let tree = tree.read(cx);
3283 let dot_git = tree.entry_for_path(".git").unwrap();
3284 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3285 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3286 assert_eq!(tracked.is_ignored, false);
3287 assert_eq!(ignored.is_ignored, true);
3288 assert_eq!(dot_git.is_ignored, true);
3289 });
3290 }
3291
3292 #[gpui::test]
3293 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3294 let user_id = 100;
3295 let mut client = Client::new();
3296 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3297 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3298
3299 let fs = Arc::new(FakeFs::new());
3300 fs.insert_tree(
3301 "/the-dir",
3302 json!({
3303 "a.txt": "a-contents",
3304 "b.txt": "b-contents",
3305 }),
3306 )
3307 .await;
3308
3309 let worktree = Worktree::open_local(
3310 client.clone(),
3311 user_store,
3312 "/the-dir".as_ref(),
3313 fs,
3314 Default::default(),
3315 &mut cx.to_async(),
3316 )
3317 .await
3318 .unwrap();
3319
3320 // Spawn multiple tasks to open paths, repeating some paths.
3321 let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3322 (
3323 worktree.open_buffer("a.txt", cx),
3324 worktree.open_buffer("b.txt", cx),
3325 worktree.open_buffer("a.txt", cx),
3326 )
3327 });
3328
3329 let buffer_a_1 = buffer_a_1.await.unwrap();
3330 let buffer_a_2 = buffer_a_2.await.unwrap();
3331 let buffer_b = buffer_b.await.unwrap();
3332 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3333 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3334
3335 // There is only one buffer per path.
3336 let buffer_a_id = buffer_a_1.id();
3337 assert_eq!(buffer_a_2.id(), buffer_a_id);
3338
3339 // Open the same path again while it is still open.
3340 drop(buffer_a_1);
3341 let buffer_a_3 = worktree
3342 .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3343 .await
3344 .unwrap();
3345
3346 // There's still only one buffer per path.
3347 assert_eq!(buffer_a_3.id(), buffer_a_id);
3348 }
3349
3350 #[gpui::test]
3351 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3352 use std::fs;
3353
3354 let dir = temp_tree(json!({
3355 "file1": "abc",
3356 "file2": "def",
3357 "file3": "ghi",
3358 }));
3359 let client = Client::new();
3360 let http_client = FakeHttpClient::with_404_response();
3361 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3362
3363 let tree = Worktree::open_local(
3364 client,
3365 user_store,
3366 dir.path(),
3367 Arc::new(RealFs),
3368 Default::default(),
3369 &mut cx.to_async(),
3370 )
3371 .await
3372 .unwrap();
3373 tree.flush_fs_events(&cx).await;
3374 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3375 .await;
3376
3377 let buffer1 = tree
3378 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3379 .await
3380 .unwrap();
3381 let events = Rc::new(RefCell::new(Vec::new()));
3382
3383 // initially, the buffer isn't dirty.
3384 buffer1.update(&mut cx, |buffer, cx| {
3385 cx.subscribe(&buffer1, {
3386 let events = events.clone();
3387 move |_, _, event, _| events.borrow_mut().push(event.clone())
3388 })
3389 .detach();
3390
3391 assert!(!buffer.is_dirty());
3392 assert!(events.borrow().is_empty());
3393
3394 buffer.edit(vec![1..2], "", cx);
3395 });
3396
3397 // after the first edit, the buffer is dirty, and emits a dirtied event.
3398 buffer1.update(&mut cx, |buffer, cx| {
3399 assert!(buffer.text() == "ac");
3400 assert!(buffer.is_dirty());
3401 assert_eq!(
3402 *events.borrow(),
3403 &[language::Event::Edited, language::Event::Dirtied]
3404 );
3405 events.borrow_mut().clear();
3406 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3407 });
3408
3409 // after saving, the buffer is not dirty, and emits a saved event.
3410 buffer1.update(&mut cx, |buffer, cx| {
3411 assert!(!buffer.is_dirty());
3412 assert_eq!(*events.borrow(), &[language::Event::Saved]);
3413 events.borrow_mut().clear();
3414
3415 buffer.edit(vec![1..1], "B", cx);
3416 buffer.edit(vec![2..2], "D", cx);
3417 });
3418
3419 // after editing again, the buffer is dirty, and emits another dirty event.
3420 buffer1.update(&mut cx, |buffer, cx| {
3421 assert!(buffer.text() == "aBDc");
3422 assert!(buffer.is_dirty());
3423 assert_eq!(
3424 *events.borrow(),
3425 &[
3426 language::Event::Edited,
3427 language::Event::Dirtied,
3428 language::Event::Edited,
3429 ],
3430 );
3431 events.borrow_mut().clear();
3432
3433 // TODO - currently, after restoring the buffer to its
3434 // previously-saved state, the is still considered dirty.
3435 buffer.edit([1..3], "", cx);
3436 assert!(buffer.text() == "ac");
3437 assert!(buffer.is_dirty());
3438 });
3439
3440 assert_eq!(*events.borrow(), &[language::Event::Edited]);
3441
3442 // When a file is deleted, the buffer is considered dirty.
3443 let events = Rc::new(RefCell::new(Vec::new()));
3444 let buffer2 = tree
3445 .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3446 .await
3447 .unwrap();
3448 buffer2.update(&mut cx, |_, cx| {
3449 cx.subscribe(&buffer2, {
3450 let events = events.clone();
3451 move |_, _, event, _| events.borrow_mut().push(event.clone())
3452 })
3453 .detach();
3454 });
3455
3456 fs::remove_file(dir.path().join("file2")).unwrap();
3457 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3458 assert_eq!(
3459 *events.borrow(),
3460 &[language::Event::Dirtied, language::Event::FileHandleChanged]
3461 );
3462
3463 // When a file is already dirty when deleted, we don't emit a Dirtied event.
3464 let events = Rc::new(RefCell::new(Vec::new()));
3465 let buffer3 = tree
3466 .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3467 .await
3468 .unwrap();
3469 buffer3.update(&mut cx, |_, cx| {
3470 cx.subscribe(&buffer3, {
3471 let events = events.clone();
3472 move |_, _, event, _| events.borrow_mut().push(event.clone())
3473 })
3474 .detach();
3475 });
3476
3477 tree.flush_fs_events(&cx).await;
3478 buffer3.update(&mut cx, |buffer, cx| {
3479 buffer.edit(Some(0..0), "x", cx);
3480 });
3481 events.borrow_mut().clear();
3482 fs::remove_file(dir.path().join("file3")).unwrap();
3483 buffer3
3484 .condition(&cx, |_, _| !events.borrow().is_empty())
3485 .await;
3486 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3487 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3488 }
3489
3490 #[gpui::test]
3491 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3492 use std::fs;
3493
3494 let initial_contents = "aaa\nbbbbb\nc\n";
3495 let dir = temp_tree(json!({ "the-file": initial_contents }));
3496 let client = Client::new();
3497 let http_client = FakeHttpClient::with_404_response();
3498 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3499
3500 let tree = Worktree::open_local(
3501 client,
3502 user_store,
3503 dir.path(),
3504 Arc::new(RealFs),
3505 Default::default(),
3506 &mut cx.to_async(),
3507 )
3508 .await
3509 .unwrap();
3510 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3511 .await;
3512
3513 let abs_path = dir.path().join("the-file");
3514 let buffer = tree
3515 .update(&mut cx, |tree, cx| {
3516 tree.open_buffer(Path::new("the-file"), cx)
3517 })
3518 .await
3519 .unwrap();
3520
3521 // TODO
3522 // Add a cursor on each row.
3523 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3524 // assert!(!buffer.is_dirty());
3525 // buffer.add_selection_set(
3526 // &(0..3)
3527 // .map(|row| Selection {
3528 // id: row as usize,
3529 // start: Point::new(row, 1),
3530 // end: Point::new(row, 1),
3531 // reversed: false,
3532 // goal: SelectionGoal::None,
3533 // })
3534 // .collect::<Vec<_>>(),
3535 // cx,
3536 // )
3537 // });
3538
3539 // Change the file on disk, adding two new lines of text, and removing
3540 // one line.
3541 buffer.read_with(&cx, |buffer, _| {
3542 assert!(!buffer.is_dirty());
3543 assert!(!buffer.has_conflict());
3544 });
3545 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3546 fs::write(&abs_path, new_contents).unwrap();
3547
3548 // Because the buffer was not modified, it is reloaded from disk. Its
3549 // contents are edited according to the diff between the old and new
3550 // file contents.
3551 buffer
3552 .condition(&cx, |buffer, _| buffer.text() == new_contents)
3553 .await;
3554
3555 buffer.update(&mut cx, |buffer, _| {
3556 assert_eq!(buffer.text(), new_contents);
3557 assert!(!buffer.is_dirty());
3558 assert!(!buffer.has_conflict());
3559
3560 // TODO
3561 // let cursor_positions = buffer
3562 // .selection_set(selection_set_id)
3563 // .unwrap()
3564 // .selections::<Point>(&*buffer)
3565 // .map(|selection| {
3566 // assert_eq!(selection.start, selection.end);
3567 // selection.start
3568 // })
3569 // .collect::<Vec<_>>();
3570 // assert_eq!(
3571 // cursor_positions,
3572 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3573 // );
3574 });
3575
3576 // Modify the buffer
3577 buffer.update(&mut cx, |buffer, cx| {
3578 buffer.edit(vec![0..0], " ", cx);
3579 assert!(buffer.is_dirty());
3580 assert!(!buffer.has_conflict());
3581 });
3582
3583 // Change the file on disk again, adding blank lines to the beginning.
3584 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3585
3586 // Because the buffer is modified, it doesn't reload from disk, but is
3587 // marked as having a conflict.
3588 buffer
3589 .condition(&cx, |buffer, _| buffer.has_conflict())
3590 .await;
3591 }
3592
3593 #[gpui::test]
3594 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3595 let (language_server_config, mut fake_server) =
3596 LanguageServerConfig::fake(cx.background()).await;
3597 let mut languages = LanguageRegistry::new();
3598 languages.add(Arc::new(Language::new(
3599 LanguageConfig {
3600 name: "Rust".to_string(),
3601 path_suffixes: vec!["rs".to_string()],
3602 language_server: Some(language_server_config),
3603 ..Default::default()
3604 },
3605 Some(tree_sitter_rust::language()),
3606 )));
3607
3608 let dir = temp_tree(json!({
3609 "a.rs": "fn a() { A }",
3610 "b.rs": "const y: i32 = 1",
3611 }));
3612
3613 let client = Client::new();
3614 let http_client = FakeHttpClient::with_404_response();
3615 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3616
3617 let tree = Worktree::open_local(
3618 client,
3619 user_store,
3620 dir.path(),
3621 Arc::new(RealFs),
3622 Arc::new(languages),
3623 &mut cx.to_async(),
3624 )
3625 .await
3626 .unwrap();
3627 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3628 .await;
3629
3630 // Cause worktree to start the fake language server
3631 let _buffer = tree
3632 .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3633 .await
3634 .unwrap();
3635
3636 fake_server
3637 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3638 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3639 version: None,
3640 diagnostics: vec![lsp::Diagnostic {
3641 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
3642 severity: Some(lsp::DiagnosticSeverity::ERROR),
3643 message: "undefined variable 'A'".to_string(),
3644 ..Default::default()
3645 }],
3646 })
3647 .await;
3648
3649 let buffer = tree
3650 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3651 .await
3652 .unwrap();
3653
3654 buffer.read_with(&cx, |buffer, _| {
3655 let diagnostics = buffer
3656 .snapshot()
3657 .diagnostics_in_range::<_, Point>(0..buffer.len())
3658 .collect::<Vec<_>>();
3659 assert_eq!(
3660 diagnostics,
3661 &[DiagnosticEntry {
3662 range: Point::new(0, 9)..Point::new(0, 10),
3663 diagnostic: Diagnostic {
3664 severity: lsp::DiagnosticSeverity::ERROR,
3665 message: "undefined variable 'A'".to_string(),
3666 group_id: 0,
3667 is_primary: true,
3668 ..Default::default()
3669 }
3670 }]
3671 )
3672 });
3673 }
3674
3675 #[gpui::test]
3676 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3677 let fs = Arc::new(FakeFs::new());
3678 let client = Client::new();
3679 let http_client = FakeHttpClient::with_404_response();
3680 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3681
3682 fs.insert_tree(
3683 "/the-dir",
3684 json!({
3685 "a.rs": "
3686 fn foo(mut v: Vec<usize>) {
3687 for x in &v {
3688 v.push(1);
3689 }
3690 }
3691 "
3692 .unindent(),
3693 }),
3694 )
3695 .await;
3696
3697 let worktree = Worktree::open_local(
3698 client.clone(),
3699 user_store,
3700 "/the-dir".as_ref(),
3701 fs,
3702 Default::default(),
3703 &mut cx.to_async(),
3704 )
3705 .await
3706 .unwrap();
3707
3708 let buffer = worktree
3709 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3710 .await
3711 .unwrap();
3712
3713 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3714 let message = lsp::PublishDiagnosticsParams {
3715 uri: buffer_uri.clone(),
3716 diagnostics: vec![
3717 lsp::Diagnostic {
3718 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3719 severity: Some(DiagnosticSeverity::WARNING),
3720 message: "error 1".to_string(),
3721 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3722 location: lsp::Location {
3723 uri: buffer_uri.clone(),
3724 range: lsp::Range::new(
3725 lsp::Position::new(1, 8),
3726 lsp::Position::new(1, 9),
3727 ),
3728 },
3729 message: "error 1 hint 1".to_string(),
3730 }]),
3731 ..Default::default()
3732 },
3733 lsp::Diagnostic {
3734 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3735 severity: Some(DiagnosticSeverity::HINT),
3736 message: "error 1 hint 1".to_string(),
3737 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3738 location: lsp::Location {
3739 uri: buffer_uri.clone(),
3740 range: lsp::Range::new(
3741 lsp::Position::new(1, 8),
3742 lsp::Position::new(1, 9),
3743 ),
3744 },
3745 message: "original diagnostic".to_string(),
3746 }]),
3747 ..Default::default()
3748 },
3749 lsp::Diagnostic {
3750 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3751 severity: Some(DiagnosticSeverity::ERROR),
3752 message: "error 2".to_string(),
3753 related_information: Some(vec![
3754 lsp::DiagnosticRelatedInformation {
3755 location: lsp::Location {
3756 uri: buffer_uri.clone(),
3757 range: lsp::Range::new(
3758 lsp::Position::new(1, 13),
3759 lsp::Position::new(1, 15),
3760 ),
3761 },
3762 message: "error 2 hint 1".to_string(),
3763 },
3764 lsp::DiagnosticRelatedInformation {
3765 location: lsp::Location {
3766 uri: buffer_uri.clone(),
3767 range: lsp::Range::new(
3768 lsp::Position::new(1, 13),
3769 lsp::Position::new(1, 15),
3770 ),
3771 },
3772 message: "error 2 hint 2".to_string(),
3773 },
3774 ]),
3775 ..Default::default()
3776 },
3777 lsp::Diagnostic {
3778 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3779 severity: Some(DiagnosticSeverity::HINT),
3780 message: "error 2 hint 1".to_string(),
3781 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3782 location: lsp::Location {
3783 uri: buffer_uri.clone(),
3784 range: lsp::Range::new(
3785 lsp::Position::new(2, 8),
3786 lsp::Position::new(2, 17),
3787 ),
3788 },
3789 message: "original diagnostic".to_string(),
3790 }]),
3791 ..Default::default()
3792 },
3793 lsp::Diagnostic {
3794 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3795 severity: Some(DiagnosticSeverity::HINT),
3796 message: "error 2 hint 2".to_string(),
3797 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3798 location: lsp::Location {
3799 uri: buffer_uri.clone(),
3800 range: lsp::Range::new(
3801 lsp::Position::new(2, 8),
3802 lsp::Position::new(2, 17),
3803 ),
3804 },
3805 message: "original diagnostic".to_string(),
3806 }]),
3807 ..Default::default()
3808 },
3809 ],
3810 version: None,
3811 };
3812
3813 worktree
3814 .update(&mut cx, |tree, cx| tree.update_diagnostics(message, cx))
3815 .unwrap();
3816 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
3817
3818 assert_eq!(
3819 buffer
3820 .diagnostics_in_range::<_, Point>(0..buffer.len())
3821 .collect::<Vec<_>>(),
3822 &[
3823 DiagnosticEntry {
3824 range: Point::new(1, 8)..Point::new(1, 9),
3825 diagnostic: Diagnostic {
3826 severity: DiagnosticSeverity::WARNING,
3827 message: "error 1".to_string(),
3828 group_id: 0,
3829 is_primary: true,
3830 ..Default::default()
3831 }
3832 },
3833 DiagnosticEntry {
3834 range: Point::new(1, 8)..Point::new(1, 9),
3835 diagnostic: Diagnostic {
3836 severity: DiagnosticSeverity::HINT,
3837 message: "error 1 hint 1".to_string(),
3838 group_id: 0,
3839 is_primary: false,
3840 ..Default::default()
3841 }
3842 },
3843 DiagnosticEntry {
3844 range: Point::new(1, 13)..Point::new(1, 15),
3845 diagnostic: Diagnostic {
3846 severity: DiagnosticSeverity::HINT,
3847 message: "error 2 hint 1".to_string(),
3848 group_id: 1,
3849 is_primary: false,
3850 ..Default::default()
3851 }
3852 },
3853 DiagnosticEntry {
3854 range: Point::new(1, 13)..Point::new(1, 15),
3855 diagnostic: Diagnostic {
3856 severity: DiagnosticSeverity::HINT,
3857 message: "error 2 hint 2".to_string(),
3858 group_id: 1,
3859 is_primary: false,
3860 ..Default::default()
3861 }
3862 },
3863 DiagnosticEntry {
3864 range: Point::new(2, 8)..Point::new(2, 17),
3865 diagnostic: Diagnostic {
3866 severity: DiagnosticSeverity::ERROR,
3867 message: "error 2".to_string(),
3868 group_id: 1,
3869 is_primary: true,
3870 ..Default::default()
3871 }
3872 }
3873 ]
3874 );
3875
3876 assert_eq!(
3877 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
3878 &[
3879 DiagnosticEntry {
3880 range: Point::new(1, 8)..Point::new(1, 9),
3881 diagnostic: Diagnostic {
3882 severity: DiagnosticSeverity::WARNING,
3883 message: "error 1".to_string(),
3884 group_id: 0,
3885 is_primary: true,
3886 ..Default::default()
3887 }
3888 },
3889 DiagnosticEntry {
3890 range: Point::new(1, 8)..Point::new(1, 9),
3891 diagnostic: Diagnostic {
3892 severity: DiagnosticSeverity::HINT,
3893 message: "error 1 hint 1".to_string(),
3894 group_id: 0,
3895 is_primary: false,
3896 ..Default::default()
3897 }
3898 },
3899 ]
3900 );
3901 assert_eq!(
3902 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
3903 &[
3904 DiagnosticEntry {
3905 range: Point::new(1, 13)..Point::new(1, 15),
3906 diagnostic: Diagnostic {
3907 severity: DiagnosticSeverity::HINT,
3908 message: "error 2 hint 1".to_string(),
3909 group_id: 1,
3910 is_primary: false,
3911 ..Default::default()
3912 }
3913 },
3914 DiagnosticEntry {
3915 range: Point::new(1, 13)..Point::new(1, 15),
3916 diagnostic: Diagnostic {
3917 severity: DiagnosticSeverity::HINT,
3918 message: "error 2 hint 2".to_string(),
3919 group_id: 1,
3920 is_primary: false,
3921 ..Default::default()
3922 }
3923 },
3924 DiagnosticEntry {
3925 range: Point::new(2, 8)..Point::new(2, 17),
3926 diagnostic: Diagnostic {
3927 severity: DiagnosticSeverity::ERROR,
3928 message: "error 2".to_string(),
3929 group_id: 1,
3930 is_primary: true,
3931 ..Default::default()
3932 }
3933 }
3934 ]
3935 );
3936 }
3937
3938 #[gpui::test(iterations = 100)]
3939 fn test_random(mut rng: StdRng) {
3940 let operations = env::var("OPERATIONS")
3941 .map(|o| o.parse().unwrap())
3942 .unwrap_or(40);
3943 let initial_entries = env::var("INITIAL_ENTRIES")
3944 .map(|o| o.parse().unwrap())
3945 .unwrap_or(20);
3946
3947 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3948 for _ in 0..initial_entries {
3949 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3950 }
3951 log::info!("Generated initial tree");
3952
3953 let (notify_tx, _notify_rx) = smol::channel::unbounded();
3954 let fs = Arc::new(RealFs);
3955 let next_entry_id = Arc::new(AtomicUsize::new(0));
3956 let mut initial_snapshot = Snapshot {
3957 id: 0,
3958 scan_id: 0,
3959 abs_path: root_dir.path().into(),
3960 entries_by_path: Default::default(),
3961 entries_by_id: Default::default(),
3962 removed_entry_ids: Default::default(),
3963 ignores: Default::default(),
3964 root_name: Default::default(),
3965 root_char_bag: Default::default(),
3966 next_entry_id: next_entry_id.clone(),
3967 };
3968 initial_snapshot.insert_entry(
3969 Entry::new(
3970 Path::new("").into(),
3971 &smol::block_on(fs.metadata(root_dir.path()))
3972 .unwrap()
3973 .unwrap(),
3974 &next_entry_id,
3975 Default::default(),
3976 ),
3977 fs.as_ref(),
3978 );
3979 let mut scanner = BackgroundScanner::new(
3980 Arc::new(Mutex::new(initial_snapshot.clone())),
3981 notify_tx,
3982 fs.clone(),
3983 Arc::new(gpui::executor::Background::new()),
3984 );
3985 smol::block_on(scanner.scan_dirs()).unwrap();
3986 scanner.snapshot().check_invariants();
3987
3988 let mut events = Vec::new();
3989 let mut snapshots = Vec::new();
3990 let mut mutations_len = operations;
3991 while mutations_len > 1 {
3992 if !events.is_empty() && rng.gen_bool(0.4) {
3993 let len = rng.gen_range(0..=events.len());
3994 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
3995 log::info!("Delivering events: {:#?}", to_deliver);
3996 smol::block_on(scanner.process_events(to_deliver));
3997 scanner.snapshot().check_invariants();
3998 } else {
3999 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4000 mutations_len -= 1;
4001 }
4002
4003 if rng.gen_bool(0.2) {
4004 snapshots.push(scanner.snapshot());
4005 }
4006 }
4007 log::info!("Quiescing: {:#?}", events);
4008 smol::block_on(scanner.process_events(events));
4009 scanner.snapshot().check_invariants();
4010
4011 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4012 let mut new_scanner = BackgroundScanner::new(
4013 Arc::new(Mutex::new(initial_snapshot)),
4014 notify_tx,
4015 scanner.fs.clone(),
4016 scanner.executor.clone(),
4017 );
4018 smol::block_on(new_scanner.scan_dirs()).unwrap();
4019 assert_eq!(
4020 scanner.snapshot().to_vec(true),
4021 new_scanner.snapshot().to_vec(true)
4022 );
4023
4024 for mut prev_snapshot in snapshots {
4025 let include_ignored = rng.gen::<bool>();
4026 if !include_ignored {
4027 let mut entries_by_path_edits = Vec::new();
4028 let mut entries_by_id_edits = Vec::new();
4029 for entry in prev_snapshot
4030 .entries_by_id
4031 .cursor::<()>()
4032 .filter(|e| e.is_ignored)
4033 {
4034 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4035 entries_by_id_edits.push(Edit::Remove(entry.id));
4036 }
4037
4038 prev_snapshot
4039 .entries_by_path
4040 .edit(entries_by_path_edits, &());
4041 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4042 }
4043
4044 let update = scanner
4045 .snapshot()
4046 .build_update(&prev_snapshot, 0, 0, include_ignored);
4047 prev_snapshot.apply_update(update).unwrap();
4048 assert_eq!(
4049 prev_snapshot.to_vec(true),
4050 scanner.snapshot().to_vec(include_ignored)
4051 );
4052 }
4053 }
4054
4055 fn randomly_mutate_tree(
4056 root_path: &Path,
4057 insertion_probability: f64,
4058 rng: &mut impl Rng,
4059 ) -> Result<Vec<fsevent::Event>> {
4060 let root_path = root_path.canonicalize().unwrap();
4061 let (dirs, files) = read_dir_recursive(root_path.clone());
4062
4063 let mut events = Vec::new();
4064 let mut record_event = |path: PathBuf| {
4065 events.push(fsevent::Event {
4066 event_id: SystemTime::now()
4067 .duration_since(UNIX_EPOCH)
4068 .unwrap()
4069 .as_secs(),
4070 flags: fsevent::StreamFlags::empty(),
4071 path,
4072 });
4073 };
4074
4075 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4076 let path = dirs.choose(rng).unwrap();
4077 let new_path = path.join(gen_name(rng));
4078
4079 if rng.gen() {
4080 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4081 std::fs::create_dir(&new_path)?;
4082 } else {
4083 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4084 std::fs::write(&new_path, "")?;
4085 }
4086 record_event(new_path);
4087 } else if rng.gen_bool(0.05) {
4088 let ignore_dir_path = dirs.choose(rng).unwrap();
4089 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4090
4091 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4092 let files_to_ignore = {
4093 let len = rng.gen_range(0..=subfiles.len());
4094 subfiles.choose_multiple(rng, len)
4095 };
4096 let dirs_to_ignore = {
4097 let len = rng.gen_range(0..subdirs.len());
4098 subdirs.choose_multiple(rng, len)
4099 };
4100
4101 let mut ignore_contents = String::new();
4102 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4103 write!(
4104 ignore_contents,
4105 "{}\n",
4106 path_to_ignore
4107 .strip_prefix(&ignore_dir_path)?
4108 .to_str()
4109 .unwrap()
4110 )
4111 .unwrap();
4112 }
4113 log::info!(
4114 "Creating {:?} with contents:\n{}",
4115 ignore_path.strip_prefix(&root_path)?,
4116 ignore_contents
4117 );
4118 std::fs::write(&ignore_path, ignore_contents).unwrap();
4119 record_event(ignore_path);
4120 } else {
4121 let old_path = {
4122 let file_path = files.choose(rng);
4123 let dir_path = dirs[1..].choose(rng);
4124 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4125 };
4126
4127 let is_rename = rng.gen();
4128 if is_rename {
4129 let new_path_parent = dirs
4130 .iter()
4131 .filter(|d| !d.starts_with(old_path))
4132 .choose(rng)
4133 .unwrap();
4134
4135 let overwrite_existing_dir =
4136 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4137 let new_path = if overwrite_existing_dir {
4138 std::fs::remove_dir_all(&new_path_parent).ok();
4139 new_path_parent.to_path_buf()
4140 } else {
4141 new_path_parent.join(gen_name(rng))
4142 };
4143
4144 log::info!(
4145 "Renaming {:?} to {}{:?}",
4146 old_path.strip_prefix(&root_path)?,
4147 if overwrite_existing_dir {
4148 "overwrite "
4149 } else {
4150 ""
4151 },
4152 new_path.strip_prefix(&root_path)?
4153 );
4154 std::fs::rename(&old_path, &new_path)?;
4155 record_event(old_path.clone());
4156 record_event(new_path);
4157 } else if old_path.is_dir() {
4158 let (dirs, files) = read_dir_recursive(old_path.clone());
4159
4160 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4161 std::fs::remove_dir_all(&old_path).unwrap();
4162 for file in files {
4163 record_event(file);
4164 }
4165 for dir in dirs {
4166 record_event(dir);
4167 }
4168 } else {
4169 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4170 std::fs::remove_file(old_path).unwrap();
4171 record_event(old_path.clone());
4172 }
4173 }
4174
4175 Ok(events)
4176 }
4177
4178 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4179 let child_entries = std::fs::read_dir(&path).unwrap();
4180 let mut dirs = vec![path];
4181 let mut files = Vec::new();
4182 for child_entry in child_entries {
4183 let child_path = child_entry.unwrap().path();
4184 if child_path.is_dir() {
4185 let (child_dirs, child_files) = read_dir_recursive(child_path);
4186 dirs.extend(child_dirs);
4187 files.extend(child_files);
4188 } else {
4189 files.push(child_path);
4190 }
4191 }
4192 (dirs, files)
4193 }
4194
4195 fn gen_name(rng: &mut impl Rng) -> String {
4196 (0..6)
4197 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4198 .map(char::from)
4199 .collect()
4200 }
4201
4202 impl Snapshot {
4203 fn check_invariants(&self) {
4204 let mut files = self.files(true, 0);
4205 let mut visible_files = self.files(false, 0);
4206 for entry in self.entries_by_path.cursor::<()>() {
4207 if entry.is_file() {
4208 assert_eq!(files.next().unwrap().inode, entry.inode);
4209 if !entry.is_ignored {
4210 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4211 }
4212 }
4213 }
4214 assert!(files.next().is_none());
4215 assert!(visible_files.next().is_none());
4216
4217 let mut bfs_paths = Vec::new();
4218 let mut stack = vec![Path::new("")];
4219 while let Some(path) = stack.pop() {
4220 bfs_paths.push(path);
4221 let ix = stack.len();
4222 for child_entry in self.child_entries(path) {
4223 stack.insert(ix, &child_entry.path);
4224 }
4225 }
4226
4227 let dfs_paths = self
4228 .entries_by_path
4229 .cursor::<()>()
4230 .map(|e| e.path.as_ref())
4231 .collect::<Vec<_>>();
4232 assert_eq!(bfs_paths, dfs_paths);
4233
4234 for (ignore_parent_path, _) in &self.ignores {
4235 assert!(self.entry_for_path(ignore_parent_path).is_some());
4236 assert!(self
4237 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4238 .is_some());
4239 }
4240 }
4241
4242 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4243 let mut paths = Vec::new();
4244 for entry in self.entries_by_path.cursor::<()>() {
4245 if include_ignored || !entry.is_ignored {
4246 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4247 }
4248 }
4249 paths.sort_by(|a, b| a.0.cmp(&b.0));
4250 paths
4251 }
4252 }
4253}