1use super::{
2 fs::{self, Fs},
3 ignore::IgnoreStack,
4 DiagnosticSummary,
5};
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context, Result};
8use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
9use clock::ReplicaId;
10use collections::{hash_map, HashMap};
11use collections::{BTreeMap, HashSet};
12use futures::{Stream, StreamExt};
13use fuzzy::CharBag;
14use gpui::{
15 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
16 Task, UpgradeModelHandle, WeakModelHandle,
17};
18use language::{
19 Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Language, LanguageRegistry,
20 Operation, PointUtf16, Rope,
21};
22use lazy_static::lazy_static;
23use lsp::LanguageServer;
24use parking_lot::Mutex;
25use postage::{
26 prelude::{Sink as _, Stream as _},
27 watch,
28};
29use serde::Deserialize;
30use smol::channel::{self, Sender};
31use std::{
32 any::Any,
33 cmp::{self, Ordering},
34 convert::{TryFrom, TryInto},
35 ffi::{OsStr, OsString},
36 fmt,
37 future::Future,
38 mem,
39 ops::{Deref, Range},
40 path::{Path, PathBuf},
41 sync::{
42 atomic::{AtomicUsize, Ordering::SeqCst},
43 Arc,
44 },
45 time::{Duration, SystemTime},
46};
47use sum_tree::Bias;
48use sum_tree::{Edit, SeekTarget, SumTree};
49use util::{post_inc, ResultExt, TryFutureExt};
50
51lazy_static! {
52 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
53}
54
55#[derive(Clone, Debug)]
56enum ScanState {
57 Idle,
58 Scanning,
59 Err(Arc<anyhow::Error>),
60}
61
62pub enum Worktree {
63 Local(LocalWorktree),
64 Remote(RemoteWorktree),
65}
66
67#[derive(Debug)]
68pub enum Event {
69 DiagnosticsUpdated(Arc<Path>),
70}
71
72impl Entity for Worktree {
73 type Event = Event;
74
75 fn app_will_quit(
76 &mut self,
77 _: &mut MutableAppContext,
78 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
79 use futures::FutureExt;
80
81 if let Self::Local(worktree) = self {
82 let shutdown_futures = worktree
83 .language_servers
84 .drain()
85 .filter_map(|(_, server)| server.shutdown())
86 .collect::<Vec<_>>();
87 Some(
88 async move {
89 futures::future::join_all(shutdown_futures).await;
90 }
91 .boxed(),
92 )
93 } else {
94 None
95 }
96 }
97}
98
99impl Worktree {
100 pub async fn open_local(
101 client: Arc<Client>,
102 user_store: ModelHandle<UserStore>,
103 path: impl Into<Arc<Path>>,
104 fs: Arc<dyn Fs>,
105 languages: Arc<LanguageRegistry>,
106 cx: &mut AsyncAppContext,
107 ) -> Result<ModelHandle<Self>> {
108 let (tree, scan_states_tx) =
109 LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
110 tree.update(cx, |tree, cx| {
111 let tree = tree.as_local_mut().unwrap();
112 let abs_path = tree.snapshot.abs_path.clone();
113 let background_snapshot = tree.background_snapshot.clone();
114 let background = cx.background().clone();
115 tree._background_scanner_task = Some(cx.background().spawn(async move {
116 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
117 let scanner =
118 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
119 scanner.run(events).await;
120 }));
121 });
122 Ok(tree)
123 }
124
125 pub async fn remote(
126 project_remote_id: u64,
127 replica_id: ReplicaId,
128 worktree: proto::Worktree,
129 client: Arc<Client>,
130 user_store: ModelHandle<UserStore>,
131 languages: Arc<LanguageRegistry>,
132 cx: &mut AsyncAppContext,
133 ) -> Result<ModelHandle<Self>> {
134 let remote_id = worktree.id;
135 let root_char_bag: CharBag = worktree
136 .root_name
137 .chars()
138 .map(|c| c.to_ascii_lowercase())
139 .collect();
140 let root_name = worktree.root_name.clone();
141 let (entries_by_path, entries_by_id) = cx
142 .background()
143 .spawn(async move {
144 let mut entries_by_path_edits = Vec::new();
145 let mut entries_by_id_edits = Vec::new();
146 for entry in worktree.entries {
147 match Entry::try_from((&root_char_bag, entry)) {
148 Ok(entry) => {
149 entries_by_id_edits.push(Edit::Insert(PathEntry {
150 id: entry.id,
151 path: entry.path.clone(),
152 is_ignored: entry.is_ignored,
153 scan_id: 0,
154 }));
155 entries_by_path_edits.push(Edit::Insert(entry));
156 }
157 Err(err) => log::warn!("error for remote worktree entry {:?}", err),
158 }
159 }
160
161 let mut entries_by_path = SumTree::new();
162 let mut entries_by_id = SumTree::new();
163 entries_by_path.edit(entries_by_path_edits, &());
164 entries_by_id.edit(entries_by_id_edits, &());
165 (entries_by_path, entries_by_id)
166 })
167 .await;
168
169 let worktree = cx.update(|cx| {
170 cx.add_model(|cx: &mut ModelContext<Worktree>| {
171 let snapshot = Snapshot {
172 id: remote_id as usize,
173 scan_id: 0,
174 abs_path: Path::new("").into(),
175 root_name,
176 root_char_bag,
177 ignores: Default::default(),
178 entries_by_path,
179 entries_by_id,
180 removed_entry_ids: Default::default(),
181 next_entry_id: Default::default(),
182 };
183
184 let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
185 let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
186
187 cx.background()
188 .spawn(async move {
189 while let Some(update) = updates_rx.recv().await {
190 let mut snapshot = snapshot_tx.borrow().clone();
191 if let Err(error) = snapshot.apply_update(update) {
192 log::error!("error applying worktree update: {}", error);
193 }
194 *snapshot_tx.borrow_mut() = snapshot;
195 }
196 })
197 .detach();
198
199 {
200 let mut snapshot_rx = snapshot_rx.clone();
201 cx.spawn_weak(|this, mut cx| async move {
202 while let Some(_) = snapshot_rx.recv().await {
203 if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
204 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
205 } else {
206 break;
207 }
208 }
209 })
210 .detach();
211 }
212
213 Worktree::Remote(RemoteWorktree {
214 project_id: project_remote_id,
215 remote_id,
216 replica_id,
217 snapshot,
218 snapshot_rx,
219 updates_tx,
220 client: client.clone(),
221 loading_buffers: Default::default(),
222 open_buffers: Default::default(),
223 diagnostic_summaries: Default::default(),
224 queued_operations: Default::default(),
225 languages,
226 user_store,
227 })
228 })
229 });
230
231 Ok(worktree)
232 }
233
234 pub fn as_local(&self) -> Option<&LocalWorktree> {
235 if let Worktree::Local(worktree) = self {
236 Some(worktree)
237 } else {
238 None
239 }
240 }
241
242 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
243 if let Worktree::Remote(worktree) = self {
244 Some(worktree)
245 } else {
246 None
247 }
248 }
249
250 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
251 if let Worktree::Local(worktree) = self {
252 Some(worktree)
253 } else {
254 None
255 }
256 }
257
258 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
259 if let Worktree::Remote(worktree) = self {
260 Some(worktree)
261 } else {
262 None
263 }
264 }
265
266 pub fn snapshot(&self) -> Snapshot {
267 match self {
268 Worktree::Local(worktree) => worktree.snapshot(),
269 Worktree::Remote(worktree) => worktree.snapshot(),
270 }
271 }
272
273 pub fn replica_id(&self) -> ReplicaId {
274 match self {
275 Worktree::Local(_) => 0,
276 Worktree::Remote(worktree) => worktree.replica_id,
277 }
278 }
279
280 pub fn remove_collaborator(
281 &mut self,
282 peer_id: PeerId,
283 replica_id: ReplicaId,
284 cx: &mut ModelContext<Self>,
285 ) {
286 match self {
287 Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
288 Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
289 }
290 }
291
292 pub fn languages(&self) -> &Arc<LanguageRegistry> {
293 match self {
294 Worktree::Local(worktree) => &worktree.language_registry,
295 Worktree::Remote(worktree) => &worktree.languages,
296 }
297 }
298
299 pub fn user_store(&self) -> &ModelHandle<UserStore> {
300 match self {
301 Worktree::Local(worktree) => &worktree.user_store,
302 Worktree::Remote(worktree) => &worktree.user_store,
303 }
304 }
305
306 pub fn handle_open_buffer(
307 &mut self,
308 envelope: TypedEnvelope<proto::OpenBuffer>,
309 rpc: Arc<Client>,
310 cx: &mut ModelContext<Self>,
311 ) -> anyhow::Result<()> {
312 let receipt = envelope.receipt();
313
314 let response = self
315 .as_local_mut()
316 .unwrap()
317 .open_remote_buffer(envelope, cx);
318
319 cx.background()
320 .spawn(
321 async move {
322 rpc.respond(receipt, response.await?).await?;
323 Ok(())
324 }
325 .log_err(),
326 )
327 .detach();
328
329 Ok(())
330 }
331
332 pub fn handle_close_buffer(
333 &mut self,
334 envelope: TypedEnvelope<proto::CloseBuffer>,
335 _: Arc<Client>,
336 cx: &mut ModelContext<Self>,
337 ) -> anyhow::Result<()> {
338 self.as_local_mut()
339 .unwrap()
340 .close_remote_buffer(envelope, cx)
341 }
342
343 pub fn diagnostic_summaries<'a>(
344 &'a self,
345 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
346 match self {
347 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
348 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
349 }
350 .iter()
351 .map(|(path, summary)| (path.clone(), summary.clone()))
352 }
353
354 pub fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
355 match self {
356 Worktree::Local(worktree) => &mut worktree.loading_buffers,
357 Worktree::Remote(worktree) => &mut worktree.loading_buffers,
358 }
359 }
360
361 pub fn open_buffer(
362 &mut self,
363 path: impl AsRef<Path>,
364 cx: &mut ModelContext<Self>,
365 ) -> Task<Result<ModelHandle<Buffer>>> {
366 let path = path.as_ref();
367
368 // If there is already a buffer for the given path, then return it.
369 let existing_buffer = match self {
370 Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
371 Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
372 };
373 if let Some(existing_buffer) = existing_buffer {
374 return cx.spawn(move |_, _| async move { Ok(existing_buffer) });
375 }
376
377 let path: Arc<Path> = Arc::from(path);
378 let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
379 // If the given path is already being loaded, then wait for that existing
380 // task to complete and return the same buffer.
381 hash_map::Entry::Occupied(e) => e.get().clone(),
382
383 // Otherwise, record the fact that this path is now being loaded.
384 hash_map::Entry::Vacant(entry) => {
385 let (mut tx, rx) = postage::watch::channel();
386 entry.insert(rx.clone());
387
388 let load_buffer = match self {
389 Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
390 Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
391 };
392 cx.spawn(move |this, mut cx| async move {
393 let result = load_buffer.await;
394
395 // After the buffer loads, record the fact that it is no longer
396 // loading.
397 this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
398 *tx.borrow_mut() = Some(result.map_err(|e| Arc::new(e)));
399 })
400 .detach();
401 rx
402 }
403 };
404
405 cx.spawn(|_, _| async move {
406 loop {
407 if let Some(result) = loading_watch.borrow().as_ref() {
408 return result.clone().map_err(|e| anyhow!("{}", e));
409 }
410 loading_watch.recv().await;
411 }
412 })
413 }
414
415 #[cfg(feature = "test-support")]
416 pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
417 let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
418 Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
419 Worktree::Remote(worktree) => {
420 Box::new(worktree.open_buffers.values().filter_map(|buf| {
421 if let RemoteBuffer::Loaded(buf) = buf {
422 Some(buf)
423 } else {
424 None
425 }
426 }))
427 }
428 };
429
430 let path = path.as_ref();
431 open_buffers
432 .find(|buffer| {
433 if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
434 file.path().as_ref() == path
435 } else {
436 false
437 }
438 })
439 .is_some()
440 }
441
442 pub fn handle_update_buffer(
443 &mut self,
444 envelope: TypedEnvelope<proto::UpdateBuffer>,
445 cx: &mut ModelContext<Self>,
446 ) -> Result<()> {
447 let payload = envelope.payload.clone();
448 let buffer_id = payload.buffer_id as usize;
449 let ops = payload
450 .operations
451 .into_iter()
452 .map(|op| language::proto::deserialize_operation(op))
453 .collect::<Result<Vec<_>, _>>()?;
454
455 match self {
456 Worktree::Local(worktree) => {
457 let buffer = worktree
458 .open_buffers
459 .get(&buffer_id)
460 .and_then(|buf| buf.upgrade(cx))
461 .ok_or_else(|| {
462 anyhow!("invalid buffer {} in update buffer message", buffer_id)
463 })?;
464 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
465 }
466 Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
467 Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
468 Some(RemoteBuffer::Loaded(buffer)) => {
469 if let Some(buffer) = buffer.upgrade(cx) {
470 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
471 } else {
472 worktree
473 .open_buffers
474 .insert(buffer_id, RemoteBuffer::Operations(ops));
475 }
476 }
477 None => {
478 worktree
479 .open_buffers
480 .insert(buffer_id, RemoteBuffer::Operations(ops));
481 }
482 },
483 }
484
485 Ok(())
486 }
487
488 pub fn handle_save_buffer(
489 &mut self,
490 envelope: TypedEnvelope<proto::SaveBuffer>,
491 rpc: Arc<Client>,
492 cx: &mut ModelContext<Self>,
493 ) -> Result<()> {
494 let sender_id = envelope.original_sender_id()?;
495 let this = self.as_local().unwrap();
496 let project_id = this
497 .share
498 .as_ref()
499 .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
500 .project_id;
501
502 let buffer = this
503 .shared_buffers
504 .get(&sender_id)
505 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
506 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
507
508 let receipt = envelope.receipt();
509 let worktree_id = envelope.payload.worktree_id;
510 let buffer_id = envelope.payload.buffer_id;
511 let save = cx.spawn(|_, mut cx| async move {
512 buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
513 });
514
515 cx.background()
516 .spawn(
517 async move {
518 let (version, mtime) = save.await?;
519
520 rpc.respond(
521 receipt,
522 proto::BufferSaved {
523 project_id,
524 worktree_id,
525 buffer_id,
526 version: (&version).into(),
527 mtime: Some(mtime.into()),
528 },
529 )
530 .await?;
531
532 Ok(())
533 }
534 .log_err(),
535 )
536 .detach();
537
538 Ok(())
539 }
540
541 pub fn handle_buffer_saved(
542 &mut self,
543 envelope: TypedEnvelope<proto::BufferSaved>,
544 cx: &mut ModelContext<Self>,
545 ) -> Result<()> {
546 let payload = envelope.payload.clone();
547 let worktree = self.as_remote_mut().unwrap();
548 if let Some(buffer) = worktree
549 .open_buffers
550 .get(&(payload.buffer_id as usize))
551 .and_then(|buf| buf.upgrade(cx))
552 {
553 buffer.update(cx, |buffer, cx| {
554 let version = payload.version.try_into()?;
555 let mtime = payload
556 .mtime
557 .ok_or_else(|| anyhow!("missing mtime"))?
558 .into();
559 buffer.did_save(version, mtime, None, cx);
560 Result::<_, anyhow::Error>::Ok(())
561 })?;
562 }
563 Ok(())
564 }
565
566 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
567 match self {
568 Self::Local(worktree) => {
569 let is_fake_fs = worktree.fs.is_fake();
570 worktree.snapshot = worktree.background_snapshot.lock().clone();
571 if worktree.is_scanning() {
572 if worktree.poll_task.is_none() {
573 worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
574 if is_fake_fs {
575 smol::future::yield_now().await;
576 } else {
577 smol::Timer::after(Duration::from_millis(100)).await;
578 }
579 this.update(&mut cx, |this, cx| {
580 this.as_local_mut().unwrap().poll_task = None;
581 this.poll_snapshot(cx);
582 })
583 }));
584 }
585 } else {
586 worktree.poll_task.take();
587 self.update_open_buffers(cx);
588 }
589 }
590 Self::Remote(worktree) => {
591 worktree.snapshot = worktree.snapshot_rx.borrow().clone();
592 self.update_open_buffers(cx);
593 }
594 };
595
596 cx.notify();
597 }
598
599 fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
600 let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
601 Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
602 Self::Remote(worktree) => {
603 Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
604 if let RemoteBuffer::Loaded(buf) = buf {
605 Some((id, buf))
606 } else {
607 None
608 }
609 }))
610 }
611 };
612
613 let local = self.as_local().is_some();
614 let worktree_path = self.abs_path.clone();
615 let worktree_handle = cx.handle();
616 let mut buffers_to_delete = Vec::new();
617 for (buffer_id, buffer) in open_buffers {
618 if let Some(buffer) = buffer.upgrade(cx) {
619 buffer.update(cx, |buffer, cx| {
620 if let Some(old_file) = buffer.file() {
621 let new_file = if let Some(entry) = old_file
622 .entry_id()
623 .and_then(|entry_id| self.entry_for_id(entry_id))
624 {
625 File {
626 is_local: local,
627 worktree_path: worktree_path.clone(),
628 entry_id: Some(entry.id),
629 mtime: entry.mtime,
630 path: entry.path.clone(),
631 worktree: worktree_handle.clone(),
632 }
633 } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
634 File {
635 is_local: local,
636 worktree_path: worktree_path.clone(),
637 entry_id: Some(entry.id),
638 mtime: entry.mtime,
639 path: entry.path.clone(),
640 worktree: worktree_handle.clone(),
641 }
642 } else {
643 File {
644 is_local: local,
645 worktree_path: worktree_path.clone(),
646 entry_id: None,
647 path: old_file.path().clone(),
648 mtime: old_file.mtime(),
649 worktree: worktree_handle.clone(),
650 }
651 };
652
653 if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
654 task.detach();
655 }
656 }
657 });
658 } else {
659 buffers_to_delete.push(*buffer_id);
660 }
661 }
662
663 for buffer_id in buffers_to_delete {
664 match self {
665 Self::Local(worktree) => {
666 worktree.open_buffers.remove(&buffer_id);
667 }
668 Self::Remote(worktree) => {
669 worktree.open_buffers.remove(&buffer_id);
670 }
671 }
672 }
673 }
674
675 pub fn update_diagnostics(
676 &mut self,
677 mut params: lsp::PublishDiagnosticsParams,
678 disk_based_sources: &HashSet<String>,
679 cx: &mut ModelContext<Worktree>,
680 ) -> Result<()> {
681 let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
682 let abs_path = params
683 .uri
684 .to_file_path()
685 .map_err(|_| anyhow!("URI is not a file"))?;
686 let worktree_path = Arc::from(
687 abs_path
688 .strip_prefix(&this.abs_path)
689 .context("path is not within worktree")?,
690 );
691
692 let mut group_ids_by_diagnostic_range = HashMap::default();
693 let mut diagnostics_by_group_id = HashMap::default();
694 let mut next_group_id = 0;
695 for diagnostic in &mut params.diagnostics {
696 let source = diagnostic.source.as_ref();
697 let code = diagnostic.code.as_ref();
698 let group_id = diagnostic_ranges(&diagnostic, &abs_path)
699 .find_map(|range| group_ids_by_diagnostic_range.get(&(source, code, range)))
700 .copied()
701 .unwrap_or_else(|| {
702 let group_id = post_inc(&mut next_group_id);
703 for range in diagnostic_ranges(&diagnostic, &abs_path) {
704 group_ids_by_diagnostic_range.insert((source, code, range), group_id);
705 }
706 group_id
707 });
708
709 diagnostics_by_group_id
710 .entry(group_id)
711 .or_insert(Vec::new())
712 .push(DiagnosticEntry {
713 range: diagnostic.range.start.to_point_utf16()
714 ..diagnostic.range.end.to_point_utf16(),
715 diagnostic: Diagnostic {
716 code: diagnostic.code.clone().map(|code| match code {
717 lsp::NumberOrString::Number(code) => code.to_string(),
718 lsp::NumberOrString::String(code) => code,
719 }),
720 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
721 message: mem::take(&mut diagnostic.message),
722 group_id,
723 is_primary: false,
724 is_valid: true,
725 is_disk_based: diagnostic
726 .source
727 .as_ref()
728 .map_or(false, |source| disk_based_sources.contains(source)),
729 },
730 });
731 }
732
733 let diagnostics = diagnostics_by_group_id
734 .into_values()
735 .flat_map(|mut diagnostics| {
736 let primary = diagnostics
737 .iter_mut()
738 .min_by_key(|entry| entry.diagnostic.severity)
739 .unwrap();
740 primary.diagnostic.is_primary = true;
741 diagnostics
742 })
743 .collect::<Vec<_>>();
744
745 self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)
746 }
747
748 pub fn update_diagnostic_entries(
749 &mut self,
750 path: Arc<Path>,
751 version: Option<i32>,
752 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
753 cx: &mut ModelContext<Worktree>,
754 ) -> Result<()> {
755 let this = self.as_local_mut().unwrap();
756 for buffer in this.open_buffers.values() {
757 if let Some(buffer) = buffer.upgrade(cx) {
758 if buffer
759 .read(cx)
760 .file()
761 .map_or(false, |file| *file.path() == path)
762 {
763 let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
764 (
765 buffer.remote_id(),
766 buffer.update_diagnostics(version, diagnostics.clone(), cx),
767 )
768 });
769 self.send_buffer_update(remote_id, operation?, cx);
770 break;
771 }
772 }
773 }
774
775 let this = self.as_local_mut().unwrap();
776 this.diagnostic_summaries
777 .insert(path.clone(), DiagnosticSummary::new(&diagnostics));
778 this.diagnostics.insert(path.clone(), diagnostics);
779 cx.emit(Event::DiagnosticsUpdated(path.clone()));
780 Ok(())
781 }
782
783 fn send_buffer_update(
784 &mut self,
785 buffer_id: u64,
786 operation: Operation,
787 cx: &mut ModelContext<Self>,
788 ) {
789 if let Some((project_id, worktree_id, rpc)) = match self {
790 Worktree::Local(worktree) => worktree.share.as_ref().map(|share| {
791 (
792 share.project_id,
793 worktree.id() as u64,
794 worktree.client.clone(),
795 )
796 }),
797 Worktree::Remote(worktree) => Some((
798 worktree.project_id,
799 worktree.remote_id,
800 worktree.client.clone(),
801 )),
802 } {
803 cx.spawn(|worktree, mut cx| async move {
804 if let Err(error) = rpc
805 .request(proto::UpdateBuffer {
806 project_id,
807 worktree_id,
808 buffer_id,
809 operations: vec![language::proto::serialize_operation(&operation)],
810 })
811 .await
812 {
813 worktree.update(&mut cx, |worktree, _| {
814 log::error!("error sending buffer operation: {}", error);
815 match worktree {
816 Worktree::Local(t) => &mut t.queued_operations,
817 Worktree::Remote(t) => &mut t.queued_operations,
818 }
819 .push((buffer_id, operation));
820 });
821 }
822 })
823 .detach();
824 }
825 }
826}
827
828#[derive(Clone)]
829pub struct Snapshot {
830 id: usize,
831 scan_id: usize,
832 abs_path: Arc<Path>,
833 root_name: String,
834 root_char_bag: CharBag,
835 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
836 entries_by_path: SumTree<Entry>,
837 entries_by_id: SumTree<PathEntry>,
838 removed_entry_ids: HashMap<u64, usize>,
839 next_entry_id: Arc<AtomicUsize>,
840}
841
842pub struct LocalWorktree {
843 snapshot: Snapshot,
844 config: WorktreeConfig,
845 background_snapshot: Arc<Mutex<Snapshot>>,
846 last_scan_state_rx: watch::Receiver<ScanState>,
847 _background_scanner_task: Option<Task<()>>,
848 poll_task: Option<Task<()>>,
849 share: Option<ShareState>,
850 loading_buffers: LoadingBuffers,
851 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
852 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
853 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
854 diagnostic_summaries: BTreeMap<Arc<Path>, DiagnosticSummary>,
855 queued_operations: Vec<(u64, Operation)>,
856 language_registry: Arc<LanguageRegistry>,
857 client: Arc<Client>,
858 user_store: ModelHandle<UserStore>,
859 fs: Arc<dyn Fs>,
860 languages: Vec<Arc<Language>>,
861 language_servers: HashMap<String, Arc<LanguageServer>>,
862}
863
864struct ShareState {
865 project_id: u64,
866 snapshots_tx: Sender<Snapshot>,
867}
868
869pub struct RemoteWorktree {
870 project_id: u64,
871 remote_id: u64,
872 snapshot: Snapshot,
873 snapshot_rx: watch::Receiver<Snapshot>,
874 client: Arc<Client>,
875 updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
876 replica_id: ReplicaId,
877 loading_buffers: LoadingBuffers,
878 open_buffers: HashMap<usize, RemoteBuffer>,
879 diagnostic_summaries: BTreeMap<Arc<Path>, DiagnosticSummary>,
880 languages: Arc<LanguageRegistry>,
881 user_store: ModelHandle<UserStore>,
882 queued_operations: Vec<(u64, Operation)>,
883}
884
885type LoadingBuffers = HashMap<
886 Arc<Path>,
887 postage::watch::Receiver<Option<Result<ModelHandle<Buffer>, Arc<anyhow::Error>>>>,
888>;
889
890#[derive(Default, Deserialize)]
891struct WorktreeConfig {
892 collaborators: Vec<String>,
893}
894
895impl LocalWorktree {
896 async fn new(
897 client: Arc<Client>,
898 user_store: ModelHandle<UserStore>,
899 path: impl Into<Arc<Path>>,
900 fs: Arc<dyn Fs>,
901 languages: Arc<LanguageRegistry>,
902 cx: &mut AsyncAppContext,
903 ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
904 let abs_path = path.into();
905 let path: Arc<Path> = Arc::from(Path::new(""));
906 let next_entry_id = AtomicUsize::new(0);
907
908 // After determining whether the root entry is a file or a directory, populate the
909 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
910 let root_name = abs_path
911 .file_name()
912 .map_or(String::new(), |f| f.to_string_lossy().to_string());
913 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
914 let metadata = fs.metadata(&abs_path).await?;
915
916 let mut config = WorktreeConfig::default();
917 if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
918 if let Ok(parsed) = toml::from_str(&zed_toml) {
919 config = parsed;
920 }
921 }
922
923 let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
924 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
925 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
926 let mut snapshot = Snapshot {
927 id: cx.model_id(),
928 scan_id: 0,
929 abs_path,
930 root_name: root_name.clone(),
931 root_char_bag,
932 ignores: Default::default(),
933 entries_by_path: Default::default(),
934 entries_by_id: Default::default(),
935 removed_entry_ids: Default::default(),
936 next_entry_id: Arc::new(next_entry_id),
937 };
938 if let Some(metadata) = metadata {
939 snapshot.insert_entry(
940 Entry::new(
941 path.into(),
942 &metadata,
943 &snapshot.next_entry_id,
944 snapshot.root_char_bag,
945 ),
946 fs.as_ref(),
947 );
948 }
949
950 let tree = Self {
951 snapshot: snapshot.clone(),
952 config,
953 background_snapshot: Arc::new(Mutex::new(snapshot)),
954 last_scan_state_rx,
955 _background_scanner_task: None,
956 share: None,
957 poll_task: None,
958 loading_buffers: Default::default(),
959 open_buffers: Default::default(),
960 shared_buffers: Default::default(),
961 diagnostics: Default::default(),
962 diagnostic_summaries: Default::default(),
963 queued_operations: Default::default(),
964 language_registry: languages,
965 client,
966 user_store,
967 fs,
968 languages: Default::default(),
969 language_servers: Default::default(),
970 };
971
972 cx.spawn_weak(|this, mut cx| async move {
973 while let Ok(scan_state) = scan_states_rx.recv().await {
974 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
975 let to_send = handle.update(&mut cx, |this, cx| {
976 last_scan_state_tx.blocking_send(scan_state).ok();
977 this.poll_snapshot(cx);
978 let tree = this.as_local_mut().unwrap();
979 if !tree.is_scanning() {
980 if let Some(share) = tree.share.as_ref() {
981 return Some((tree.snapshot(), share.snapshots_tx.clone()));
982 }
983 }
984 None
985 });
986
987 if let Some((snapshot, snapshots_to_send_tx)) = to_send {
988 if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
989 log::error!("error submitting snapshot to send {}", err);
990 }
991 }
992 } else {
993 break;
994 }
995 }
996 })
997 .detach();
998
999 Worktree::Local(tree)
1000 });
1001
1002 Ok((tree, scan_states_tx))
1003 }
1004
1005 pub fn authorized_logins(&self) -> Vec<String> {
1006 self.config.collaborators.clone()
1007 }
1008
1009 pub fn language_registry(&self) -> &LanguageRegistry {
1010 &self.language_registry
1011 }
1012
1013 pub fn languages(&self) -> &[Arc<Language>] {
1014 &self.languages
1015 }
1016
1017 pub fn register_language(
1018 &mut self,
1019 language: &Arc<Language>,
1020 cx: &mut ModelContext<Worktree>,
1021 ) -> Option<Arc<LanguageServer>> {
1022 if !self.languages.iter().any(|l| Arc::ptr_eq(l, language)) {
1023 self.languages.push(language.clone());
1024 }
1025
1026 if let Some(server) = self.language_servers.get(language.name()) {
1027 return Some(server.clone());
1028 }
1029
1030 if let Some(language_server) = language
1031 .start_server(self.abs_path(), cx)
1032 .log_err()
1033 .flatten()
1034 {
1035 let disk_based_sources = language
1036 .disk_based_diagnostic_sources()
1037 .cloned()
1038 .unwrap_or_default();
1039 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
1040 language_server
1041 .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
1042 smol::block_on(diagnostics_tx.send(params)).ok();
1043 })
1044 .detach();
1045 cx.spawn_weak(|this, mut cx| async move {
1046 while let Ok(diagnostics) = diagnostics_rx.recv().await {
1047 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1048 handle.update(&mut cx, |this, cx| {
1049 this.update_diagnostics(diagnostics, &disk_based_sources, cx)
1050 .log_err();
1051 });
1052 } else {
1053 break;
1054 }
1055 }
1056 })
1057 .detach();
1058
1059 self.language_servers
1060 .insert(language.name().to_string(), language_server.clone());
1061 Some(language_server.clone())
1062 } else {
1063 None
1064 }
1065 }
1066
1067 fn get_open_buffer(
1068 &mut self,
1069 path: &Path,
1070 cx: &mut ModelContext<Worktree>,
1071 ) -> Option<ModelHandle<Buffer>> {
1072 let worktree_id = self.id();
1073 let mut result = None;
1074 self.open_buffers.retain(|_buffer_id, buffer| {
1075 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1076 if let Some(file) = buffer.read(cx.as_ref()).file() {
1077 if file.worktree_id() == worktree_id && file.path().as_ref() == path {
1078 result = Some(buffer);
1079 }
1080 }
1081 true
1082 } else {
1083 false
1084 }
1085 });
1086 result
1087 }
1088
1089 fn open_buffer(
1090 &mut self,
1091 path: &Path,
1092 cx: &mut ModelContext<Worktree>,
1093 ) -> Task<Result<ModelHandle<Buffer>>> {
1094 let path = Arc::from(path);
1095 cx.spawn(move |this, mut cx| async move {
1096 let (file, contents) = this
1097 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
1098 .await?;
1099
1100 let (diagnostics, language, language_server) = this.update(&mut cx, |this, cx| {
1101 let this = this.as_local_mut().unwrap();
1102 let diagnostics = this.diagnostics.remove(&path);
1103 let language = this
1104 .language_registry
1105 .select_language(file.full_path())
1106 .cloned();
1107 let server = language
1108 .as_ref()
1109 .and_then(|language| this.register_language(language, cx));
1110 (diagnostics, language, server)
1111 });
1112
1113 let buffer = cx.add_model(|cx| {
1114 let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1115 buffer.set_language(language, language_server, cx);
1116 if let Some(diagnostics) = diagnostics {
1117 buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1118 }
1119 buffer
1120 });
1121
1122 this.update(&mut cx, |this, _| {
1123 let this = this.as_local_mut().unwrap();
1124 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1125 });
1126
1127 Ok(buffer)
1128 })
1129 }
1130
1131 pub fn open_remote_buffer(
1132 &mut self,
1133 envelope: TypedEnvelope<proto::OpenBuffer>,
1134 cx: &mut ModelContext<Worktree>,
1135 ) -> Task<Result<proto::OpenBufferResponse>> {
1136 cx.spawn(|this, mut cx| async move {
1137 let peer_id = envelope.original_sender_id();
1138 let path = Path::new(&envelope.payload.path);
1139 let buffer = this
1140 .update(&mut cx, |this, cx| this.open_buffer(path, cx))
1141 .await?;
1142 this.update(&mut cx, |this, cx| {
1143 this.as_local_mut()
1144 .unwrap()
1145 .shared_buffers
1146 .entry(peer_id?)
1147 .or_default()
1148 .insert(buffer.id() as u64, buffer.clone());
1149
1150 Ok(proto::OpenBufferResponse {
1151 buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1152 })
1153 })
1154 })
1155 }
1156
1157 pub fn close_remote_buffer(
1158 &mut self,
1159 envelope: TypedEnvelope<proto::CloseBuffer>,
1160 cx: &mut ModelContext<Worktree>,
1161 ) -> Result<()> {
1162 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1163 shared_buffers.remove(&envelope.payload.buffer_id);
1164 cx.notify();
1165 }
1166
1167 Ok(())
1168 }
1169
1170 pub fn remove_collaborator(
1171 &mut self,
1172 peer_id: PeerId,
1173 replica_id: ReplicaId,
1174 cx: &mut ModelContext<Worktree>,
1175 ) {
1176 self.shared_buffers.remove(&peer_id);
1177 for (_, buffer) in &self.open_buffers {
1178 if let Some(buffer) = buffer.upgrade(cx) {
1179 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1180 }
1181 }
1182 cx.notify();
1183 }
1184
1185 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1186 let mut scan_state_rx = self.last_scan_state_rx.clone();
1187 async move {
1188 let mut scan_state = Some(scan_state_rx.borrow().clone());
1189 while let Some(ScanState::Scanning) = scan_state {
1190 scan_state = scan_state_rx.recv().await;
1191 }
1192 }
1193 }
1194
1195 fn is_scanning(&self) -> bool {
1196 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1197 true
1198 } else {
1199 false
1200 }
1201 }
1202
1203 pub fn snapshot(&self) -> Snapshot {
1204 self.snapshot.clone()
1205 }
1206
1207 pub fn abs_path(&self) -> &Arc<Path> {
1208 &self.snapshot.abs_path
1209 }
1210
1211 pub fn contains_abs_path(&self, path: &Path) -> bool {
1212 path.starts_with(&self.snapshot.abs_path)
1213 }
1214
1215 fn absolutize(&self, path: &Path) -> PathBuf {
1216 if path.file_name().is_some() {
1217 self.snapshot.abs_path.join(path)
1218 } else {
1219 self.snapshot.abs_path.to_path_buf()
1220 }
1221 }
1222
1223 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1224 let handle = cx.handle();
1225 let path = Arc::from(path);
1226 let worktree_path = self.abs_path.clone();
1227 let abs_path = self.absolutize(&path);
1228 let background_snapshot = self.background_snapshot.clone();
1229 let fs = self.fs.clone();
1230 cx.spawn(|this, mut cx| async move {
1231 let text = fs.load(&abs_path).await?;
1232 // Eagerly populate the snapshot with an updated entry for the loaded file
1233 let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1234 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1235 Ok((
1236 File {
1237 entry_id: Some(entry.id),
1238 worktree: handle,
1239 worktree_path,
1240 path: entry.path,
1241 mtime: entry.mtime,
1242 is_local: true,
1243 },
1244 text,
1245 ))
1246 })
1247 }
1248
1249 pub fn save_buffer_as(
1250 &self,
1251 buffer: ModelHandle<Buffer>,
1252 path: impl Into<Arc<Path>>,
1253 text: Rope,
1254 cx: &mut ModelContext<Worktree>,
1255 ) -> Task<Result<File>> {
1256 let save = self.save(path, text, cx);
1257 cx.spawn(|this, mut cx| async move {
1258 let entry = save.await?;
1259 this.update(&mut cx, |this, cx| {
1260 let this = this.as_local_mut().unwrap();
1261 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1262 Ok(File {
1263 entry_id: Some(entry.id),
1264 worktree: cx.handle(),
1265 worktree_path: this.abs_path.clone(),
1266 path: entry.path,
1267 mtime: entry.mtime,
1268 is_local: true,
1269 })
1270 })
1271 })
1272 }
1273
1274 fn save(
1275 &self,
1276 path: impl Into<Arc<Path>>,
1277 text: Rope,
1278 cx: &mut ModelContext<Worktree>,
1279 ) -> Task<Result<Entry>> {
1280 let path = path.into();
1281 let abs_path = self.absolutize(&path);
1282 let background_snapshot = self.background_snapshot.clone();
1283 let fs = self.fs.clone();
1284 let save = cx.background().spawn(async move {
1285 fs.save(&abs_path, &text).await?;
1286 refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1287 });
1288
1289 cx.spawn(|this, mut cx| async move {
1290 let entry = save.await?;
1291 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1292 Ok(entry)
1293 })
1294 }
1295
1296 pub fn share(
1297 &mut self,
1298 project_id: u64,
1299 cx: &mut ModelContext<Worktree>,
1300 ) -> Task<anyhow::Result<()>> {
1301 if self.share.is_some() {
1302 return Task::ready(Ok(()));
1303 }
1304
1305 let snapshot = self.snapshot();
1306 let rpc = self.client.clone();
1307 let worktree_id = cx.model_id() as u64;
1308 let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1309 self.share = Some(ShareState {
1310 project_id,
1311 snapshots_tx: snapshots_to_send_tx,
1312 });
1313
1314 cx.background()
1315 .spawn({
1316 let rpc = rpc.clone();
1317 let snapshot = snapshot.clone();
1318 async move {
1319 let mut prev_snapshot = snapshot;
1320 while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1321 let message =
1322 snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1323 match rpc.send(message).await {
1324 Ok(()) => prev_snapshot = snapshot,
1325 Err(err) => log::error!("error sending snapshot diff {}", err),
1326 }
1327 }
1328 }
1329 })
1330 .detach();
1331
1332 let share_message = cx.background().spawn(async move {
1333 proto::ShareWorktree {
1334 project_id,
1335 worktree: Some(snapshot.to_proto()),
1336 }
1337 });
1338
1339 cx.foreground().spawn(async move {
1340 rpc.request(share_message.await).await?;
1341 Ok(())
1342 })
1343 }
1344}
1345
1346fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1347 let contents = smol::block_on(fs.load(&abs_path))?;
1348 let parent = abs_path.parent().unwrap_or(Path::new("/"));
1349 let mut builder = GitignoreBuilder::new(parent);
1350 for line in contents.lines() {
1351 builder.add_line(Some(abs_path.into()), line)?;
1352 }
1353 Ok(builder.build()?)
1354}
1355
1356impl Deref for Worktree {
1357 type Target = Snapshot;
1358
1359 fn deref(&self) -> &Self::Target {
1360 match self {
1361 Worktree::Local(worktree) => &worktree.snapshot,
1362 Worktree::Remote(worktree) => &worktree.snapshot,
1363 }
1364 }
1365}
1366
1367impl Deref for LocalWorktree {
1368 type Target = Snapshot;
1369
1370 fn deref(&self) -> &Self::Target {
1371 &self.snapshot
1372 }
1373}
1374
1375impl fmt::Debug for LocalWorktree {
1376 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1377 self.snapshot.fmt(f)
1378 }
1379}
1380
1381impl RemoteWorktree {
1382 pub fn remote_id(&self) -> u64 {
1383 self.remote_id
1384 }
1385
1386 fn get_open_buffer(
1387 &mut self,
1388 path: &Path,
1389 cx: &mut ModelContext<Worktree>,
1390 ) -> Option<ModelHandle<Buffer>> {
1391 let handle = cx.handle();
1392 let mut existing_buffer = None;
1393 self.open_buffers.retain(|_buffer_id, buffer| {
1394 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1395 if let Some(file) = buffer.read(cx.as_ref()).file() {
1396 if file.worktree_id() == handle.id() && file.path().as_ref() == path {
1397 existing_buffer = Some(buffer);
1398 }
1399 }
1400 true
1401 } else {
1402 false
1403 }
1404 });
1405 existing_buffer
1406 }
1407
1408 fn open_buffer(
1409 &mut self,
1410 path: &Path,
1411 cx: &mut ModelContext<Worktree>,
1412 ) -> Task<Result<ModelHandle<Buffer>>> {
1413 let rpc = self.client.clone();
1414 let replica_id = self.replica_id;
1415 let project_id = self.project_id;
1416 let remote_worktree_id = self.remote_id;
1417 let root_path = self.snapshot.abs_path.clone();
1418 let path: Arc<Path> = Arc::from(path);
1419 let path_string = path.to_string_lossy().to_string();
1420 cx.spawn_weak(move |this, mut cx| async move {
1421 let entry = this
1422 .upgrade(&cx)
1423 .ok_or_else(|| anyhow!("worktree was closed"))?
1424 .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1425 .ok_or_else(|| anyhow!("file does not exist"))?;
1426 let response = rpc
1427 .request(proto::OpenBuffer {
1428 project_id,
1429 worktree_id: remote_worktree_id as u64,
1430 path: path_string,
1431 })
1432 .await?;
1433
1434 let this = this
1435 .upgrade(&cx)
1436 .ok_or_else(|| anyhow!("worktree was closed"))?;
1437 let file = File {
1438 entry_id: Some(entry.id),
1439 worktree: this.clone(),
1440 worktree_path: root_path,
1441 path: entry.path,
1442 mtime: entry.mtime,
1443 is_local: false,
1444 };
1445 let language = this.read_with(&cx, |this, _| {
1446 use language::File;
1447 this.languages().select_language(file.full_path()).cloned()
1448 });
1449 let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1450 let buffer_id = remote_buffer.id as usize;
1451 let buffer = cx.add_model(|cx| {
1452 Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1453 .unwrap()
1454 .with_language(language, None, cx)
1455 });
1456 this.update(&mut cx, move |this, cx| {
1457 let this = this.as_remote_mut().unwrap();
1458 if let Some(RemoteBuffer::Operations(pending_ops)) = this
1459 .open_buffers
1460 .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1461 {
1462 buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1463 }
1464 Result::<_, anyhow::Error>::Ok(buffer)
1465 })
1466 })
1467 }
1468
1469 pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1470 for (_, buffer) in self.open_buffers.drain() {
1471 if let RemoteBuffer::Loaded(buffer) = buffer {
1472 if let Some(buffer) = buffer.upgrade(cx) {
1473 buffer.update(cx, |buffer, cx| buffer.close(cx))
1474 }
1475 }
1476 }
1477 }
1478
1479 fn snapshot(&self) -> Snapshot {
1480 self.snapshot.clone()
1481 }
1482
1483 pub fn update_from_remote(
1484 &mut self,
1485 envelope: TypedEnvelope<proto::UpdateWorktree>,
1486 cx: &mut ModelContext<Worktree>,
1487 ) -> Result<()> {
1488 let mut tx = self.updates_tx.clone();
1489 let payload = envelope.payload.clone();
1490 cx.background()
1491 .spawn(async move {
1492 tx.send(payload).await.expect("receiver runs to completion");
1493 })
1494 .detach();
1495
1496 Ok(())
1497 }
1498
1499 pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1500 for (_, buffer) in &self.open_buffers {
1501 if let Some(buffer) = buffer.upgrade(cx) {
1502 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1503 }
1504 }
1505 cx.notify();
1506 }
1507}
1508
1509enum RemoteBuffer {
1510 Operations(Vec<Operation>),
1511 Loaded(WeakModelHandle<Buffer>),
1512}
1513
1514impl RemoteBuffer {
1515 fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1516 match self {
1517 Self::Operations(_) => None,
1518 Self::Loaded(buffer) => buffer.upgrade(cx),
1519 }
1520 }
1521}
1522
1523impl Snapshot {
1524 pub fn id(&self) -> usize {
1525 self.id
1526 }
1527
1528 pub fn to_proto(&self) -> proto::Worktree {
1529 let root_name = self.root_name.clone();
1530 proto::Worktree {
1531 id: self.id as u64,
1532 root_name,
1533 entries: self
1534 .entries_by_path
1535 .cursor::<()>()
1536 .filter(|e| !e.is_ignored)
1537 .map(Into::into)
1538 .collect(),
1539 }
1540 }
1541
1542 pub fn build_update(
1543 &self,
1544 other: &Self,
1545 project_id: u64,
1546 worktree_id: u64,
1547 include_ignored: bool,
1548 ) -> proto::UpdateWorktree {
1549 let mut updated_entries = Vec::new();
1550 let mut removed_entries = Vec::new();
1551 let mut self_entries = self
1552 .entries_by_id
1553 .cursor::<()>()
1554 .filter(|e| include_ignored || !e.is_ignored)
1555 .peekable();
1556 let mut other_entries = other
1557 .entries_by_id
1558 .cursor::<()>()
1559 .filter(|e| include_ignored || !e.is_ignored)
1560 .peekable();
1561 loop {
1562 match (self_entries.peek(), other_entries.peek()) {
1563 (Some(self_entry), Some(other_entry)) => {
1564 match Ord::cmp(&self_entry.id, &other_entry.id) {
1565 Ordering::Less => {
1566 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1567 updated_entries.push(entry);
1568 self_entries.next();
1569 }
1570 Ordering::Equal => {
1571 if self_entry.scan_id != other_entry.scan_id {
1572 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1573 updated_entries.push(entry);
1574 }
1575
1576 self_entries.next();
1577 other_entries.next();
1578 }
1579 Ordering::Greater => {
1580 removed_entries.push(other_entry.id as u64);
1581 other_entries.next();
1582 }
1583 }
1584 }
1585 (Some(self_entry), None) => {
1586 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1587 updated_entries.push(entry);
1588 self_entries.next();
1589 }
1590 (None, Some(other_entry)) => {
1591 removed_entries.push(other_entry.id as u64);
1592 other_entries.next();
1593 }
1594 (None, None) => break,
1595 }
1596 }
1597
1598 proto::UpdateWorktree {
1599 project_id,
1600 worktree_id,
1601 root_name: self.root_name().to_string(),
1602 updated_entries,
1603 removed_entries,
1604 }
1605 }
1606
1607 fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1608 self.scan_id += 1;
1609 let scan_id = self.scan_id;
1610
1611 let mut entries_by_path_edits = Vec::new();
1612 let mut entries_by_id_edits = Vec::new();
1613 for entry_id in update.removed_entries {
1614 let entry_id = entry_id as usize;
1615 let entry = self
1616 .entry_for_id(entry_id)
1617 .ok_or_else(|| anyhow!("unknown entry"))?;
1618 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1619 entries_by_id_edits.push(Edit::Remove(entry.id));
1620 }
1621
1622 for entry in update.updated_entries {
1623 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1624 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1625 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1626 }
1627 entries_by_id_edits.push(Edit::Insert(PathEntry {
1628 id: entry.id,
1629 path: entry.path.clone(),
1630 is_ignored: entry.is_ignored,
1631 scan_id,
1632 }));
1633 entries_by_path_edits.push(Edit::Insert(entry));
1634 }
1635
1636 self.entries_by_path.edit(entries_by_path_edits, &());
1637 self.entries_by_id.edit(entries_by_id_edits, &());
1638
1639 Ok(())
1640 }
1641
1642 pub fn file_count(&self) -> usize {
1643 self.entries_by_path.summary().file_count
1644 }
1645
1646 pub fn visible_file_count(&self) -> usize {
1647 self.entries_by_path.summary().visible_file_count
1648 }
1649
1650 fn traverse_from_offset(
1651 &self,
1652 include_dirs: bool,
1653 include_ignored: bool,
1654 start_offset: usize,
1655 ) -> Traversal {
1656 let mut cursor = self.entries_by_path.cursor();
1657 cursor.seek(
1658 &TraversalTarget::Count {
1659 count: start_offset,
1660 include_dirs,
1661 include_ignored,
1662 },
1663 Bias::Right,
1664 &(),
1665 );
1666 Traversal {
1667 cursor,
1668 include_dirs,
1669 include_ignored,
1670 }
1671 }
1672
1673 fn traverse_from_path(
1674 &self,
1675 include_dirs: bool,
1676 include_ignored: bool,
1677 path: &Path,
1678 ) -> Traversal {
1679 let mut cursor = self.entries_by_path.cursor();
1680 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1681 Traversal {
1682 cursor,
1683 include_dirs,
1684 include_ignored,
1685 }
1686 }
1687
1688 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1689 self.traverse_from_offset(false, include_ignored, start)
1690 }
1691
1692 pub fn entries(&self, include_ignored: bool) -> Traversal {
1693 self.traverse_from_offset(true, include_ignored, 0)
1694 }
1695
1696 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1697 let empty_path = Path::new("");
1698 self.entries_by_path
1699 .cursor::<()>()
1700 .filter(move |entry| entry.path.as_ref() != empty_path)
1701 .map(|entry| &entry.path)
1702 }
1703
1704 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1705 let mut cursor = self.entries_by_path.cursor();
1706 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1707 let traversal = Traversal {
1708 cursor,
1709 include_dirs: true,
1710 include_ignored: true,
1711 };
1712 ChildEntriesIter {
1713 traversal,
1714 parent_path,
1715 }
1716 }
1717
1718 pub fn root_entry(&self) -> Option<&Entry> {
1719 self.entry_for_path("")
1720 }
1721
1722 pub fn root_name(&self) -> &str {
1723 &self.root_name
1724 }
1725
1726 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1727 let path = path.as_ref();
1728 self.traverse_from_path(true, true, path)
1729 .entry()
1730 .and_then(|entry| {
1731 if entry.path.as_ref() == path {
1732 Some(entry)
1733 } else {
1734 None
1735 }
1736 })
1737 }
1738
1739 pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1740 let entry = self.entries_by_id.get(&id, &())?;
1741 self.entry_for_path(&entry.path)
1742 }
1743
1744 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1745 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1746 }
1747
1748 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1749 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1750 let abs_path = self.abs_path.join(&entry.path);
1751 match build_gitignore(&abs_path, fs) {
1752 Ok(ignore) => {
1753 let ignore_dir_path = entry.path.parent().unwrap();
1754 self.ignores
1755 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1756 }
1757 Err(error) => {
1758 log::error!(
1759 "error loading .gitignore file {:?} - {:?}",
1760 &entry.path,
1761 error
1762 );
1763 }
1764 }
1765 }
1766
1767 self.reuse_entry_id(&mut entry);
1768 self.entries_by_path.insert_or_replace(entry.clone(), &());
1769 self.entries_by_id.insert_or_replace(
1770 PathEntry {
1771 id: entry.id,
1772 path: entry.path.clone(),
1773 is_ignored: entry.is_ignored,
1774 scan_id: self.scan_id,
1775 },
1776 &(),
1777 );
1778 entry
1779 }
1780
1781 fn populate_dir(
1782 &mut self,
1783 parent_path: Arc<Path>,
1784 entries: impl IntoIterator<Item = Entry>,
1785 ignore: Option<Arc<Gitignore>>,
1786 ) {
1787 let mut parent_entry = self
1788 .entries_by_path
1789 .get(&PathKey(parent_path.clone()), &())
1790 .unwrap()
1791 .clone();
1792 if let Some(ignore) = ignore {
1793 self.ignores.insert(parent_path, (ignore, self.scan_id));
1794 }
1795 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1796 parent_entry.kind = EntryKind::Dir;
1797 } else {
1798 unreachable!();
1799 }
1800
1801 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1802 let mut entries_by_id_edits = Vec::new();
1803
1804 for mut entry in entries {
1805 self.reuse_entry_id(&mut entry);
1806 entries_by_id_edits.push(Edit::Insert(PathEntry {
1807 id: entry.id,
1808 path: entry.path.clone(),
1809 is_ignored: entry.is_ignored,
1810 scan_id: self.scan_id,
1811 }));
1812 entries_by_path_edits.push(Edit::Insert(entry));
1813 }
1814
1815 self.entries_by_path.edit(entries_by_path_edits, &());
1816 self.entries_by_id.edit(entries_by_id_edits, &());
1817 }
1818
1819 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1820 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1821 entry.id = removed_entry_id;
1822 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1823 entry.id = existing_entry.id;
1824 }
1825 }
1826
1827 fn remove_path(&mut self, path: &Path) {
1828 let mut new_entries;
1829 let removed_entries;
1830 {
1831 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1832 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1833 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1834 new_entries.push_tree(cursor.suffix(&()), &());
1835 }
1836 self.entries_by_path = new_entries;
1837
1838 let mut entries_by_id_edits = Vec::new();
1839 for entry in removed_entries.cursor::<()>() {
1840 let removed_entry_id = self
1841 .removed_entry_ids
1842 .entry(entry.inode)
1843 .or_insert(entry.id);
1844 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1845 entries_by_id_edits.push(Edit::Remove(entry.id));
1846 }
1847 self.entries_by_id.edit(entries_by_id_edits, &());
1848
1849 if path.file_name() == Some(&GITIGNORE) {
1850 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1851 *scan_id = self.scan_id;
1852 }
1853 }
1854 }
1855
1856 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1857 let mut new_ignores = Vec::new();
1858 for ancestor in path.ancestors().skip(1) {
1859 if let Some((ignore, _)) = self.ignores.get(ancestor) {
1860 new_ignores.push((ancestor, Some(ignore.clone())));
1861 } else {
1862 new_ignores.push((ancestor, None));
1863 }
1864 }
1865
1866 let mut ignore_stack = IgnoreStack::none();
1867 for (parent_path, ignore) in new_ignores.into_iter().rev() {
1868 if ignore_stack.is_path_ignored(&parent_path, true) {
1869 ignore_stack = IgnoreStack::all();
1870 break;
1871 } else if let Some(ignore) = ignore {
1872 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
1873 }
1874 }
1875
1876 if ignore_stack.is_path_ignored(path, is_dir) {
1877 ignore_stack = IgnoreStack::all();
1878 }
1879
1880 ignore_stack
1881 }
1882}
1883
1884impl fmt::Debug for Snapshot {
1885 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1886 for entry in self.entries_by_path.cursor::<()>() {
1887 for _ in entry.path.ancestors().skip(1) {
1888 write!(f, " ")?;
1889 }
1890 writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
1891 }
1892 Ok(())
1893 }
1894}
1895
1896#[derive(Clone, PartialEq)]
1897pub struct File {
1898 entry_id: Option<usize>,
1899 worktree: ModelHandle<Worktree>,
1900 worktree_path: Arc<Path>,
1901 pub path: Arc<Path>,
1902 pub mtime: SystemTime,
1903 is_local: bool,
1904}
1905
1906impl language::File for File {
1907 fn worktree_id(&self) -> usize {
1908 self.worktree.id()
1909 }
1910
1911 fn entry_id(&self) -> Option<usize> {
1912 self.entry_id
1913 }
1914
1915 fn mtime(&self) -> SystemTime {
1916 self.mtime
1917 }
1918
1919 fn path(&self) -> &Arc<Path> {
1920 &self.path
1921 }
1922
1923 fn abs_path(&self) -> Option<PathBuf> {
1924 if self.is_local {
1925 Some(self.worktree_path.join(&self.path))
1926 } else {
1927 None
1928 }
1929 }
1930
1931 fn full_path(&self) -> PathBuf {
1932 let mut full_path = PathBuf::new();
1933 if let Some(worktree_name) = self.worktree_path.file_name() {
1934 full_path.push(worktree_name);
1935 }
1936 full_path.push(&self.path);
1937 full_path
1938 }
1939
1940 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1941 /// of its worktree, then this method will return the name of the worktree itself.
1942 fn file_name<'a>(&'a self) -> Option<OsString> {
1943 self.path
1944 .file_name()
1945 .or_else(|| self.worktree_path.file_name())
1946 .map(Into::into)
1947 }
1948
1949 fn is_deleted(&self) -> bool {
1950 self.entry_id.is_none()
1951 }
1952
1953 fn save(
1954 &self,
1955 buffer_id: u64,
1956 text: Rope,
1957 version: clock::Global,
1958 cx: &mut MutableAppContext,
1959 ) -> Task<Result<(clock::Global, SystemTime)>> {
1960 let worktree_id = self.worktree.read(cx).id() as u64;
1961 self.worktree.update(cx, |worktree, cx| match worktree {
1962 Worktree::Local(worktree) => {
1963 let rpc = worktree.client.clone();
1964 let project_id = worktree.share.as_ref().map(|share| share.project_id);
1965 let save = worktree.save(self.path.clone(), text, cx);
1966 cx.background().spawn(async move {
1967 let entry = save.await?;
1968 if let Some(project_id) = project_id {
1969 rpc.send(proto::BufferSaved {
1970 project_id,
1971 worktree_id,
1972 buffer_id,
1973 version: (&version).into(),
1974 mtime: Some(entry.mtime.into()),
1975 })
1976 .await?;
1977 }
1978 Ok((version, entry.mtime))
1979 })
1980 }
1981 Worktree::Remote(worktree) => {
1982 let rpc = worktree.client.clone();
1983 let project_id = worktree.project_id;
1984 cx.foreground().spawn(async move {
1985 let response = rpc
1986 .request(proto::SaveBuffer {
1987 project_id,
1988 worktree_id,
1989 buffer_id,
1990 })
1991 .await?;
1992 let version = response.version.try_into()?;
1993 let mtime = response
1994 .mtime
1995 .ok_or_else(|| anyhow!("missing mtime"))?
1996 .into();
1997 Ok((version, mtime))
1998 })
1999 }
2000 })
2001 }
2002
2003 fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2004 let worktree = self.worktree.read(cx).as_local()?;
2005 let abs_path = worktree.absolutize(&self.path);
2006 let fs = worktree.fs.clone();
2007 Some(
2008 cx.background()
2009 .spawn(async move { fs.load(&abs_path).await }),
2010 )
2011 }
2012
2013 fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2014 self.worktree.update(cx, |worktree, cx| {
2015 worktree.send_buffer_update(buffer_id, operation, cx);
2016 });
2017 }
2018
2019 fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2020 self.worktree.update(cx, |worktree, cx| {
2021 if let Worktree::Remote(worktree) = worktree {
2022 let project_id = worktree.project_id;
2023 let worktree_id = worktree.remote_id;
2024 let rpc = worktree.client.clone();
2025 cx.background()
2026 .spawn(async move {
2027 if let Err(error) = rpc
2028 .send(proto::CloseBuffer {
2029 project_id,
2030 worktree_id,
2031 buffer_id,
2032 })
2033 .await
2034 {
2035 log::error!("error closing remote buffer: {}", error);
2036 }
2037 })
2038 .detach();
2039 }
2040 });
2041 }
2042
2043 fn boxed_clone(&self) -> Box<dyn language::File> {
2044 Box::new(self.clone())
2045 }
2046
2047 fn as_any(&self) -> &dyn Any {
2048 self
2049 }
2050}
2051
2052#[derive(Clone, Debug)]
2053pub struct Entry {
2054 pub id: usize,
2055 pub kind: EntryKind,
2056 pub path: Arc<Path>,
2057 pub inode: u64,
2058 pub mtime: SystemTime,
2059 pub is_symlink: bool,
2060 pub is_ignored: bool,
2061}
2062
2063#[derive(Clone, Debug)]
2064pub enum EntryKind {
2065 PendingDir,
2066 Dir,
2067 File(CharBag),
2068}
2069
2070impl Entry {
2071 fn new(
2072 path: Arc<Path>,
2073 metadata: &fs::Metadata,
2074 next_entry_id: &AtomicUsize,
2075 root_char_bag: CharBag,
2076 ) -> Self {
2077 Self {
2078 id: next_entry_id.fetch_add(1, SeqCst),
2079 kind: if metadata.is_dir {
2080 EntryKind::PendingDir
2081 } else {
2082 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2083 },
2084 path,
2085 inode: metadata.inode,
2086 mtime: metadata.mtime,
2087 is_symlink: metadata.is_symlink,
2088 is_ignored: false,
2089 }
2090 }
2091
2092 pub fn is_dir(&self) -> bool {
2093 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2094 }
2095
2096 pub fn is_file(&self) -> bool {
2097 matches!(self.kind, EntryKind::File(_))
2098 }
2099}
2100
2101impl sum_tree::Item for Entry {
2102 type Summary = EntrySummary;
2103
2104 fn summary(&self) -> Self::Summary {
2105 let visible_count = if self.is_ignored { 0 } else { 1 };
2106 let file_count;
2107 let visible_file_count;
2108 if self.is_file() {
2109 file_count = 1;
2110 visible_file_count = visible_count;
2111 } else {
2112 file_count = 0;
2113 visible_file_count = 0;
2114 }
2115
2116 EntrySummary {
2117 max_path: self.path.clone(),
2118 count: 1,
2119 visible_count,
2120 file_count,
2121 visible_file_count,
2122 }
2123 }
2124}
2125
2126impl sum_tree::KeyedItem for Entry {
2127 type Key = PathKey;
2128
2129 fn key(&self) -> Self::Key {
2130 PathKey(self.path.clone())
2131 }
2132}
2133
2134#[derive(Clone, Debug)]
2135pub struct EntrySummary {
2136 max_path: Arc<Path>,
2137 count: usize,
2138 visible_count: usize,
2139 file_count: usize,
2140 visible_file_count: usize,
2141}
2142
2143impl Default for EntrySummary {
2144 fn default() -> Self {
2145 Self {
2146 max_path: Arc::from(Path::new("")),
2147 count: 0,
2148 visible_count: 0,
2149 file_count: 0,
2150 visible_file_count: 0,
2151 }
2152 }
2153}
2154
2155impl sum_tree::Summary for EntrySummary {
2156 type Context = ();
2157
2158 fn add_summary(&mut self, rhs: &Self, _: &()) {
2159 self.max_path = rhs.max_path.clone();
2160 self.visible_count += rhs.visible_count;
2161 self.file_count += rhs.file_count;
2162 self.visible_file_count += rhs.visible_file_count;
2163 }
2164}
2165
2166#[derive(Clone, Debug)]
2167struct PathEntry {
2168 id: usize,
2169 path: Arc<Path>,
2170 is_ignored: bool,
2171 scan_id: usize,
2172}
2173
2174impl sum_tree::Item for PathEntry {
2175 type Summary = PathEntrySummary;
2176
2177 fn summary(&self) -> Self::Summary {
2178 PathEntrySummary { max_id: self.id }
2179 }
2180}
2181
2182impl sum_tree::KeyedItem for PathEntry {
2183 type Key = usize;
2184
2185 fn key(&self) -> Self::Key {
2186 self.id
2187 }
2188}
2189
2190#[derive(Clone, Debug, Default)]
2191struct PathEntrySummary {
2192 max_id: usize,
2193}
2194
2195impl sum_tree::Summary for PathEntrySummary {
2196 type Context = ();
2197
2198 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2199 self.max_id = summary.max_id;
2200 }
2201}
2202
2203impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2204 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2205 *self = summary.max_id;
2206 }
2207}
2208
2209#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2210pub struct PathKey(Arc<Path>);
2211
2212impl Default for PathKey {
2213 fn default() -> Self {
2214 Self(Path::new("").into())
2215 }
2216}
2217
2218impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2219 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2220 self.0 = summary.max_path.clone();
2221 }
2222}
2223
2224struct BackgroundScanner {
2225 fs: Arc<dyn Fs>,
2226 snapshot: Arc<Mutex<Snapshot>>,
2227 notify: Sender<ScanState>,
2228 executor: Arc<executor::Background>,
2229}
2230
2231impl BackgroundScanner {
2232 fn new(
2233 snapshot: Arc<Mutex<Snapshot>>,
2234 notify: Sender<ScanState>,
2235 fs: Arc<dyn Fs>,
2236 executor: Arc<executor::Background>,
2237 ) -> Self {
2238 Self {
2239 fs,
2240 snapshot,
2241 notify,
2242 executor,
2243 }
2244 }
2245
2246 fn abs_path(&self) -> Arc<Path> {
2247 self.snapshot.lock().abs_path.clone()
2248 }
2249
2250 fn snapshot(&self) -> Snapshot {
2251 self.snapshot.lock().clone()
2252 }
2253
2254 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2255 if self.notify.send(ScanState::Scanning).await.is_err() {
2256 return;
2257 }
2258
2259 if let Err(err) = self.scan_dirs().await {
2260 if self
2261 .notify
2262 .send(ScanState::Err(Arc::new(err)))
2263 .await
2264 .is_err()
2265 {
2266 return;
2267 }
2268 }
2269
2270 if self.notify.send(ScanState::Idle).await.is_err() {
2271 return;
2272 }
2273
2274 futures::pin_mut!(events_rx);
2275 while let Some(events) = events_rx.next().await {
2276 if self.notify.send(ScanState::Scanning).await.is_err() {
2277 break;
2278 }
2279
2280 if !self.process_events(events).await {
2281 break;
2282 }
2283
2284 if self.notify.send(ScanState::Idle).await.is_err() {
2285 break;
2286 }
2287 }
2288 }
2289
2290 async fn scan_dirs(&mut self) -> Result<()> {
2291 let root_char_bag;
2292 let next_entry_id;
2293 let is_dir;
2294 {
2295 let snapshot = self.snapshot.lock();
2296 root_char_bag = snapshot.root_char_bag;
2297 next_entry_id = snapshot.next_entry_id.clone();
2298 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2299 };
2300
2301 if is_dir {
2302 let path: Arc<Path> = Arc::from(Path::new(""));
2303 let abs_path = self.abs_path();
2304 let (tx, rx) = channel::unbounded();
2305 tx.send(ScanJob {
2306 abs_path: abs_path.to_path_buf(),
2307 path,
2308 ignore_stack: IgnoreStack::none(),
2309 scan_queue: tx.clone(),
2310 })
2311 .await
2312 .unwrap();
2313 drop(tx);
2314
2315 self.executor
2316 .scoped(|scope| {
2317 for _ in 0..self.executor.num_cpus() {
2318 scope.spawn(async {
2319 while let Ok(job) = rx.recv().await {
2320 if let Err(err) = self
2321 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2322 .await
2323 {
2324 log::error!("error scanning {:?}: {}", job.abs_path, err);
2325 }
2326 }
2327 });
2328 }
2329 })
2330 .await;
2331 }
2332
2333 Ok(())
2334 }
2335
2336 async fn scan_dir(
2337 &self,
2338 root_char_bag: CharBag,
2339 next_entry_id: Arc<AtomicUsize>,
2340 job: &ScanJob,
2341 ) -> Result<()> {
2342 let mut new_entries: Vec<Entry> = Vec::new();
2343 let mut new_jobs: Vec<ScanJob> = Vec::new();
2344 let mut ignore_stack = job.ignore_stack.clone();
2345 let mut new_ignore = None;
2346
2347 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2348 while let Some(child_abs_path) = child_paths.next().await {
2349 let child_abs_path = match child_abs_path {
2350 Ok(child_abs_path) => child_abs_path,
2351 Err(error) => {
2352 log::error!("error processing entry {:?}", error);
2353 continue;
2354 }
2355 };
2356 let child_name = child_abs_path.file_name().unwrap();
2357 let child_path: Arc<Path> = job.path.join(child_name).into();
2358 let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2359 Some(metadata) => metadata,
2360 None => continue,
2361 };
2362
2363 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2364 if child_name == *GITIGNORE {
2365 match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2366 Ok(ignore) => {
2367 let ignore = Arc::new(ignore);
2368 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2369 new_ignore = Some(ignore);
2370 }
2371 Err(error) => {
2372 log::error!(
2373 "error loading .gitignore file {:?} - {:?}",
2374 child_name,
2375 error
2376 );
2377 }
2378 }
2379
2380 // Update ignore status of any child entries we've already processed to reflect the
2381 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2382 // there should rarely be too numerous. Update the ignore stack associated with any
2383 // new jobs as well.
2384 let mut new_jobs = new_jobs.iter_mut();
2385 for entry in &mut new_entries {
2386 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2387 if entry.is_dir() {
2388 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2389 IgnoreStack::all()
2390 } else {
2391 ignore_stack.clone()
2392 };
2393 }
2394 }
2395 }
2396
2397 let mut child_entry = Entry::new(
2398 child_path.clone(),
2399 &child_metadata,
2400 &next_entry_id,
2401 root_char_bag,
2402 );
2403
2404 if child_metadata.is_dir {
2405 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2406 child_entry.is_ignored = is_ignored;
2407 new_entries.push(child_entry);
2408 new_jobs.push(ScanJob {
2409 abs_path: child_abs_path,
2410 path: child_path,
2411 ignore_stack: if is_ignored {
2412 IgnoreStack::all()
2413 } else {
2414 ignore_stack.clone()
2415 },
2416 scan_queue: job.scan_queue.clone(),
2417 });
2418 } else {
2419 child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2420 new_entries.push(child_entry);
2421 };
2422 }
2423
2424 self.snapshot
2425 .lock()
2426 .populate_dir(job.path.clone(), new_entries, new_ignore);
2427 for new_job in new_jobs {
2428 job.scan_queue.send(new_job).await.unwrap();
2429 }
2430
2431 Ok(())
2432 }
2433
2434 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2435 let mut snapshot = self.snapshot();
2436 snapshot.scan_id += 1;
2437
2438 let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2439 abs_path
2440 } else {
2441 return false;
2442 };
2443 let root_char_bag = snapshot.root_char_bag;
2444 let next_entry_id = snapshot.next_entry_id.clone();
2445
2446 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2447 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2448
2449 for event in &events {
2450 match event.path.strip_prefix(&root_abs_path) {
2451 Ok(path) => snapshot.remove_path(&path),
2452 Err(_) => {
2453 log::error!(
2454 "unexpected event {:?} for root path {:?}",
2455 event.path,
2456 root_abs_path
2457 );
2458 continue;
2459 }
2460 }
2461 }
2462
2463 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2464 for event in events {
2465 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2466 Ok(path) => Arc::from(path.to_path_buf()),
2467 Err(_) => {
2468 log::error!(
2469 "unexpected event {:?} for root path {:?}",
2470 event.path,
2471 root_abs_path
2472 );
2473 continue;
2474 }
2475 };
2476
2477 match self.fs.metadata(&event.path).await {
2478 Ok(Some(metadata)) => {
2479 let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2480 let mut fs_entry = Entry::new(
2481 path.clone(),
2482 &metadata,
2483 snapshot.next_entry_id.as_ref(),
2484 snapshot.root_char_bag,
2485 );
2486 fs_entry.is_ignored = ignore_stack.is_all();
2487 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2488 if metadata.is_dir {
2489 scan_queue_tx
2490 .send(ScanJob {
2491 abs_path: event.path,
2492 path,
2493 ignore_stack,
2494 scan_queue: scan_queue_tx.clone(),
2495 })
2496 .await
2497 .unwrap();
2498 }
2499 }
2500 Ok(None) => {}
2501 Err(err) => {
2502 // TODO - create a special 'error' entry in the entries tree to mark this
2503 log::error!("error reading file on event {:?}", err);
2504 }
2505 }
2506 }
2507
2508 *self.snapshot.lock() = snapshot;
2509
2510 // Scan any directories that were created as part of this event batch.
2511 drop(scan_queue_tx);
2512 self.executor
2513 .scoped(|scope| {
2514 for _ in 0..self.executor.num_cpus() {
2515 scope.spawn(async {
2516 while let Ok(job) = scan_queue_rx.recv().await {
2517 if let Err(err) = self
2518 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2519 .await
2520 {
2521 log::error!("error scanning {:?}: {}", job.abs_path, err);
2522 }
2523 }
2524 });
2525 }
2526 })
2527 .await;
2528
2529 // Attempt to detect renames only over a single batch of file-system events.
2530 self.snapshot.lock().removed_entry_ids.clear();
2531
2532 self.update_ignore_statuses().await;
2533 true
2534 }
2535
2536 async fn update_ignore_statuses(&self) {
2537 let mut snapshot = self.snapshot();
2538
2539 let mut ignores_to_update = Vec::new();
2540 let mut ignores_to_delete = Vec::new();
2541 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2542 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2543 ignores_to_update.push(parent_path.clone());
2544 }
2545
2546 let ignore_path = parent_path.join(&*GITIGNORE);
2547 if snapshot.entry_for_path(ignore_path).is_none() {
2548 ignores_to_delete.push(parent_path.clone());
2549 }
2550 }
2551
2552 for parent_path in ignores_to_delete {
2553 snapshot.ignores.remove(&parent_path);
2554 self.snapshot.lock().ignores.remove(&parent_path);
2555 }
2556
2557 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2558 ignores_to_update.sort_unstable();
2559 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2560 while let Some(parent_path) = ignores_to_update.next() {
2561 while ignores_to_update
2562 .peek()
2563 .map_or(false, |p| p.starts_with(&parent_path))
2564 {
2565 ignores_to_update.next().unwrap();
2566 }
2567
2568 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2569 ignore_queue_tx
2570 .send(UpdateIgnoreStatusJob {
2571 path: parent_path,
2572 ignore_stack,
2573 ignore_queue: ignore_queue_tx.clone(),
2574 })
2575 .await
2576 .unwrap();
2577 }
2578 drop(ignore_queue_tx);
2579
2580 self.executor
2581 .scoped(|scope| {
2582 for _ in 0..self.executor.num_cpus() {
2583 scope.spawn(async {
2584 while let Ok(job) = ignore_queue_rx.recv().await {
2585 self.update_ignore_status(job, &snapshot).await;
2586 }
2587 });
2588 }
2589 })
2590 .await;
2591 }
2592
2593 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2594 let mut ignore_stack = job.ignore_stack;
2595 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2596 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2597 }
2598
2599 let mut entries_by_id_edits = Vec::new();
2600 let mut entries_by_path_edits = Vec::new();
2601 for mut entry in snapshot.child_entries(&job.path).cloned() {
2602 let was_ignored = entry.is_ignored;
2603 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2604 if entry.is_dir() {
2605 let child_ignore_stack = if entry.is_ignored {
2606 IgnoreStack::all()
2607 } else {
2608 ignore_stack.clone()
2609 };
2610 job.ignore_queue
2611 .send(UpdateIgnoreStatusJob {
2612 path: entry.path.clone(),
2613 ignore_stack: child_ignore_stack,
2614 ignore_queue: job.ignore_queue.clone(),
2615 })
2616 .await
2617 .unwrap();
2618 }
2619
2620 if entry.is_ignored != was_ignored {
2621 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2622 path_entry.scan_id = snapshot.scan_id;
2623 path_entry.is_ignored = entry.is_ignored;
2624 entries_by_id_edits.push(Edit::Insert(path_entry));
2625 entries_by_path_edits.push(Edit::Insert(entry));
2626 }
2627 }
2628
2629 let mut snapshot = self.snapshot.lock();
2630 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2631 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2632 }
2633}
2634
2635async fn refresh_entry(
2636 fs: &dyn Fs,
2637 snapshot: &Mutex<Snapshot>,
2638 path: Arc<Path>,
2639 abs_path: &Path,
2640) -> Result<Entry> {
2641 let root_char_bag;
2642 let next_entry_id;
2643 {
2644 let snapshot = snapshot.lock();
2645 root_char_bag = snapshot.root_char_bag;
2646 next_entry_id = snapshot.next_entry_id.clone();
2647 }
2648 let entry = Entry::new(
2649 path,
2650 &fs.metadata(abs_path)
2651 .await?
2652 .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2653 &next_entry_id,
2654 root_char_bag,
2655 );
2656 Ok(snapshot.lock().insert_entry(entry, fs))
2657}
2658
2659fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2660 let mut result = root_char_bag;
2661 result.extend(
2662 path.to_string_lossy()
2663 .chars()
2664 .map(|c| c.to_ascii_lowercase()),
2665 );
2666 result
2667}
2668
2669struct ScanJob {
2670 abs_path: PathBuf,
2671 path: Arc<Path>,
2672 ignore_stack: Arc<IgnoreStack>,
2673 scan_queue: Sender<ScanJob>,
2674}
2675
2676struct UpdateIgnoreStatusJob {
2677 path: Arc<Path>,
2678 ignore_stack: Arc<IgnoreStack>,
2679 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2680}
2681
2682pub trait WorktreeHandle {
2683 #[cfg(test)]
2684 fn flush_fs_events<'a>(
2685 &self,
2686 cx: &'a gpui::TestAppContext,
2687 ) -> futures::future::LocalBoxFuture<'a, ()>;
2688}
2689
2690impl WorktreeHandle for ModelHandle<Worktree> {
2691 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2692 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2693 // extra directory scans, and emit extra scan-state notifications.
2694 //
2695 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2696 // to ensure that all redundant FS events have already been processed.
2697 #[cfg(test)]
2698 fn flush_fs_events<'a>(
2699 &self,
2700 cx: &'a gpui::TestAppContext,
2701 ) -> futures::future::LocalBoxFuture<'a, ()> {
2702 use smol::future::FutureExt;
2703
2704 let filename = "fs-event-sentinel";
2705 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2706 let tree = self.clone();
2707 async move {
2708 std::fs::write(root_path.join(filename), "").unwrap();
2709 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2710 .await;
2711
2712 std::fs::remove_file(root_path.join(filename)).unwrap();
2713 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2714 .await;
2715
2716 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2717 .await;
2718 }
2719 .boxed_local()
2720 }
2721}
2722
2723#[derive(Clone, Debug)]
2724struct TraversalProgress<'a> {
2725 max_path: &'a Path,
2726 count: usize,
2727 visible_count: usize,
2728 file_count: usize,
2729 visible_file_count: usize,
2730}
2731
2732impl<'a> TraversalProgress<'a> {
2733 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2734 match (include_ignored, include_dirs) {
2735 (true, true) => self.count,
2736 (true, false) => self.file_count,
2737 (false, true) => self.visible_count,
2738 (false, false) => self.visible_file_count,
2739 }
2740 }
2741}
2742
2743impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2744 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2745 self.max_path = summary.max_path.as_ref();
2746 self.count += summary.count;
2747 self.visible_count += summary.visible_count;
2748 self.file_count += summary.file_count;
2749 self.visible_file_count += summary.visible_file_count;
2750 }
2751}
2752
2753impl<'a> Default for TraversalProgress<'a> {
2754 fn default() -> Self {
2755 Self {
2756 max_path: Path::new(""),
2757 count: 0,
2758 visible_count: 0,
2759 file_count: 0,
2760 visible_file_count: 0,
2761 }
2762 }
2763}
2764
2765pub struct Traversal<'a> {
2766 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2767 include_ignored: bool,
2768 include_dirs: bool,
2769}
2770
2771impl<'a> Traversal<'a> {
2772 pub fn advance(&mut self) -> bool {
2773 self.advance_to_offset(self.offset() + 1)
2774 }
2775
2776 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2777 self.cursor.seek_forward(
2778 &TraversalTarget::Count {
2779 count: offset,
2780 include_dirs: self.include_dirs,
2781 include_ignored: self.include_ignored,
2782 },
2783 Bias::Right,
2784 &(),
2785 )
2786 }
2787
2788 pub fn advance_to_sibling(&mut self) -> bool {
2789 while let Some(entry) = self.cursor.item() {
2790 self.cursor.seek_forward(
2791 &TraversalTarget::PathSuccessor(&entry.path),
2792 Bias::Left,
2793 &(),
2794 );
2795 if let Some(entry) = self.cursor.item() {
2796 if (self.include_dirs || !entry.is_dir())
2797 && (self.include_ignored || !entry.is_ignored)
2798 {
2799 return true;
2800 }
2801 }
2802 }
2803 false
2804 }
2805
2806 pub fn entry(&self) -> Option<&'a Entry> {
2807 self.cursor.item()
2808 }
2809
2810 pub fn offset(&self) -> usize {
2811 self.cursor
2812 .start()
2813 .count(self.include_dirs, self.include_ignored)
2814 }
2815}
2816
2817impl<'a> Iterator for Traversal<'a> {
2818 type Item = &'a Entry;
2819
2820 fn next(&mut self) -> Option<Self::Item> {
2821 if let Some(item) = self.entry() {
2822 self.advance();
2823 Some(item)
2824 } else {
2825 None
2826 }
2827 }
2828}
2829
2830#[derive(Debug)]
2831enum TraversalTarget<'a> {
2832 Path(&'a Path),
2833 PathSuccessor(&'a Path),
2834 Count {
2835 count: usize,
2836 include_ignored: bool,
2837 include_dirs: bool,
2838 },
2839}
2840
2841impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2842 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2843 match self {
2844 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2845 TraversalTarget::PathSuccessor(path) => {
2846 if !cursor_location.max_path.starts_with(path) {
2847 Ordering::Equal
2848 } else {
2849 Ordering::Greater
2850 }
2851 }
2852 TraversalTarget::Count {
2853 count,
2854 include_dirs,
2855 include_ignored,
2856 } => Ord::cmp(
2857 count,
2858 &cursor_location.count(*include_dirs, *include_ignored),
2859 ),
2860 }
2861 }
2862}
2863
2864struct ChildEntriesIter<'a> {
2865 parent_path: &'a Path,
2866 traversal: Traversal<'a>,
2867}
2868
2869impl<'a> Iterator for ChildEntriesIter<'a> {
2870 type Item = &'a Entry;
2871
2872 fn next(&mut self) -> Option<Self::Item> {
2873 if let Some(item) = self.traversal.entry() {
2874 if item.path.starts_with(&self.parent_path) {
2875 self.traversal.advance_to_sibling();
2876 return Some(item);
2877 }
2878 }
2879 None
2880 }
2881}
2882
2883impl<'a> From<&'a Entry> for proto::Entry {
2884 fn from(entry: &'a Entry) -> Self {
2885 Self {
2886 id: entry.id as u64,
2887 is_dir: entry.is_dir(),
2888 path: entry.path.to_string_lossy().to_string(),
2889 inode: entry.inode,
2890 mtime: Some(entry.mtime.into()),
2891 is_symlink: entry.is_symlink,
2892 is_ignored: entry.is_ignored,
2893 }
2894 }
2895}
2896
2897impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2898 type Error = anyhow::Error;
2899
2900 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2901 if let Some(mtime) = entry.mtime {
2902 let kind = if entry.is_dir {
2903 EntryKind::Dir
2904 } else {
2905 let mut char_bag = root_char_bag.clone();
2906 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
2907 EntryKind::File(char_bag)
2908 };
2909 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
2910 Ok(Entry {
2911 id: entry.id as usize,
2912 kind,
2913 path: path.clone(),
2914 inode: entry.inode,
2915 mtime: mtime.into(),
2916 is_symlink: entry.is_symlink,
2917 is_ignored: entry.is_ignored,
2918 })
2919 } else {
2920 Err(anyhow!(
2921 "missing mtime in remote worktree entry {:?}",
2922 entry.path
2923 ))
2924 }
2925 }
2926}
2927
2928trait ToPointUtf16 {
2929 fn to_point_utf16(self) -> PointUtf16;
2930}
2931
2932impl ToPointUtf16 for lsp::Position {
2933 fn to_point_utf16(self) -> PointUtf16 {
2934 PointUtf16::new(self.line, self.character)
2935 }
2936}
2937
2938fn diagnostic_ranges<'a>(
2939 diagnostic: &'a lsp::Diagnostic,
2940 abs_path: &'a Path,
2941) -> impl 'a + Iterator<Item = Range<PointUtf16>> {
2942 diagnostic
2943 .related_information
2944 .iter()
2945 .flatten()
2946 .filter_map(move |info| {
2947 if info.location.uri.to_file_path().ok()? == abs_path {
2948 let info_start = PointUtf16::new(
2949 info.location.range.start.line,
2950 info.location.range.start.character,
2951 );
2952 let info_end = PointUtf16::new(
2953 info.location.range.end.line,
2954 info.location.range.end.character,
2955 );
2956 Some(info_start..info_end)
2957 } else {
2958 None
2959 }
2960 })
2961 .chain(Some(
2962 diagnostic.range.start.to_point_utf16()..diagnostic.range.end.to_point_utf16(),
2963 ))
2964}
2965
2966#[cfg(test)]
2967mod tests {
2968 use super::*;
2969 use crate::fs::FakeFs;
2970 use anyhow::Result;
2971 use client::test::{FakeHttpClient, FakeServer};
2972 use fs::RealFs;
2973 use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
2974 use language::{Diagnostic, LanguageConfig};
2975 use lsp::Url;
2976 use rand::prelude::*;
2977 use serde_json::json;
2978 use std::{cell::RefCell, rc::Rc};
2979 use std::{
2980 env,
2981 fmt::Write,
2982 time::{SystemTime, UNIX_EPOCH},
2983 };
2984 use text::Point;
2985 use unindent::Unindent as _;
2986 use util::test::temp_tree;
2987
2988 #[gpui::test]
2989 async fn test_traversal(mut cx: gpui::TestAppContext) {
2990 let fs = FakeFs::new();
2991 fs.insert_tree(
2992 "/root",
2993 json!({
2994 ".gitignore": "a/b\n",
2995 "a": {
2996 "b": "",
2997 "c": "",
2998 }
2999 }),
3000 )
3001 .await;
3002
3003 let client = Client::new();
3004 let http_client = FakeHttpClient::with_404_response();
3005 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3006
3007 let tree = Worktree::open_local(
3008 client,
3009 user_store,
3010 Arc::from(Path::new("/root")),
3011 Arc::new(fs),
3012 Default::default(),
3013 &mut cx.to_async(),
3014 )
3015 .await
3016 .unwrap();
3017 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3018 .await;
3019
3020 tree.read_with(&cx, |tree, _| {
3021 assert_eq!(
3022 tree.entries(false)
3023 .map(|entry| entry.path.as_ref())
3024 .collect::<Vec<_>>(),
3025 vec![
3026 Path::new(""),
3027 Path::new(".gitignore"),
3028 Path::new("a"),
3029 Path::new("a/c"),
3030 ]
3031 );
3032 })
3033 }
3034
3035 #[gpui::test]
3036 async fn test_save_file(mut cx: gpui::TestAppContext) {
3037 let dir = temp_tree(json!({
3038 "file1": "the old contents",
3039 }));
3040
3041 let client = Client::new();
3042 let http_client = FakeHttpClient::with_404_response();
3043 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3044
3045 let tree = Worktree::open_local(
3046 client,
3047 user_store,
3048 dir.path(),
3049 Arc::new(RealFs),
3050 Default::default(),
3051 &mut cx.to_async(),
3052 )
3053 .await
3054 .unwrap();
3055 let buffer = tree
3056 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3057 .await
3058 .unwrap();
3059 let save = buffer.update(&mut cx, |buffer, cx| {
3060 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3061 buffer.save(cx).unwrap()
3062 });
3063 save.await.unwrap();
3064
3065 let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3066 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3067 }
3068
3069 #[gpui::test]
3070 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3071 let dir = temp_tree(json!({
3072 "file1": "the old contents",
3073 }));
3074 let file_path = dir.path().join("file1");
3075
3076 let client = Client::new();
3077 let http_client = FakeHttpClient::with_404_response();
3078 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3079
3080 let tree = Worktree::open_local(
3081 client,
3082 user_store,
3083 file_path.clone(),
3084 Arc::new(RealFs),
3085 Default::default(),
3086 &mut cx.to_async(),
3087 )
3088 .await
3089 .unwrap();
3090 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3091 .await;
3092 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3093
3094 let buffer = tree
3095 .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3096 .await
3097 .unwrap();
3098 let save = buffer.update(&mut cx, |buffer, cx| {
3099 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3100 buffer.save(cx).unwrap()
3101 });
3102 save.await.unwrap();
3103
3104 let new_text = std::fs::read_to_string(file_path).unwrap();
3105 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3106 }
3107
3108 #[gpui::test]
3109 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3110 let dir = temp_tree(json!({
3111 "a": {
3112 "file1": "",
3113 "file2": "",
3114 "file3": "",
3115 },
3116 "b": {
3117 "c": {
3118 "file4": "",
3119 "file5": "",
3120 }
3121 }
3122 }));
3123
3124 let user_id = 5;
3125 let mut client = Client::new();
3126 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3127 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3128 let tree = Worktree::open_local(
3129 client,
3130 user_store.clone(),
3131 dir.path(),
3132 Arc::new(RealFs),
3133 Default::default(),
3134 &mut cx.to_async(),
3135 )
3136 .await
3137 .unwrap();
3138
3139 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3140 let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3141 async move { buffer.await.unwrap() }
3142 };
3143 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3144 tree.read_with(cx, |tree, _| {
3145 tree.entry_for_path(path)
3146 .expect(&format!("no entry for path {}", path))
3147 .id
3148 })
3149 };
3150
3151 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3152 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3153 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3154 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3155
3156 let file2_id = id_for_path("a/file2", &cx);
3157 let file3_id = id_for_path("a/file3", &cx);
3158 let file4_id = id_for_path("b/c/file4", &cx);
3159
3160 // Wait for the initial scan.
3161 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3162 .await;
3163
3164 // Create a remote copy of this worktree.
3165 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3166 let remote = Worktree::remote(
3167 1,
3168 1,
3169 initial_snapshot.to_proto(),
3170 Client::new(),
3171 user_store,
3172 Default::default(),
3173 &mut cx.to_async(),
3174 )
3175 .await
3176 .unwrap();
3177
3178 cx.read(|cx| {
3179 assert!(!buffer2.read(cx).is_dirty());
3180 assert!(!buffer3.read(cx).is_dirty());
3181 assert!(!buffer4.read(cx).is_dirty());
3182 assert!(!buffer5.read(cx).is_dirty());
3183 });
3184
3185 // Rename and delete files and directories.
3186 tree.flush_fs_events(&cx).await;
3187 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3188 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3189 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3190 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3191 tree.flush_fs_events(&cx).await;
3192
3193 let expected_paths = vec![
3194 "a",
3195 "a/file1",
3196 "a/file2.new",
3197 "b",
3198 "d",
3199 "d/file3",
3200 "d/file4",
3201 ];
3202
3203 cx.read(|app| {
3204 assert_eq!(
3205 tree.read(app)
3206 .paths()
3207 .map(|p| p.to_str().unwrap())
3208 .collect::<Vec<_>>(),
3209 expected_paths
3210 );
3211
3212 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3213 assert_eq!(id_for_path("d/file3", &cx), file3_id);
3214 assert_eq!(id_for_path("d/file4", &cx), file4_id);
3215
3216 assert_eq!(
3217 buffer2.read(app).file().unwrap().path().as_ref(),
3218 Path::new("a/file2.new")
3219 );
3220 assert_eq!(
3221 buffer3.read(app).file().unwrap().path().as_ref(),
3222 Path::new("d/file3")
3223 );
3224 assert_eq!(
3225 buffer4.read(app).file().unwrap().path().as_ref(),
3226 Path::new("d/file4")
3227 );
3228 assert_eq!(
3229 buffer5.read(app).file().unwrap().path().as_ref(),
3230 Path::new("b/c/file5")
3231 );
3232
3233 assert!(!buffer2.read(app).file().unwrap().is_deleted());
3234 assert!(!buffer3.read(app).file().unwrap().is_deleted());
3235 assert!(!buffer4.read(app).file().unwrap().is_deleted());
3236 assert!(buffer5.read(app).file().unwrap().is_deleted());
3237 });
3238
3239 // Update the remote worktree. Check that it becomes consistent with the
3240 // local worktree.
3241 remote.update(&mut cx, |remote, cx| {
3242 let update_message =
3243 tree.read(cx)
3244 .snapshot()
3245 .build_update(&initial_snapshot, 1, 1, true);
3246 remote
3247 .as_remote_mut()
3248 .unwrap()
3249 .snapshot
3250 .apply_update(update_message)
3251 .unwrap();
3252
3253 assert_eq!(
3254 remote
3255 .paths()
3256 .map(|p| p.to_str().unwrap())
3257 .collect::<Vec<_>>(),
3258 expected_paths
3259 );
3260 });
3261 }
3262
3263 #[gpui::test]
3264 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3265 let dir = temp_tree(json!({
3266 ".git": {},
3267 ".gitignore": "ignored-dir\n",
3268 "tracked-dir": {
3269 "tracked-file1": "tracked contents",
3270 },
3271 "ignored-dir": {
3272 "ignored-file1": "ignored contents",
3273 }
3274 }));
3275
3276 let client = Client::new();
3277 let http_client = FakeHttpClient::with_404_response();
3278 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3279
3280 let tree = Worktree::open_local(
3281 client,
3282 user_store,
3283 dir.path(),
3284 Arc::new(RealFs),
3285 Default::default(),
3286 &mut cx.to_async(),
3287 )
3288 .await
3289 .unwrap();
3290 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3291 .await;
3292 tree.flush_fs_events(&cx).await;
3293 cx.read(|cx| {
3294 let tree = tree.read(cx);
3295 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3296 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3297 assert_eq!(tracked.is_ignored, false);
3298 assert_eq!(ignored.is_ignored, true);
3299 });
3300
3301 std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3302 std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3303 tree.flush_fs_events(&cx).await;
3304 cx.read(|cx| {
3305 let tree = tree.read(cx);
3306 let dot_git = tree.entry_for_path(".git").unwrap();
3307 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3308 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3309 assert_eq!(tracked.is_ignored, false);
3310 assert_eq!(ignored.is_ignored, true);
3311 assert_eq!(dot_git.is_ignored, true);
3312 });
3313 }
3314
3315 #[gpui::test]
3316 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3317 let user_id = 100;
3318 let mut client = Client::new();
3319 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3320 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3321
3322 let fs = Arc::new(FakeFs::new());
3323 fs.insert_tree(
3324 "/the-dir",
3325 json!({
3326 "a.txt": "a-contents",
3327 "b.txt": "b-contents",
3328 }),
3329 )
3330 .await;
3331
3332 let worktree = Worktree::open_local(
3333 client.clone(),
3334 user_store,
3335 "/the-dir".as_ref(),
3336 fs,
3337 Default::default(),
3338 &mut cx.to_async(),
3339 )
3340 .await
3341 .unwrap();
3342
3343 // Spawn multiple tasks to open paths, repeating some paths.
3344 let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3345 (
3346 worktree.open_buffer("a.txt", cx),
3347 worktree.open_buffer("b.txt", cx),
3348 worktree.open_buffer("a.txt", cx),
3349 )
3350 });
3351
3352 let buffer_a_1 = buffer_a_1.await.unwrap();
3353 let buffer_a_2 = buffer_a_2.await.unwrap();
3354 let buffer_b = buffer_b.await.unwrap();
3355 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3356 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3357
3358 // There is only one buffer per path.
3359 let buffer_a_id = buffer_a_1.id();
3360 assert_eq!(buffer_a_2.id(), buffer_a_id);
3361
3362 // Open the same path again while it is still open.
3363 drop(buffer_a_1);
3364 let buffer_a_3 = worktree
3365 .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3366 .await
3367 .unwrap();
3368
3369 // There's still only one buffer per path.
3370 assert_eq!(buffer_a_3.id(), buffer_a_id);
3371 }
3372
3373 #[gpui::test]
3374 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3375 use std::fs;
3376
3377 let dir = temp_tree(json!({
3378 "file1": "abc",
3379 "file2": "def",
3380 "file3": "ghi",
3381 }));
3382 let client = Client::new();
3383 let http_client = FakeHttpClient::with_404_response();
3384 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3385
3386 let tree = Worktree::open_local(
3387 client,
3388 user_store,
3389 dir.path(),
3390 Arc::new(RealFs),
3391 Default::default(),
3392 &mut cx.to_async(),
3393 )
3394 .await
3395 .unwrap();
3396 tree.flush_fs_events(&cx).await;
3397 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3398 .await;
3399
3400 let buffer1 = tree
3401 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3402 .await
3403 .unwrap();
3404 let events = Rc::new(RefCell::new(Vec::new()));
3405
3406 // initially, the buffer isn't dirty.
3407 buffer1.update(&mut cx, |buffer, cx| {
3408 cx.subscribe(&buffer1, {
3409 let events = events.clone();
3410 move |_, _, event, _| events.borrow_mut().push(event.clone())
3411 })
3412 .detach();
3413
3414 assert!(!buffer.is_dirty());
3415 assert!(events.borrow().is_empty());
3416
3417 buffer.edit(vec![1..2], "", cx);
3418 });
3419
3420 // after the first edit, the buffer is dirty, and emits a dirtied event.
3421 buffer1.update(&mut cx, |buffer, cx| {
3422 assert!(buffer.text() == "ac");
3423 assert!(buffer.is_dirty());
3424 assert_eq!(
3425 *events.borrow(),
3426 &[language::Event::Edited, language::Event::Dirtied]
3427 );
3428 events.borrow_mut().clear();
3429 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3430 });
3431
3432 // after saving, the buffer is not dirty, and emits a saved event.
3433 buffer1.update(&mut cx, |buffer, cx| {
3434 assert!(!buffer.is_dirty());
3435 assert_eq!(*events.borrow(), &[language::Event::Saved]);
3436 events.borrow_mut().clear();
3437
3438 buffer.edit(vec![1..1], "B", cx);
3439 buffer.edit(vec![2..2], "D", cx);
3440 });
3441
3442 // after editing again, the buffer is dirty, and emits another dirty event.
3443 buffer1.update(&mut cx, |buffer, cx| {
3444 assert!(buffer.text() == "aBDc");
3445 assert!(buffer.is_dirty());
3446 assert_eq!(
3447 *events.borrow(),
3448 &[
3449 language::Event::Edited,
3450 language::Event::Dirtied,
3451 language::Event::Edited,
3452 ],
3453 );
3454 events.borrow_mut().clear();
3455
3456 // TODO - currently, after restoring the buffer to its
3457 // previously-saved state, the is still considered dirty.
3458 buffer.edit([1..3], "", cx);
3459 assert!(buffer.text() == "ac");
3460 assert!(buffer.is_dirty());
3461 });
3462
3463 assert_eq!(*events.borrow(), &[language::Event::Edited]);
3464
3465 // When a file is deleted, the buffer is considered dirty.
3466 let events = Rc::new(RefCell::new(Vec::new()));
3467 let buffer2 = tree
3468 .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3469 .await
3470 .unwrap();
3471 buffer2.update(&mut cx, |_, cx| {
3472 cx.subscribe(&buffer2, {
3473 let events = events.clone();
3474 move |_, _, event, _| events.borrow_mut().push(event.clone())
3475 })
3476 .detach();
3477 });
3478
3479 fs::remove_file(dir.path().join("file2")).unwrap();
3480 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3481 assert_eq!(
3482 *events.borrow(),
3483 &[language::Event::Dirtied, language::Event::FileHandleChanged]
3484 );
3485
3486 // When a file is already dirty when deleted, we don't emit a Dirtied event.
3487 let events = Rc::new(RefCell::new(Vec::new()));
3488 let buffer3 = tree
3489 .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3490 .await
3491 .unwrap();
3492 buffer3.update(&mut cx, |_, cx| {
3493 cx.subscribe(&buffer3, {
3494 let events = events.clone();
3495 move |_, _, event, _| events.borrow_mut().push(event.clone())
3496 })
3497 .detach();
3498 });
3499
3500 tree.flush_fs_events(&cx).await;
3501 buffer3.update(&mut cx, |buffer, cx| {
3502 buffer.edit(Some(0..0), "x", cx);
3503 });
3504 events.borrow_mut().clear();
3505 fs::remove_file(dir.path().join("file3")).unwrap();
3506 buffer3
3507 .condition(&cx, |_, _| !events.borrow().is_empty())
3508 .await;
3509 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3510 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3511 }
3512
3513 #[gpui::test]
3514 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3515 use std::fs;
3516
3517 let initial_contents = "aaa\nbbbbb\nc\n";
3518 let dir = temp_tree(json!({ "the-file": initial_contents }));
3519 let client = Client::new();
3520 let http_client = FakeHttpClient::with_404_response();
3521 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3522
3523 let tree = Worktree::open_local(
3524 client,
3525 user_store,
3526 dir.path(),
3527 Arc::new(RealFs),
3528 Default::default(),
3529 &mut cx.to_async(),
3530 )
3531 .await
3532 .unwrap();
3533 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3534 .await;
3535
3536 let abs_path = dir.path().join("the-file");
3537 let buffer = tree
3538 .update(&mut cx, |tree, cx| {
3539 tree.open_buffer(Path::new("the-file"), cx)
3540 })
3541 .await
3542 .unwrap();
3543
3544 // TODO
3545 // Add a cursor on each row.
3546 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3547 // assert!(!buffer.is_dirty());
3548 // buffer.add_selection_set(
3549 // &(0..3)
3550 // .map(|row| Selection {
3551 // id: row as usize,
3552 // start: Point::new(row, 1),
3553 // end: Point::new(row, 1),
3554 // reversed: false,
3555 // goal: SelectionGoal::None,
3556 // })
3557 // .collect::<Vec<_>>(),
3558 // cx,
3559 // )
3560 // });
3561
3562 // Change the file on disk, adding two new lines of text, and removing
3563 // one line.
3564 buffer.read_with(&cx, |buffer, _| {
3565 assert!(!buffer.is_dirty());
3566 assert!(!buffer.has_conflict());
3567 });
3568 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3569 fs::write(&abs_path, new_contents).unwrap();
3570
3571 // Because the buffer was not modified, it is reloaded from disk. Its
3572 // contents are edited according to the diff between the old and new
3573 // file contents.
3574 buffer
3575 .condition(&cx, |buffer, _| buffer.text() == new_contents)
3576 .await;
3577
3578 buffer.update(&mut cx, |buffer, _| {
3579 assert_eq!(buffer.text(), new_contents);
3580 assert!(!buffer.is_dirty());
3581 assert!(!buffer.has_conflict());
3582
3583 // TODO
3584 // let cursor_positions = buffer
3585 // .selection_set(selection_set_id)
3586 // .unwrap()
3587 // .selections::<Point>(&*buffer)
3588 // .map(|selection| {
3589 // assert_eq!(selection.start, selection.end);
3590 // selection.start
3591 // })
3592 // .collect::<Vec<_>>();
3593 // assert_eq!(
3594 // cursor_positions,
3595 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3596 // );
3597 });
3598
3599 // Modify the buffer
3600 buffer.update(&mut cx, |buffer, cx| {
3601 buffer.edit(vec![0..0], " ", cx);
3602 assert!(buffer.is_dirty());
3603 assert!(!buffer.has_conflict());
3604 });
3605
3606 // Change the file on disk again, adding blank lines to the beginning.
3607 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3608
3609 // Because the buffer is modified, it doesn't reload from disk, but is
3610 // marked as having a conflict.
3611 buffer
3612 .condition(&cx, |buffer, _| buffer.has_conflict())
3613 .await;
3614 }
3615
3616 #[gpui::test]
3617 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3618 let (language_server_config, mut fake_server) =
3619 LanguageServerConfig::fake(cx.background()).await;
3620 let mut languages = LanguageRegistry::new();
3621 languages.add(Arc::new(Language::new(
3622 LanguageConfig {
3623 name: "Rust".to_string(),
3624 path_suffixes: vec!["rs".to_string()],
3625 language_server: Some(language_server_config),
3626 ..Default::default()
3627 },
3628 Some(tree_sitter_rust::language()),
3629 )));
3630
3631 let dir = temp_tree(json!({
3632 "a.rs": "fn a() { A }",
3633 "b.rs": "const y: i32 = 1",
3634 }));
3635
3636 let client = Client::new();
3637 let http_client = FakeHttpClient::with_404_response();
3638 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3639
3640 let tree = Worktree::open_local(
3641 client,
3642 user_store,
3643 dir.path(),
3644 Arc::new(RealFs),
3645 Arc::new(languages),
3646 &mut cx.to_async(),
3647 )
3648 .await
3649 .unwrap();
3650 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3651 .await;
3652
3653 // Cause worktree to start the fake language server
3654 let _buffer = tree
3655 .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3656 .await
3657 .unwrap();
3658
3659 fake_server
3660 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3661 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3662 version: None,
3663 diagnostics: vec![lsp::Diagnostic {
3664 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
3665 severity: Some(lsp::DiagnosticSeverity::ERROR),
3666 message: "undefined variable 'A'".to_string(),
3667 ..Default::default()
3668 }],
3669 })
3670 .await;
3671
3672 let buffer = tree
3673 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3674 .await
3675 .unwrap();
3676
3677 buffer.read_with(&cx, |buffer, _| {
3678 let diagnostics = buffer
3679 .snapshot()
3680 .diagnostics_in_range::<_, Point>(0..buffer.len())
3681 .collect::<Vec<_>>();
3682 assert_eq!(
3683 diagnostics,
3684 &[DiagnosticEntry {
3685 range: Point::new(0, 9)..Point::new(0, 10),
3686 diagnostic: Diagnostic {
3687 severity: lsp::DiagnosticSeverity::ERROR,
3688 message: "undefined variable 'A'".to_string(),
3689 group_id: 0,
3690 is_primary: true,
3691 ..Default::default()
3692 }
3693 }]
3694 )
3695 });
3696 }
3697
3698 #[gpui::test]
3699 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3700 let fs = Arc::new(FakeFs::new());
3701 let client = Client::new();
3702 let http_client = FakeHttpClient::with_404_response();
3703 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3704
3705 fs.insert_tree(
3706 "/the-dir",
3707 json!({
3708 "a.rs": "
3709 fn foo(mut v: Vec<usize>) {
3710 for x in &v {
3711 v.push(1);
3712 }
3713 }
3714 "
3715 .unindent(),
3716 }),
3717 )
3718 .await;
3719
3720 let worktree = Worktree::open_local(
3721 client.clone(),
3722 user_store,
3723 "/the-dir".as_ref(),
3724 fs,
3725 Default::default(),
3726 &mut cx.to_async(),
3727 )
3728 .await
3729 .unwrap();
3730
3731 let buffer = worktree
3732 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3733 .await
3734 .unwrap();
3735
3736 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3737 let message = lsp::PublishDiagnosticsParams {
3738 uri: buffer_uri.clone(),
3739 diagnostics: vec![
3740 lsp::Diagnostic {
3741 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3742 severity: Some(DiagnosticSeverity::WARNING),
3743 message: "error 1".to_string(),
3744 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3745 location: lsp::Location {
3746 uri: buffer_uri.clone(),
3747 range: lsp::Range::new(
3748 lsp::Position::new(1, 8),
3749 lsp::Position::new(1, 9),
3750 ),
3751 },
3752 message: "error 1 hint 1".to_string(),
3753 }]),
3754 ..Default::default()
3755 },
3756 lsp::Diagnostic {
3757 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3758 severity: Some(DiagnosticSeverity::HINT),
3759 message: "error 1 hint 1".to_string(),
3760 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3761 location: lsp::Location {
3762 uri: buffer_uri.clone(),
3763 range: lsp::Range::new(
3764 lsp::Position::new(1, 8),
3765 lsp::Position::new(1, 9),
3766 ),
3767 },
3768 message: "original diagnostic".to_string(),
3769 }]),
3770 ..Default::default()
3771 },
3772 lsp::Diagnostic {
3773 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3774 severity: Some(DiagnosticSeverity::ERROR),
3775 message: "error 2".to_string(),
3776 related_information: Some(vec![
3777 lsp::DiagnosticRelatedInformation {
3778 location: lsp::Location {
3779 uri: buffer_uri.clone(),
3780 range: lsp::Range::new(
3781 lsp::Position::new(1, 13),
3782 lsp::Position::new(1, 15),
3783 ),
3784 },
3785 message: "error 2 hint 1".to_string(),
3786 },
3787 lsp::DiagnosticRelatedInformation {
3788 location: lsp::Location {
3789 uri: buffer_uri.clone(),
3790 range: lsp::Range::new(
3791 lsp::Position::new(1, 13),
3792 lsp::Position::new(1, 15),
3793 ),
3794 },
3795 message: "error 2 hint 2".to_string(),
3796 },
3797 ]),
3798 ..Default::default()
3799 },
3800 lsp::Diagnostic {
3801 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3802 severity: Some(DiagnosticSeverity::HINT),
3803 message: "error 2 hint 1".to_string(),
3804 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3805 location: lsp::Location {
3806 uri: buffer_uri.clone(),
3807 range: lsp::Range::new(
3808 lsp::Position::new(2, 8),
3809 lsp::Position::new(2, 17),
3810 ),
3811 },
3812 message: "original diagnostic".to_string(),
3813 }]),
3814 ..Default::default()
3815 },
3816 lsp::Diagnostic {
3817 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3818 severity: Some(DiagnosticSeverity::HINT),
3819 message: "error 2 hint 2".to_string(),
3820 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3821 location: lsp::Location {
3822 uri: buffer_uri.clone(),
3823 range: lsp::Range::new(
3824 lsp::Position::new(2, 8),
3825 lsp::Position::new(2, 17),
3826 ),
3827 },
3828 message: "original diagnostic".to_string(),
3829 }]),
3830 ..Default::default()
3831 },
3832 ],
3833 version: None,
3834 };
3835
3836 worktree
3837 .update(&mut cx, |tree, cx| {
3838 tree.update_diagnostics(message, &Default::default(), cx)
3839 })
3840 .unwrap();
3841 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
3842
3843 assert_eq!(
3844 buffer
3845 .diagnostics_in_range::<_, Point>(0..buffer.len())
3846 .collect::<Vec<_>>(),
3847 &[
3848 DiagnosticEntry {
3849 range: Point::new(1, 8)..Point::new(1, 9),
3850 diagnostic: Diagnostic {
3851 severity: DiagnosticSeverity::WARNING,
3852 message: "error 1".to_string(),
3853 group_id: 0,
3854 is_primary: true,
3855 ..Default::default()
3856 }
3857 },
3858 DiagnosticEntry {
3859 range: Point::new(1, 8)..Point::new(1, 9),
3860 diagnostic: Diagnostic {
3861 severity: DiagnosticSeverity::HINT,
3862 message: "error 1 hint 1".to_string(),
3863 group_id: 0,
3864 is_primary: false,
3865 ..Default::default()
3866 }
3867 },
3868 DiagnosticEntry {
3869 range: Point::new(1, 13)..Point::new(1, 15),
3870 diagnostic: Diagnostic {
3871 severity: DiagnosticSeverity::HINT,
3872 message: "error 2 hint 1".to_string(),
3873 group_id: 1,
3874 is_primary: false,
3875 ..Default::default()
3876 }
3877 },
3878 DiagnosticEntry {
3879 range: Point::new(1, 13)..Point::new(1, 15),
3880 diagnostic: Diagnostic {
3881 severity: DiagnosticSeverity::HINT,
3882 message: "error 2 hint 2".to_string(),
3883 group_id: 1,
3884 is_primary: false,
3885 ..Default::default()
3886 }
3887 },
3888 DiagnosticEntry {
3889 range: Point::new(2, 8)..Point::new(2, 17),
3890 diagnostic: Diagnostic {
3891 severity: DiagnosticSeverity::ERROR,
3892 message: "error 2".to_string(),
3893 group_id: 1,
3894 is_primary: true,
3895 ..Default::default()
3896 }
3897 }
3898 ]
3899 );
3900
3901 assert_eq!(
3902 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
3903 &[
3904 DiagnosticEntry {
3905 range: Point::new(1, 8)..Point::new(1, 9),
3906 diagnostic: Diagnostic {
3907 severity: DiagnosticSeverity::WARNING,
3908 message: "error 1".to_string(),
3909 group_id: 0,
3910 is_primary: true,
3911 ..Default::default()
3912 }
3913 },
3914 DiagnosticEntry {
3915 range: Point::new(1, 8)..Point::new(1, 9),
3916 diagnostic: Diagnostic {
3917 severity: DiagnosticSeverity::HINT,
3918 message: "error 1 hint 1".to_string(),
3919 group_id: 0,
3920 is_primary: false,
3921 ..Default::default()
3922 }
3923 },
3924 ]
3925 );
3926 assert_eq!(
3927 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
3928 &[
3929 DiagnosticEntry {
3930 range: Point::new(1, 13)..Point::new(1, 15),
3931 diagnostic: Diagnostic {
3932 severity: DiagnosticSeverity::HINT,
3933 message: "error 2 hint 1".to_string(),
3934 group_id: 1,
3935 is_primary: false,
3936 ..Default::default()
3937 }
3938 },
3939 DiagnosticEntry {
3940 range: Point::new(1, 13)..Point::new(1, 15),
3941 diagnostic: Diagnostic {
3942 severity: DiagnosticSeverity::HINT,
3943 message: "error 2 hint 2".to_string(),
3944 group_id: 1,
3945 is_primary: false,
3946 ..Default::default()
3947 }
3948 },
3949 DiagnosticEntry {
3950 range: Point::new(2, 8)..Point::new(2, 17),
3951 diagnostic: Diagnostic {
3952 severity: DiagnosticSeverity::ERROR,
3953 message: "error 2".to_string(),
3954 group_id: 1,
3955 is_primary: true,
3956 ..Default::default()
3957 }
3958 }
3959 ]
3960 );
3961 }
3962
3963 #[gpui::test(iterations = 100)]
3964 fn test_random(mut rng: StdRng) {
3965 let operations = env::var("OPERATIONS")
3966 .map(|o| o.parse().unwrap())
3967 .unwrap_or(40);
3968 let initial_entries = env::var("INITIAL_ENTRIES")
3969 .map(|o| o.parse().unwrap())
3970 .unwrap_or(20);
3971
3972 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3973 for _ in 0..initial_entries {
3974 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3975 }
3976 log::info!("Generated initial tree");
3977
3978 let (notify_tx, _notify_rx) = smol::channel::unbounded();
3979 let fs = Arc::new(RealFs);
3980 let next_entry_id = Arc::new(AtomicUsize::new(0));
3981 let mut initial_snapshot = Snapshot {
3982 id: 0,
3983 scan_id: 0,
3984 abs_path: root_dir.path().into(),
3985 entries_by_path: Default::default(),
3986 entries_by_id: Default::default(),
3987 removed_entry_ids: Default::default(),
3988 ignores: Default::default(),
3989 root_name: Default::default(),
3990 root_char_bag: Default::default(),
3991 next_entry_id: next_entry_id.clone(),
3992 };
3993 initial_snapshot.insert_entry(
3994 Entry::new(
3995 Path::new("").into(),
3996 &smol::block_on(fs.metadata(root_dir.path()))
3997 .unwrap()
3998 .unwrap(),
3999 &next_entry_id,
4000 Default::default(),
4001 ),
4002 fs.as_ref(),
4003 );
4004 let mut scanner = BackgroundScanner::new(
4005 Arc::new(Mutex::new(initial_snapshot.clone())),
4006 notify_tx,
4007 fs.clone(),
4008 Arc::new(gpui::executor::Background::new()),
4009 );
4010 smol::block_on(scanner.scan_dirs()).unwrap();
4011 scanner.snapshot().check_invariants();
4012
4013 let mut events = Vec::new();
4014 let mut snapshots = Vec::new();
4015 let mut mutations_len = operations;
4016 while mutations_len > 1 {
4017 if !events.is_empty() && rng.gen_bool(0.4) {
4018 let len = rng.gen_range(0..=events.len());
4019 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4020 log::info!("Delivering events: {:#?}", to_deliver);
4021 smol::block_on(scanner.process_events(to_deliver));
4022 scanner.snapshot().check_invariants();
4023 } else {
4024 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4025 mutations_len -= 1;
4026 }
4027
4028 if rng.gen_bool(0.2) {
4029 snapshots.push(scanner.snapshot());
4030 }
4031 }
4032 log::info!("Quiescing: {:#?}", events);
4033 smol::block_on(scanner.process_events(events));
4034 scanner.snapshot().check_invariants();
4035
4036 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4037 let mut new_scanner = BackgroundScanner::new(
4038 Arc::new(Mutex::new(initial_snapshot)),
4039 notify_tx,
4040 scanner.fs.clone(),
4041 scanner.executor.clone(),
4042 );
4043 smol::block_on(new_scanner.scan_dirs()).unwrap();
4044 assert_eq!(
4045 scanner.snapshot().to_vec(true),
4046 new_scanner.snapshot().to_vec(true)
4047 );
4048
4049 for mut prev_snapshot in snapshots {
4050 let include_ignored = rng.gen::<bool>();
4051 if !include_ignored {
4052 let mut entries_by_path_edits = Vec::new();
4053 let mut entries_by_id_edits = Vec::new();
4054 for entry in prev_snapshot
4055 .entries_by_id
4056 .cursor::<()>()
4057 .filter(|e| e.is_ignored)
4058 {
4059 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4060 entries_by_id_edits.push(Edit::Remove(entry.id));
4061 }
4062
4063 prev_snapshot
4064 .entries_by_path
4065 .edit(entries_by_path_edits, &());
4066 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4067 }
4068
4069 let update = scanner
4070 .snapshot()
4071 .build_update(&prev_snapshot, 0, 0, include_ignored);
4072 prev_snapshot.apply_update(update).unwrap();
4073 assert_eq!(
4074 prev_snapshot.to_vec(true),
4075 scanner.snapshot().to_vec(include_ignored)
4076 );
4077 }
4078 }
4079
4080 fn randomly_mutate_tree(
4081 root_path: &Path,
4082 insertion_probability: f64,
4083 rng: &mut impl Rng,
4084 ) -> Result<Vec<fsevent::Event>> {
4085 let root_path = root_path.canonicalize().unwrap();
4086 let (dirs, files) = read_dir_recursive(root_path.clone());
4087
4088 let mut events = Vec::new();
4089 let mut record_event = |path: PathBuf| {
4090 events.push(fsevent::Event {
4091 event_id: SystemTime::now()
4092 .duration_since(UNIX_EPOCH)
4093 .unwrap()
4094 .as_secs(),
4095 flags: fsevent::StreamFlags::empty(),
4096 path,
4097 });
4098 };
4099
4100 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4101 let path = dirs.choose(rng).unwrap();
4102 let new_path = path.join(gen_name(rng));
4103
4104 if rng.gen() {
4105 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4106 std::fs::create_dir(&new_path)?;
4107 } else {
4108 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4109 std::fs::write(&new_path, "")?;
4110 }
4111 record_event(new_path);
4112 } else if rng.gen_bool(0.05) {
4113 let ignore_dir_path = dirs.choose(rng).unwrap();
4114 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4115
4116 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4117 let files_to_ignore = {
4118 let len = rng.gen_range(0..=subfiles.len());
4119 subfiles.choose_multiple(rng, len)
4120 };
4121 let dirs_to_ignore = {
4122 let len = rng.gen_range(0..subdirs.len());
4123 subdirs.choose_multiple(rng, len)
4124 };
4125
4126 let mut ignore_contents = String::new();
4127 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4128 write!(
4129 ignore_contents,
4130 "{}\n",
4131 path_to_ignore
4132 .strip_prefix(&ignore_dir_path)?
4133 .to_str()
4134 .unwrap()
4135 )
4136 .unwrap();
4137 }
4138 log::info!(
4139 "Creating {:?} with contents:\n{}",
4140 ignore_path.strip_prefix(&root_path)?,
4141 ignore_contents
4142 );
4143 std::fs::write(&ignore_path, ignore_contents).unwrap();
4144 record_event(ignore_path);
4145 } else {
4146 let old_path = {
4147 let file_path = files.choose(rng);
4148 let dir_path = dirs[1..].choose(rng);
4149 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4150 };
4151
4152 let is_rename = rng.gen();
4153 if is_rename {
4154 let new_path_parent = dirs
4155 .iter()
4156 .filter(|d| !d.starts_with(old_path))
4157 .choose(rng)
4158 .unwrap();
4159
4160 let overwrite_existing_dir =
4161 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4162 let new_path = if overwrite_existing_dir {
4163 std::fs::remove_dir_all(&new_path_parent).ok();
4164 new_path_parent.to_path_buf()
4165 } else {
4166 new_path_parent.join(gen_name(rng))
4167 };
4168
4169 log::info!(
4170 "Renaming {:?} to {}{:?}",
4171 old_path.strip_prefix(&root_path)?,
4172 if overwrite_existing_dir {
4173 "overwrite "
4174 } else {
4175 ""
4176 },
4177 new_path.strip_prefix(&root_path)?
4178 );
4179 std::fs::rename(&old_path, &new_path)?;
4180 record_event(old_path.clone());
4181 record_event(new_path);
4182 } else if old_path.is_dir() {
4183 let (dirs, files) = read_dir_recursive(old_path.clone());
4184
4185 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4186 std::fs::remove_dir_all(&old_path).unwrap();
4187 for file in files {
4188 record_event(file);
4189 }
4190 for dir in dirs {
4191 record_event(dir);
4192 }
4193 } else {
4194 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4195 std::fs::remove_file(old_path).unwrap();
4196 record_event(old_path.clone());
4197 }
4198 }
4199
4200 Ok(events)
4201 }
4202
4203 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4204 let child_entries = std::fs::read_dir(&path).unwrap();
4205 let mut dirs = vec![path];
4206 let mut files = Vec::new();
4207 for child_entry in child_entries {
4208 let child_path = child_entry.unwrap().path();
4209 if child_path.is_dir() {
4210 let (child_dirs, child_files) = read_dir_recursive(child_path);
4211 dirs.extend(child_dirs);
4212 files.extend(child_files);
4213 } else {
4214 files.push(child_path);
4215 }
4216 }
4217 (dirs, files)
4218 }
4219
4220 fn gen_name(rng: &mut impl Rng) -> String {
4221 (0..6)
4222 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4223 .map(char::from)
4224 .collect()
4225 }
4226
4227 impl Snapshot {
4228 fn check_invariants(&self) {
4229 let mut files = self.files(true, 0);
4230 let mut visible_files = self.files(false, 0);
4231 for entry in self.entries_by_path.cursor::<()>() {
4232 if entry.is_file() {
4233 assert_eq!(files.next().unwrap().inode, entry.inode);
4234 if !entry.is_ignored {
4235 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4236 }
4237 }
4238 }
4239 assert!(files.next().is_none());
4240 assert!(visible_files.next().is_none());
4241
4242 let mut bfs_paths = Vec::new();
4243 let mut stack = vec![Path::new("")];
4244 while let Some(path) = stack.pop() {
4245 bfs_paths.push(path);
4246 let ix = stack.len();
4247 for child_entry in self.child_entries(path) {
4248 stack.insert(ix, &child_entry.path);
4249 }
4250 }
4251
4252 let dfs_paths = self
4253 .entries_by_path
4254 .cursor::<()>()
4255 .map(|e| e.path.as_ref())
4256 .collect::<Vec<_>>();
4257 assert_eq!(bfs_paths, dfs_paths);
4258
4259 for (ignore_parent_path, _) in &self.ignores {
4260 assert!(self.entry_for_path(ignore_parent_path).is_some());
4261 assert!(self
4262 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4263 .is_some());
4264 }
4265 }
4266
4267 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4268 let mut paths = Vec::new();
4269 for entry in self.entries_by_path.cursor::<()>() {
4270 if include_ignored || !entry.is_ignored {
4271 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4272 }
4273 }
4274 paths.sort_by(|a, b| a.0.cmp(&b.0));
4275 paths
4276 }
4277 }
4278}