1use super::{
2 fs::{self, Fs},
3 ignore::IgnoreStack,
4 DiagnosticSummary,
5};
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Result};
8use client::{proto, Client, PeerId, TypedEnvelope};
9use clock::ReplicaId;
10use collections::{hash_map, HashMap, HashSet};
11use futures::{Stream, StreamExt};
12use fuzzy::CharBag;
13use gpui::{
14 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
15 Task, UpgradeModelHandle, WeakModelHandle,
16};
17use language::{
18 range_from_lsp, Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, File as _, Operation,
19 PointUtf16, Rope,
20};
21use lazy_static::lazy_static;
22use parking_lot::Mutex;
23use postage::{
24 prelude::{Sink as _, Stream as _},
25 watch,
26};
27use serde::Deserialize;
28use smol::channel::{self, Sender};
29use std::{
30 any::Any,
31 cmp::{self, Ordering},
32 convert::{TryFrom, TryInto},
33 ffi::{OsStr, OsString},
34 fmt,
35 future::Future,
36 ops::Deref,
37 path::{Path, PathBuf},
38 sync::{
39 atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst},
40 Arc,
41 },
42 time::{Duration, SystemTime},
43};
44use sum_tree::{Bias, TreeMap};
45use sum_tree::{Edit, SeekTarget, SumTree};
46use util::{post_inc, ResultExt, TryFutureExt};
47
48lazy_static! {
49 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
50}
51
52#[derive(Clone, Debug)]
53enum ScanState {
54 Idle,
55 Scanning,
56 Err(Arc<anyhow::Error>),
57}
58
59#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
60pub struct WorktreeId(usize);
61
62pub enum Worktree {
63 Local(LocalWorktree),
64 Remote(RemoteWorktree),
65}
66
67impl Entity for Worktree {
68 type Event = ();
69
70 fn release(&mut self, cx: &mut MutableAppContext) {
71 if let Some(worktree) = self.as_local_mut() {
72 if let Registration::Done { project_id } = worktree.registration {
73 let client = worktree.client.clone();
74 let unregister_message = proto::UnregisterWorktree {
75 project_id,
76 worktree_id: worktree.id().to_proto(),
77 };
78 cx.foreground()
79 .spawn(async move {
80 client.send(unregister_message).await?;
81 Ok::<_, anyhow::Error>(())
82 })
83 .detach_and_log_err(cx);
84 }
85 }
86 }
87}
88
89impl Worktree {
90 pub async fn open_local(
91 client: Arc<Client>,
92 path: impl Into<Arc<Path>>,
93 weak: bool,
94 fs: Arc<dyn Fs>,
95 cx: &mut AsyncAppContext,
96 ) -> Result<ModelHandle<Self>> {
97 let (tree, scan_states_tx) = LocalWorktree::new(client, path, weak, fs.clone(), cx).await?;
98 tree.update(cx, |tree, cx| {
99 let tree = tree.as_local_mut().unwrap();
100 let abs_path = tree.snapshot.abs_path.clone();
101 let background_snapshot = tree.background_snapshot.clone();
102 let background = cx.background().clone();
103 tree._background_scanner_task = Some(cx.background().spawn(async move {
104 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
105 let scanner =
106 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
107 scanner.run(events).await;
108 }));
109 });
110 Ok(tree)
111 }
112
113 pub async fn remote(
114 project_remote_id: u64,
115 replica_id: ReplicaId,
116 worktree: proto::Worktree,
117 client: Arc<Client>,
118 cx: &mut AsyncAppContext,
119 ) -> Result<ModelHandle<Self>> {
120 let remote_id = worktree.id;
121 let root_char_bag: CharBag = worktree
122 .root_name
123 .chars()
124 .map(|c| c.to_ascii_lowercase())
125 .collect();
126 let root_name = worktree.root_name.clone();
127 let weak = worktree.weak;
128 let (entries_by_path, entries_by_id, diagnostic_summaries) = cx
129 .background()
130 .spawn(async move {
131 let mut entries_by_path_edits = Vec::new();
132 let mut entries_by_id_edits = Vec::new();
133 for entry in worktree.entries {
134 match Entry::try_from((&root_char_bag, entry)) {
135 Ok(entry) => {
136 entries_by_id_edits.push(Edit::Insert(PathEntry {
137 id: entry.id,
138 path: entry.path.clone(),
139 is_ignored: entry.is_ignored,
140 scan_id: 0,
141 }));
142 entries_by_path_edits.push(Edit::Insert(entry));
143 }
144 Err(err) => log::warn!("error for remote worktree entry {:?}", err),
145 }
146 }
147
148 let mut entries_by_path = SumTree::new();
149 let mut entries_by_id = SumTree::new();
150 entries_by_path.edit(entries_by_path_edits, &());
151 entries_by_id.edit(entries_by_id_edits, &());
152
153 let diagnostic_summaries = TreeMap::from_ordered_entries(
154 worktree.diagnostic_summaries.into_iter().map(|summary| {
155 (
156 PathKey(PathBuf::from(summary.path).into()),
157 DiagnosticSummary {
158 error_count: summary.error_count as usize,
159 warning_count: summary.warning_count as usize,
160 info_count: summary.info_count as usize,
161 hint_count: summary.hint_count as usize,
162 },
163 )
164 }),
165 );
166
167 (entries_by_path, entries_by_id, diagnostic_summaries)
168 })
169 .await;
170
171 let worktree = cx.update(|cx| {
172 cx.add_model(|cx: &mut ModelContext<Worktree>| {
173 let snapshot = Snapshot {
174 id: WorktreeId(remote_id as usize),
175 scan_id: 0,
176 abs_path: Path::new("").into(),
177 root_name,
178 root_char_bag,
179 ignores: Default::default(),
180 entries_by_path,
181 entries_by_id,
182 removed_entry_ids: Default::default(),
183 next_entry_id: Default::default(),
184 };
185
186 let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
187 let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
188
189 cx.background()
190 .spawn(async move {
191 while let Some(update) = updates_rx.recv().await {
192 let mut snapshot = snapshot_tx.borrow().clone();
193 if let Err(error) = snapshot.apply_update(update) {
194 log::error!("error applying worktree update: {}", error);
195 }
196 *snapshot_tx.borrow_mut() = snapshot;
197 }
198 })
199 .detach();
200
201 {
202 let mut snapshot_rx = snapshot_rx.clone();
203 cx.spawn_weak(|this, mut cx| async move {
204 while let Some(_) = snapshot_rx.recv().await {
205 if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
206 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
207 } else {
208 break;
209 }
210 }
211 })
212 .detach();
213 }
214
215 Worktree::Remote(RemoteWorktree {
216 project_id: project_remote_id,
217 replica_id,
218 snapshot,
219 snapshot_rx,
220 updates_tx,
221 client: client.clone(),
222 loading_buffers: Default::default(),
223 open_buffers: Default::default(),
224 queued_operations: Default::default(),
225 diagnostic_summaries,
226 weak,
227 })
228 })
229 });
230
231 Ok(worktree)
232 }
233
234 pub fn as_local(&self) -> Option<&LocalWorktree> {
235 if let Worktree::Local(worktree) = self {
236 Some(worktree)
237 } else {
238 None
239 }
240 }
241
242 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
243 if let Worktree::Remote(worktree) = self {
244 Some(worktree)
245 } else {
246 None
247 }
248 }
249
250 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
251 if let Worktree::Local(worktree) = self {
252 Some(worktree)
253 } else {
254 None
255 }
256 }
257
258 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
259 if let Worktree::Remote(worktree) = self {
260 Some(worktree)
261 } else {
262 None
263 }
264 }
265
266 pub fn is_local(&self) -> bool {
267 matches!(self, Worktree::Local(_))
268 }
269
270 pub fn snapshot(&self) -> Snapshot {
271 match self {
272 Worktree::Local(worktree) => worktree.snapshot(),
273 Worktree::Remote(worktree) => worktree.snapshot(),
274 }
275 }
276
277 pub fn is_weak(&self) -> bool {
278 match self {
279 Worktree::Local(worktree) => worktree.weak,
280 Worktree::Remote(worktree) => worktree.weak,
281 }
282 }
283
284 pub fn replica_id(&self) -> ReplicaId {
285 match self {
286 Worktree::Local(_) => 0,
287 Worktree::Remote(worktree) => worktree.replica_id,
288 }
289 }
290
291 pub fn remove_collaborator(
292 &mut self,
293 peer_id: PeerId,
294 replica_id: ReplicaId,
295 cx: &mut ModelContext<Self>,
296 ) {
297 match self {
298 Worktree::Local(worktree) => worktree.remove_collaborator(peer_id, replica_id, cx),
299 Worktree::Remote(worktree) => worktree.remove_collaborator(replica_id, cx),
300 }
301 }
302
303 pub fn diagnostic_summaries<'a>(
304 &'a self,
305 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
306 match self {
307 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
308 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
309 }
310 .iter()
311 .map(|(path, summary)| (path.0.clone(), summary.clone()))
312 }
313
314 pub(crate) fn loading_buffers<'a>(&'a mut self) -> &'a mut LoadingBuffers {
315 match self {
316 Worktree::Local(worktree) => &mut worktree.loading_buffers,
317 Worktree::Remote(worktree) => &mut worktree.loading_buffers,
318 }
319 }
320
321 pub(crate) fn open_buffer(
322 &mut self,
323 path: impl AsRef<Path>,
324 cx: &mut ModelContext<Self>,
325 ) -> Task<Result<(ModelHandle<Buffer>, bool)>> {
326 let path = path.as_ref();
327
328 // If there is already a buffer for the given path, then return it.
329 let existing_buffer = match self {
330 Worktree::Local(worktree) => worktree.get_open_buffer(path, cx),
331 Worktree::Remote(worktree) => worktree.get_open_buffer(path, cx),
332 };
333 if let Some(existing_buffer) = existing_buffer {
334 return cx.spawn(move |_, _| async move { Ok((existing_buffer, false)) });
335 }
336
337 let is_new = Arc::new(AtomicBool::new(true));
338 let path: Arc<Path> = Arc::from(path);
339 let mut loading_watch = match self.loading_buffers().entry(path.clone()) {
340 // If the given path is already being loaded, then wait for that existing
341 // task to complete and return the same buffer.
342 hash_map::Entry::Occupied(e) => e.get().clone(),
343
344 // Otherwise, record the fact that this path is now being loaded.
345 hash_map::Entry::Vacant(entry) => {
346 let (mut tx, rx) = postage::watch::channel();
347 entry.insert(rx.clone());
348
349 let load_buffer = match self {
350 Worktree::Local(worktree) => worktree.open_buffer(&path, cx),
351 Worktree::Remote(worktree) => worktree.open_buffer(&path, cx),
352 };
353 cx.spawn(move |this, mut cx| async move {
354 let result = load_buffer.await;
355
356 // After the buffer loads, record the fact that it is no longer
357 // loading.
358 this.update(&mut cx, |this, _| this.loading_buffers().remove(&path));
359 *tx.borrow_mut() = Some(match result {
360 Ok(buffer) => Ok((buffer, is_new)),
361 Err(error) => Err(Arc::new(error)),
362 });
363 })
364 .detach();
365 rx
366 }
367 };
368
369 cx.spawn(|_, _| async move {
370 loop {
371 if let Some(result) = loading_watch.borrow().as_ref() {
372 return match result {
373 Ok((buf, is_new)) => Ok((buf.clone(), is_new.fetch_and(false, SeqCst))),
374 Err(error) => Err(anyhow!("{}", error)),
375 };
376 }
377 loading_watch.recv().await;
378 }
379 })
380 }
381
382 #[cfg(any(test, feature = "test-support"))]
383 pub(crate) fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
384 let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
385 Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
386 Worktree::Remote(worktree) => {
387 Box::new(worktree.open_buffers.values().filter_map(|buf| {
388 if let RemoteBuffer::Loaded(buf) = buf {
389 Some(buf)
390 } else {
391 None
392 }
393 }))
394 }
395 };
396
397 let path = path.as_ref();
398 open_buffers
399 .find(|buffer| {
400 if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
401 file.path().as_ref() == path
402 } else {
403 false
404 }
405 })
406 .is_some()
407 }
408
409 pub fn handle_update_buffer(
410 &mut self,
411 envelope: TypedEnvelope<proto::UpdateBuffer>,
412 cx: &mut ModelContext<Self>,
413 ) -> Result<()> {
414 let payload = envelope.payload.clone();
415 let buffer_id = payload.buffer_id as usize;
416 let ops = payload
417 .operations
418 .into_iter()
419 .map(|op| language::proto::deserialize_operation(op))
420 .collect::<Result<Vec<_>, _>>()?;
421
422 match self {
423 Worktree::Local(worktree) => {
424 let buffer = worktree
425 .open_buffers
426 .get(&buffer_id)
427 .and_then(|buf| buf.upgrade(cx))
428 .ok_or_else(|| {
429 anyhow!("invalid buffer {} in update buffer message", buffer_id)
430 })?;
431 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
432 }
433 Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
434 Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
435 Some(RemoteBuffer::Loaded(buffer)) => {
436 if let Some(buffer) = buffer.upgrade(cx) {
437 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
438 } else {
439 worktree
440 .open_buffers
441 .insert(buffer_id, RemoteBuffer::Operations(ops));
442 }
443 }
444 None => {
445 worktree
446 .open_buffers
447 .insert(buffer_id, RemoteBuffer::Operations(ops));
448 }
449 },
450 }
451
452 Ok(())
453 }
454
455 pub fn handle_save_buffer(
456 &mut self,
457 envelope: TypedEnvelope<proto::SaveBuffer>,
458 rpc: Arc<Client>,
459 cx: &mut ModelContext<Self>,
460 ) -> Result<()> {
461 let sender_id = envelope.original_sender_id()?;
462 let this = self.as_local().unwrap();
463 let project_id = this
464 .share
465 .as_ref()
466 .ok_or_else(|| anyhow!("can't save buffer while disconnected"))?
467 .project_id;
468
469 let buffer = this
470 .shared_buffers
471 .get(&sender_id)
472 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
473 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
474
475 let receipt = envelope.receipt();
476 let worktree_id = envelope.payload.worktree_id;
477 let buffer_id = envelope.payload.buffer_id;
478 let save = cx.spawn(|_, mut cx| async move {
479 buffer.update(&mut cx, |buffer, cx| buffer.save(cx)).await
480 });
481
482 cx.background()
483 .spawn(
484 async move {
485 let (version, mtime) = save.await?;
486
487 rpc.respond(
488 receipt,
489 proto::BufferSaved {
490 project_id,
491 worktree_id,
492 buffer_id,
493 version: (&version).into(),
494 mtime: Some(mtime.into()),
495 },
496 )
497 .await?;
498
499 Ok(())
500 }
501 .log_err(),
502 )
503 .detach();
504
505 Ok(())
506 }
507
508 pub fn handle_buffer_saved(
509 &mut self,
510 envelope: TypedEnvelope<proto::BufferSaved>,
511 cx: &mut ModelContext<Self>,
512 ) -> Result<()> {
513 let payload = envelope.payload.clone();
514 let worktree = self.as_remote_mut().unwrap();
515 if let Some(buffer) = worktree
516 .open_buffers
517 .get(&(payload.buffer_id as usize))
518 .and_then(|buf| buf.upgrade(cx))
519 {
520 buffer.update(cx, |buffer, cx| {
521 let version = payload.version.try_into()?;
522 let mtime = payload
523 .mtime
524 .ok_or_else(|| anyhow!("missing mtime"))?
525 .into();
526 buffer.did_save(version, mtime, None, cx);
527 Result::<_, anyhow::Error>::Ok(())
528 })?;
529 }
530 Ok(())
531 }
532
533 pub fn handle_format_buffer(
534 &mut self,
535 envelope: TypedEnvelope<proto::FormatBuffer>,
536 rpc: Arc<Client>,
537 cx: &mut ModelContext<Self>,
538 ) -> Result<()> {
539 let sender_id = envelope.original_sender_id()?;
540 let this = self.as_local().unwrap();
541 let buffer = this
542 .shared_buffers
543 .get(&sender_id)
544 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
545 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
546
547 let receipt = envelope.receipt();
548 cx.spawn(|_, mut cx| async move {
549 let format = buffer.update(&mut cx, |buffer, cx| buffer.format(cx)).await;
550 // We spawn here in order to enqueue the sending of `Ack` *after* transmission of edits
551 // associated with formatting.
552 cx.spawn(|_| async move {
553 match format {
554 Ok(()) => rpc.respond(receipt, proto::Ack {}).await?,
555 Err(error) => {
556 rpc.respond_with_error(
557 receipt,
558 proto::Error {
559 message: error.to_string(),
560 },
561 )
562 .await?
563 }
564 }
565 Ok::<_, anyhow::Error>(())
566 })
567 .await
568 .log_err();
569 })
570 .detach();
571
572 Ok(())
573 }
574
575 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
576 match self {
577 Self::Local(worktree) => {
578 let is_fake_fs = worktree.fs.is_fake();
579 worktree.snapshot = worktree.background_snapshot.lock().clone();
580 if worktree.is_scanning() {
581 if worktree.poll_task.is_none() {
582 worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
583 if is_fake_fs {
584 smol::future::yield_now().await;
585 } else {
586 smol::Timer::after(Duration::from_millis(100)).await;
587 }
588 this.update(&mut cx, |this, cx| {
589 this.as_local_mut().unwrap().poll_task = None;
590 this.poll_snapshot(cx);
591 })
592 }));
593 }
594 } else {
595 worktree.poll_task.take();
596 self.update_open_buffers(cx);
597 }
598 }
599 Self::Remote(worktree) => {
600 worktree.snapshot = worktree.snapshot_rx.borrow().clone();
601 self.update_open_buffers(cx);
602 }
603 };
604
605 cx.notify();
606 }
607
608 fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
609 let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
610 Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
611 Self::Remote(worktree) => {
612 Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
613 if let RemoteBuffer::Loaded(buf) = buf {
614 Some((id, buf))
615 } else {
616 None
617 }
618 }))
619 }
620 };
621
622 let local = self.as_local().is_some();
623 let worktree_path = self.abs_path.clone();
624 let worktree_handle = cx.handle();
625 let mut buffers_to_delete = Vec::new();
626 for (buffer_id, buffer) in open_buffers {
627 if let Some(buffer) = buffer.upgrade(cx) {
628 buffer.update(cx, |buffer, cx| {
629 if let Some(old_file) = File::from_dyn(buffer.file()) {
630 let new_file = if let Some(entry) = old_file
631 .entry_id
632 .and_then(|entry_id| self.entry_for_id(entry_id))
633 {
634 File {
635 is_local: local,
636 worktree_path: worktree_path.clone(),
637 entry_id: Some(entry.id),
638 mtime: entry.mtime,
639 path: entry.path.clone(),
640 worktree: worktree_handle.clone(),
641 }
642 } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
643 File {
644 is_local: local,
645 worktree_path: worktree_path.clone(),
646 entry_id: Some(entry.id),
647 mtime: entry.mtime,
648 path: entry.path.clone(),
649 worktree: worktree_handle.clone(),
650 }
651 } else {
652 File {
653 is_local: local,
654 worktree_path: worktree_path.clone(),
655 entry_id: None,
656 path: old_file.path().clone(),
657 mtime: old_file.mtime(),
658 worktree: worktree_handle.clone(),
659 }
660 };
661
662 if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
663 task.detach();
664 }
665 }
666 });
667 } else {
668 buffers_to_delete.push(*buffer_id);
669 }
670 }
671
672 for buffer_id in buffers_to_delete {
673 match self {
674 Self::Local(worktree) => {
675 worktree.open_buffers.remove(&buffer_id);
676 }
677 Self::Remote(worktree) => {
678 worktree.open_buffers.remove(&buffer_id);
679 }
680 }
681 }
682 }
683
684 fn send_buffer_update(
685 &mut self,
686 buffer_id: u64,
687 operation: Operation,
688 cx: &mut ModelContext<Self>,
689 ) {
690 if let Some((project_id, worktree_id, rpc)) = match self {
691 Worktree::Local(worktree) => worktree
692 .share
693 .as_ref()
694 .map(|share| (share.project_id, worktree.id(), worktree.client.clone())),
695 Worktree::Remote(worktree) => Some((
696 worktree.project_id,
697 worktree.snapshot.id(),
698 worktree.client.clone(),
699 )),
700 } {
701 cx.spawn(|worktree, mut cx| async move {
702 if let Err(error) = rpc
703 .request(proto::UpdateBuffer {
704 project_id,
705 worktree_id: worktree_id.0 as u64,
706 buffer_id,
707 operations: vec![language::proto::serialize_operation(&operation)],
708 })
709 .await
710 {
711 worktree.update(&mut cx, |worktree, _| {
712 log::error!("error sending buffer operation: {}", error);
713 match worktree {
714 Worktree::Local(t) => &mut t.queued_operations,
715 Worktree::Remote(t) => &mut t.queued_operations,
716 }
717 .push((buffer_id, operation));
718 });
719 }
720 })
721 .detach();
722 }
723 }
724}
725
726impl WorktreeId {
727 pub fn from_usize(handle_id: usize) -> Self {
728 Self(handle_id)
729 }
730
731 pub(crate) fn from_proto(id: u64) -> Self {
732 Self(id as usize)
733 }
734
735 pub fn to_proto(&self) -> u64 {
736 self.0 as u64
737 }
738
739 pub fn to_usize(&self) -> usize {
740 self.0
741 }
742}
743
744impl fmt::Display for WorktreeId {
745 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
746 self.0.fmt(f)
747 }
748}
749
750#[derive(Clone)]
751pub struct Snapshot {
752 id: WorktreeId,
753 scan_id: usize,
754 abs_path: Arc<Path>,
755 root_name: String,
756 root_char_bag: CharBag,
757 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
758 entries_by_path: SumTree<Entry>,
759 entries_by_id: SumTree<PathEntry>,
760 removed_entry_ids: HashMap<u64, usize>,
761 next_entry_id: Arc<AtomicUsize>,
762}
763
764pub struct LocalWorktree {
765 snapshot: Snapshot,
766 config: WorktreeConfig,
767 background_snapshot: Arc<Mutex<Snapshot>>,
768 last_scan_state_rx: watch::Receiver<ScanState>,
769 _background_scanner_task: Option<Task<()>>,
770 poll_task: Option<Task<()>>,
771 registration: Registration,
772 share: Option<ShareState>,
773 loading_buffers: LoadingBuffers,
774 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
775 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
776 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
777 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
778 queued_operations: Vec<(u64, Operation)>,
779 client: Arc<Client>,
780 fs: Arc<dyn Fs>,
781 weak: bool,
782}
783
784#[derive(Debug, Eq, PartialEq)]
785enum Registration {
786 None,
787 Pending,
788 Done { project_id: u64 },
789}
790
791struct ShareState {
792 project_id: u64,
793 snapshots_tx: Sender<Snapshot>,
794 _maintain_remote_snapshot: Option<Task<()>>,
795}
796
797pub struct RemoteWorktree {
798 project_id: u64,
799 snapshot: Snapshot,
800 snapshot_rx: watch::Receiver<Snapshot>,
801 client: Arc<Client>,
802 updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
803 replica_id: ReplicaId,
804 loading_buffers: LoadingBuffers,
805 open_buffers: HashMap<usize, RemoteBuffer>,
806 queued_operations: Vec<(u64, Operation)>,
807 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
808 weak: bool,
809}
810
811type LoadingBuffers = HashMap<
812 Arc<Path>,
813 postage::watch::Receiver<
814 Option<Result<(ModelHandle<Buffer>, Arc<AtomicBool>), Arc<anyhow::Error>>>,
815 >,
816>;
817
818#[derive(Default, Deserialize)]
819struct WorktreeConfig {
820 collaborators: Vec<String>,
821}
822
823impl LocalWorktree {
824 async fn new(
825 client: Arc<Client>,
826 path: impl Into<Arc<Path>>,
827 weak: bool,
828 fs: Arc<dyn Fs>,
829 cx: &mut AsyncAppContext,
830 ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
831 let abs_path = path.into();
832 let path: Arc<Path> = Arc::from(Path::new(""));
833 let next_entry_id = AtomicUsize::new(0);
834
835 // After determining whether the root entry is a file or a directory, populate the
836 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
837 let root_name = abs_path
838 .file_name()
839 .map_or(String::new(), |f| f.to_string_lossy().to_string());
840 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
841 let metadata = fs.metadata(&abs_path).await?;
842
843 let mut config = WorktreeConfig::default();
844 if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
845 if let Ok(parsed) = toml::from_str(&zed_toml) {
846 config = parsed;
847 }
848 }
849
850 let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
851 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
852 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
853 let mut snapshot = Snapshot {
854 id: WorktreeId::from_usize(cx.model_id()),
855 scan_id: 0,
856 abs_path,
857 root_name: root_name.clone(),
858 root_char_bag,
859 ignores: Default::default(),
860 entries_by_path: Default::default(),
861 entries_by_id: Default::default(),
862 removed_entry_ids: Default::default(),
863 next_entry_id: Arc::new(next_entry_id),
864 };
865 if let Some(metadata) = metadata {
866 snapshot.insert_entry(
867 Entry::new(
868 path.into(),
869 &metadata,
870 &snapshot.next_entry_id,
871 snapshot.root_char_bag,
872 ),
873 fs.as_ref(),
874 );
875 }
876
877 let tree = Self {
878 snapshot: snapshot.clone(),
879 config,
880 background_snapshot: Arc::new(Mutex::new(snapshot)),
881 last_scan_state_rx,
882 _background_scanner_task: None,
883 registration: Registration::None,
884 share: None,
885 poll_task: None,
886 loading_buffers: Default::default(),
887 open_buffers: Default::default(),
888 shared_buffers: Default::default(),
889 diagnostics: Default::default(),
890 diagnostic_summaries: Default::default(),
891 queued_operations: Default::default(),
892 client,
893 fs,
894 weak,
895 };
896
897 cx.spawn_weak(|this, mut cx| async move {
898 while let Ok(scan_state) = scan_states_rx.recv().await {
899 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
900 let to_send = handle.update(&mut cx, |this, cx| {
901 last_scan_state_tx.blocking_send(scan_state).ok();
902 this.poll_snapshot(cx);
903 let tree = this.as_local_mut().unwrap();
904 if !tree.is_scanning() {
905 if let Some(share) = tree.share.as_ref() {
906 return Some((tree.snapshot(), share.snapshots_tx.clone()));
907 }
908 }
909 None
910 });
911
912 if let Some((snapshot, snapshots_to_send_tx)) = to_send {
913 if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
914 log::error!("error submitting snapshot to send {}", err);
915 }
916 }
917 } else {
918 break;
919 }
920 }
921 })
922 .detach();
923
924 Worktree::Local(tree)
925 });
926
927 Ok((tree, scan_states_tx))
928 }
929
930 pub fn authorized_logins(&self) -> Vec<String> {
931 self.config.collaborators.clone()
932 }
933
934 fn get_open_buffer(
935 &mut self,
936 path: &Path,
937 cx: &mut ModelContext<Worktree>,
938 ) -> Option<ModelHandle<Buffer>> {
939 let handle = cx.handle();
940 let mut result = None;
941 self.open_buffers.retain(|_buffer_id, buffer| {
942 if let Some(buffer) = buffer.upgrade(cx) {
943 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
944 if file.worktree == handle && file.path().as_ref() == path {
945 result = Some(buffer);
946 }
947 }
948 true
949 } else {
950 false
951 }
952 });
953 result
954 }
955
956 fn open_buffer(
957 &mut self,
958 path: &Path,
959 cx: &mut ModelContext<Worktree>,
960 ) -> Task<Result<ModelHandle<Buffer>>> {
961 let path = Arc::from(path);
962 cx.spawn(move |this, mut cx| async move {
963 let (file, contents) = this
964 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
965 .await?;
966
967 let diagnostics = this.update(&mut cx, |this, _| {
968 this.as_local_mut().unwrap().diagnostics.get(&path).cloned()
969 });
970
971 let mut buffer_operations = Vec::new();
972 let buffer = cx.add_model(|cx| {
973 let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
974 if let Some(diagnostics) = diagnostics {
975 let op = buffer.update_diagnostics(None, diagnostics, cx).unwrap();
976 buffer_operations.push(op);
977 }
978 buffer
979 });
980
981 this.update(&mut cx, |this, cx| {
982 for op in buffer_operations {
983 this.send_buffer_update(buffer.read(cx).remote_id(), op, cx);
984 }
985 let this = this.as_local_mut().unwrap();
986 this.open_buffers.insert(buffer.id(), buffer.downgrade());
987 });
988
989 Ok(buffer)
990 })
991 }
992
993 pub fn open_remote_buffer(
994 &mut self,
995 peer_id: PeerId,
996 buffer: ModelHandle<Buffer>,
997 cx: &mut ModelContext<Worktree>,
998 ) -> proto::OpenBufferResponse {
999 self.shared_buffers
1000 .entry(peer_id)
1001 .or_default()
1002 .insert(buffer.id() as u64, buffer.clone());
1003 proto::OpenBufferResponse {
1004 buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1005 }
1006 }
1007
1008 pub fn close_remote_buffer(
1009 &mut self,
1010 envelope: TypedEnvelope<proto::CloseBuffer>,
1011 cx: &mut ModelContext<Worktree>,
1012 ) -> Result<()> {
1013 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1014 shared_buffers.remove(&envelope.payload.buffer_id);
1015 cx.notify();
1016 }
1017
1018 Ok(())
1019 }
1020
1021 pub fn remove_collaborator(
1022 &mut self,
1023 peer_id: PeerId,
1024 replica_id: ReplicaId,
1025 cx: &mut ModelContext<Worktree>,
1026 ) {
1027 self.shared_buffers.remove(&peer_id);
1028 for (_, buffer) in &self.open_buffers {
1029 if let Some(buffer) = buffer.upgrade(cx) {
1030 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1031 }
1032 }
1033 cx.notify();
1034 }
1035
1036 pub fn update_diagnostics(
1037 &mut self,
1038 worktree_path: Arc<Path>,
1039 params: lsp::PublishDiagnosticsParams,
1040 disk_based_sources: &HashSet<String>,
1041 cx: &mut ModelContext<Worktree>,
1042 ) -> Result<()> {
1043 let mut next_group_id = 0;
1044 let mut diagnostics = Vec::default();
1045 let mut primary_diagnostic_group_ids = HashMap::default();
1046 let mut sources_by_group_id = HashMap::default();
1047 let mut supporting_diagnostic_severities = HashMap::default();
1048 for diagnostic in ¶ms.diagnostics {
1049 let source = diagnostic.source.as_ref();
1050 let code = diagnostic.code.as_ref().map(|code| match code {
1051 lsp::NumberOrString::Number(code) => code.to_string(),
1052 lsp::NumberOrString::String(code) => code.clone(),
1053 });
1054 let range = range_from_lsp(diagnostic.range);
1055 let is_supporting = diagnostic
1056 .related_information
1057 .as_ref()
1058 .map_or(false, |infos| {
1059 infos.iter().any(|info| {
1060 primary_diagnostic_group_ids.contains_key(&(
1061 source,
1062 code.clone(),
1063 range_from_lsp(info.location.range),
1064 ))
1065 })
1066 });
1067
1068 if is_supporting {
1069 if let Some(severity) = diagnostic.severity {
1070 supporting_diagnostic_severities
1071 .insert((source, code.clone(), range), severity);
1072 }
1073 } else {
1074 let group_id = post_inc(&mut next_group_id);
1075 let is_disk_based =
1076 source.map_or(false, |source| disk_based_sources.contains(source));
1077
1078 sources_by_group_id.insert(group_id, source);
1079 primary_diagnostic_group_ids
1080 .insert((source, code.clone(), range.clone()), group_id);
1081
1082 diagnostics.push(DiagnosticEntry {
1083 range,
1084 diagnostic: Diagnostic {
1085 code: code.clone(),
1086 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
1087 message: diagnostic.message.clone(),
1088 group_id,
1089 is_primary: true,
1090 is_valid: true,
1091 is_disk_based,
1092 },
1093 });
1094 if let Some(infos) = &diagnostic.related_information {
1095 for info in infos {
1096 if info.location.uri == params.uri {
1097 let range = range_from_lsp(info.location.range);
1098 diagnostics.push(DiagnosticEntry {
1099 range,
1100 diagnostic: Diagnostic {
1101 code: code.clone(),
1102 severity: DiagnosticSeverity::INFORMATION,
1103 message: info.message.clone(),
1104 group_id,
1105 is_primary: false,
1106 is_valid: true,
1107 is_disk_based,
1108 },
1109 });
1110 }
1111 }
1112 }
1113 }
1114 }
1115
1116 for entry in &mut diagnostics {
1117 let diagnostic = &mut entry.diagnostic;
1118 if !diagnostic.is_primary {
1119 let source = *sources_by_group_id.get(&diagnostic.group_id).unwrap();
1120 if let Some(&severity) = supporting_diagnostic_severities.get(&(
1121 source,
1122 diagnostic.code.clone(),
1123 entry.range.clone(),
1124 )) {
1125 diagnostic.severity = severity;
1126 }
1127 }
1128 }
1129
1130 self.update_diagnostic_entries(worktree_path, params.version, diagnostics, cx)?;
1131 Ok(())
1132 }
1133
1134 pub fn update_diagnostic_entries(
1135 &mut self,
1136 worktree_path: Arc<Path>,
1137 version: Option<i32>,
1138 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
1139 cx: &mut ModelContext<Worktree>,
1140 ) -> Result<()> {
1141 for buffer in self.open_buffers.values() {
1142 if let Some(buffer) = buffer.upgrade(cx) {
1143 if buffer
1144 .read(cx)
1145 .file()
1146 .map_or(false, |file| *file.path() == worktree_path)
1147 {
1148 let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
1149 (
1150 buffer.remote_id(),
1151 buffer.update_diagnostics(version, diagnostics.clone(), cx),
1152 )
1153 });
1154 self.send_buffer_update(remote_id, operation?, cx);
1155 break;
1156 }
1157 }
1158 }
1159
1160 let summary = DiagnosticSummary::new(&diagnostics);
1161 self.diagnostic_summaries
1162 .insert(PathKey(worktree_path.clone()), summary.clone());
1163 self.diagnostics.insert(worktree_path.clone(), diagnostics);
1164
1165 if let Some(share) = self.share.as_ref() {
1166 cx.foreground()
1167 .spawn({
1168 let client = self.client.clone();
1169 let project_id = share.project_id;
1170 let worktree_id = self.id().to_proto();
1171 let path = worktree_path.to_string_lossy().to_string();
1172 async move {
1173 client
1174 .send(proto::UpdateDiagnosticSummary {
1175 project_id,
1176 worktree_id,
1177 summary: Some(proto::DiagnosticSummary {
1178 path,
1179 error_count: summary.error_count as u32,
1180 warning_count: summary.warning_count as u32,
1181 info_count: summary.info_count as u32,
1182 hint_count: summary.hint_count as u32,
1183 }),
1184 })
1185 .await
1186 .log_err()
1187 }
1188 })
1189 .detach();
1190 }
1191
1192 Ok(())
1193 }
1194
1195 fn send_buffer_update(
1196 &mut self,
1197 buffer_id: u64,
1198 operation: Operation,
1199 cx: &mut ModelContext<Worktree>,
1200 ) -> Option<()> {
1201 let share = self.share.as_ref()?;
1202 let project_id = share.project_id;
1203 let worktree_id = self.id();
1204 let rpc = self.client.clone();
1205 cx.spawn(|worktree, mut cx| async move {
1206 if let Err(error) = rpc
1207 .request(proto::UpdateBuffer {
1208 project_id,
1209 worktree_id: worktree_id.0 as u64,
1210 buffer_id,
1211 operations: vec![language::proto::serialize_operation(&operation)],
1212 })
1213 .await
1214 {
1215 worktree.update(&mut cx, |worktree, _| {
1216 log::error!("error sending buffer operation: {}", error);
1217 worktree
1218 .as_local_mut()
1219 .unwrap()
1220 .queued_operations
1221 .push((buffer_id, operation));
1222 });
1223 }
1224 })
1225 .detach();
1226 None
1227 }
1228
1229 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1230 let mut scan_state_rx = self.last_scan_state_rx.clone();
1231 async move {
1232 let mut scan_state = Some(scan_state_rx.borrow().clone());
1233 while let Some(ScanState::Scanning) = scan_state {
1234 scan_state = scan_state_rx.recv().await;
1235 }
1236 }
1237 }
1238
1239 fn is_scanning(&self) -> bool {
1240 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1241 true
1242 } else {
1243 false
1244 }
1245 }
1246
1247 pub fn snapshot(&self) -> Snapshot {
1248 self.snapshot.clone()
1249 }
1250
1251 pub fn abs_path(&self) -> &Arc<Path> {
1252 &self.snapshot.abs_path
1253 }
1254
1255 pub fn contains_abs_path(&self, path: &Path) -> bool {
1256 path.starts_with(&self.snapshot.abs_path)
1257 }
1258
1259 fn absolutize(&self, path: &Path) -> PathBuf {
1260 if path.file_name().is_some() {
1261 self.snapshot.abs_path.join(path)
1262 } else {
1263 self.snapshot.abs_path.to_path_buf()
1264 }
1265 }
1266
1267 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1268 let handle = cx.handle();
1269 let path = Arc::from(path);
1270 let worktree_path = self.abs_path.clone();
1271 let abs_path = self.absolutize(&path);
1272 let background_snapshot = self.background_snapshot.clone();
1273 let fs = self.fs.clone();
1274 cx.spawn(|this, mut cx| async move {
1275 let text = fs.load(&abs_path).await?;
1276 // Eagerly populate the snapshot with an updated entry for the loaded file
1277 let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1278 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1279 Ok((
1280 File {
1281 entry_id: Some(entry.id),
1282 worktree: handle,
1283 worktree_path,
1284 path: entry.path,
1285 mtime: entry.mtime,
1286 is_local: true,
1287 },
1288 text,
1289 ))
1290 })
1291 }
1292
1293 pub fn save_buffer_as(
1294 &self,
1295 buffer_handle: ModelHandle<Buffer>,
1296 path: impl Into<Arc<Path>>,
1297 cx: &mut ModelContext<Worktree>,
1298 ) -> Task<Result<()>> {
1299 let buffer = buffer_handle.read(cx);
1300 let text = buffer.as_rope().clone();
1301 let version = buffer.version();
1302 let save = self.save(path, text, cx);
1303 cx.spawn(|this, mut cx| async move {
1304 let entry = save.await?;
1305 let file = this.update(&mut cx, |this, cx| {
1306 let this = this.as_local_mut().unwrap();
1307 this.open_buffers
1308 .insert(buffer_handle.id(), buffer_handle.downgrade());
1309 File {
1310 entry_id: Some(entry.id),
1311 worktree: cx.handle(),
1312 worktree_path: this.abs_path.clone(),
1313 path: entry.path,
1314 mtime: entry.mtime,
1315 is_local: true,
1316 }
1317 });
1318
1319 buffer_handle.update(&mut cx, |buffer, cx| {
1320 buffer.did_save(version, file.mtime, Some(Box::new(file)), cx);
1321 });
1322
1323 Ok(())
1324 })
1325 }
1326
1327 fn save(
1328 &self,
1329 path: impl Into<Arc<Path>>,
1330 text: Rope,
1331 cx: &mut ModelContext<Worktree>,
1332 ) -> Task<Result<Entry>> {
1333 let path = path.into();
1334 let abs_path = self.absolutize(&path);
1335 let background_snapshot = self.background_snapshot.clone();
1336 let fs = self.fs.clone();
1337 let save = cx.background().spawn(async move {
1338 fs.save(&abs_path, &text).await?;
1339 refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1340 });
1341
1342 cx.spawn(|this, mut cx| async move {
1343 let entry = save.await?;
1344 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1345 Ok(entry)
1346 })
1347 }
1348
1349 pub fn register(
1350 &mut self,
1351 project_id: u64,
1352 cx: &mut ModelContext<Worktree>,
1353 ) -> Task<anyhow::Result<()>> {
1354 if self.registration != Registration::None {
1355 return Task::ready(Ok(()));
1356 }
1357
1358 self.registration = Registration::Pending;
1359 let client = self.client.clone();
1360 let register_message = proto::RegisterWorktree {
1361 project_id,
1362 worktree_id: self.id().to_proto(),
1363 root_name: self.root_name().to_string(),
1364 authorized_logins: self.authorized_logins(),
1365 };
1366 cx.spawn(|this, mut cx| async move {
1367 let response = client.request(register_message).await;
1368 this.update(&mut cx, |this, _| {
1369 let worktree = this.as_local_mut().unwrap();
1370 match response {
1371 Ok(_) => {
1372 worktree.registration = Registration::Done { project_id };
1373 Ok(())
1374 }
1375 Err(error) => {
1376 worktree.registration = Registration::None;
1377 Err(error)
1378 }
1379 }
1380 })
1381 })
1382 }
1383
1384 pub fn share(&mut self, cx: &mut ModelContext<Worktree>) -> Task<anyhow::Result<()>> {
1385 let project_id = if let Registration::Done { project_id } = self.registration {
1386 project_id
1387 } else {
1388 return Task::ready(Err(anyhow!("cannot share worktree before registering it")));
1389 };
1390
1391 if self.share.is_some() {
1392 return Task::ready(Ok(()));
1393 }
1394
1395 let snapshot = self.snapshot();
1396 let rpc = self.client.clone();
1397 let worktree_id = cx.model_id() as u64;
1398 let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
1399 let maintain_remote_snapshot = cx.background().spawn({
1400 let rpc = rpc.clone();
1401 let snapshot = snapshot.clone();
1402 async move {
1403 let mut prev_snapshot = snapshot;
1404 while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1405 let message =
1406 snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
1407 match rpc.send(message).await {
1408 Ok(()) => prev_snapshot = snapshot,
1409 Err(err) => log::error!("error sending snapshot diff {}", err),
1410 }
1411 }
1412 }
1413 });
1414 self.share = Some(ShareState {
1415 project_id,
1416 snapshots_tx: snapshots_to_send_tx,
1417 _maintain_remote_snapshot: Some(maintain_remote_snapshot),
1418 });
1419
1420 let diagnostic_summaries = self.diagnostic_summaries.clone();
1421 let weak = self.weak;
1422 let share_message = cx.background().spawn(async move {
1423 proto::ShareWorktree {
1424 project_id,
1425 worktree: Some(snapshot.to_proto(&diagnostic_summaries, weak)),
1426 }
1427 });
1428
1429 cx.foreground().spawn(async move {
1430 rpc.request(share_message.await).await?;
1431 Ok(())
1432 })
1433 }
1434
1435 pub fn unshare(&mut self) {
1436 self.share.take();
1437 }
1438
1439 pub fn is_shared(&self) -> bool {
1440 self.share.is_some()
1441 }
1442}
1443
1444fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1445 let contents = smol::block_on(fs.load(&abs_path))?;
1446 let parent = abs_path.parent().unwrap_or(Path::new("/"));
1447 let mut builder = GitignoreBuilder::new(parent);
1448 for line in contents.lines() {
1449 builder.add_line(Some(abs_path.into()), line)?;
1450 }
1451 Ok(builder.build()?)
1452}
1453
1454impl Deref for Worktree {
1455 type Target = Snapshot;
1456
1457 fn deref(&self) -> &Self::Target {
1458 match self {
1459 Worktree::Local(worktree) => &worktree.snapshot,
1460 Worktree::Remote(worktree) => &worktree.snapshot,
1461 }
1462 }
1463}
1464
1465impl Deref for LocalWorktree {
1466 type Target = Snapshot;
1467
1468 fn deref(&self) -> &Self::Target {
1469 &self.snapshot
1470 }
1471}
1472
1473impl Deref for RemoteWorktree {
1474 type Target = Snapshot;
1475
1476 fn deref(&self) -> &Self::Target {
1477 &self.snapshot
1478 }
1479}
1480
1481impl fmt::Debug for LocalWorktree {
1482 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1483 self.snapshot.fmt(f)
1484 }
1485}
1486
1487impl RemoteWorktree {
1488 fn get_open_buffer(
1489 &mut self,
1490 path: &Path,
1491 cx: &mut ModelContext<Worktree>,
1492 ) -> Option<ModelHandle<Buffer>> {
1493 let handle = cx.handle();
1494 let mut existing_buffer = None;
1495 self.open_buffers.retain(|_buffer_id, buffer| {
1496 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1497 if let Some(file) = File::from_dyn(buffer.read(cx).file()) {
1498 if file.worktree == handle && file.path().as_ref() == path {
1499 existing_buffer = Some(buffer);
1500 }
1501 }
1502 true
1503 } else {
1504 false
1505 }
1506 });
1507 existing_buffer
1508 }
1509
1510 fn open_buffer(
1511 &mut self,
1512 path: &Path,
1513 cx: &mut ModelContext<Worktree>,
1514 ) -> Task<Result<ModelHandle<Buffer>>> {
1515 let rpc = self.client.clone();
1516 let replica_id = self.replica_id;
1517 let project_id = self.project_id;
1518 let remote_worktree_id = self.id();
1519 let root_path = self.snapshot.abs_path.clone();
1520 let path: Arc<Path> = Arc::from(path);
1521 let path_string = path.to_string_lossy().to_string();
1522 cx.spawn_weak(move |this, mut cx| async move {
1523 let entry = this
1524 .upgrade(&cx)
1525 .ok_or_else(|| anyhow!("worktree was closed"))?
1526 .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1527 .ok_or_else(|| anyhow!("file does not exist"))?;
1528 let response = rpc
1529 .request(proto::OpenBuffer {
1530 project_id,
1531 worktree_id: remote_worktree_id.to_proto(),
1532 path: path_string,
1533 })
1534 .await?;
1535
1536 let this = this
1537 .upgrade(&cx)
1538 .ok_or_else(|| anyhow!("worktree was closed"))?;
1539 let file = File {
1540 entry_id: Some(entry.id),
1541 worktree: this.clone(),
1542 worktree_path: root_path,
1543 path: entry.path,
1544 mtime: entry.mtime,
1545 is_local: false,
1546 };
1547 let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1548 let buffer_id = remote_buffer.id as usize;
1549 let buffer = cx.add_model(|cx| {
1550 Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx).unwrap()
1551 });
1552 this.update(&mut cx, move |this, cx| {
1553 let this = this.as_remote_mut().unwrap();
1554 if let Some(RemoteBuffer::Operations(pending_ops)) = this
1555 .open_buffers
1556 .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1557 {
1558 buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1559 }
1560 Result::<_, anyhow::Error>::Ok(buffer)
1561 })
1562 })
1563 }
1564
1565 fn snapshot(&self) -> Snapshot {
1566 self.snapshot.clone()
1567 }
1568
1569 pub fn update_from_remote(
1570 &mut self,
1571 envelope: TypedEnvelope<proto::UpdateWorktree>,
1572 cx: &mut ModelContext<Worktree>,
1573 ) -> Result<()> {
1574 let mut tx = self.updates_tx.clone();
1575 let payload = envelope.payload.clone();
1576 cx.background()
1577 .spawn(async move {
1578 tx.send(payload).await.expect("receiver runs to completion");
1579 })
1580 .detach();
1581
1582 Ok(())
1583 }
1584
1585 pub fn update_diagnostic_summary(
1586 &mut self,
1587 path: Arc<Path>,
1588 summary: &proto::DiagnosticSummary,
1589 ) {
1590 self.diagnostic_summaries.insert(
1591 PathKey(path.clone()),
1592 DiagnosticSummary {
1593 error_count: summary.error_count as usize,
1594 warning_count: summary.warning_count as usize,
1595 info_count: summary.info_count as usize,
1596 hint_count: summary.hint_count as usize,
1597 },
1598 );
1599 }
1600
1601 pub fn remove_collaborator(&mut self, replica_id: ReplicaId, cx: &mut ModelContext<Worktree>) {
1602 for (_, buffer) in &self.open_buffers {
1603 if let Some(buffer) = buffer.upgrade(cx) {
1604 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1605 }
1606 }
1607 cx.notify();
1608 }
1609}
1610
1611enum RemoteBuffer {
1612 Operations(Vec<Operation>),
1613 Loaded(WeakModelHandle<Buffer>),
1614}
1615
1616impl RemoteBuffer {
1617 fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1618 match self {
1619 Self::Operations(_) => None,
1620 Self::Loaded(buffer) => buffer.upgrade(cx),
1621 }
1622 }
1623}
1624
1625impl Snapshot {
1626 pub fn id(&self) -> WorktreeId {
1627 self.id
1628 }
1629
1630 pub fn to_proto(
1631 &self,
1632 diagnostic_summaries: &TreeMap<PathKey, DiagnosticSummary>,
1633 weak: bool,
1634 ) -> proto::Worktree {
1635 let root_name = self.root_name.clone();
1636 proto::Worktree {
1637 id: self.id.0 as u64,
1638 root_name,
1639 entries: self
1640 .entries_by_path
1641 .iter()
1642 .filter(|e| !e.is_ignored)
1643 .map(Into::into)
1644 .collect(),
1645 diagnostic_summaries: diagnostic_summaries
1646 .iter()
1647 .map(|(path, summary)| summary.to_proto(path.0.clone()))
1648 .collect(),
1649 weak,
1650 }
1651 }
1652
1653 pub fn build_update(
1654 &self,
1655 other: &Self,
1656 project_id: u64,
1657 worktree_id: u64,
1658 include_ignored: bool,
1659 ) -> proto::UpdateWorktree {
1660 let mut updated_entries = Vec::new();
1661 let mut removed_entries = Vec::new();
1662 let mut self_entries = self
1663 .entries_by_id
1664 .cursor::<()>()
1665 .filter(|e| include_ignored || !e.is_ignored)
1666 .peekable();
1667 let mut other_entries = other
1668 .entries_by_id
1669 .cursor::<()>()
1670 .filter(|e| include_ignored || !e.is_ignored)
1671 .peekable();
1672 loop {
1673 match (self_entries.peek(), other_entries.peek()) {
1674 (Some(self_entry), Some(other_entry)) => {
1675 match Ord::cmp(&self_entry.id, &other_entry.id) {
1676 Ordering::Less => {
1677 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1678 updated_entries.push(entry);
1679 self_entries.next();
1680 }
1681 Ordering::Equal => {
1682 if self_entry.scan_id != other_entry.scan_id {
1683 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1684 updated_entries.push(entry);
1685 }
1686
1687 self_entries.next();
1688 other_entries.next();
1689 }
1690 Ordering::Greater => {
1691 removed_entries.push(other_entry.id as u64);
1692 other_entries.next();
1693 }
1694 }
1695 }
1696 (Some(self_entry), None) => {
1697 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1698 updated_entries.push(entry);
1699 self_entries.next();
1700 }
1701 (None, Some(other_entry)) => {
1702 removed_entries.push(other_entry.id as u64);
1703 other_entries.next();
1704 }
1705 (None, None) => break,
1706 }
1707 }
1708
1709 proto::UpdateWorktree {
1710 project_id,
1711 worktree_id,
1712 root_name: self.root_name().to_string(),
1713 updated_entries,
1714 removed_entries,
1715 }
1716 }
1717
1718 fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1719 self.scan_id += 1;
1720 let scan_id = self.scan_id;
1721
1722 let mut entries_by_path_edits = Vec::new();
1723 let mut entries_by_id_edits = Vec::new();
1724 for entry_id in update.removed_entries {
1725 let entry_id = entry_id as usize;
1726 let entry = self
1727 .entry_for_id(entry_id)
1728 .ok_or_else(|| anyhow!("unknown entry"))?;
1729 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1730 entries_by_id_edits.push(Edit::Remove(entry.id));
1731 }
1732
1733 for entry in update.updated_entries {
1734 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1735 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1736 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1737 }
1738 entries_by_id_edits.push(Edit::Insert(PathEntry {
1739 id: entry.id,
1740 path: entry.path.clone(),
1741 is_ignored: entry.is_ignored,
1742 scan_id,
1743 }));
1744 entries_by_path_edits.push(Edit::Insert(entry));
1745 }
1746
1747 self.entries_by_path.edit(entries_by_path_edits, &());
1748 self.entries_by_id.edit(entries_by_id_edits, &());
1749
1750 Ok(())
1751 }
1752
1753 pub fn file_count(&self) -> usize {
1754 self.entries_by_path.summary().file_count
1755 }
1756
1757 pub fn visible_file_count(&self) -> usize {
1758 self.entries_by_path.summary().visible_file_count
1759 }
1760
1761 fn traverse_from_offset(
1762 &self,
1763 include_dirs: bool,
1764 include_ignored: bool,
1765 start_offset: usize,
1766 ) -> Traversal {
1767 let mut cursor = self.entries_by_path.cursor();
1768 cursor.seek(
1769 &TraversalTarget::Count {
1770 count: start_offset,
1771 include_dirs,
1772 include_ignored,
1773 },
1774 Bias::Right,
1775 &(),
1776 );
1777 Traversal {
1778 cursor,
1779 include_dirs,
1780 include_ignored,
1781 }
1782 }
1783
1784 fn traverse_from_path(
1785 &self,
1786 include_dirs: bool,
1787 include_ignored: bool,
1788 path: &Path,
1789 ) -> Traversal {
1790 let mut cursor = self.entries_by_path.cursor();
1791 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1792 Traversal {
1793 cursor,
1794 include_dirs,
1795 include_ignored,
1796 }
1797 }
1798
1799 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1800 self.traverse_from_offset(false, include_ignored, start)
1801 }
1802
1803 pub fn entries(&self, include_ignored: bool) -> Traversal {
1804 self.traverse_from_offset(true, include_ignored, 0)
1805 }
1806
1807 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1808 let empty_path = Path::new("");
1809 self.entries_by_path
1810 .cursor::<()>()
1811 .filter(move |entry| entry.path.as_ref() != empty_path)
1812 .map(|entry| &entry.path)
1813 }
1814
1815 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1816 let mut cursor = self.entries_by_path.cursor();
1817 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1818 let traversal = Traversal {
1819 cursor,
1820 include_dirs: true,
1821 include_ignored: true,
1822 };
1823 ChildEntriesIter {
1824 traversal,
1825 parent_path,
1826 }
1827 }
1828
1829 pub fn root_entry(&self) -> Option<&Entry> {
1830 self.entry_for_path("")
1831 }
1832
1833 pub fn root_name(&self) -> &str {
1834 &self.root_name
1835 }
1836
1837 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1838 let path = path.as_ref();
1839 self.traverse_from_path(true, true, path)
1840 .entry()
1841 .and_then(|entry| {
1842 if entry.path.as_ref() == path {
1843 Some(entry)
1844 } else {
1845 None
1846 }
1847 })
1848 }
1849
1850 pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1851 let entry = self.entries_by_id.get(&id, &())?;
1852 self.entry_for_path(&entry.path)
1853 }
1854
1855 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1856 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1857 }
1858
1859 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1860 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1861 let abs_path = self.abs_path.join(&entry.path);
1862 match build_gitignore(&abs_path, fs) {
1863 Ok(ignore) => {
1864 let ignore_dir_path = entry.path.parent().unwrap();
1865 self.ignores
1866 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1867 }
1868 Err(error) => {
1869 log::error!(
1870 "error loading .gitignore file {:?} - {:?}",
1871 &entry.path,
1872 error
1873 );
1874 }
1875 }
1876 }
1877
1878 self.reuse_entry_id(&mut entry);
1879 self.entries_by_path.insert_or_replace(entry.clone(), &());
1880 self.entries_by_id.insert_or_replace(
1881 PathEntry {
1882 id: entry.id,
1883 path: entry.path.clone(),
1884 is_ignored: entry.is_ignored,
1885 scan_id: self.scan_id,
1886 },
1887 &(),
1888 );
1889 entry
1890 }
1891
1892 fn populate_dir(
1893 &mut self,
1894 parent_path: Arc<Path>,
1895 entries: impl IntoIterator<Item = Entry>,
1896 ignore: Option<Arc<Gitignore>>,
1897 ) {
1898 let mut parent_entry = self
1899 .entries_by_path
1900 .get(&PathKey(parent_path.clone()), &())
1901 .unwrap()
1902 .clone();
1903 if let Some(ignore) = ignore {
1904 self.ignores.insert(parent_path, (ignore, self.scan_id));
1905 }
1906 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1907 parent_entry.kind = EntryKind::Dir;
1908 } else {
1909 unreachable!();
1910 }
1911
1912 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1913 let mut entries_by_id_edits = Vec::new();
1914
1915 for mut entry in entries {
1916 self.reuse_entry_id(&mut entry);
1917 entries_by_id_edits.push(Edit::Insert(PathEntry {
1918 id: entry.id,
1919 path: entry.path.clone(),
1920 is_ignored: entry.is_ignored,
1921 scan_id: self.scan_id,
1922 }));
1923 entries_by_path_edits.push(Edit::Insert(entry));
1924 }
1925
1926 self.entries_by_path.edit(entries_by_path_edits, &());
1927 self.entries_by_id.edit(entries_by_id_edits, &());
1928 }
1929
1930 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1931 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1932 entry.id = removed_entry_id;
1933 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1934 entry.id = existing_entry.id;
1935 }
1936 }
1937
1938 fn remove_path(&mut self, path: &Path) {
1939 let mut new_entries;
1940 let removed_entries;
1941 {
1942 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1943 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1944 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1945 new_entries.push_tree(cursor.suffix(&()), &());
1946 }
1947 self.entries_by_path = new_entries;
1948
1949 let mut entries_by_id_edits = Vec::new();
1950 for entry in removed_entries.cursor::<()>() {
1951 let removed_entry_id = self
1952 .removed_entry_ids
1953 .entry(entry.inode)
1954 .or_insert(entry.id);
1955 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1956 entries_by_id_edits.push(Edit::Remove(entry.id));
1957 }
1958 self.entries_by_id.edit(entries_by_id_edits, &());
1959
1960 if path.file_name() == Some(&GITIGNORE) {
1961 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1962 *scan_id = self.scan_id;
1963 }
1964 }
1965 }
1966
1967 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1968 let mut new_ignores = Vec::new();
1969 for ancestor in path.ancestors().skip(1) {
1970 if let Some((ignore, _)) = self.ignores.get(ancestor) {
1971 new_ignores.push((ancestor, Some(ignore.clone())));
1972 } else {
1973 new_ignores.push((ancestor, None));
1974 }
1975 }
1976
1977 let mut ignore_stack = IgnoreStack::none();
1978 for (parent_path, ignore) in new_ignores.into_iter().rev() {
1979 if ignore_stack.is_path_ignored(&parent_path, true) {
1980 ignore_stack = IgnoreStack::all();
1981 break;
1982 } else if let Some(ignore) = ignore {
1983 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
1984 }
1985 }
1986
1987 if ignore_stack.is_path_ignored(path, is_dir) {
1988 ignore_stack = IgnoreStack::all();
1989 }
1990
1991 ignore_stack
1992 }
1993}
1994
1995impl fmt::Debug for Snapshot {
1996 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1997 for entry in self.entries_by_path.cursor::<()>() {
1998 for _ in entry.path.ancestors().skip(1) {
1999 write!(f, " ")?;
2000 }
2001 writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
2002 }
2003 Ok(())
2004 }
2005}
2006
2007#[derive(Clone, PartialEq)]
2008pub struct File {
2009 entry_id: Option<usize>,
2010 pub worktree: ModelHandle<Worktree>,
2011 worktree_path: Arc<Path>,
2012 pub path: Arc<Path>,
2013 pub mtime: SystemTime,
2014 is_local: bool,
2015}
2016
2017impl language::File for File {
2018 fn mtime(&self) -> SystemTime {
2019 self.mtime
2020 }
2021
2022 fn path(&self) -> &Arc<Path> {
2023 &self.path
2024 }
2025
2026 fn abs_path(&self) -> Option<PathBuf> {
2027 if self.is_local {
2028 Some(self.worktree_path.join(&self.path))
2029 } else {
2030 None
2031 }
2032 }
2033
2034 fn full_path(&self) -> PathBuf {
2035 let mut full_path = PathBuf::new();
2036 if let Some(worktree_name) = self.worktree_path.file_name() {
2037 full_path.push(worktree_name);
2038 }
2039 full_path.push(&self.path);
2040 full_path
2041 }
2042
2043 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2044 /// of its worktree, then this method will return the name of the worktree itself.
2045 fn file_name<'a>(&'a self) -> Option<OsString> {
2046 self.path
2047 .file_name()
2048 .or_else(|| self.worktree_path.file_name())
2049 .map(Into::into)
2050 }
2051
2052 fn is_deleted(&self) -> bool {
2053 self.entry_id.is_none()
2054 }
2055
2056 fn save(
2057 &self,
2058 buffer_id: u64,
2059 text: Rope,
2060 version: clock::Global,
2061 cx: &mut MutableAppContext,
2062 ) -> Task<Result<(clock::Global, SystemTime)>> {
2063 let worktree_id = self.worktree.read(cx).id().to_proto();
2064 self.worktree.update(cx, |worktree, cx| match worktree {
2065 Worktree::Local(worktree) => {
2066 let rpc = worktree.client.clone();
2067 let project_id = worktree.share.as_ref().map(|share| share.project_id);
2068 let save = worktree.save(self.path.clone(), text, cx);
2069 cx.background().spawn(async move {
2070 let entry = save.await?;
2071 if let Some(project_id) = project_id {
2072 rpc.send(proto::BufferSaved {
2073 project_id,
2074 worktree_id,
2075 buffer_id,
2076 version: (&version).into(),
2077 mtime: Some(entry.mtime.into()),
2078 })
2079 .await?;
2080 }
2081 Ok((version, entry.mtime))
2082 })
2083 }
2084 Worktree::Remote(worktree) => {
2085 let rpc = worktree.client.clone();
2086 let project_id = worktree.project_id;
2087 cx.foreground().spawn(async move {
2088 let response = rpc
2089 .request(proto::SaveBuffer {
2090 project_id,
2091 worktree_id,
2092 buffer_id,
2093 })
2094 .await?;
2095 let version = response.version.try_into()?;
2096 let mtime = response
2097 .mtime
2098 .ok_or_else(|| anyhow!("missing mtime"))?
2099 .into();
2100 Ok((version, mtime))
2101 })
2102 }
2103 })
2104 }
2105
2106 fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2107 let worktree = self.worktree.read(cx).as_local()?;
2108 let abs_path = worktree.absolutize(&self.path);
2109 let fs = worktree.fs.clone();
2110 Some(
2111 cx.background()
2112 .spawn(async move { fs.load(&abs_path).await }),
2113 )
2114 }
2115
2116 fn format_remote(
2117 &self,
2118 buffer_id: u64,
2119 cx: &mut MutableAppContext,
2120 ) -> Option<Task<Result<()>>> {
2121 let worktree = self.worktree.read(cx);
2122 let worktree_id = worktree.id().to_proto();
2123 let worktree = worktree.as_remote()?;
2124 let rpc = worktree.client.clone();
2125 let project_id = worktree.project_id;
2126 Some(cx.foreground().spawn(async move {
2127 rpc.request(proto::FormatBuffer {
2128 project_id,
2129 worktree_id,
2130 buffer_id,
2131 })
2132 .await?;
2133 Ok(())
2134 }))
2135 }
2136
2137 fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2138 self.worktree.update(cx, |worktree, cx| {
2139 worktree.send_buffer_update(buffer_id, operation, cx);
2140 });
2141 }
2142
2143 fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2144 self.worktree.update(cx, |worktree, cx| {
2145 if let Worktree::Remote(worktree) = worktree {
2146 let project_id = worktree.project_id;
2147 let worktree_id = worktree.id().to_proto();
2148 let rpc = worktree.client.clone();
2149 cx.background()
2150 .spawn(async move {
2151 if let Err(error) = rpc
2152 .send(proto::CloseBuffer {
2153 project_id,
2154 worktree_id,
2155 buffer_id,
2156 })
2157 .await
2158 {
2159 log::error!("error closing remote buffer: {}", error);
2160 }
2161 })
2162 .detach();
2163 }
2164 });
2165 }
2166
2167 fn as_any(&self) -> &dyn Any {
2168 self
2169 }
2170}
2171
2172impl File {
2173 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2174 file.and_then(|f| f.as_any().downcast_ref())
2175 }
2176
2177 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2178 self.worktree.read(cx).id()
2179 }
2180}
2181
2182#[derive(Clone, Debug)]
2183pub struct Entry {
2184 pub id: usize,
2185 pub kind: EntryKind,
2186 pub path: Arc<Path>,
2187 pub inode: u64,
2188 pub mtime: SystemTime,
2189 pub is_symlink: bool,
2190 pub is_ignored: bool,
2191}
2192
2193#[derive(Clone, Debug)]
2194pub enum EntryKind {
2195 PendingDir,
2196 Dir,
2197 File(CharBag),
2198}
2199
2200impl Entry {
2201 fn new(
2202 path: Arc<Path>,
2203 metadata: &fs::Metadata,
2204 next_entry_id: &AtomicUsize,
2205 root_char_bag: CharBag,
2206 ) -> Self {
2207 Self {
2208 id: next_entry_id.fetch_add(1, SeqCst),
2209 kind: if metadata.is_dir {
2210 EntryKind::PendingDir
2211 } else {
2212 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2213 },
2214 path,
2215 inode: metadata.inode,
2216 mtime: metadata.mtime,
2217 is_symlink: metadata.is_symlink,
2218 is_ignored: false,
2219 }
2220 }
2221
2222 pub fn is_dir(&self) -> bool {
2223 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2224 }
2225
2226 pub fn is_file(&self) -> bool {
2227 matches!(self.kind, EntryKind::File(_))
2228 }
2229}
2230
2231impl sum_tree::Item for Entry {
2232 type Summary = EntrySummary;
2233
2234 fn summary(&self) -> Self::Summary {
2235 let visible_count = if self.is_ignored { 0 } else { 1 };
2236 let file_count;
2237 let visible_file_count;
2238 if self.is_file() {
2239 file_count = 1;
2240 visible_file_count = visible_count;
2241 } else {
2242 file_count = 0;
2243 visible_file_count = 0;
2244 }
2245
2246 EntrySummary {
2247 max_path: self.path.clone(),
2248 count: 1,
2249 visible_count,
2250 file_count,
2251 visible_file_count,
2252 }
2253 }
2254}
2255
2256impl sum_tree::KeyedItem for Entry {
2257 type Key = PathKey;
2258
2259 fn key(&self) -> Self::Key {
2260 PathKey(self.path.clone())
2261 }
2262}
2263
2264#[derive(Clone, Debug)]
2265pub struct EntrySummary {
2266 max_path: Arc<Path>,
2267 count: usize,
2268 visible_count: usize,
2269 file_count: usize,
2270 visible_file_count: usize,
2271}
2272
2273impl Default for EntrySummary {
2274 fn default() -> Self {
2275 Self {
2276 max_path: Arc::from(Path::new("")),
2277 count: 0,
2278 visible_count: 0,
2279 file_count: 0,
2280 visible_file_count: 0,
2281 }
2282 }
2283}
2284
2285impl sum_tree::Summary for EntrySummary {
2286 type Context = ();
2287
2288 fn add_summary(&mut self, rhs: &Self, _: &()) {
2289 self.max_path = rhs.max_path.clone();
2290 self.visible_count += rhs.visible_count;
2291 self.file_count += rhs.file_count;
2292 self.visible_file_count += rhs.visible_file_count;
2293 }
2294}
2295
2296#[derive(Clone, Debug)]
2297struct PathEntry {
2298 id: usize,
2299 path: Arc<Path>,
2300 is_ignored: bool,
2301 scan_id: usize,
2302}
2303
2304impl sum_tree::Item for PathEntry {
2305 type Summary = PathEntrySummary;
2306
2307 fn summary(&self) -> Self::Summary {
2308 PathEntrySummary { max_id: self.id }
2309 }
2310}
2311
2312impl sum_tree::KeyedItem for PathEntry {
2313 type Key = usize;
2314
2315 fn key(&self) -> Self::Key {
2316 self.id
2317 }
2318}
2319
2320#[derive(Clone, Debug, Default)]
2321struct PathEntrySummary {
2322 max_id: usize,
2323}
2324
2325impl sum_tree::Summary for PathEntrySummary {
2326 type Context = ();
2327
2328 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2329 self.max_id = summary.max_id;
2330 }
2331}
2332
2333impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2334 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2335 *self = summary.max_id;
2336 }
2337}
2338
2339#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2340pub struct PathKey(Arc<Path>);
2341
2342impl Default for PathKey {
2343 fn default() -> Self {
2344 Self(Path::new("").into())
2345 }
2346}
2347
2348impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2349 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2350 self.0 = summary.max_path.clone();
2351 }
2352}
2353
2354struct BackgroundScanner {
2355 fs: Arc<dyn Fs>,
2356 snapshot: Arc<Mutex<Snapshot>>,
2357 notify: Sender<ScanState>,
2358 executor: Arc<executor::Background>,
2359}
2360
2361impl BackgroundScanner {
2362 fn new(
2363 snapshot: Arc<Mutex<Snapshot>>,
2364 notify: Sender<ScanState>,
2365 fs: Arc<dyn Fs>,
2366 executor: Arc<executor::Background>,
2367 ) -> Self {
2368 Self {
2369 fs,
2370 snapshot,
2371 notify,
2372 executor,
2373 }
2374 }
2375
2376 fn abs_path(&self) -> Arc<Path> {
2377 self.snapshot.lock().abs_path.clone()
2378 }
2379
2380 fn snapshot(&self) -> Snapshot {
2381 self.snapshot.lock().clone()
2382 }
2383
2384 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2385 if self.notify.send(ScanState::Scanning).await.is_err() {
2386 return;
2387 }
2388
2389 if let Err(err) = self.scan_dirs().await {
2390 if self
2391 .notify
2392 .send(ScanState::Err(Arc::new(err)))
2393 .await
2394 .is_err()
2395 {
2396 return;
2397 }
2398 }
2399
2400 if self.notify.send(ScanState::Idle).await.is_err() {
2401 return;
2402 }
2403
2404 futures::pin_mut!(events_rx);
2405 while let Some(events) = events_rx.next().await {
2406 if self.notify.send(ScanState::Scanning).await.is_err() {
2407 break;
2408 }
2409
2410 if !self.process_events(events).await {
2411 break;
2412 }
2413
2414 if self.notify.send(ScanState::Idle).await.is_err() {
2415 break;
2416 }
2417 }
2418 }
2419
2420 async fn scan_dirs(&mut self) -> Result<()> {
2421 let root_char_bag;
2422 let next_entry_id;
2423 let is_dir;
2424 {
2425 let snapshot = self.snapshot.lock();
2426 root_char_bag = snapshot.root_char_bag;
2427 next_entry_id = snapshot.next_entry_id.clone();
2428 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2429 };
2430
2431 if is_dir {
2432 let path: Arc<Path> = Arc::from(Path::new(""));
2433 let abs_path = self.abs_path();
2434 let (tx, rx) = channel::unbounded();
2435 tx.send(ScanJob {
2436 abs_path: abs_path.to_path_buf(),
2437 path,
2438 ignore_stack: IgnoreStack::none(),
2439 scan_queue: tx.clone(),
2440 })
2441 .await
2442 .unwrap();
2443 drop(tx);
2444
2445 self.executor
2446 .scoped(|scope| {
2447 for _ in 0..self.executor.num_cpus() {
2448 scope.spawn(async {
2449 while let Ok(job) = rx.recv().await {
2450 if let Err(err) = self
2451 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2452 .await
2453 {
2454 log::error!("error scanning {:?}: {}", job.abs_path, err);
2455 }
2456 }
2457 });
2458 }
2459 })
2460 .await;
2461 }
2462
2463 Ok(())
2464 }
2465
2466 async fn scan_dir(
2467 &self,
2468 root_char_bag: CharBag,
2469 next_entry_id: Arc<AtomicUsize>,
2470 job: &ScanJob,
2471 ) -> Result<()> {
2472 let mut new_entries: Vec<Entry> = Vec::new();
2473 let mut new_jobs: Vec<ScanJob> = Vec::new();
2474 let mut ignore_stack = job.ignore_stack.clone();
2475 let mut new_ignore = None;
2476
2477 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2478 while let Some(child_abs_path) = child_paths.next().await {
2479 let child_abs_path = match child_abs_path {
2480 Ok(child_abs_path) => child_abs_path,
2481 Err(error) => {
2482 log::error!("error processing entry {:?}", error);
2483 continue;
2484 }
2485 };
2486 let child_name = child_abs_path.file_name().unwrap();
2487 let child_path: Arc<Path> = job.path.join(child_name).into();
2488 let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2489 Some(metadata) => metadata,
2490 None => continue,
2491 };
2492
2493 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2494 if child_name == *GITIGNORE {
2495 match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2496 Ok(ignore) => {
2497 let ignore = Arc::new(ignore);
2498 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2499 new_ignore = Some(ignore);
2500 }
2501 Err(error) => {
2502 log::error!(
2503 "error loading .gitignore file {:?} - {:?}",
2504 child_name,
2505 error
2506 );
2507 }
2508 }
2509
2510 // Update ignore status of any child entries we've already processed to reflect the
2511 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2512 // there should rarely be too numerous. Update the ignore stack associated with any
2513 // new jobs as well.
2514 let mut new_jobs = new_jobs.iter_mut();
2515 for entry in &mut new_entries {
2516 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2517 if entry.is_dir() {
2518 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2519 IgnoreStack::all()
2520 } else {
2521 ignore_stack.clone()
2522 };
2523 }
2524 }
2525 }
2526
2527 let mut child_entry = Entry::new(
2528 child_path.clone(),
2529 &child_metadata,
2530 &next_entry_id,
2531 root_char_bag,
2532 );
2533
2534 if child_metadata.is_dir {
2535 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2536 child_entry.is_ignored = is_ignored;
2537 new_entries.push(child_entry);
2538 new_jobs.push(ScanJob {
2539 abs_path: child_abs_path,
2540 path: child_path,
2541 ignore_stack: if is_ignored {
2542 IgnoreStack::all()
2543 } else {
2544 ignore_stack.clone()
2545 },
2546 scan_queue: job.scan_queue.clone(),
2547 });
2548 } else {
2549 child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2550 new_entries.push(child_entry);
2551 };
2552 }
2553
2554 self.snapshot
2555 .lock()
2556 .populate_dir(job.path.clone(), new_entries, new_ignore);
2557 for new_job in new_jobs {
2558 job.scan_queue.send(new_job).await.unwrap();
2559 }
2560
2561 Ok(())
2562 }
2563
2564 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2565 let mut snapshot = self.snapshot();
2566 snapshot.scan_id += 1;
2567
2568 let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2569 abs_path
2570 } else {
2571 return false;
2572 };
2573 let root_char_bag = snapshot.root_char_bag;
2574 let next_entry_id = snapshot.next_entry_id.clone();
2575
2576 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2577 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2578
2579 for event in &events {
2580 match event.path.strip_prefix(&root_abs_path) {
2581 Ok(path) => snapshot.remove_path(&path),
2582 Err(_) => {
2583 log::error!(
2584 "unexpected event {:?} for root path {:?}",
2585 event.path,
2586 root_abs_path
2587 );
2588 continue;
2589 }
2590 }
2591 }
2592
2593 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2594 for event in events {
2595 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2596 Ok(path) => Arc::from(path.to_path_buf()),
2597 Err(_) => {
2598 log::error!(
2599 "unexpected event {:?} for root path {:?}",
2600 event.path,
2601 root_abs_path
2602 );
2603 continue;
2604 }
2605 };
2606
2607 match self.fs.metadata(&event.path).await {
2608 Ok(Some(metadata)) => {
2609 let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2610 let mut fs_entry = Entry::new(
2611 path.clone(),
2612 &metadata,
2613 snapshot.next_entry_id.as_ref(),
2614 snapshot.root_char_bag,
2615 );
2616 fs_entry.is_ignored = ignore_stack.is_all();
2617 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2618 if metadata.is_dir {
2619 scan_queue_tx
2620 .send(ScanJob {
2621 abs_path: event.path,
2622 path,
2623 ignore_stack,
2624 scan_queue: scan_queue_tx.clone(),
2625 })
2626 .await
2627 .unwrap();
2628 }
2629 }
2630 Ok(None) => {}
2631 Err(err) => {
2632 // TODO - create a special 'error' entry in the entries tree to mark this
2633 log::error!("error reading file on event {:?}", err);
2634 }
2635 }
2636 }
2637
2638 *self.snapshot.lock() = snapshot;
2639
2640 // Scan any directories that were created as part of this event batch.
2641 drop(scan_queue_tx);
2642 self.executor
2643 .scoped(|scope| {
2644 for _ in 0..self.executor.num_cpus() {
2645 scope.spawn(async {
2646 while let Ok(job) = scan_queue_rx.recv().await {
2647 if let Err(err) = self
2648 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2649 .await
2650 {
2651 log::error!("error scanning {:?}: {}", job.abs_path, err);
2652 }
2653 }
2654 });
2655 }
2656 })
2657 .await;
2658
2659 // Attempt to detect renames only over a single batch of file-system events.
2660 self.snapshot.lock().removed_entry_ids.clear();
2661
2662 self.update_ignore_statuses().await;
2663 true
2664 }
2665
2666 async fn update_ignore_statuses(&self) {
2667 let mut snapshot = self.snapshot();
2668
2669 let mut ignores_to_update = Vec::new();
2670 let mut ignores_to_delete = Vec::new();
2671 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2672 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2673 ignores_to_update.push(parent_path.clone());
2674 }
2675
2676 let ignore_path = parent_path.join(&*GITIGNORE);
2677 if snapshot.entry_for_path(ignore_path).is_none() {
2678 ignores_to_delete.push(parent_path.clone());
2679 }
2680 }
2681
2682 for parent_path in ignores_to_delete {
2683 snapshot.ignores.remove(&parent_path);
2684 self.snapshot.lock().ignores.remove(&parent_path);
2685 }
2686
2687 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2688 ignores_to_update.sort_unstable();
2689 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2690 while let Some(parent_path) = ignores_to_update.next() {
2691 while ignores_to_update
2692 .peek()
2693 .map_or(false, |p| p.starts_with(&parent_path))
2694 {
2695 ignores_to_update.next().unwrap();
2696 }
2697
2698 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2699 ignore_queue_tx
2700 .send(UpdateIgnoreStatusJob {
2701 path: parent_path,
2702 ignore_stack,
2703 ignore_queue: ignore_queue_tx.clone(),
2704 })
2705 .await
2706 .unwrap();
2707 }
2708 drop(ignore_queue_tx);
2709
2710 self.executor
2711 .scoped(|scope| {
2712 for _ in 0..self.executor.num_cpus() {
2713 scope.spawn(async {
2714 while let Ok(job) = ignore_queue_rx.recv().await {
2715 self.update_ignore_status(job, &snapshot).await;
2716 }
2717 });
2718 }
2719 })
2720 .await;
2721 }
2722
2723 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2724 let mut ignore_stack = job.ignore_stack;
2725 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2726 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2727 }
2728
2729 let mut entries_by_id_edits = Vec::new();
2730 let mut entries_by_path_edits = Vec::new();
2731 for mut entry in snapshot.child_entries(&job.path).cloned() {
2732 let was_ignored = entry.is_ignored;
2733 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2734 if entry.is_dir() {
2735 let child_ignore_stack = if entry.is_ignored {
2736 IgnoreStack::all()
2737 } else {
2738 ignore_stack.clone()
2739 };
2740 job.ignore_queue
2741 .send(UpdateIgnoreStatusJob {
2742 path: entry.path.clone(),
2743 ignore_stack: child_ignore_stack,
2744 ignore_queue: job.ignore_queue.clone(),
2745 })
2746 .await
2747 .unwrap();
2748 }
2749
2750 if entry.is_ignored != was_ignored {
2751 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2752 path_entry.scan_id = snapshot.scan_id;
2753 path_entry.is_ignored = entry.is_ignored;
2754 entries_by_id_edits.push(Edit::Insert(path_entry));
2755 entries_by_path_edits.push(Edit::Insert(entry));
2756 }
2757 }
2758
2759 let mut snapshot = self.snapshot.lock();
2760 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2761 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2762 }
2763}
2764
2765async fn refresh_entry(
2766 fs: &dyn Fs,
2767 snapshot: &Mutex<Snapshot>,
2768 path: Arc<Path>,
2769 abs_path: &Path,
2770) -> Result<Entry> {
2771 let root_char_bag;
2772 let next_entry_id;
2773 {
2774 let snapshot = snapshot.lock();
2775 root_char_bag = snapshot.root_char_bag;
2776 next_entry_id = snapshot.next_entry_id.clone();
2777 }
2778 let entry = Entry::new(
2779 path,
2780 &fs.metadata(abs_path)
2781 .await?
2782 .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2783 &next_entry_id,
2784 root_char_bag,
2785 );
2786 Ok(snapshot.lock().insert_entry(entry, fs))
2787}
2788
2789fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2790 let mut result = root_char_bag;
2791 result.extend(
2792 path.to_string_lossy()
2793 .chars()
2794 .map(|c| c.to_ascii_lowercase()),
2795 );
2796 result
2797}
2798
2799struct ScanJob {
2800 abs_path: PathBuf,
2801 path: Arc<Path>,
2802 ignore_stack: Arc<IgnoreStack>,
2803 scan_queue: Sender<ScanJob>,
2804}
2805
2806struct UpdateIgnoreStatusJob {
2807 path: Arc<Path>,
2808 ignore_stack: Arc<IgnoreStack>,
2809 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2810}
2811
2812pub trait WorktreeHandle {
2813 #[cfg(test)]
2814 fn flush_fs_events<'a>(
2815 &self,
2816 cx: &'a gpui::TestAppContext,
2817 ) -> futures::future::LocalBoxFuture<'a, ()>;
2818}
2819
2820impl WorktreeHandle for ModelHandle<Worktree> {
2821 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2822 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2823 // extra directory scans, and emit extra scan-state notifications.
2824 //
2825 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2826 // to ensure that all redundant FS events have already been processed.
2827 #[cfg(test)]
2828 fn flush_fs_events<'a>(
2829 &self,
2830 cx: &'a gpui::TestAppContext,
2831 ) -> futures::future::LocalBoxFuture<'a, ()> {
2832 use smol::future::FutureExt;
2833
2834 let filename = "fs-event-sentinel";
2835 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2836 let tree = self.clone();
2837 async move {
2838 std::fs::write(root_path.join(filename), "").unwrap();
2839 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2840 .await;
2841
2842 std::fs::remove_file(root_path.join(filename)).unwrap();
2843 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2844 .await;
2845
2846 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2847 .await;
2848 }
2849 .boxed_local()
2850 }
2851}
2852
2853#[derive(Clone, Debug)]
2854struct TraversalProgress<'a> {
2855 max_path: &'a Path,
2856 count: usize,
2857 visible_count: usize,
2858 file_count: usize,
2859 visible_file_count: usize,
2860}
2861
2862impl<'a> TraversalProgress<'a> {
2863 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2864 match (include_ignored, include_dirs) {
2865 (true, true) => self.count,
2866 (true, false) => self.file_count,
2867 (false, true) => self.visible_count,
2868 (false, false) => self.visible_file_count,
2869 }
2870 }
2871}
2872
2873impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2874 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2875 self.max_path = summary.max_path.as_ref();
2876 self.count += summary.count;
2877 self.visible_count += summary.visible_count;
2878 self.file_count += summary.file_count;
2879 self.visible_file_count += summary.visible_file_count;
2880 }
2881}
2882
2883impl<'a> Default for TraversalProgress<'a> {
2884 fn default() -> Self {
2885 Self {
2886 max_path: Path::new(""),
2887 count: 0,
2888 visible_count: 0,
2889 file_count: 0,
2890 visible_file_count: 0,
2891 }
2892 }
2893}
2894
2895pub struct Traversal<'a> {
2896 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2897 include_ignored: bool,
2898 include_dirs: bool,
2899}
2900
2901impl<'a> Traversal<'a> {
2902 pub fn advance(&mut self) -> bool {
2903 self.advance_to_offset(self.offset() + 1)
2904 }
2905
2906 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2907 self.cursor.seek_forward(
2908 &TraversalTarget::Count {
2909 count: offset,
2910 include_dirs: self.include_dirs,
2911 include_ignored: self.include_ignored,
2912 },
2913 Bias::Right,
2914 &(),
2915 )
2916 }
2917
2918 pub fn advance_to_sibling(&mut self) -> bool {
2919 while let Some(entry) = self.cursor.item() {
2920 self.cursor.seek_forward(
2921 &TraversalTarget::PathSuccessor(&entry.path),
2922 Bias::Left,
2923 &(),
2924 );
2925 if let Some(entry) = self.cursor.item() {
2926 if (self.include_dirs || !entry.is_dir())
2927 && (self.include_ignored || !entry.is_ignored)
2928 {
2929 return true;
2930 }
2931 }
2932 }
2933 false
2934 }
2935
2936 pub fn entry(&self) -> Option<&'a Entry> {
2937 self.cursor.item()
2938 }
2939
2940 pub fn offset(&self) -> usize {
2941 self.cursor
2942 .start()
2943 .count(self.include_dirs, self.include_ignored)
2944 }
2945}
2946
2947impl<'a> Iterator for Traversal<'a> {
2948 type Item = &'a Entry;
2949
2950 fn next(&mut self) -> Option<Self::Item> {
2951 if let Some(item) = self.entry() {
2952 self.advance();
2953 Some(item)
2954 } else {
2955 None
2956 }
2957 }
2958}
2959
2960#[derive(Debug)]
2961enum TraversalTarget<'a> {
2962 Path(&'a Path),
2963 PathSuccessor(&'a Path),
2964 Count {
2965 count: usize,
2966 include_ignored: bool,
2967 include_dirs: bool,
2968 },
2969}
2970
2971impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2972 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2973 match self {
2974 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2975 TraversalTarget::PathSuccessor(path) => {
2976 if !cursor_location.max_path.starts_with(path) {
2977 Ordering::Equal
2978 } else {
2979 Ordering::Greater
2980 }
2981 }
2982 TraversalTarget::Count {
2983 count,
2984 include_dirs,
2985 include_ignored,
2986 } => Ord::cmp(
2987 count,
2988 &cursor_location.count(*include_dirs, *include_ignored),
2989 ),
2990 }
2991 }
2992}
2993
2994struct ChildEntriesIter<'a> {
2995 parent_path: &'a Path,
2996 traversal: Traversal<'a>,
2997}
2998
2999impl<'a> Iterator for ChildEntriesIter<'a> {
3000 type Item = &'a Entry;
3001
3002 fn next(&mut self) -> Option<Self::Item> {
3003 if let Some(item) = self.traversal.entry() {
3004 if item.path.starts_with(&self.parent_path) {
3005 self.traversal.advance_to_sibling();
3006 return Some(item);
3007 }
3008 }
3009 None
3010 }
3011}
3012
3013impl<'a> From<&'a Entry> for proto::Entry {
3014 fn from(entry: &'a Entry) -> Self {
3015 Self {
3016 id: entry.id as u64,
3017 is_dir: entry.is_dir(),
3018 path: entry.path.to_string_lossy().to_string(),
3019 inode: entry.inode,
3020 mtime: Some(entry.mtime.into()),
3021 is_symlink: entry.is_symlink,
3022 is_ignored: entry.is_ignored,
3023 }
3024 }
3025}
3026
3027impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3028 type Error = anyhow::Error;
3029
3030 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3031 if let Some(mtime) = entry.mtime {
3032 let kind = if entry.is_dir {
3033 EntryKind::Dir
3034 } else {
3035 let mut char_bag = root_char_bag.clone();
3036 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3037 EntryKind::File(char_bag)
3038 };
3039 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
3040 Ok(Entry {
3041 id: entry.id as usize,
3042 kind,
3043 path: path.clone(),
3044 inode: entry.inode,
3045 mtime: mtime.into(),
3046 is_symlink: entry.is_symlink,
3047 is_ignored: entry.is_ignored,
3048 })
3049 } else {
3050 Err(anyhow!(
3051 "missing mtime in remote worktree entry {:?}",
3052 entry.path
3053 ))
3054 }
3055 }
3056}
3057
3058#[cfg(test)]
3059mod tests {
3060 use super::*;
3061 use crate::fs::FakeFs;
3062 use anyhow::Result;
3063 use client::test::FakeHttpClient;
3064 use fs::RealFs;
3065 use language::{Diagnostic, DiagnosticEntry};
3066 use lsp::Url;
3067 use rand::prelude::*;
3068 use serde_json::json;
3069 use std::{cell::RefCell, rc::Rc};
3070 use std::{
3071 env,
3072 fmt::Write,
3073 time::{SystemTime, UNIX_EPOCH},
3074 };
3075 use text::Point;
3076 use unindent::Unindent as _;
3077 use util::test::temp_tree;
3078
3079 #[gpui::test]
3080 async fn test_traversal(cx: gpui::TestAppContext) {
3081 let fs = FakeFs::new();
3082 fs.insert_tree(
3083 "/root",
3084 json!({
3085 ".gitignore": "a/b\n",
3086 "a": {
3087 "b": "",
3088 "c": "",
3089 }
3090 }),
3091 )
3092 .await;
3093
3094 let http_client = FakeHttpClient::with_404_response();
3095 let client = Client::new(http_client);
3096
3097 let tree = Worktree::open_local(
3098 client,
3099 Arc::from(Path::new("/root")),
3100 false,
3101 Arc::new(fs),
3102 &mut cx.to_async(),
3103 )
3104 .await
3105 .unwrap();
3106 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3107 .await;
3108
3109 tree.read_with(&cx, |tree, _| {
3110 assert_eq!(
3111 tree.entries(false)
3112 .map(|entry| entry.path.as_ref())
3113 .collect::<Vec<_>>(),
3114 vec![
3115 Path::new(""),
3116 Path::new(".gitignore"),
3117 Path::new("a"),
3118 Path::new("a/c"),
3119 ]
3120 );
3121 })
3122 }
3123
3124 #[gpui::test]
3125 async fn test_save_file(mut cx: gpui::TestAppContext) {
3126 let dir = temp_tree(json!({
3127 "file1": "the old contents",
3128 }));
3129
3130 let http_client = FakeHttpClient::with_404_response();
3131 let client = Client::new(http_client);
3132
3133 let tree = Worktree::open_local(
3134 client,
3135 dir.path(),
3136 false,
3137 Arc::new(RealFs),
3138 &mut cx.to_async(),
3139 )
3140 .await
3141 .unwrap();
3142 let (buffer, _) = tree
3143 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3144 .await
3145 .unwrap();
3146 let save = buffer.update(&mut cx, |buffer, cx| {
3147 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3148 buffer.save(cx)
3149 });
3150 save.await.unwrap();
3151
3152 let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3153 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3154 }
3155
3156 #[gpui::test]
3157 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3158 let dir = temp_tree(json!({
3159 "file1": "the old contents",
3160 }));
3161 let file_path = dir.path().join("file1");
3162
3163 let http_client = FakeHttpClient::with_404_response();
3164 let client = Client::new(http_client);
3165
3166 let tree = Worktree::open_local(
3167 client,
3168 file_path.clone(),
3169 false,
3170 Arc::new(RealFs),
3171 &mut cx.to_async(),
3172 )
3173 .await
3174 .unwrap();
3175 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3176 .await;
3177 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3178
3179 let (buffer, _) = tree
3180 .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3181 .await
3182 .unwrap();
3183 let save = buffer.update(&mut cx, |buffer, cx| {
3184 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3185 buffer.save(cx)
3186 });
3187 save.await.unwrap();
3188
3189 let new_text = std::fs::read_to_string(file_path).unwrap();
3190 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3191 }
3192
3193 #[gpui::test]
3194 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3195 let dir = temp_tree(json!({
3196 "a": {
3197 "file1": "",
3198 "file2": "",
3199 "file3": "",
3200 },
3201 "b": {
3202 "c": {
3203 "file4": "",
3204 "file5": "",
3205 }
3206 }
3207 }));
3208
3209 let http_client = FakeHttpClient::with_404_response();
3210 let client = Client::new(http_client.clone());
3211 let tree = Worktree::open_local(
3212 client,
3213 dir.path(),
3214 false,
3215 Arc::new(RealFs),
3216 &mut cx.to_async(),
3217 )
3218 .await
3219 .unwrap();
3220
3221 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3222 let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3223 async move { buffer.await.unwrap().0 }
3224 };
3225 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3226 tree.read_with(cx, |tree, _| {
3227 tree.entry_for_path(path)
3228 .expect(&format!("no entry for path {}", path))
3229 .id
3230 })
3231 };
3232
3233 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3234 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3235 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3236 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3237
3238 let file2_id = id_for_path("a/file2", &cx);
3239 let file3_id = id_for_path("a/file3", &cx);
3240 let file4_id = id_for_path("b/c/file4", &cx);
3241
3242 // Wait for the initial scan.
3243 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3244 .await;
3245
3246 // Create a remote copy of this worktree.
3247 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3248 let remote = Worktree::remote(
3249 1,
3250 1,
3251 initial_snapshot.to_proto(&Default::default(), Default::default()),
3252 Client::new(http_client.clone()),
3253 &mut cx.to_async(),
3254 )
3255 .await
3256 .unwrap();
3257
3258 cx.read(|cx| {
3259 assert!(!buffer2.read(cx).is_dirty());
3260 assert!(!buffer3.read(cx).is_dirty());
3261 assert!(!buffer4.read(cx).is_dirty());
3262 assert!(!buffer5.read(cx).is_dirty());
3263 });
3264
3265 // Rename and delete files and directories.
3266 tree.flush_fs_events(&cx).await;
3267 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3268 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3269 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3270 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3271 tree.flush_fs_events(&cx).await;
3272
3273 let expected_paths = vec![
3274 "a",
3275 "a/file1",
3276 "a/file2.new",
3277 "b",
3278 "d",
3279 "d/file3",
3280 "d/file4",
3281 ];
3282
3283 cx.read(|app| {
3284 assert_eq!(
3285 tree.read(app)
3286 .paths()
3287 .map(|p| p.to_str().unwrap())
3288 .collect::<Vec<_>>(),
3289 expected_paths
3290 );
3291
3292 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3293 assert_eq!(id_for_path("d/file3", &cx), file3_id);
3294 assert_eq!(id_for_path("d/file4", &cx), file4_id);
3295
3296 assert_eq!(
3297 buffer2.read(app).file().unwrap().path().as_ref(),
3298 Path::new("a/file2.new")
3299 );
3300 assert_eq!(
3301 buffer3.read(app).file().unwrap().path().as_ref(),
3302 Path::new("d/file3")
3303 );
3304 assert_eq!(
3305 buffer4.read(app).file().unwrap().path().as_ref(),
3306 Path::new("d/file4")
3307 );
3308 assert_eq!(
3309 buffer5.read(app).file().unwrap().path().as_ref(),
3310 Path::new("b/c/file5")
3311 );
3312
3313 assert!(!buffer2.read(app).file().unwrap().is_deleted());
3314 assert!(!buffer3.read(app).file().unwrap().is_deleted());
3315 assert!(!buffer4.read(app).file().unwrap().is_deleted());
3316 assert!(buffer5.read(app).file().unwrap().is_deleted());
3317 });
3318
3319 // Update the remote worktree. Check that it becomes consistent with the
3320 // local worktree.
3321 remote.update(&mut cx, |remote, cx| {
3322 let update_message =
3323 tree.read(cx)
3324 .snapshot()
3325 .build_update(&initial_snapshot, 1, 1, true);
3326 remote
3327 .as_remote_mut()
3328 .unwrap()
3329 .snapshot
3330 .apply_update(update_message)
3331 .unwrap();
3332
3333 assert_eq!(
3334 remote
3335 .paths()
3336 .map(|p| p.to_str().unwrap())
3337 .collect::<Vec<_>>(),
3338 expected_paths
3339 );
3340 });
3341 }
3342
3343 #[gpui::test]
3344 async fn test_rescan_with_gitignore(cx: gpui::TestAppContext) {
3345 let dir = temp_tree(json!({
3346 ".git": {},
3347 ".gitignore": "ignored-dir\n",
3348 "tracked-dir": {
3349 "tracked-file1": "tracked contents",
3350 },
3351 "ignored-dir": {
3352 "ignored-file1": "ignored contents",
3353 }
3354 }));
3355
3356 let http_client = FakeHttpClient::with_404_response();
3357 let client = Client::new(http_client.clone());
3358
3359 let tree = Worktree::open_local(
3360 client,
3361 dir.path(),
3362 false,
3363 Arc::new(RealFs),
3364 &mut cx.to_async(),
3365 )
3366 .await
3367 .unwrap();
3368 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3369 .await;
3370 tree.flush_fs_events(&cx).await;
3371 cx.read(|cx| {
3372 let tree = tree.read(cx);
3373 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3374 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3375 assert_eq!(tracked.is_ignored, false);
3376 assert_eq!(ignored.is_ignored, true);
3377 });
3378
3379 std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3380 std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3381 tree.flush_fs_events(&cx).await;
3382 cx.read(|cx| {
3383 let tree = tree.read(cx);
3384 let dot_git = tree.entry_for_path(".git").unwrap();
3385 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3386 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3387 assert_eq!(tracked.is_ignored, false);
3388 assert_eq!(ignored.is_ignored, true);
3389 assert_eq!(dot_git.is_ignored, true);
3390 });
3391 }
3392
3393 #[gpui::test]
3394 async fn test_buffer_deduping(mut cx: gpui::TestAppContext) {
3395 let http_client = FakeHttpClient::with_404_response();
3396 let client = Client::new(http_client);
3397
3398 let fs = Arc::new(FakeFs::new());
3399 fs.insert_tree(
3400 "/the-dir",
3401 json!({
3402 "a.txt": "a-contents",
3403 "b.txt": "b-contents",
3404 }),
3405 )
3406 .await;
3407
3408 let worktree =
3409 Worktree::open_local(client, "/the-dir".as_ref(), false, fs, &mut cx.to_async())
3410 .await
3411 .unwrap();
3412
3413 // Spawn multiple tasks to open paths, repeating some paths.
3414 let (buffer_a_1, buffer_b, buffer_a_2) = worktree.update(&mut cx, |worktree, cx| {
3415 (
3416 worktree.open_buffer("a.txt", cx),
3417 worktree.open_buffer("b.txt", cx),
3418 worktree.open_buffer("a.txt", cx),
3419 )
3420 });
3421
3422 let buffer_a_1 = buffer_a_1.await.unwrap().0;
3423 let buffer_a_2 = buffer_a_2.await.unwrap().0;
3424 let buffer_b = buffer_b.await.unwrap().0;
3425 assert_eq!(buffer_a_1.read_with(&cx, |b, _| b.text()), "a-contents");
3426 assert_eq!(buffer_b.read_with(&cx, |b, _| b.text()), "b-contents");
3427
3428 // There is only one buffer per path.
3429 let buffer_a_id = buffer_a_1.id();
3430 assert_eq!(buffer_a_2.id(), buffer_a_id);
3431
3432 // Open the same path again while it is still open.
3433 drop(buffer_a_1);
3434 let buffer_a_3 = worktree
3435 .update(&mut cx, |worktree, cx| worktree.open_buffer("a.txt", cx))
3436 .await
3437 .unwrap()
3438 .0;
3439
3440 // There's still only one buffer per path.
3441 assert_eq!(buffer_a_3.id(), buffer_a_id);
3442 }
3443
3444 #[gpui::test]
3445 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3446 use std::fs;
3447
3448 let dir = temp_tree(json!({
3449 "file1": "abc",
3450 "file2": "def",
3451 "file3": "ghi",
3452 }));
3453 let http_client = FakeHttpClient::with_404_response();
3454 let client = Client::new(http_client.clone());
3455
3456 let tree = Worktree::open_local(
3457 client,
3458 dir.path(),
3459 false,
3460 Arc::new(RealFs),
3461 &mut cx.to_async(),
3462 )
3463 .await
3464 .unwrap();
3465 tree.flush_fs_events(&cx).await;
3466 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3467 .await;
3468
3469 let (buffer1, _) = tree
3470 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3471 .await
3472 .unwrap();
3473 let events = Rc::new(RefCell::new(Vec::new()));
3474
3475 // initially, the buffer isn't dirty.
3476 buffer1.update(&mut cx, |buffer, cx| {
3477 cx.subscribe(&buffer1, {
3478 let events = events.clone();
3479 move |_, _, event, _| events.borrow_mut().push(event.clone())
3480 })
3481 .detach();
3482
3483 assert!(!buffer.is_dirty());
3484 assert!(events.borrow().is_empty());
3485
3486 buffer.edit(vec![1..2], "", cx);
3487 });
3488
3489 // after the first edit, the buffer is dirty, and emits a dirtied event.
3490 buffer1.update(&mut cx, |buffer, cx| {
3491 assert!(buffer.text() == "ac");
3492 assert!(buffer.is_dirty());
3493 assert_eq!(
3494 *events.borrow(),
3495 &[language::Event::Edited, language::Event::Dirtied]
3496 );
3497 events.borrow_mut().clear();
3498 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3499 });
3500
3501 // after saving, the buffer is not dirty, and emits a saved event.
3502 buffer1.update(&mut cx, |buffer, cx| {
3503 assert!(!buffer.is_dirty());
3504 assert_eq!(*events.borrow(), &[language::Event::Saved]);
3505 events.borrow_mut().clear();
3506
3507 buffer.edit(vec![1..1], "B", cx);
3508 buffer.edit(vec![2..2], "D", cx);
3509 });
3510
3511 // after editing again, the buffer is dirty, and emits another dirty event.
3512 buffer1.update(&mut cx, |buffer, cx| {
3513 assert!(buffer.text() == "aBDc");
3514 assert!(buffer.is_dirty());
3515 assert_eq!(
3516 *events.borrow(),
3517 &[
3518 language::Event::Edited,
3519 language::Event::Dirtied,
3520 language::Event::Edited,
3521 ],
3522 );
3523 events.borrow_mut().clear();
3524
3525 // TODO - currently, after restoring the buffer to its
3526 // previously-saved state, the is still considered dirty.
3527 buffer.edit([1..3], "", cx);
3528 assert!(buffer.text() == "ac");
3529 assert!(buffer.is_dirty());
3530 });
3531
3532 assert_eq!(*events.borrow(), &[language::Event::Edited]);
3533
3534 // When a file is deleted, the buffer is considered dirty.
3535 let events = Rc::new(RefCell::new(Vec::new()));
3536 let (buffer2, _) = tree
3537 .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3538 .await
3539 .unwrap();
3540 buffer2.update(&mut cx, |_, cx| {
3541 cx.subscribe(&buffer2, {
3542 let events = events.clone();
3543 move |_, _, event, _| events.borrow_mut().push(event.clone())
3544 })
3545 .detach();
3546 });
3547
3548 fs::remove_file(dir.path().join("file2")).unwrap();
3549 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3550 assert_eq!(
3551 *events.borrow(),
3552 &[language::Event::Dirtied, language::Event::FileHandleChanged]
3553 );
3554
3555 // When a file is already dirty when deleted, we don't emit a Dirtied event.
3556 let events = Rc::new(RefCell::new(Vec::new()));
3557 let (buffer3, _) = tree
3558 .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3559 .await
3560 .unwrap();
3561 buffer3.update(&mut cx, |_, cx| {
3562 cx.subscribe(&buffer3, {
3563 let events = events.clone();
3564 move |_, _, event, _| events.borrow_mut().push(event.clone())
3565 })
3566 .detach();
3567 });
3568
3569 tree.flush_fs_events(&cx).await;
3570 buffer3.update(&mut cx, |buffer, cx| {
3571 buffer.edit(Some(0..0), "x", cx);
3572 });
3573 events.borrow_mut().clear();
3574 fs::remove_file(dir.path().join("file3")).unwrap();
3575 buffer3
3576 .condition(&cx, |_, _| !events.borrow().is_empty())
3577 .await;
3578 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3579 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3580 }
3581
3582 #[gpui::test]
3583 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3584 use std::fs;
3585
3586 let initial_contents = "aaa\nbbbbb\nc\n";
3587 let dir = temp_tree(json!({ "the-file": initial_contents }));
3588 let http_client = FakeHttpClient::with_404_response();
3589 let client = Client::new(http_client);
3590
3591 let tree = Worktree::open_local(
3592 client,
3593 dir.path(),
3594 false,
3595 Arc::new(RealFs),
3596 &mut cx.to_async(),
3597 )
3598 .await
3599 .unwrap();
3600 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3601 .await;
3602
3603 let abs_path = dir.path().join("the-file");
3604 let (buffer, _) = tree
3605 .update(&mut cx, |tree, cx| {
3606 tree.open_buffer(Path::new("the-file"), cx)
3607 })
3608 .await
3609 .unwrap();
3610
3611 // TODO
3612 // Add a cursor on each row.
3613 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3614 // assert!(!buffer.is_dirty());
3615 // buffer.add_selection_set(
3616 // &(0..3)
3617 // .map(|row| Selection {
3618 // id: row as usize,
3619 // start: Point::new(row, 1),
3620 // end: Point::new(row, 1),
3621 // reversed: false,
3622 // goal: SelectionGoal::None,
3623 // })
3624 // .collect::<Vec<_>>(),
3625 // cx,
3626 // )
3627 // });
3628
3629 // Change the file on disk, adding two new lines of text, and removing
3630 // one line.
3631 buffer.read_with(&cx, |buffer, _| {
3632 assert!(!buffer.is_dirty());
3633 assert!(!buffer.has_conflict());
3634 });
3635 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3636 fs::write(&abs_path, new_contents).unwrap();
3637
3638 // Because the buffer was not modified, it is reloaded from disk. Its
3639 // contents are edited according to the diff between the old and new
3640 // file contents.
3641 buffer
3642 .condition(&cx, |buffer, _| buffer.text() == new_contents)
3643 .await;
3644
3645 buffer.update(&mut cx, |buffer, _| {
3646 assert_eq!(buffer.text(), new_contents);
3647 assert!(!buffer.is_dirty());
3648 assert!(!buffer.has_conflict());
3649
3650 // TODO
3651 // let cursor_positions = buffer
3652 // .selection_set(selection_set_id)
3653 // .unwrap()
3654 // .selections::<Point>(&*buffer)
3655 // .map(|selection| {
3656 // assert_eq!(selection.start, selection.end);
3657 // selection.start
3658 // })
3659 // .collect::<Vec<_>>();
3660 // assert_eq!(
3661 // cursor_positions,
3662 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3663 // );
3664 });
3665
3666 // Modify the buffer
3667 buffer.update(&mut cx, |buffer, cx| {
3668 buffer.edit(vec![0..0], " ", cx);
3669 assert!(buffer.is_dirty());
3670 assert!(!buffer.has_conflict());
3671 });
3672
3673 // Change the file on disk again, adding blank lines to the beginning.
3674 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3675
3676 // Because the buffer is modified, it doesn't reload from disk, but is
3677 // marked as having a conflict.
3678 buffer
3679 .condition(&cx, |buffer, _| buffer.has_conflict())
3680 .await;
3681 }
3682
3683 #[gpui::test]
3684 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3685 let fs = Arc::new(FakeFs::new());
3686 let http_client = FakeHttpClient::with_404_response();
3687 let client = Client::new(http_client.clone());
3688
3689 fs.insert_tree(
3690 "/the-dir",
3691 json!({
3692 "a.rs": "
3693 fn foo(mut v: Vec<usize>) {
3694 for x in &v {
3695 v.push(1);
3696 }
3697 }
3698 "
3699 .unindent(),
3700 }),
3701 )
3702 .await;
3703
3704 let worktree = Worktree::open_local(
3705 client.clone(),
3706 "/the-dir".as_ref(),
3707 false,
3708 fs,
3709 &mut cx.to_async(),
3710 )
3711 .await
3712 .unwrap();
3713
3714 let (buffer, _) = worktree
3715 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3716 .await
3717 .unwrap();
3718
3719 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3720 let message = lsp::PublishDiagnosticsParams {
3721 uri: buffer_uri.clone(),
3722 diagnostics: vec![
3723 lsp::Diagnostic {
3724 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3725 severity: Some(DiagnosticSeverity::WARNING),
3726 message: "error 1".to_string(),
3727 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3728 location: lsp::Location {
3729 uri: buffer_uri.clone(),
3730 range: lsp::Range::new(
3731 lsp::Position::new(1, 8),
3732 lsp::Position::new(1, 9),
3733 ),
3734 },
3735 message: "error 1 hint 1".to_string(),
3736 }]),
3737 ..Default::default()
3738 },
3739 lsp::Diagnostic {
3740 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3741 severity: Some(DiagnosticSeverity::HINT),
3742 message: "error 1 hint 1".to_string(),
3743 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3744 location: lsp::Location {
3745 uri: buffer_uri.clone(),
3746 range: lsp::Range::new(
3747 lsp::Position::new(1, 8),
3748 lsp::Position::new(1, 9),
3749 ),
3750 },
3751 message: "original diagnostic".to_string(),
3752 }]),
3753 ..Default::default()
3754 },
3755 lsp::Diagnostic {
3756 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3757 severity: Some(DiagnosticSeverity::ERROR),
3758 message: "error 2".to_string(),
3759 related_information: Some(vec![
3760 lsp::DiagnosticRelatedInformation {
3761 location: lsp::Location {
3762 uri: buffer_uri.clone(),
3763 range: lsp::Range::new(
3764 lsp::Position::new(1, 13),
3765 lsp::Position::new(1, 15),
3766 ),
3767 },
3768 message: "error 2 hint 1".to_string(),
3769 },
3770 lsp::DiagnosticRelatedInformation {
3771 location: lsp::Location {
3772 uri: buffer_uri.clone(),
3773 range: lsp::Range::new(
3774 lsp::Position::new(1, 13),
3775 lsp::Position::new(1, 15),
3776 ),
3777 },
3778 message: "error 2 hint 2".to_string(),
3779 },
3780 ]),
3781 ..Default::default()
3782 },
3783 lsp::Diagnostic {
3784 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3785 severity: Some(DiagnosticSeverity::HINT),
3786 message: "error 2 hint 1".to_string(),
3787 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3788 location: lsp::Location {
3789 uri: buffer_uri.clone(),
3790 range: lsp::Range::new(
3791 lsp::Position::new(2, 8),
3792 lsp::Position::new(2, 17),
3793 ),
3794 },
3795 message: "original diagnostic".to_string(),
3796 }]),
3797 ..Default::default()
3798 },
3799 lsp::Diagnostic {
3800 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3801 severity: Some(DiagnosticSeverity::HINT),
3802 message: "error 2 hint 2".to_string(),
3803 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3804 location: lsp::Location {
3805 uri: buffer_uri.clone(),
3806 range: lsp::Range::new(
3807 lsp::Position::new(2, 8),
3808 lsp::Position::new(2, 17),
3809 ),
3810 },
3811 message: "original diagnostic".to_string(),
3812 }]),
3813 ..Default::default()
3814 },
3815 ],
3816 version: None,
3817 };
3818
3819 worktree
3820 .update(&mut cx, |tree, cx| {
3821 tree.as_local_mut().unwrap().update_diagnostics(
3822 Arc::from("a.rs".as_ref()),
3823 message,
3824 &Default::default(),
3825 cx,
3826 )
3827 })
3828 .unwrap();
3829 let buffer = buffer.read_with(&cx, |buffer, _| buffer.snapshot());
3830
3831 assert_eq!(
3832 buffer
3833 .diagnostics_in_range::<_, Point>(0..buffer.len())
3834 .collect::<Vec<_>>(),
3835 &[
3836 DiagnosticEntry {
3837 range: Point::new(1, 8)..Point::new(1, 9),
3838 diagnostic: Diagnostic {
3839 severity: DiagnosticSeverity::WARNING,
3840 message: "error 1".to_string(),
3841 group_id: 0,
3842 is_primary: true,
3843 ..Default::default()
3844 }
3845 },
3846 DiagnosticEntry {
3847 range: Point::new(1, 8)..Point::new(1, 9),
3848 diagnostic: Diagnostic {
3849 severity: DiagnosticSeverity::HINT,
3850 message: "error 1 hint 1".to_string(),
3851 group_id: 0,
3852 is_primary: false,
3853 ..Default::default()
3854 }
3855 },
3856 DiagnosticEntry {
3857 range: Point::new(1, 13)..Point::new(1, 15),
3858 diagnostic: Diagnostic {
3859 severity: DiagnosticSeverity::HINT,
3860 message: "error 2 hint 1".to_string(),
3861 group_id: 1,
3862 is_primary: false,
3863 ..Default::default()
3864 }
3865 },
3866 DiagnosticEntry {
3867 range: Point::new(1, 13)..Point::new(1, 15),
3868 diagnostic: Diagnostic {
3869 severity: DiagnosticSeverity::HINT,
3870 message: "error 2 hint 2".to_string(),
3871 group_id: 1,
3872 is_primary: false,
3873 ..Default::default()
3874 }
3875 },
3876 DiagnosticEntry {
3877 range: Point::new(2, 8)..Point::new(2, 17),
3878 diagnostic: Diagnostic {
3879 severity: DiagnosticSeverity::ERROR,
3880 message: "error 2".to_string(),
3881 group_id: 1,
3882 is_primary: true,
3883 ..Default::default()
3884 }
3885 }
3886 ]
3887 );
3888
3889 assert_eq!(
3890 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
3891 &[
3892 DiagnosticEntry {
3893 range: Point::new(1, 8)..Point::new(1, 9),
3894 diagnostic: Diagnostic {
3895 severity: DiagnosticSeverity::WARNING,
3896 message: "error 1".to_string(),
3897 group_id: 0,
3898 is_primary: true,
3899 ..Default::default()
3900 }
3901 },
3902 DiagnosticEntry {
3903 range: Point::new(1, 8)..Point::new(1, 9),
3904 diagnostic: Diagnostic {
3905 severity: DiagnosticSeverity::HINT,
3906 message: "error 1 hint 1".to_string(),
3907 group_id: 0,
3908 is_primary: false,
3909 ..Default::default()
3910 }
3911 },
3912 ]
3913 );
3914 assert_eq!(
3915 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
3916 &[
3917 DiagnosticEntry {
3918 range: Point::new(1, 13)..Point::new(1, 15),
3919 diagnostic: Diagnostic {
3920 severity: DiagnosticSeverity::HINT,
3921 message: "error 2 hint 1".to_string(),
3922 group_id: 1,
3923 is_primary: false,
3924 ..Default::default()
3925 }
3926 },
3927 DiagnosticEntry {
3928 range: Point::new(1, 13)..Point::new(1, 15),
3929 diagnostic: Diagnostic {
3930 severity: DiagnosticSeverity::HINT,
3931 message: "error 2 hint 2".to_string(),
3932 group_id: 1,
3933 is_primary: false,
3934 ..Default::default()
3935 }
3936 },
3937 DiagnosticEntry {
3938 range: Point::new(2, 8)..Point::new(2, 17),
3939 diagnostic: Diagnostic {
3940 severity: DiagnosticSeverity::ERROR,
3941 message: "error 2".to_string(),
3942 group_id: 1,
3943 is_primary: true,
3944 ..Default::default()
3945 }
3946 }
3947 ]
3948 );
3949 }
3950
3951 #[gpui::test(iterations = 100)]
3952 fn test_random(mut rng: StdRng) {
3953 let operations = env::var("OPERATIONS")
3954 .map(|o| o.parse().unwrap())
3955 .unwrap_or(40);
3956 let initial_entries = env::var("INITIAL_ENTRIES")
3957 .map(|o| o.parse().unwrap())
3958 .unwrap_or(20);
3959
3960 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3961 for _ in 0..initial_entries {
3962 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3963 }
3964 log::info!("Generated initial tree");
3965
3966 let (notify_tx, _notify_rx) = smol::channel::unbounded();
3967 let fs = Arc::new(RealFs);
3968 let next_entry_id = Arc::new(AtomicUsize::new(0));
3969 let mut initial_snapshot = Snapshot {
3970 id: WorktreeId::from_usize(0),
3971 scan_id: 0,
3972 abs_path: root_dir.path().into(),
3973 entries_by_path: Default::default(),
3974 entries_by_id: Default::default(),
3975 removed_entry_ids: Default::default(),
3976 ignores: Default::default(),
3977 root_name: Default::default(),
3978 root_char_bag: Default::default(),
3979 next_entry_id: next_entry_id.clone(),
3980 };
3981 initial_snapshot.insert_entry(
3982 Entry::new(
3983 Path::new("").into(),
3984 &smol::block_on(fs.metadata(root_dir.path()))
3985 .unwrap()
3986 .unwrap(),
3987 &next_entry_id,
3988 Default::default(),
3989 ),
3990 fs.as_ref(),
3991 );
3992 let mut scanner = BackgroundScanner::new(
3993 Arc::new(Mutex::new(initial_snapshot.clone())),
3994 notify_tx,
3995 fs.clone(),
3996 Arc::new(gpui::executor::Background::new()),
3997 );
3998 smol::block_on(scanner.scan_dirs()).unwrap();
3999 scanner.snapshot().check_invariants();
4000
4001 let mut events = Vec::new();
4002 let mut snapshots = Vec::new();
4003 let mut mutations_len = operations;
4004 while mutations_len > 1 {
4005 if !events.is_empty() && rng.gen_bool(0.4) {
4006 let len = rng.gen_range(0..=events.len());
4007 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4008 log::info!("Delivering events: {:#?}", to_deliver);
4009 smol::block_on(scanner.process_events(to_deliver));
4010 scanner.snapshot().check_invariants();
4011 } else {
4012 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4013 mutations_len -= 1;
4014 }
4015
4016 if rng.gen_bool(0.2) {
4017 snapshots.push(scanner.snapshot());
4018 }
4019 }
4020 log::info!("Quiescing: {:#?}", events);
4021 smol::block_on(scanner.process_events(events));
4022 scanner.snapshot().check_invariants();
4023
4024 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4025 let mut new_scanner = BackgroundScanner::new(
4026 Arc::new(Mutex::new(initial_snapshot)),
4027 notify_tx,
4028 scanner.fs.clone(),
4029 scanner.executor.clone(),
4030 );
4031 smol::block_on(new_scanner.scan_dirs()).unwrap();
4032 assert_eq!(
4033 scanner.snapshot().to_vec(true),
4034 new_scanner.snapshot().to_vec(true)
4035 );
4036
4037 for mut prev_snapshot in snapshots {
4038 let include_ignored = rng.gen::<bool>();
4039 if !include_ignored {
4040 let mut entries_by_path_edits = Vec::new();
4041 let mut entries_by_id_edits = Vec::new();
4042 for entry in prev_snapshot
4043 .entries_by_id
4044 .cursor::<()>()
4045 .filter(|e| e.is_ignored)
4046 {
4047 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4048 entries_by_id_edits.push(Edit::Remove(entry.id));
4049 }
4050
4051 prev_snapshot
4052 .entries_by_path
4053 .edit(entries_by_path_edits, &());
4054 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4055 }
4056
4057 let update = scanner
4058 .snapshot()
4059 .build_update(&prev_snapshot, 0, 0, include_ignored);
4060 prev_snapshot.apply_update(update).unwrap();
4061 assert_eq!(
4062 prev_snapshot.to_vec(true),
4063 scanner.snapshot().to_vec(include_ignored)
4064 );
4065 }
4066 }
4067
4068 fn randomly_mutate_tree(
4069 root_path: &Path,
4070 insertion_probability: f64,
4071 rng: &mut impl Rng,
4072 ) -> Result<Vec<fsevent::Event>> {
4073 let root_path = root_path.canonicalize().unwrap();
4074 let (dirs, files) = read_dir_recursive(root_path.clone());
4075
4076 let mut events = Vec::new();
4077 let mut record_event = |path: PathBuf| {
4078 events.push(fsevent::Event {
4079 event_id: SystemTime::now()
4080 .duration_since(UNIX_EPOCH)
4081 .unwrap()
4082 .as_secs(),
4083 flags: fsevent::StreamFlags::empty(),
4084 path,
4085 });
4086 };
4087
4088 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4089 let path = dirs.choose(rng).unwrap();
4090 let new_path = path.join(gen_name(rng));
4091
4092 if rng.gen() {
4093 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4094 std::fs::create_dir(&new_path)?;
4095 } else {
4096 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4097 std::fs::write(&new_path, "")?;
4098 }
4099 record_event(new_path);
4100 } else if rng.gen_bool(0.05) {
4101 let ignore_dir_path = dirs.choose(rng).unwrap();
4102 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4103
4104 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4105 let files_to_ignore = {
4106 let len = rng.gen_range(0..=subfiles.len());
4107 subfiles.choose_multiple(rng, len)
4108 };
4109 let dirs_to_ignore = {
4110 let len = rng.gen_range(0..subdirs.len());
4111 subdirs.choose_multiple(rng, len)
4112 };
4113
4114 let mut ignore_contents = String::new();
4115 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4116 write!(
4117 ignore_contents,
4118 "{}\n",
4119 path_to_ignore
4120 .strip_prefix(&ignore_dir_path)?
4121 .to_str()
4122 .unwrap()
4123 )
4124 .unwrap();
4125 }
4126 log::info!(
4127 "Creating {:?} with contents:\n{}",
4128 ignore_path.strip_prefix(&root_path)?,
4129 ignore_contents
4130 );
4131 std::fs::write(&ignore_path, ignore_contents).unwrap();
4132 record_event(ignore_path);
4133 } else {
4134 let old_path = {
4135 let file_path = files.choose(rng);
4136 let dir_path = dirs[1..].choose(rng);
4137 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4138 };
4139
4140 let is_rename = rng.gen();
4141 if is_rename {
4142 let new_path_parent = dirs
4143 .iter()
4144 .filter(|d| !d.starts_with(old_path))
4145 .choose(rng)
4146 .unwrap();
4147
4148 let overwrite_existing_dir =
4149 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4150 let new_path = if overwrite_existing_dir {
4151 std::fs::remove_dir_all(&new_path_parent).ok();
4152 new_path_parent.to_path_buf()
4153 } else {
4154 new_path_parent.join(gen_name(rng))
4155 };
4156
4157 log::info!(
4158 "Renaming {:?} to {}{:?}",
4159 old_path.strip_prefix(&root_path)?,
4160 if overwrite_existing_dir {
4161 "overwrite "
4162 } else {
4163 ""
4164 },
4165 new_path.strip_prefix(&root_path)?
4166 );
4167 std::fs::rename(&old_path, &new_path)?;
4168 record_event(old_path.clone());
4169 record_event(new_path);
4170 } else if old_path.is_dir() {
4171 let (dirs, files) = read_dir_recursive(old_path.clone());
4172
4173 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4174 std::fs::remove_dir_all(&old_path).unwrap();
4175 for file in files {
4176 record_event(file);
4177 }
4178 for dir in dirs {
4179 record_event(dir);
4180 }
4181 } else {
4182 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4183 std::fs::remove_file(old_path).unwrap();
4184 record_event(old_path.clone());
4185 }
4186 }
4187
4188 Ok(events)
4189 }
4190
4191 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4192 let child_entries = std::fs::read_dir(&path).unwrap();
4193 let mut dirs = vec![path];
4194 let mut files = Vec::new();
4195 for child_entry in child_entries {
4196 let child_path = child_entry.unwrap().path();
4197 if child_path.is_dir() {
4198 let (child_dirs, child_files) = read_dir_recursive(child_path);
4199 dirs.extend(child_dirs);
4200 files.extend(child_files);
4201 } else {
4202 files.push(child_path);
4203 }
4204 }
4205 (dirs, files)
4206 }
4207
4208 fn gen_name(rng: &mut impl Rng) -> String {
4209 (0..6)
4210 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4211 .map(char::from)
4212 .collect()
4213 }
4214
4215 impl Snapshot {
4216 fn check_invariants(&self) {
4217 let mut files = self.files(true, 0);
4218 let mut visible_files = self.files(false, 0);
4219 for entry in self.entries_by_path.cursor::<()>() {
4220 if entry.is_file() {
4221 assert_eq!(files.next().unwrap().inode, entry.inode);
4222 if !entry.is_ignored {
4223 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4224 }
4225 }
4226 }
4227 assert!(files.next().is_none());
4228 assert!(visible_files.next().is_none());
4229
4230 let mut bfs_paths = Vec::new();
4231 let mut stack = vec![Path::new("")];
4232 while let Some(path) = stack.pop() {
4233 bfs_paths.push(path);
4234 let ix = stack.len();
4235 for child_entry in self.child_entries(path) {
4236 stack.insert(ix, &child_entry.path);
4237 }
4238 }
4239
4240 let dfs_paths = self
4241 .entries_by_path
4242 .cursor::<()>()
4243 .map(|e| e.path.as_ref())
4244 .collect::<Vec<_>>();
4245 assert_eq!(bfs_paths, dfs_paths);
4246
4247 for (ignore_parent_path, _) in &self.ignores {
4248 assert!(self.entry_for_path(ignore_parent_path).is_some());
4249 assert!(self
4250 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4251 .is_some());
4252 }
4253 }
4254
4255 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4256 let mut paths = Vec::new();
4257 for entry in self.entries_by_path.cursor::<()>() {
4258 if include_ignored || !entry.is_ignored {
4259 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4260 }
4261 }
4262 paths.sort_by(|a, b| a.0.cmp(&b.0));
4263 paths
4264 }
4265 }
4266}