1use super::{
2 fs::{self, Fs},
3 ignore::IgnoreStack,
4 DiagnosticSummary,
5};
6use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
7use anyhow::{anyhow, Context, Result};
8use client::{proto, Client, PeerId, TypedEnvelope, User, UserStore};
9use clock::ReplicaId;
10use futures::{Stream, StreamExt};
11use fuzzy::CharBag;
12use gpui::{
13 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
14 Task, UpgradeModelHandle, WeakModelHandle,
15};
16use language::{
17 Buffer, Diagnostic, DiagnosticEntry, DiagnosticSeverity, Language, LanguageRegistry, Operation,
18 PointUtf16, Rope,
19};
20use lazy_static::lazy_static;
21use lsp::LanguageServer;
22use parking_lot::Mutex;
23use postage::{
24 prelude::{Sink as _, Stream as _},
25 watch,
26};
27use serde::Deserialize;
28use smol::channel::{self, Sender};
29use std::{
30 any::Any,
31 cmp::{self, Ordering},
32 collections::HashMap,
33 convert::{TryFrom, TryInto},
34 ffi::{OsStr, OsString},
35 fmt,
36 future::Future,
37 ops::{Deref, Range},
38 path::{Path, PathBuf},
39 sync::{
40 atomic::{AtomicUsize, Ordering::SeqCst},
41 Arc,
42 },
43 time::{Duration, SystemTime},
44};
45use sum_tree::Bias;
46use sum_tree::{Edit, SeekTarget, SumTree};
47use util::{post_inc, ResultExt, TryFutureExt};
48
49lazy_static! {
50 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
51}
52
53#[derive(Clone, Debug)]
54enum ScanState {
55 Idle,
56 Scanning,
57 Err(Arc<anyhow::Error>),
58}
59
60pub enum Worktree {
61 Local(LocalWorktree),
62 Remote(RemoteWorktree),
63}
64
65pub enum Event {
66 Closed,
67}
68
69#[derive(Clone, Debug)]
70pub struct Collaborator {
71 pub user: Arc<User>,
72 pub peer_id: PeerId,
73 pub replica_id: ReplicaId,
74}
75
76impl Collaborator {
77 fn from_proto(
78 message: proto::Collaborator,
79 user_store: &ModelHandle<UserStore>,
80 cx: &mut AsyncAppContext,
81 ) -> impl Future<Output = Result<Self>> {
82 let user = user_store.update(cx, |user_store, cx| {
83 user_store.fetch_user(message.user_id, cx)
84 });
85
86 async move {
87 Ok(Self {
88 peer_id: PeerId(message.peer_id),
89 user: user.await?,
90 replica_id: message.replica_id as ReplicaId,
91 })
92 }
93 }
94}
95
96impl Entity for Worktree {
97 type Event = Event;
98
99 fn release(&mut self, cx: &mut MutableAppContext) {
100 match self {
101 Self::Local(tree) => {
102 if let Some(worktree_id) = *tree.remote_id.borrow() {
103 let rpc = tree.client.clone();
104 cx.spawn(|_| async move {
105 if let Err(err) = rpc.send(proto::CloseWorktree { worktree_id }).await {
106 log::error!("error closing worktree: {}", err);
107 }
108 })
109 .detach();
110 }
111 }
112 Self::Remote(tree) => {
113 let rpc = tree.client.clone();
114 let worktree_id = tree.remote_id;
115 cx.spawn(|_| async move {
116 if let Err(err) = rpc.send(proto::LeaveWorktree { worktree_id }).await {
117 log::error!("error closing worktree: {}", err);
118 }
119 })
120 .detach();
121 }
122 }
123 }
124
125 fn app_will_quit(
126 &mut self,
127 _: &mut MutableAppContext,
128 ) -> Option<std::pin::Pin<Box<dyn 'static + Future<Output = ()>>>> {
129 use futures::FutureExt;
130
131 if let Self::Local(worktree) = self {
132 let shutdown_futures = worktree
133 .language_servers
134 .drain()
135 .filter_map(|(_, server)| server.shutdown())
136 .collect::<Vec<_>>();
137 Some(
138 async move {
139 futures::future::join_all(shutdown_futures).await;
140 }
141 .boxed(),
142 )
143 } else {
144 None
145 }
146 }
147}
148
149impl Worktree {
150 pub async fn open_local(
151 client: Arc<Client>,
152 user_store: ModelHandle<UserStore>,
153 path: impl Into<Arc<Path>>,
154 fs: Arc<dyn Fs>,
155 languages: Arc<LanguageRegistry>,
156 cx: &mut AsyncAppContext,
157 ) -> Result<ModelHandle<Self>> {
158 let (tree, scan_states_tx) =
159 LocalWorktree::new(client, user_store, path, fs.clone(), languages, cx).await?;
160 tree.update(cx, |tree, cx| {
161 let tree = tree.as_local_mut().unwrap();
162 let abs_path = tree.snapshot.abs_path.clone();
163 let background_snapshot = tree.background_snapshot.clone();
164 let background = cx.background().clone();
165 tree._background_scanner_task = Some(cx.background().spawn(async move {
166 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
167 let scanner =
168 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
169 scanner.run(events).await;
170 }));
171 });
172 Ok(tree)
173 }
174
175 pub async fn open_remote(
176 client: Arc<Client>,
177 id: u64,
178 languages: Arc<LanguageRegistry>,
179 user_store: ModelHandle<UserStore>,
180 cx: &mut AsyncAppContext,
181 ) -> Result<ModelHandle<Self>> {
182 let response = client
183 .request(proto::JoinWorktree { worktree_id: id })
184 .await?;
185 Worktree::remote(response, client, user_store, languages, cx).await
186 }
187
188 async fn remote(
189 join_response: proto::JoinWorktreeResponse,
190 client: Arc<Client>,
191 user_store: ModelHandle<UserStore>,
192 languages: Arc<LanguageRegistry>,
193 cx: &mut AsyncAppContext,
194 ) -> Result<ModelHandle<Self>> {
195 let worktree = join_response
196 .worktree
197 .ok_or_else(|| anyhow!("empty worktree"))?;
198
199 let remote_id = worktree.id;
200 let replica_id = join_response.replica_id as ReplicaId;
201 let root_char_bag: CharBag = worktree
202 .root_name
203 .chars()
204 .map(|c| c.to_ascii_lowercase())
205 .collect();
206 let root_name = worktree.root_name.clone();
207 let (entries_by_path, entries_by_id) = cx
208 .background()
209 .spawn(async move {
210 let mut entries_by_path_edits = Vec::new();
211 let mut entries_by_id_edits = Vec::new();
212 for entry in worktree.entries {
213 match Entry::try_from((&root_char_bag, entry)) {
214 Ok(entry) => {
215 entries_by_id_edits.push(Edit::Insert(PathEntry {
216 id: entry.id,
217 path: entry.path.clone(),
218 is_ignored: entry.is_ignored,
219 scan_id: 0,
220 }));
221 entries_by_path_edits.push(Edit::Insert(entry));
222 }
223 Err(err) => log::warn!("error for remote worktree entry {:?}", err),
224 }
225 }
226
227 let mut entries_by_path = SumTree::new();
228 let mut entries_by_id = SumTree::new();
229 entries_by_path.edit(entries_by_path_edits, &());
230 entries_by_id.edit(entries_by_id_edits, &());
231 (entries_by_path, entries_by_id)
232 })
233 .await;
234
235 let user_ids = join_response
236 .collaborators
237 .iter()
238 .map(|peer| peer.user_id)
239 .collect();
240 user_store
241 .update(cx, |user_store, cx| user_store.load_users(user_ids, cx))
242 .await?;
243 let mut collaborators = HashMap::with_capacity(join_response.collaborators.len());
244 for message in join_response.collaborators {
245 let collaborator = Collaborator::from_proto(message, &user_store, cx).await?;
246 collaborators.insert(collaborator.peer_id, collaborator);
247 }
248
249 let worktree = cx.update(|cx| {
250 cx.add_model(|cx: &mut ModelContext<Worktree>| {
251 let snapshot = Snapshot {
252 id: cx.model_id(),
253 scan_id: 0,
254 abs_path: Path::new("").into(),
255 root_name,
256 root_char_bag,
257 ignores: Default::default(),
258 entries_by_path,
259 entries_by_id,
260 removed_entry_ids: Default::default(),
261 next_entry_id: Default::default(),
262 };
263
264 let (updates_tx, mut updates_rx) = postage::mpsc::channel(64);
265 let (mut snapshot_tx, snapshot_rx) = watch::channel_with(snapshot.clone());
266
267 cx.background()
268 .spawn(async move {
269 while let Some(update) = updates_rx.recv().await {
270 let mut snapshot = snapshot_tx.borrow().clone();
271 if let Err(error) = snapshot.apply_update(update) {
272 log::error!("error applying worktree update: {}", error);
273 }
274 *snapshot_tx.borrow_mut() = snapshot;
275 }
276 })
277 .detach();
278
279 {
280 let mut snapshot_rx = snapshot_rx.clone();
281 cx.spawn_weak(|this, mut cx| async move {
282 while let Some(_) = snapshot_rx.recv().await {
283 if let Some(this) = cx.read(|cx| this.upgrade(cx)) {
284 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
285 } else {
286 break;
287 }
288 }
289 })
290 .detach();
291 }
292
293 let _subscriptions = vec![
294 client.subscribe_to_entity(remote_id, cx, Self::handle_add_collaborator),
295 client.subscribe_to_entity(remote_id, cx, Self::handle_remove_collaborator),
296 client.subscribe_to_entity(remote_id, cx, Self::handle_update),
297 client.subscribe_to_entity(remote_id, cx, Self::handle_update_buffer),
298 client.subscribe_to_entity(remote_id, cx, Self::handle_buffer_saved),
299 client.subscribe_to_entity(remote_id, cx, Self::handle_unshare),
300 ];
301
302 Worktree::Remote(RemoteWorktree {
303 remote_id,
304 replica_id,
305 snapshot,
306 snapshot_rx,
307 updates_tx,
308 client: client.clone(),
309 open_buffers: Default::default(),
310 diagnostics: Vec::new(),
311 collaborators,
312 queued_operations: Default::default(),
313 languages,
314 user_store,
315 _subscriptions,
316 })
317 })
318 });
319
320 Ok(worktree)
321 }
322
323 pub fn as_local(&self) -> Option<&LocalWorktree> {
324 if let Worktree::Local(worktree) = self {
325 Some(worktree)
326 } else {
327 None
328 }
329 }
330
331 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
332 if let Worktree::Local(worktree) = self {
333 Some(worktree)
334 } else {
335 None
336 }
337 }
338
339 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
340 if let Worktree::Remote(worktree) = self {
341 Some(worktree)
342 } else {
343 None
344 }
345 }
346
347 pub fn snapshot(&self) -> Snapshot {
348 match self {
349 Worktree::Local(worktree) => worktree.snapshot(),
350 Worktree::Remote(worktree) => worktree.snapshot(),
351 }
352 }
353
354 pub fn replica_id(&self) -> ReplicaId {
355 match self {
356 Worktree::Local(_) => 0,
357 Worktree::Remote(worktree) => worktree.replica_id,
358 }
359 }
360
361 pub fn languages(&self) -> &Arc<LanguageRegistry> {
362 match self {
363 Worktree::Local(worktree) => &worktree.languages,
364 Worktree::Remote(worktree) => &worktree.languages,
365 }
366 }
367
368 pub fn user_store(&self) -> &ModelHandle<UserStore> {
369 match self {
370 Worktree::Local(worktree) => &worktree.user_store,
371 Worktree::Remote(worktree) => &worktree.user_store,
372 }
373 }
374
375 pub fn handle_add_collaborator(
376 &mut self,
377 mut envelope: TypedEnvelope<proto::AddCollaborator>,
378 _: Arc<Client>,
379 cx: &mut ModelContext<Self>,
380 ) -> Result<()> {
381 let user_store = self.user_store().clone();
382 let collaborator = envelope
383 .payload
384 .collaborator
385 .take()
386 .ok_or_else(|| anyhow!("empty collaborator"))?;
387
388 cx.spawn(|this, mut cx| {
389 async move {
390 let collaborator =
391 Collaborator::from_proto(collaborator, &user_store, &mut cx).await?;
392 this.update(&mut cx, |this, cx| match this {
393 Worktree::Local(worktree) => worktree.add_collaborator(collaborator, cx),
394 Worktree::Remote(worktree) => worktree.add_collaborator(collaborator, cx),
395 });
396 Ok(())
397 }
398 .log_err()
399 })
400 .detach();
401
402 Ok(())
403 }
404
405 pub fn handle_remove_collaborator(
406 &mut self,
407 envelope: TypedEnvelope<proto::RemoveCollaborator>,
408 _: Arc<Client>,
409 cx: &mut ModelContext<Self>,
410 ) -> Result<()> {
411 match self {
412 Worktree::Local(worktree) => worktree.remove_collaborator(envelope, cx),
413 Worktree::Remote(worktree) => worktree.remove_collaborator(envelope, cx),
414 }
415 }
416
417 pub fn handle_update(
418 &mut self,
419 envelope: TypedEnvelope<proto::UpdateWorktree>,
420 _: Arc<Client>,
421 cx: &mut ModelContext<Self>,
422 ) -> anyhow::Result<()> {
423 self.as_remote_mut()
424 .unwrap()
425 .update_from_remote(envelope, cx)
426 }
427
428 pub fn handle_open_buffer(
429 &mut self,
430 envelope: TypedEnvelope<proto::OpenBuffer>,
431 rpc: Arc<Client>,
432 cx: &mut ModelContext<Self>,
433 ) -> anyhow::Result<()> {
434 let receipt = envelope.receipt();
435
436 let response = self
437 .as_local_mut()
438 .unwrap()
439 .open_remote_buffer(envelope, cx);
440
441 cx.background()
442 .spawn(
443 async move {
444 rpc.respond(receipt, response.await?).await?;
445 Ok(())
446 }
447 .log_err(),
448 )
449 .detach();
450
451 Ok(())
452 }
453
454 pub fn handle_close_buffer(
455 &mut self,
456 envelope: TypedEnvelope<proto::CloseBuffer>,
457 _: Arc<Client>,
458 cx: &mut ModelContext<Self>,
459 ) -> anyhow::Result<()> {
460 self.as_local_mut()
461 .unwrap()
462 .close_remote_buffer(envelope, cx)
463 }
464
465 pub fn collaborators(&self) -> &HashMap<PeerId, Collaborator> {
466 match self {
467 Worktree::Local(worktree) => &worktree.collaborators,
468 Worktree::Remote(worktree) => &worktree.collaborators,
469 }
470 }
471
472 pub fn diagnostic_summaries<'a>(
473 &'a self,
474 cx: &'a AppContext,
475 ) -> impl Iterator<Item = DiagnosticSummary> {
476 std::iter::empty()
477 }
478
479 pub fn open_buffer(
480 &mut self,
481 path: impl AsRef<Path>,
482 cx: &mut ModelContext<Self>,
483 ) -> Task<Result<ModelHandle<Buffer>>> {
484 match self {
485 Worktree::Local(worktree) => worktree.open_buffer(path.as_ref(), cx),
486 Worktree::Remote(worktree) => worktree.open_buffer(path.as_ref(), cx),
487 }
488 }
489
490 #[cfg(feature = "test-support")]
491 pub fn has_open_buffer(&self, path: impl AsRef<Path>, cx: &AppContext) -> bool {
492 let mut open_buffers: Box<dyn Iterator<Item = _>> = match self {
493 Worktree::Local(worktree) => Box::new(worktree.open_buffers.values()),
494 Worktree::Remote(worktree) => {
495 Box::new(worktree.open_buffers.values().filter_map(|buf| {
496 if let RemoteBuffer::Loaded(buf) = buf {
497 Some(buf)
498 } else {
499 None
500 }
501 }))
502 }
503 };
504
505 let path = path.as_ref();
506 open_buffers
507 .find(|buffer| {
508 if let Some(file) = buffer.upgrade(cx).and_then(|buffer| buffer.read(cx).file()) {
509 file.path().as_ref() == path
510 } else {
511 false
512 }
513 })
514 .is_some()
515 }
516
517 pub fn handle_update_buffer(
518 &mut self,
519 envelope: TypedEnvelope<proto::UpdateBuffer>,
520 _: Arc<Client>,
521 cx: &mut ModelContext<Self>,
522 ) -> Result<()> {
523 let payload = envelope.payload.clone();
524 let buffer_id = payload.buffer_id as usize;
525 let ops = payload
526 .operations
527 .into_iter()
528 .map(|op| language::proto::deserialize_operation(op))
529 .collect::<Result<Vec<_>, _>>()?;
530
531 match self {
532 Worktree::Local(worktree) => {
533 let buffer = worktree
534 .open_buffers
535 .get(&buffer_id)
536 .and_then(|buf| buf.upgrade(cx))
537 .ok_or_else(|| {
538 anyhow!("invalid buffer {} in update buffer message", buffer_id)
539 })?;
540 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
541 }
542 Worktree::Remote(worktree) => match worktree.open_buffers.get_mut(&buffer_id) {
543 Some(RemoteBuffer::Operations(pending_ops)) => pending_ops.extend(ops),
544 Some(RemoteBuffer::Loaded(buffer)) => {
545 if let Some(buffer) = buffer.upgrade(cx) {
546 buffer.update(cx, |buffer, cx| buffer.apply_ops(ops, cx))?;
547 } else {
548 worktree
549 .open_buffers
550 .insert(buffer_id, RemoteBuffer::Operations(ops));
551 }
552 }
553 None => {
554 worktree
555 .open_buffers
556 .insert(buffer_id, RemoteBuffer::Operations(ops));
557 }
558 },
559 }
560
561 Ok(())
562 }
563
564 pub fn handle_save_buffer(
565 &mut self,
566 envelope: TypedEnvelope<proto::SaveBuffer>,
567 rpc: Arc<Client>,
568 cx: &mut ModelContext<Self>,
569 ) -> Result<()> {
570 let sender_id = envelope.original_sender_id()?;
571 let buffer = self
572 .as_local()
573 .unwrap()
574 .shared_buffers
575 .get(&sender_id)
576 .and_then(|shared_buffers| shared_buffers.get(&envelope.payload.buffer_id).cloned())
577 .ok_or_else(|| anyhow!("unknown buffer id {}", envelope.payload.buffer_id))?;
578
579 let receipt = envelope.receipt();
580 let worktree_id = envelope.payload.worktree_id;
581 let buffer_id = envelope.payload.buffer_id;
582 let save = cx.spawn(|_, mut cx| async move {
583 buffer.update(&mut cx, |buffer, cx| buffer.save(cx))?.await
584 });
585
586 cx.background()
587 .spawn(
588 async move {
589 let (version, mtime) = save.await?;
590
591 rpc.respond(
592 receipt,
593 proto::BufferSaved {
594 worktree_id,
595 buffer_id,
596 version: (&version).into(),
597 mtime: Some(mtime.into()),
598 },
599 )
600 .await?;
601
602 Ok(())
603 }
604 .log_err(),
605 )
606 .detach();
607
608 Ok(())
609 }
610
611 pub fn handle_buffer_saved(
612 &mut self,
613 envelope: TypedEnvelope<proto::BufferSaved>,
614 _: Arc<Client>,
615 cx: &mut ModelContext<Self>,
616 ) -> Result<()> {
617 let payload = envelope.payload.clone();
618 let worktree = self.as_remote_mut().unwrap();
619 if let Some(buffer) = worktree
620 .open_buffers
621 .get(&(payload.buffer_id as usize))
622 .and_then(|buf| buf.upgrade(cx))
623 {
624 buffer.update(cx, |buffer, cx| {
625 let version = payload.version.try_into()?;
626 let mtime = payload
627 .mtime
628 .ok_or_else(|| anyhow!("missing mtime"))?
629 .into();
630 buffer.did_save(version, mtime, None, cx);
631 Result::<_, anyhow::Error>::Ok(())
632 })?;
633 }
634 Ok(())
635 }
636
637 pub fn handle_unshare(
638 &mut self,
639 _: TypedEnvelope<proto::UnshareWorktree>,
640 _: Arc<Client>,
641 cx: &mut ModelContext<Self>,
642 ) -> Result<()> {
643 cx.emit(Event::Closed);
644 Ok(())
645 }
646
647 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
648 match self {
649 Self::Local(worktree) => {
650 let is_fake_fs = worktree.fs.is_fake();
651 worktree.snapshot = worktree.background_snapshot.lock().clone();
652 if worktree.is_scanning() {
653 if worktree.poll_task.is_none() {
654 worktree.poll_task = Some(cx.spawn(|this, mut cx| async move {
655 if is_fake_fs {
656 smol::future::yield_now().await;
657 } else {
658 smol::Timer::after(Duration::from_millis(100)).await;
659 }
660 this.update(&mut cx, |this, cx| {
661 this.as_local_mut().unwrap().poll_task = None;
662 this.poll_snapshot(cx);
663 })
664 }));
665 }
666 } else {
667 worktree.poll_task.take();
668 self.update_open_buffers(cx);
669 }
670 }
671 Self::Remote(worktree) => {
672 worktree.snapshot = worktree.snapshot_rx.borrow().clone();
673 self.update_open_buffers(cx);
674 }
675 };
676
677 cx.notify();
678 }
679
680 fn update_open_buffers(&mut self, cx: &mut ModelContext<Self>) {
681 let open_buffers: Box<dyn Iterator<Item = _>> = match &self {
682 Self::Local(worktree) => Box::new(worktree.open_buffers.iter()),
683 Self::Remote(worktree) => {
684 Box::new(worktree.open_buffers.iter().filter_map(|(id, buf)| {
685 if let RemoteBuffer::Loaded(buf) = buf {
686 Some((id, buf))
687 } else {
688 None
689 }
690 }))
691 }
692 };
693
694 let local = self.as_local().is_some();
695 let worktree_path = self.abs_path.clone();
696 let worktree_handle = cx.handle();
697 let mut buffers_to_delete = Vec::new();
698 for (buffer_id, buffer) in open_buffers {
699 if let Some(buffer) = buffer.upgrade(cx) {
700 buffer.update(cx, |buffer, cx| {
701 if let Some(old_file) = buffer.file() {
702 let new_file = if let Some(entry) = old_file
703 .entry_id()
704 .and_then(|entry_id| self.entry_for_id(entry_id))
705 {
706 File {
707 is_local: local,
708 worktree_path: worktree_path.clone(),
709 entry_id: Some(entry.id),
710 mtime: entry.mtime,
711 path: entry.path.clone(),
712 worktree: worktree_handle.clone(),
713 }
714 } else if let Some(entry) = self.entry_for_path(old_file.path().as_ref()) {
715 File {
716 is_local: local,
717 worktree_path: worktree_path.clone(),
718 entry_id: Some(entry.id),
719 mtime: entry.mtime,
720 path: entry.path.clone(),
721 worktree: worktree_handle.clone(),
722 }
723 } else {
724 File {
725 is_local: local,
726 worktree_path: worktree_path.clone(),
727 entry_id: None,
728 path: old_file.path().clone(),
729 mtime: old_file.mtime(),
730 worktree: worktree_handle.clone(),
731 }
732 };
733
734 if let Some(task) = buffer.file_updated(Box::new(new_file), cx) {
735 task.detach();
736 }
737 }
738 });
739 } else {
740 buffers_to_delete.push(*buffer_id);
741 }
742 }
743
744 for buffer_id in buffers_to_delete {
745 match self {
746 Self::Local(worktree) => {
747 worktree.open_buffers.remove(&buffer_id);
748 }
749 Self::Remote(worktree) => {
750 worktree.open_buffers.remove(&buffer_id);
751 }
752 }
753 }
754 }
755
756 fn update_diagnostics(
757 &mut self,
758 params: lsp::PublishDiagnosticsParams,
759 cx: &mut ModelContext<Worktree>,
760 ) -> Result<()> {
761 let this = self.as_local_mut().ok_or_else(|| anyhow!("not local"))?;
762 let abs_path = params
763 .uri
764 .to_file_path()
765 .map_err(|_| anyhow!("URI is not a file"))?;
766 let worktree_path = Arc::from(
767 abs_path
768 .strip_prefix(&this.abs_path)
769 .context("path is not within worktree")?,
770 );
771
772 let mut group_ids_by_diagnostic_range = HashMap::new();
773 let mut diagnostics_by_group_id = HashMap::new();
774 let mut next_group_id = 0;
775 for diagnostic in ¶ms.diagnostics {
776 let source = diagnostic.source.as_ref();
777 let code = diagnostic.code.as_ref();
778 let group_id = diagnostic_ranges(&diagnostic, &abs_path)
779 .find_map(|range| group_ids_by_diagnostic_range.get(&(source, code, range)))
780 .copied()
781 .unwrap_or_else(|| {
782 let group_id = post_inc(&mut next_group_id);
783 for range in diagnostic_ranges(&diagnostic, &abs_path) {
784 group_ids_by_diagnostic_range.insert((source, code, range), group_id);
785 }
786 group_id
787 });
788
789 diagnostics_by_group_id
790 .entry(group_id)
791 .or_insert(Vec::new())
792 .push(DiagnosticEntry {
793 range: diagnostic.range.start.to_point_utf16()
794 ..diagnostic.range.end.to_point_utf16(),
795 diagnostic: Diagnostic {
796 source: diagnostic.source.clone(),
797 code: diagnostic.code.clone().map(|code| match code {
798 lsp::NumberOrString::Number(code) => code.to_string(),
799 lsp::NumberOrString::String(code) => code,
800 }),
801 severity: diagnostic.severity.unwrap_or(DiagnosticSeverity::ERROR),
802 message: diagnostic.message.clone(),
803 group_id,
804 is_primary: false,
805 },
806 });
807 }
808
809 let diagnostics = diagnostics_by_group_id
810 .into_values()
811 .flat_map(|mut diagnostics| {
812 let primary = diagnostics
813 .iter_mut()
814 .min_by_key(|entry| entry.diagnostic.severity)
815 .unwrap();
816 primary.diagnostic.is_primary = true;
817 diagnostics
818 })
819 .collect::<Vec<_>>();
820
821 for buffer in this.open_buffers.values() {
822 if let Some(buffer) = buffer.upgrade(cx) {
823 if buffer
824 .read(cx)
825 .file()
826 .map_or(false, |file| *file.path() == worktree_path)
827 {
828 let (remote_id, operation) = buffer.update(cx, |buffer, cx| {
829 (
830 buffer.remote_id(),
831 buffer.update_diagnostics(params.version, diagnostics, cx),
832 )
833 });
834 self.send_buffer_update(remote_id, operation?, cx);
835 return Ok(());
836 }
837 }
838 }
839
840 this.diagnostics.insert(worktree_path, diagnostics);
841 Ok(())
842 }
843
844 fn send_buffer_update(
845 &mut self,
846 buffer_id: u64,
847 operation: Operation,
848 cx: &mut ModelContext<Self>,
849 ) {
850 if let Some((rpc, remote_id)) = match self {
851 Worktree::Local(worktree) => worktree
852 .remote_id
853 .borrow()
854 .map(|id| (worktree.client.clone(), id)),
855 Worktree::Remote(worktree) => Some((worktree.client.clone(), worktree.remote_id)),
856 } {
857 cx.spawn(|worktree, mut cx| async move {
858 if let Err(error) = rpc
859 .request(proto::UpdateBuffer {
860 worktree_id: remote_id,
861 buffer_id,
862 operations: vec![language::proto::serialize_operation(&operation)],
863 })
864 .await
865 {
866 worktree.update(&mut cx, |worktree, _| {
867 log::error!("error sending buffer operation: {}", error);
868 match worktree {
869 Worktree::Local(t) => &mut t.queued_operations,
870 Worktree::Remote(t) => &mut t.queued_operations,
871 }
872 .push((buffer_id, operation));
873 });
874 }
875 })
876 .detach();
877 }
878 }
879}
880
881impl Deref for Worktree {
882 type Target = Snapshot;
883
884 fn deref(&self) -> &Self::Target {
885 match self {
886 Worktree::Local(worktree) => &worktree.snapshot,
887 Worktree::Remote(worktree) => &worktree.snapshot,
888 }
889 }
890}
891
892pub struct LocalWorktree {
893 snapshot: Snapshot,
894 config: WorktreeConfig,
895 background_snapshot: Arc<Mutex<Snapshot>>,
896 last_scan_state_rx: watch::Receiver<ScanState>,
897 _background_scanner_task: Option<Task<()>>,
898 _maintain_remote_id_task: Task<Option<()>>,
899 poll_task: Option<Task<()>>,
900 remote_id: watch::Receiver<Option<u64>>,
901 share: Option<ShareState>,
902 open_buffers: HashMap<usize, WeakModelHandle<Buffer>>,
903 shared_buffers: HashMap<PeerId, HashMap<u64, ModelHandle<Buffer>>>,
904 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
905 collaborators: HashMap<PeerId, Collaborator>,
906 queued_operations: Vec<(u64, Operation)>,
907 languages: Arc<LanguageRegistry>,
908 client: Arc<Client>,
909 user_store: ModelHandle<UserStore>,
910 fs: Arc<dyn Fs>,
911 language_servers: HashMap<String, Arc<LanguageServer>>,
912}
913
914#[derive(Default, Deserialize)]
915struct WorktreeConfig {
916 collaborators: Vec<String>,
917}
918
919impl LocalWorktree {
920 async fn new(
921 client: Arc<Client>,
922 user_store: ModelHandle<UserStore>,
923 path: impl Into<Arc<Path>>,
924 fs: Arc<dyn Fs>,
925 languages: Arc<LanguageRegistry>,
926 cx: &mut AsyncAppContext,
927 ) -> Result<(ModelHandle<Worktree>, Sender<ScanState>)> {
928 let abs_path = path.into();
929 let path: Arc<Path> = Arc::from(Path::new(""));
930 let next_entry_id = AtomicUsize::new(0);
931
932 // After determining whether the root entry is a file or a directory, populate the
933 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
934 let root_name = abs_path
935 .file_name()
936 .map_or(String::new(), |f| f.to_string_lossy().to_string());
937 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
938 let metadata = fs.metadata(&abs_path).await?;
939
940 let mut config = WorktreeConfig::default();
941 if let Ok(zed_toml) = fs.load(&abs_path.join(".zed.toml")).await {
942 if let Ok(parsed) = toml::from_str(&zed_toml) {
943 config = parsed;
944 }
945 }
946
947 let (scan_states_tx, scan_states_rx) = smol::channel::unbounded();
948 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
949 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
950 let mut snapshot = Snapshot {
951 id: cx.model_id(),
952 scan_id: 0,
953 abs_path,
954 root_name: root_name.clone(),
955 root_char_bag,
956 ignores: Default::default(),
957 entries_by_path: Default::default(),
958 entries_by_id: Default::default(),
959 removed_entry_ids: Default::default(),
960 next_entry_id: Arc::new(next_entry_id),
961 };
962 if let Some(metadata) = metadata {
963 snapshot.insert_entry(
964 Entry::new(
965 path.into(),
966 &metadata,
967 &snapshot.next_entry_id,
968 snapshot.root_char_bag,
969 ),
970 fs.as_ref(),
971 );
972 }
973
974 let (mut remote_id_tx, remote_id_rx) = watch::channel();
975 let _maintain_remote_id_task = cx.spawn_weak({
976 let rpc = client.clone();
977 move |this, cx| {
978 async move {
979 let mut status = rpc.status();
980 while let Some(status) = status.recv().await {
981 if let Some(this) = this.upgrade(&cx) {
982 let remote_id = if let client::Status::Connected { .. } = status {
983 let authorized_logins = this.read_with(&cx, |this, _| {
984 this.as_local().unwrap().config.collaborators.clone()
985 });
986 let response = rpc
987 .request(proto::OpenWorktree {
988 root_name: root_name.clone(),
989 authorized_logins,
990 })
991 .await?;
992
993 Some(response.worktree_id)
994 } else {
995 None
996 };
997 if remote_id_tx.send(remote_id).await.is_err() {
998 break;
999 }
1000 }
1001 }
1002 Ok(())
1003 }
1004 .log_err()
1005 }
1006 });
1007
1008 let tree = Self {
1009 snapshot: snapshot.clone(),
1010 config,
1011 remote_id: remote_id_rx,
1012 background_snapshot: Arc::new(Mutex::new(snapshot)),
1013 last_scan_state_rx,
1014 _background_scanner_task: None,
1015 _maintain_remote_id_task,
1016 share: None,
1017 poll_task: None,
1018 open_buffers: Default::default(),
1019 shared_buffers: Default::default(),
1020 diagnostics: Default::default(),
1021 queued_operations: Default::default(),
1022 collaborators: Default::default(),
1023 languages,
1024 client,
1025 user_store,
1026 fs,
1027 language_servers: Default::default(),
1028 };
1029
1030 cx.spawn_weak(|this, mut cx| async move {
1031 while let Ok(scan_state) = scan_states_rx.recv().await {
1032 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1033 let to_send = handle.update(&mut cx, |this, cx| {
1034 last_scan_state_tx.blocking_send(scan_state).ok();
1035 this.poll_snapshot(cx);
1036 let tree = this.as_local_mut().unwrap();
1037 if !tree.is_scanning() {
1038 if let Some(share) = tree.share.as_ref() {
1039 return Some((tree.snapshot(), share.snapshots_tx.clone()));
1040 }
1041 }
1042 None
1043 });
1044
1045 if let Some((snapshot, snapshots_to_send_tx)) = to_send {
1046 if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
1047 log::error!("error submitting snapshot to send {}", err);
1048 }
1049 }
1050 } else {
1051 break;
1052 }
1053 }
1054 })
1055 .detach();
1056
1057 Worktree::Local(tree)
1058 });
1059
1060 Ok((tree, scan_states_tx))
1061 }
1062
1063 pub fn languages(&self) -> &LanguageRegistry {
1064 &self.languages
1065 }
1066
1067 pub fn ensure_language_server(
1068 &mut self,
1069 language: &Language,
1070 cx: &mut ModelContext<Worktree>,
1071 ) -> Option<Arc<LanguageServer>> {
1072 if let Some(server) = self.language_servers.get(language.name()) {
1073 return Some(server.clone());
1074 }
1075
1076 if let Some(language_server) = language
1077 .start_server(self.abs_path(), cx)
1078 .log_err()
1079 .flatten()
1080 {
1081 let (diagnostics_tx, diagnostics_rx) = smol::channel::unbounded();
1082 language_server
1083 .on_notification::<lsp::notification::PublishDiagnostics, _>(move |params| {
1084 smol::block_on(diagnostics_tx.send(params)).ok();
1085 })
1086 .detach();
1087 cx.spawn_weak(|this, mut cx| async move {
1088 while let Ok(diagnostics) = diagnostics_rx.recv().await {
1089 if let Some(handle) = cx.read(|cx| this.upgrade(cx)) {
1090 handle.update(&mut cx, |this, cx| {
1091 this.update_diagnostics(diagnostics, cx).log_err();
1092 });
1093 } else {
1094 break;
1095 }
1096 }
1097 })
1098 .detach();
1099
1100 self.language_servers
1101 .insert(language.name().to_string(), language_server.clone());
1102 Some(language_server.clone())
1103 } else {
1104 None
1105 }
1106 }
1107
1108 pub fn open_buffer(
1109 &mut self,
1110 path: &Path,
1111 cx: &mut ModelContext<Worktree>,
1112 ) -> Task<Result<ModelHandle<Buffer>>> {
1113 let handle = cx.handle();
1114
1115 // If there is already a buffer for the given path, then return it.
1116 let mut existing_buffer = None;
1117 self.open_buffers.retain(|_buffer_id, buffer| {
1118 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1119 if let Some(file) = buffer.read(cx.as_ref()).file() {
1120 if file.worktree_id() == handle.id() && file.path().as_ref() == path {
1121 existing_buffer = Some(buffer);
1122 }
1123 }
1124 true
1125 } else {
1126 false
1127 }
1128 });
1129
1130 let path = Arc::from(path);
1131 cx.spawn(|this, mut cx| async move {
1132 if let Some(existing_buffer) = existing_buffer {
1133 Ok(existing_buffer)
1134 } else {
1135 let (file, contents) = this
1136 .update(&mut cx, |this, cx| this.as_local().unwrap().load(&path, cx))
1137 .await?;
1138 let language = this.read_with(&cx, |this, _| {
1139 use language::File;
1140 this.languages().select_language(file.full_path()).cloned()
1141 });
1142 let (diagnostics, language_server) = this.update(&mut cx, |this, cx| {
1143 let this = this.as_local_mut().unwrap();
1144 (
1145 this.diagnostics.remove(path.as_ref()),
1146 language
1147 .as_ref()
1148 .and_then(|language| this.ensure_language_server(language, cx)),
1149 )
1150 });
1151 let buffer = cx.add_model(|cx| {
1152 let mut buffer = Buffer::from_file(0, contents, Box::new(file), cx);
1153 buffer.set_language(language, language_server, cx);
1154 if let Some(diagnostics) = diagnostics {
1155 buffer.update_diagnostics(None, diagnostics, cx).unwrap();
1156 }
1157 buffer
1158 });
1159 this.update(&mut cx, |this, _| {
1160 let this = this
1161 .as_local_mut()
1162 .ok_or_else(|| anyhow!("must be a local worktree"))?;
1163
1164 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1165 Ok(buffer)
1166 })
1167 }
1168 })
1169 }
1170
1171 pub fn open_remote_buffer(
1172 &mut self,
1173 envelope: TypedEnvelope<proto::OpenBuffer>,
1174 cx: &mut ModelContext<Worktree>,
1175 ) -> Task<Result<proto::OpenBufferResponse>> {
1176 let peer_id = envelope.original_sender_id();
1177 let path = Path::new(&envelope.payload.path);
1178
1179 let buffer = self.open_buffer(path, cx);
1180
1181 cx.spawn(|this, mut cx| async move {
1182 let buffer = buffer.await?;
1183 this.update(&mut cx, |this, cx| {
1184 this.as_local_mut()
1185 .unwrap()
1186 .shared_buffers
1187 .entry(peer_id?)
1188 .or_default()
1189 .insert(buffer.id() as u64, buffer.clone());
1190
1191 Ok(proto::OpenBufferResponse {
1192 buffer: Some(buffer.update(cx.as_mut(), |buffer, _| buffer.to_proto())),
1193 })
1194 })
1195 })
1196 }
1197
1198 pub fn close_remote_buffer(
1199 &mut self,
1200 envelope: TypedEnvelope<proto::CloseBuffer>,
1201 cx: &mut ModelContext<Worktree>,
1202 ) -> Result<()> {
1203 if let Some(shared_buffers) = self.shared_buffers.get_mut(&envelope.original_sender_id()?) {
1204 shared_buffers.remove(&envelope.payload.buffer_id);
1205 cx.notify();
1206 }
1207
1208 Ok(())
1209 }
1210
1211 pub fn add_collaborator(
1212 &mut self,
1213 collaborator: Collaborator,
1214 cx: &mut ModelContext<Worktree>,
1215 ) {
1216 self.collaborators
1217 .insert(collaborator.peer_id, collaborator);
1218 cx.notify();
1219 }
1220
1221 pub fn remove_collaborator(
1222 &mut self,
1223 envelope: TypedEnvelope<proto::RemoveCollaborator>,
1224 cx: &mut ModelContext<Worktree>,
1225 ) -> Result<()> {
1226 let peer_id = PeerId(envelope.payload.peer_id);
1227 let replica_id = self
1228 .collaborators
1229 .remove(&peer_id)
1230 .ok_or_else(|| anyhow!("unknown peer {:?}", peer_id))?
1231 .replica_id;
1232 self.shared_buffers.remove(&peer_id);
1233 for (_, buffer) in &self.open_buffers {
1234 if let Some(buffer) = buffer.upgrade(cx) {
1235 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1236 }
1237 }
1238 cx.notify();
1239
1240 Ok(())
1241 }
1242
1243 pub fn scan_complete(&self) -> impl Future<Output = ()> {
1244 let mut scan_state_rx = self.last_scan_state_rx.clone();
1245 async move {
1246 let mut scan_state = Some(scan_state_rx.borrow().clone());
1247 while let Some(ScanState::Scanning) = scan_state {
1248 scan_state = scan_state_rx.recv().await;
1249 }
1250 }
1251 }
1252
1253 pub fn remote_id(&self) -> Option<u64> {
1254 *self.remote_id.borrow()
1255 }
1256
1257 pub fn next_remote_id(&self) -> impl Future<Output = Option<u64>> {
1258 let mut remote_id = self.remote_id.clone();
1259 async move {
1260 while let Some(remote_id) = remote_id.recv().await {
1261 if remote_id.is_some() {
1262 return remote_id;
1263 }
1264 }
1265 None
1266 }
1267 }
1268
1269 fn is_scanning(&self) -> bool {
1270 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
1271 true
1272 } else {
1273 false
1274 }
1275 }
1276
1277 pub fn snapshot(&self) -> Snapshot {
1278 self.snapshot.clone()
1279 }
1280
1281 pub fn abs_path(&self) -> &Path {
1282 self.snapshot.abs_path.as_ref()
1283 }
1284
1285 pub fn contains_abs_path(&self, path: &Path) -> bool {
1286 path.starts_with(&self.snapshot.abs_path)
1287 }
1288
1289 fn absolutize(&self, path: &Path) -> PathBuf {
1290 if path.file_name().is_some() {
1291 self.snapshot.abs_path.join(path)
1292 } else {
1293 self.snapshot.abs_path.to_path_buf()
1294 }
1295 }
1296
1297 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
1298 let handle = cx.handle();
1299 let path = Arc::from(path);
1300 let worktree_path = self.abs_path.clone();
1301 let abs_path = self.absolutize(&path);
1302 let background_snapshot = self.background_snapshot.clone();
1303 let fs = self.fs.clone();
1304 cx.spawn(|this, mut cx| async move {
1305 let text = fs.load(&abs_path).await?;
1306 // Eagerly populate the snapshot with an updated entry for the loaded file
1307 let entry = refresh_entry(fs.as_ref(), &background_snapshot, path, &abs_path).await?;
1308 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1309 Ok((
1310 File {
1311 entry_id: Some(entry.id),
1312 worktree: handle,
1313 worktree_path,
1314 path: entry.path,
1315 mtime: entry.mtime,
1316 is_local: true,
1317 },
1318 text,
1319 ))
1320 })
1321 }
1322
1323 pub fn save_buffer_as(
1324 &self,
1325 buffer: ModelHandle<Buffer>,
1326 path: impl Into<Arc<Path>>,
1327 text: Rope,
1328 cx: &mut ModelContext<Worktree>,
1329 ) -> Task<Result<File>> {
1330 let save = self.save(path, text, cx);
1331 cx.spawn(|this, mut cx| async move {
1332 let entry = save.await?;
1333 this.update(&mut cx, |this, cx| {
1334 let this = this.as_local_mut().unwrap();
1335 this.open_buffers.insert(buffer.id(), buffer.downgrade());
1336 Ok(File {
1337 entry_id: Some(entry.id),
1338 worktree: cx.handle(),
1339 worktree_path: this.abs_path.clone(),
1340 path: entry.path,
1341 mtime: entry.mtime,
1342 is_local: true,
1343 })
1344 })
1345 })
1346 }
1347
1348 fn save(
1349 &self,
1350 path: impl Into<Arc<Path>>,
1351 text: Rope,
1352 cx: &mut ModelContext<Worktree>,
1353 ) -> Task<Result<Entry>> {
1354 let path = path.into();
1355 let abs_path = self.absolutize(&path);
1356 let background_snapshot = self.background_snapshot.clone();
1357 let fs = self.fs.clone();
1358 let save = cx.background().spawn(async move {
1359 fs.save(&abs_path, &text).await?;
1360 refresh_entry(fs.as_ref(), &background_snapshot, path.clone(), &abs_path).await
1361 });
1362
1363 cx.spawn(|this, mut cx| async move {
1364 let entry = save.await?;
1365 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
1366 Ok(entry)
1367 })
1368 }
1369
1370 pub fn share(&mut self, cx: &mut ModelContext<Worktree>) -> Task<anyhow::Result<u64>> {
1371 let snapshot = self.snapshot();
1372 let share_request = self.share_request(cx);
1373 let rpc = self.client.clone();
1374 cx.spawn(|this, mut cx| async move {
1375 let share_request = if let Some(request) = share_request.await {
1376 request
1377 } else {
1378 return Err(anyhow!("failed to open worktree on the server"));
1379 };
1380
1381 let remote_id = share_request.worktree.as_ref().unwrap().id;
1382 let share_response = rpc.request(share_request).await?;
1383
1384 log::info!("sharing worktree {:?}", share_response);
1385 let (snapshots_to_send_tx, snapshots_to_send_rx) =
1386 smol::channel::unbounded::<Snapshot>();
1387
1388 cx.background()
1389 .spawn({
1390 let rpc = rpc.clone();
1391 async move {
1392 let mut prev_snapshot = snapshot;
1393 while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
1394 let message = snapshot.build_update(&prev_snapshot, remote_id, false);
1395 match rpc.send(message).await {
1396 Ok(()) => prev_snapshot = snapshot,
1397 Err(err) => log::error!("error sending snapshot diff {}", err),
1398 }
1399 }
1400 }
1401 })
1402 .detach();
1403
1404 this.update(&mut cx, |worktree, cx| {
1405 let _subscriptions = vec![
1406 rpc.subscribe_to_entity(remote_id, cx, Worktree::handle_add_collaborator),
1407 rpc.subscribe_to_entity(remote_id, cx, Worktree::handle_remove_collaborator),
1408 rpc.subscribe_to_entity(remote_id, cx, Worktree::handle_open_buffer),
1409 rpc.subscribe_to_entity(remote_id, cx, Worktree::handle_close_buffer),
1410 rpc.subscribe_to_entity(remote_id, cx, Worktree::handle_update_buffer),
1411 rpc.subscribe_to_entity(remote_id, cx, Worktree::handle_save_buffer),
1412 ];
1413
1414 let worktree = worktree.as_local_mut().unwrap();
1415 worktree.share = Some(ShareState {
1416 snapshots_tx: snapshots_to_send_tx,
1417 _subscriptions,
1418 });
1419 });
1420
1421 Ok(remote_id)
1422 })
1423 }
1424
1425 pub fn unshare(&mut self, cx: &mut ModelContext<Worktree>) {
1426 self.share.take();
1427 let rpc = self.client.clone();
1428 let remote_id = self.remote_id();
1429 cx.foreground()
1430 .spawn(
1431 async move {
1432 if let Some(worktree_id) = remote_id {
1433 rpc.send(proto::UnshareWorktree { worktree_id }).await?;
1434 }
1435 Ok(())
1436 }
1437 .log_err(),
1438 )
1439 .detach()
1440 }
1441
1442 fn share_request(&self, cx: &mut ModelContext<Worktree>) -> Task<Option<proto::ShareWorktree>> {
1443 let remote_id = self.next_remote_id();
1444 let snapshot = self.snapshot();
1445 let root_name = self.root_name.clone();
1446 cx.background().spawn(async move {
1447 remote_id.await.map(|id| {
1448 let entries = snapshot
1449 .entries_by_path
1450 .cursor::<()>()
1451 .filter(|e| !e.is_ignored)
1452 .map(Into::into)
1453 .collect();
1454 proto::ShareWorktree {
1455 worktree: Some(proto::Worktree {
1456 id,
1457 root_name,
1458 entries,
1459 }),
1460 }
1461 })
1462 })
1463 }
1464}
1465
1466fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1467 let contents = smol::block_on(fs.load(&abs_path))?;
1468 let parent = abs_path.parent().unwrap_or(Path::new("/"));
1469 let mut builder = GitignoreBuilder::new(parent);
1470 for line in contents.lines() {
1471 builder.add_line(Some(abs_path.into()), line)?;
1472 }
1473 Ok(builder.build()?)
1474}
1475
1476impl Deref for LocalWorktree {
1477 type Target = Snapshot;
1478
1479 fn deref(&self) -> &Self::Target {
1480 &self.snapshot
1481 }
1482}
1483
1484impl fmt::Debug for LocalWorktree {
1485 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1486 self.snapshot.fmt(f)
1487 }
1488}
1489
1490struct ShareState {
1491 snapshots_tx: Sender<Snapshot>,
1492 _subscriptions: Vec<client::Subscription>,
1493}
1494
1495pub struct RemoteWorktree {
1496 remote_id: u64,
1497 snapshot: Snapshot,
1498 snapshot_rx: watch::Receiver<Snapshot>,
1499 client: Arc<Client>,
1500 updates_tx: postage::mpsc::Sender<proto::UpdateWorktree>,
1501 replica_id: ReplicaId,
1502 open_buffers: HashMap<usize, RemoteBuffer>,
1503 collaborators: HashMap<PeerId, Collaborator>,
1504 diagnostics: Vec<DiagnosticSummary>,
1505 languages: Arc<LanguageRegistry>,
1506 user_store: ModelHandle<UserStore>,
1507 queued_operations: Vec<(u64, Operation)>,
1508 _subscriptions: Vec<client::Subscription>,
1509}
1510
1511impl RemoteWorktree {
1512 pub fn open_buffer(
1513 &mut self,
1514 path: &Path,
1515 cx: &mut ModelContext<Worktree>,
1516 ) -> Task<Result<ModelHandle<Buffer>>> {
1517 let mut existing_buffer = None;
1518 self.open_buffers.retain(|_buffer_id, buffer| {
1519 if let Some(buffer) = buffer.upgrade(cx.as_ref()) {
1520 if let Some(file) = buffer.read(cx.as_ref()).file() {
1521 if file.worktree_id() == cx.model_id() && file.path().as_ref() == path {
1522 existing_buffer = Some(buffer);
1523 }
1524 }
1525 true
1526 } else {
1527 false
1528 }
1529 });
1530
1531 let rpc = self.client.clone();
1532 let replica_id = self.replica_id;
1533 let remote_worktree_id = self.remote_id;
1534 let root_path = self.snapshot.abs_path.clone();
1535 let path = path.to_string_lossy().to_string();
1536 cx.spawn_weak(|this, mut cx| async move {
1537 if let Some(existing_buffer) = existing_buffer {
1538 Ok(existing_buffer)
1539 } else {
1540 let entry = this
1541 .upgrade(&cx)
1542 .ok_or_else(|| anyhow!("worktree was closed"))?
1543 .read_with(&cx, |tree, _| tree.entry_for_path(&path).cloned())
1544 .ok_or_else(|| anyhow!("file does not exist"))?;
1545 let response = rpc
1546 .request(proto::OpenBuffer {
1547 worktree_id: remote_worktree_id as u64,
1548 path,
1549 })
1550 .await?;
1551
1552 let this = this
1553 .upgrade(&cx)
1554 .ok_or_else(|| anyhow!("worktree was closed"))?;
1555 let file = File {
1556 entry_id: Some(entry.id),
1557 worktree: this.clone(),
1558 worktree_path: root_path,
1559 path: entry.path,
1560 mtime: entry.mtime,
1561 is_local: false,
1562 };
1563 let language = this.read_with(&cx, |this, _| {
1564 use language::File;
1565 this.languages().select_language(file.full_path()).cloned()
1566 });
1567 let remote_buffer = response.buffer.ok_or_else(|| anyhow!("empty buffer"))?;
1568 let buffer_id = remote_buffer.id as usize;
1569 let buffer = cx.add_model(|cx| {
1570 Buffer::from_proto(replica_id, remote_buffer, Some(Box::new(file)), cx)
1571 .unwrap()
1572 .with_language(language, None, cx)
1573 });
1574 this.update(&mut cx, |this, cx| {
1575 let this = this.as_remote_mut().unwrap();
1576 if let Some(RemoteBuffer::Operations(pending_ops)) = this
1577 .open_buffers
1578 .insert(buffer_id, RemoteBuffer::Loaded(buffer.downgrade()))
1579 {
1580 buffer.update(cx, |buf, cx| buf.apply_ops(pending_ops, cx))?;
1581 }
1582 Result::<_, anyhow::Error>::Ok(())
1583 })?;
1584 Ok(buffer)
1585 }
1586 })
1587 }
1588
1589 pub fn remote_id(&self) -> u64 {
1590 self.remote_id
1591 }
1592
1593 pub fn close_all_buffers(&mut self, cx: &mut MutableAppContext) {
1594 for (_, buffer) in self.open_buffers.drain() {
1595 if let RemoteBuffer::Loaded(buffer) = buffer {
1596 if let Some(buffer) = buffer.upgrade(cx) {
1597 buffer.update(cx, |buffer, cx| buffer.close(cx))
1598 }
1599 }
1600 }
1601 }
1602
1603 fn snapshot(&self) -> Snapshot {
1604 self.snapshot.clone()
1605 }
1606
1607 fn update_from_remote(
1608 &mut self,
1609 envelope: TypedEnvelope<proto::UpdateWorktree>,
1610 cx: &mut ModelContext<Worktree>,
1611 ) -> Result<()> {
1612 let mut tx = self.updates_tx.clone();
1613 let payload = envelope.payload.clone();
1614 cx.background()
1615 .spawn(async move {
1616 tx.send(payload).await.expect("receiver runs to completion");
1617 })
1618 .detach();
1619
1620 Ok(())
1621 }
1622
1623 pub fn add_collaborator(
1624 &mut self,
1625 collaborator: Collaborator,
1626 cx: &mut ModelContext<Worktree>,
1627 ) {
1628 self.collaborators
1629 .insert(collaborator.peer_id, collaborator);
1630 cx.notify();
1631 }
1632
1633 pub fn remove_collaborator(
1634 &mut self,
1635 envelope: TypedEnvelope<proto::RemoveCollaborator>,
1636 cx: &mut ModelContext<Worktree>,
1637 ) -> Result<()> {
1638 let peer_id = PeerId(envelope.payload.peer_id);
1639 let replica_id = self
1640 .collaborators
1641 .remove(&peer_id)
1642 .ok_or_else(|| anyhow!("unknown peer {:?}", peer_id))?
1643 .replica_id;
1644 for (_, buffer) in &self.open_buffers {
1645 if let Some(buffer) = buffer.upgrade(cx) {
1646 buffer.update(cx, |buffer, cx| buffer.remove_peer(replica_id, cx));
1647 }
1648 }
1649 cx.notify();
1650 Ok(())
1651 }
1652}
1653
1654enum RemoteBuffer {
1655 Operations(Vec<Operation>),
1656 Loaded(WeakModelHandle<Buffer>),
1657}
1658
1659impl RemoteBuffer {
1660 fn upgrade(&self, cx: &impl UpgradeModelHandle) -> Option<ModelHandle<Buffer>> {
1661 match self {
1662 Self::Operations(_) => None,
1663 Self::Loaded(buffer) => buffer.upgrade(cx),
1664 }
1665 }
1666}
1667
1668#[derive(Clone)]
1669pub struct Snapshot {
1670 id: usize,
1671 scan_id: usize,
1672 abs_path: Arc<Path>,
1673 root_name: String,
1674 root_char_bag: CharBag,
1675 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
1676 entries_by_path: SumTree<Entry>,
1677 entries_by_id: SumTree<PathEntry>,
1678 removed_entry_ids: HashMap<u64, usize>,
1679 next_entry_id: Arc<AtomicUsize>,
1680}
1681
1682impl Snapshot {
1683 pub fn id(&self) -> usize {
1684 self.id
1685 }
1686
1687 pub fn build_update(
1688 &self,
1689 other: &Self,
1690 worktree_id: u64,
1691 include_ignored: bool,
1692 ) -> proto::UpdateWorktree {
1693 let mut updated_entries = Vec::new();
1694 let mut removed_entries = Vec::new();
1695 let mut self_entries = self
1696 .entries_by_id
1697 .cursor::<()>()
1698 .filter(|e| include_ignored || !e.is_ignored)
1699 .peekable();
1700 let mut other_entries = other
1701 .entries_by_id
1702 .cursor::<()>()
1703 .filter(|e| include_ignored || !e.is_ignored)
1704 .peekable();
1705 loop {
1706 match (self_entries.peek(), other_entries.peek()) {
1707 (Some(self_entry), Some(other_entry)) => {
1708 match Ord::cmp(&self_entry.id, &other_entry.id) {
1709 Ordering::Less => {
1710 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1711 updated_entries.push(entry);
1712 self_entries.next();
1713 }
1714 Ordering::Equal => {
1715 if self_entry.scan_id != other_entry.scan_id {
1716 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1717 updated_entries.push(entry);
1718 }
1719
1720 self_entries.next();
1721 other_entries.next();
1722 }
1723 Ordering::Greater => {
1724 removed_entries.push(other_entry.id as u64);
1725 other_entries.next();
1726 }
1727 }
1728 }
1729 (Some(self_entry), None) => {
1730 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1731 updated_entries.push(entry);
1732 self_entries.next();
1733 }
1734 (None, Some(other_entry)) => {
1735 removed_entries.push(other_entry.id as u64);
1736 other_entries.next();
1737 }
1738 (None, None) => break,
1739 }
1740 }
1741
1742 proto::UpdateWorktree {
1743 updated_entries,
1744 removed_entries,
1745 worktree_id,
1746 }
1747 }
1748
1749 fn apply_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1750 self.scan_id += 1;
1751 let scan_id = self.scan_id;
1752
1753 let mut entries_by_path_edits = Vec::new();
1754 let mut entries_by_id_edits = Vec::new();
1755 for entry_id in update.removed_entries {
1756 let entry_id = entry_id as usize;
1757 let entry = self
1758 .entry_for_id(entry_id)
1759 .ok_or_else(|| anyhow!("unknown entry"))?;
1760 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1761 entries_by_id_edits.push(Edit::Remove(entry.id));
1762 }
1763
1764 for entry in update.updated_entries {
1765 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1766 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1767 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1768 }
1769 entries_by_id_edits.push(Edit::Insert(PathEntry {
1770 id: entry.id,
1771 path: entry.path.clone(),
1772 is_ignored: entry.is_ignored,
1773 scan_id,
1774 }));
1775 entries_by_path_edits.push(Edit::Insert(entry));
1776 }
1777
1778 self.entries_by_path.edit(entries_by_path_edits, &());
1779 self.entries_by_id.edit(entries_by_id_edits, &());
1780
1781 Ok(())
1782 }
1783
1784 pub fn file_count(&self) -> usize {
1785 self.entries_by_path.summary().file_count
1786 }
1787
1788 pub fn visible_file_count(&self) -> usize {
1789 self.entries_by_path.summary().visible_file_count
1790 }
1791
1792 fn traverse_from_offset(
1793 &self,
1794 include_dirs: bool,
1795 include_ignored: bool,
1796 start_offset: usize,
1797 ) -> Traversal {
1798 let mut cursor = self.entries_by_path.cursor();
1799 cursor.seek(
1800 &TraversalTarget::Count {
1801 count: start_offset,
1802 include_dirs,
1803 include_ignored,
1804 },
1805 Bias::Right,
1806 &(),
1807 );
1808 Traversal {
1809 cursor,
1810 include_dirs,
1811 include_ignored,
1812 }
1813 }
1814
1815 fn traverse_from_path(
1816 &self,
1817 include_dirs: bool,
1818 include_ignored: bool,
1819 path: &Path,
1820 ) -> Traversal {
1821 let mut cursor = self.entries_by_path.cursor();
1822 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1823 Traversal {
1824 cursor,
1825 include_dirs,
1826 include_ignored,
1827 }
1828 }
1829
1830 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1831 self.traverse_from_offset(false, include_ignored, start)
1832 }
1833
1834 pub fn entries(&self, include_ignored: bool) -> Traversal {
1835 self.traverse_from_offset(true, include_ignored, 0)
1836 }
1837
1838 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1839 let empty_path = Path::new("");
1840 self.entries_by_path
1841 .cursor::<()>()
1842 .filter(move |entry| entry.path.as_ref() != empty_path)
1843 .map(|entry| &entry.path)
1844 }
1845
1846 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1847 let mut cursor = self.entries_by_path.cursor();
1848 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1849 let traversal = Traversal {
1850 cursor,
1851 include_dirs: true,
1852 include_ignored: true,
1853 };
1854 ChildEntriesIter {
1855 traversal,
1856 parent_path,
1857 }
1858 }
1859
1860 pub fn root_entry(&self) -> Option<&Entry> {
1861 self.entry_for_path("")
1862 }
1863
1864 pub fn root_name(&self) -> &str {
1865 &self.root_name
1866 }
1867
1868 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1869 let path = path.as_ref();
1870 self.traverse_from_path(true, true, path)
1871 .entry()
1872 .and_then(|entry| {
1873 if entry.path.as_ref() == path {
1874 Some(entry)
1875 } else {
1876 None
1877 }
1878 })
1879 }
1880
1881 pub fn entry_for_id(&self, id: usize) -> Option<&Entry> {
1882 let entry = self.entries_by_id.get(&id, &())?;
1883 self.entry_for_path(&entry.path)
1884 }
1885
1886 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1887 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1888 }
1889
1890 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1891 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1892 let abs_path = self.abs_path.join(&entry.path);
1893 match build_gitignore(&abs_path, fs) {
1894 Ok(ignore) => {
1895 let ignore_dir_path = entry.path.parent().unwrap();
1896 self.ignores
1897 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1898 }
1899 Err(error) => {
1900 log::error!(
1901 "error loading .gitignore file {:?} - {:?}",
1902 &entry.path,
1903 error
1904 );
1905 }
1906 }
1907 }
1908
1909 self.reuse_entry_id(&mut entry);
1910 self.entries_by_path.insert_or_replace(entry.clone(), &());
1911 self.entries_by_id.insert_or_replace(
1912 PathEntry {
1913 id: entry.id,
1914 path: entry.path.clone(),
1915 is_ignored: entry.is_ignored,
1916 scan_id: self.scan_id,
1917 },
1918 &(),
1919 );
1920 entry
1921 }
1922
1923 fn populate_dir(
1924 &mut self,
1925 parent_path: Arc<Path>,
1926 entries: impl IntoIterator<Item = Entry>,
1927 ignore: Option<Arc<Gitignore>>,
1928 ) {
1929 let mut parent_entry = self
1930 .entries_by_path
1931 .get(&PathKey(parent_path.clone()), &())
1932 .unwrap()
1933 .clone();
1934 if let Some(ignore) = ignore {
1935 self.ignores.insert(parent_path, (ignore, self.scan_id));
1936 }
1937 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1938 parent_entry.kind = EntryKind::Dir;
1939 } else {
1940 unreachable!();
1941 }
1942
1943 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1944 let mut entries_by_id_edits = Vec::new();
1945
1946 for mut entry in entries {
1947 self.reuse_entry_id(&mut entry);
1948 entries_by_id_edits.push(Edit::Insert(PathEntry {
1949 id: entry.id,
1950 path: entry.path.clone(),
1951 is_ignored: entry.is_ignored,
1952 scan_id: self.scan_id,
1953 }));
1954 entries_by_path_edits.push(Edit::Insert(entry));
1955 }
1956
1957 self.entries_by_path.edit(entries_by_path_edits, &());
1958 self.entries_by_id.edit(entries_by_id_edits, &());
1959 }
1960
1961 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1962 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1963 entry.id = removed_entry_id;
1964 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1965 entry.id = existing_entry.id;
1966 }
1967 }
1968
1969 fn remove_path(&mut self, path: &Path) {
1970 let mut new_entries;
1971 let removed_entries;
1972 {
1973 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1974 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1975 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1976 new_entries.push_tree(cursor.suffix(&()), &());
1977 }
1978 self.entries_by_path = new_entries;
1979
1980 let mut entries_by_id_edits = Vec::new();
1981 for entry in removed_entries.cursor::<()>() {
1982 let removed_entry_id = self
1983 .removed_entry_ids
1984 .entry(entry.inode)
1985 .or_insert(entry.id);
1986 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1987 entries_by_id_edits.push(Edit::Remove(entry.id));
1988 }
1989 self.entries_by_id.edit(entries_by_id_edits, &());
1990
1991 if path.file_name() == Some(&GITIGNORE) {
1992 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1993 *scan_id = self.scan_id;
1994 }
1995 }
1996 }
1997
1998 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1999 let mut new_ignores = Vec::new();
2000 for ancestor in path.ancestors().skip(1) {
2001 if let Some((ignore, _)) = self.ignores.get(ancestor) {
2002 new_ignores.push((ancestor, Some(ignore.clone())));
2003 } else {
2004 new_ignores.push((ancestor, None));
2005 }
2006 }
2007
2008 let mut ignore_stack = IgnoreStack::none();
2009 for (parent_path, ignore) in new_ignores.into_iter().rev() {
2010 if ignore_stack.is_path_ignored(&parent_path, true) {
2011 ignore_stack = IgnoreStack::all();
2012 break;
2013 } else if let Some(ignore) = ignore {
2014 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
2015 }
2016 }
2017
2018 if ignore_stack.is_path_ignored(path, is_dir) {
2019 ignore_stack = IgnoreStack::all();
2020 }
2021
2022 ignore_stack
2023 }
2024}
2025
2026impl fmt::Debug for Snapshot {
2027 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2028 for entry in self.entries_by_path.cursor::<()>() {
2029 for _ in entry.path.ancestors().skip(1) {
2030 write!(f, " ")?;
2031 }
2032 writeln!(f, "{:?} (inode: {})", entry.path, entry.inode)?;
2033 }
2034 Ok(())
2035 }
2036}
2037
2038#[derive(Clone, PartialEq)]
2039pub struct File {
2040 entry_id: Option<usize>,
2041 worktree: ModelHandle<Worktree>,
2042 worktree_path: Arc<Path>,
2043 pub path: Arc<Path>,
2044 pub mtime: SystemTime,
2045 is_local: bool,
2046}
2047
2048impl language::File for File {
2049 fn worktree_id(&self) -> usize {
2050 self.worktree.id()
2051 }
2052
2053 fn entry_id(&self) -> Option<usize> {
2054 self.entry_id
2055 }
2056
2057 fn mtime(&self) -> SystemTime {
2058 self.mtime
2059 }
2060
2061 fn path(&self) -> &Arc<Path> {
2062 &self.path
2063 }
2064
2065 fn abs_path(&self) -> Option<PathBuf> {
2066 if self.is_local {
2067 Some(self.worktree_path.join(&self.path))
2068 } else {
2069 None
2070 }
2071 }
2072
2073 fn full_path(&self) -> PathBuf {
2074 let mut full_path = PathBuf::new();
2075 if let Some(worktree_name) = self.worktree_path.file_name() {
2076 full_path.push(worktree_name);
2077 }
2078 full_path.push(&self.path);
2079 full_path
2080 }
2081
2082 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2083 /// of its worktree, then this method will return the name of the worktree itself.
2084 fn file_name<'a>(&'a self) -> Option<OsString> {
2085 self.path
2086 .file_name()
2087 .or_else(|| self.worktree_path.file_name())
2088 .map(Into::into)
2089 }
2090
2091 fn is_deleted(&self) -> bool {
2092 self.entry_id.is_none()
2093 }
2094
2095 fn save(
2096 &self,
2097 buffer_id: u64,
2098 text: Rope,
2099 version: clock::Global,
2100 cx: &mut MutableAppContext,
2101 ) -> Task<Result<(clock::Global, SystemTime)>> {
2102 self.worktree.update(cx, |worktree, cx| match worktree {
2103 Worktree::Local(worktree) => {
2104 let rpc = worktree.client.clone();
2105 let worktree_id = *worktree.remote_id.borrow();
2106 let save = worktree.save(self.path.clone(), text, cx);
2107 cx.background().spawn(async move {
2108 let entry = save.await?;
2109 if let Some(worktree_id) = worktree_id {
2110 rpc.send(proto::BufferSaved {
2111 worktree_id,
2112 buffer_id,
2113 version: (&version).into(),
2114 mtime: Some(entry.mtime.into()),
2115 })
2116 .await?;
2117 }
2118 Ok((version, entry.mtime))
2119 })
2120 }
2121 Worktree::Remote(worktree) => {
2122 let rpc = worktree.client.clone();
2123 let worktree_id = worktree.remote_id;
2124 cx.foreground().spawn(async move {
2125 let response = rpc
2126 .request(proto::SaveBuffer {
2127 worktree_id,
2128 buffer_id,
2129 })
2130 .await?;
2131 let version = response.version.try_into()?;
2132 let mtime = response
2133 .mtime
2134 .ok_or_else(|| anyhow!("missing mtime"))?
2135 .into();
2136 Ok((version, mtime))
2137 })
2138 }
2139 })
2140 }
2141
2142 fn load_local(&self, cx: &AppContext) -> Option<Task<Result<String>>> {
2143 let worktree = self.worktree.read(cx).as_local()?;
2144 let abs_path = worktree.absolutize(&self.path);
2145 let fs = worktree.fs.clone();
2146 Some(
2147 cx.background()
2148 .spawn(async move { fs.load(&abs_path).await }),
2149 )
2150 }
2151
2152 fn buffer_updated(&self, buffer_id: u64, operation: Operation, cx: &mut MutableAppContext) {
2153 self.worktree.update(cx, |worktree, cx| {
2154 worktree.send_buffer_update(buffer_id, operation, cx);
2155 });
2156 }
2157
2158 fn buffer_removed(&self, buffer_id: u64, cx: &mut MutableAppContext) {
2159 self.worktree.update(cx, |worktree, cx| {
2160 if let Worktree::Remote(worktree) = worktree {
2161 let worktree_id = worktree.remote_id;
2162 let rpc = worktree.client.clone();
2163 cx.background()
2164 .spawn(async move {
2165 if let Err(error) = rpc
2166 .send(proto::CloseBuffer {
2167 worktree_id,
2168 buffer_id,
2169 })
2170 .await
2171 {
2172 log::error!("error closing remote buffer: {}", error);
2173 }
2174 })
2175 .detach();
2176 }
2177 });
2178 }
2179
2180 fn boxed_clone(&self) -> Box<dyn language::File> {
2181 Box::new(self.clone())
2182 }
2183
2184 fn as_any(&self) -> &dyn Any {
2185 self
2186 }
2187}
2188
2189#[derive(Clone, Debug)]
2190pub struct Entry {
2191 pub id: usize,
2192 pub kind: EntryKind,
2193 pub path: Arc<Path>,
2194 pub inode: u64,
2195 pub mtime: SystemTime,
2196 pub is_symlink: bool,
2197 pub is_ignored: bool,
2198}
2199
2200#[derive(Clone, Debug)]
2201pub enum EntryKind {
2202 PendingDir,
2203 Dir,
2204 File(CharBag),
2205}
2206
2207impl Entry {
2208 fn new(
2209 path: Arc<Path>,
2210 metadata: &fs::Metadata,
2211 next_entry_id: &AtomicUsize,
2212 root_char_bag: CharBag,
2213 ) -> Self {
2214 Self {
2215 id: next_entry_id.fetch_add(1, SeqCst),
2216 kind: if metadata.is_dir {
2217 EntryKind::PendingDir
2218 } else {
2219 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2220 },
2221 path,
2222 inode: metadata.inode,
2223 mtime: metadata.mtime,
2224 is_symlink: metadata.is_symlink,
2225 is_ignored: false,
2226 }
2227 }
2228
2229 pub fn is_dir(&self) -> bool {
2230 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2231 }
2232
2233 pub fn is_file(&self) -> bool {
2234 matches!(self.kind, EntryKind::File(_))
2235 }
2236}
2237
2238impl sum_tree::Item for Entry {
2239 type Summary = EntrySummary;
2240
2241 fn summary(&self) -> Self::Summary {
2242 let visible_count = if self.is_ignored { 0 } else { 1 };
2243 let file_count;
2244 let visible_file_count;
2245 if self.is_file() {
2246 file_count = 1;
2247 visible_file_count = visible_count;
2248 } else {
2249 file_count = 0;
2250 visible_file_count = 0;
2251 }
2252
2253 EntrySummary {
2254 max_path: self.path.clone(),
2255 count: 1,
2256 visible_count,
2257 file_count,
2258 visible_file_count,
2259 }
2260 }
2261}
2262
2263impl sum_tree::KeyedItem for Entry {
2264 type Key = PathKey;
2265
2266 fn key(&self) -> Self::Key {
2267 PathKey(self.path.clone())
2268 }
2269}
2270
2271#[derive(Clone, Debug)]
2272pub struct EntrySummary {
2273 max_path: Arc<Path>,
2274 count: usize,
2275 visible_count: usize,
2276 file_count: usize,
2277 visible_file_count: usize,
2278}
2279
2280impl Default for EntrySummary {
2281 fn default() -> Self {
2282 Self {
2283 max_path: Arc::from(Path::new("")),
2284 count: 0,
2285 visible_count: 0,
2286 file_count: 0,
2287 visible_file_count: 0,
2288 }
2289 }
2290}
2291
2292impl sum_tree::Summary for EntrySummary {
2293 type Context = ();
2294
2295 fn add_summary(&mut self, rhs: &Self, _: &()) {
2296 self.max_path = rhs.max_path.clone();
2297 self.visible_count += rhs.visible_count;
2298 self.file_count += rhs.file_count;
2299 self.visible_file_count += rhs.visible_file_count;
2300 }
2301}
2302
2303#[derive(Clone, Debug)]
2304struct PathEntry {
2305 id: usize,
2306 path: Arc<Path>,
2307 is_ignored: bool,
2308 scan_id: usize,
2309}
2310
2311impl sum_tree::Item for PathEntry {
2312 type Summary = PathEntrySummary;
2313
2314 fn summary(&self) -> Self::Summary {
2315 PathEntrySummary { max_id: self.id }
2316 }
2317}
2318
2319impl sum_tree::KeyedItem for PathEntry {
2320 type Key = usize;
2321
2322 fn key(&self) -> Self::Key {
2323 self.id
2324 }
2325}
2326
2327#[derive(Clone, Debug, Default)]
2328struct PathEntrySummary {
2329 max_id: usize,
2330}
2331
2332impl sum_tree::Summary for PathEntrySummary {
2333 type Context = ();
2334
2335 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2336 self.max_id = summary.max_id;
2337 }
2338}
2339
2340impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for usize {
2341 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2342 *self = summary.max_id;
2343 }
2344}
2345
2346#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2347pub struct PathKey(Arc<Path>);
2348
2349impl Default for PathKey {
2350 fn default() -> Self {
2351 Self(Path::new("").into())
2352 }
2353}
2354
2355impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2356 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2357 self.0 = summary.max_path.clone();
2358 }
2359}
2360
2361struct BackgroundScanner {
2362 fs: Arc<dyn Fs>,
2363 snapshot: Arc<Mutex<Snapshot>>,
2364 notify: Sender<ScanState>,
2365 executor: Arc<executor::Background>,
2366}
2367
2368impl BackgroundScanner {
2369 fn new(
2370 snapshot: Arc<Mutex<Snapshot>>,
2371 notify: Sender<ScanState>,
2372 fs: Arc<dyn Fs>,
2373 executor: Arc<executor::Background>,
2374 ) -> Self {
2375 Self {
2376 fs,
2377 snapshot,
2378 notify,
2379 executor,
2380 }
2381 }
2382
2383 fn abs_path(&self) -> Arc<Path> {
2384 self.snapshot.lock().abs_path.clone()
2385 }
2386
2387 fn snapshot(&self) -> Snapshot {
2388 self.snapshot.lock().clone()
2389 }
2390
2391 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2392 if self.notify.send(ScanState::Scanning).await.is_err() {
2393 return;
2394 }
2395
2396 if let Err(err) = self.scan_dirs().await {
2397 if self
2398 .notify
2399 .send(ScanState::Err(Arc::new(err)))
2400 .await
2401 .is_err()
2402 {
2403 return;
2404 }
2405 }
2406
2407 if self.notify.send(ScanState::Idle).await.is_err() {
2408 return;
2409 }
2410
2411 futures::pin_mut!(events_rx);
2412 while let Some(events) = events_rx.next().await {
2413 if self.notify.send(ScanState::Scanning).await.is_err() {
2414 break;
2415 }
2416
2417 if !self.process_events(events).await {
2418 break;
2419 }
2420
2421 if self.notify.send(ScanState::Idle).await.is_err() {
2422 break;
2423 }
2424 }
2425 }
2426
2427 async fn scan_dirs(&mut self) -> Result<()> {
2428 let root_char_bag;
2429 let next_entry_id;
2430 let is_dir;
2431 {
2432 let snapshot = self.snapshot.lock();
2433 root_char_bag = snapshot.root_char_bag;
2434 next_entry_id = snapshot.next_entry_id.clone();
2435 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2436 };
2437
2438 if is_dir {
2439 let path: Arc<Path> = Arc::from(Path::new(""));
2440 let abs_path = self.abs_path();
2441 let (tx, rx) = channel::unbounded();
2442 tx.send(ScanJob {
2443 abs_path: abs_path.to_path_buf(),
2444 path,
2445 ignore_stack: IgnoreStack::none(),
2446 scan_queue: tx.clone(),
2447 })
2448 .await
2449 .unwrap();
2450 drop(tx);
2451
2452 self.executor
2453 .scoped(|scope| {
2454 for _ in 0..self.executor.num_cpus() {
2455 scope.spawn(async {
2456 while let Ok(job) = rx.recv().await {
2457 if let Err(err) = self
2458 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2459 .await
2460 {
2461 log::error!("error scanning {:?}: {}", job.abs_path, err);
2462 }
2463 }
2464 });
2465 }
2466 })
2467 .await;
2468 }
2469
2470 Ok(())
2471 }
2472
2473 async fn scan_dir(
2474 &self,
2475 root_char_bag: CharBag,
2476 next_entry_id: Arc<AtomicUsize>,
2477 job: &ScanJob,
2478 ) -> Result<()> {
2479 let mut new_entries: Vec<Entry> = Vec::new();
2480 let mut new_jobs: Vec<ScanJob> = Vec::new();
2481 let mut ignore_stack = job.ignore_stack.clone();
2482 let mut new_ignore = None;
2483
2484 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2485 while let Some(child_abs_path) = child_paths.next().await {
2486 let child_abs_path = match child_abs_path {
2487 Ok(child_abs_path) => child_abs_path,
2488 Err(error) => {
2489 log::error!("error processing entry {:?}", error);
2490 continue;
2491 }
2492 };
2493 let child_name = child_abs_path.file_name().unwrap();
2494 let child_path: Arc<Path> = job.path.join(child_name).into();
2495 let child_metadata = match self.fs.metadata(&child_abs_path).await? {
2496 Some(metadata) => metadata,
2497 None => continue,
2498 };
2499
2500 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2501 if child_name == *GITIGNORE {
2502 match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2503 Ok(ignore) => {
2504 let ignore = Arc::new(ignore);
2505 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2506 new_ignore = Some(ignore);
2507 }
2508 Err(error) => {
2509 log::error!(
2510 "error loading .gitignore file {:?} - {:?}",
2511 child_name,
2512 error
2513 );
2514 }
2515 }
2516
2517 // Update ignore status of any child entries we've already processed to reflect the
2518 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2519 // there should rarely be too numerous. Update the ignore stack associated with any
2520 // new jobs as well.
2521 let mut new_jobs = new_jobs.iter_mut();
2522 for entry in &mut new_entries {
2523 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2524 if entry.is_dir() {
2525 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2526 IgnoreStack::all()
2527 } else {
2528 ignore_stack.clone()
2529 };
2530 }
2531 }
2532 }
2533
2534 let mut child_entry = Entry::new(
2535 child_path.clone(),
2536 &child_metadata,
2537 &next_entry_id,
2538 root_char_bag,
2539 );
2540
2541 if child_metadata.is_dir {
2542 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2543 child_entry.is_ignored = is_ignored;
2544 new_entries.push(child_entry);
2545 new_jobs.push(ScanJob {
2546 abs_path: child_abs_path,
2547 path: child_path,
2548 ignore_stack: if is_ignored {
2549 IgnoreStack::all()
2550 } else {
2551 ignore_stack.clone()
2552 },
2553 scan_queue: job.scan_queue.clone(),
2554 });
2555 } else {
2556 child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2557 new_entries.push(child_entry);
2558 };
2559 }
2560
2561 self.snapshot
2562 .lock()
2563 .populate_dir(job.path.clone(), new_entries, new_ignore);
2564 for new_job in new_jobs {
2565 job.scan_queue.send(new_job).await.unwrap();
2566 }
2567
2568 Ok(())
2569 }
2570
2571 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2572 let mut snapshot = self.snapshot();
2573 snapshot.scan_id += 1;
2574
2575 let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&snapshot.abs_path).await {
2576 abs_path
2577 } else {
2578 return false;
2579 };
2580 let root_char_bag = snapshot.root_char_bag;
2581 let next_entry_id = snapshot.next_entry_id.clone();
2582
2583 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2584 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2585
2586 for event in &events {
2587 match event.path.strip_prefix(&root_abs_path) {
2588 Ok(path) => snapshot.remove_path(&path),
2589 Err(_) => {
2590 log::error!(
2591 "unexpected event {:?} for root path {:?}",
2592 event.path,
2593 root_abs_path
2594 );
2595 continue;
2596 }
2597 }
2598 }
2599
2600 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2601 for event in events {
2602 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2603 Ok(path) => Arc::from(path.to_path_buf()),
2604 Err(_) => {
2605 log::error!(
2606 "unexpected event {:?} for root path {:?}",
2607 event.path,
2608 root_abs_path
2609 );
2610 continue;
2611 }
2612 };
2613
2614 match self.fs.metadata(&event.path).await {
2615 Ok(Some(metadata)) => {
2616 let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2617 let mut fs_entry = Entry::new(
2618 path.clone(),
2619 &metadata,
2620 snapshot.next_entry_id.as_ref(),
2621 snapshot.root_char_bag,
2622 );
2623 fs_entry.is_ignored = ignore_stack.is_all();
2624 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2625 if metadata.is_dir {
2626 scan_queue_tx
2627 .send(ScanJob {
2628 abs_path: event.path,
2629 path,
2630 ignore_stack,
2631 scan_queue: scan_queue_tx.clone(),
2632 })
2633 .await
2634 .unwrap();
2635 }
2636 }
2637 Ok(None) => {}
2638 Err(err) => {
2639 // TODO - create a special 'error' entry in the entries tree to mark this
2640 log::error!("error reading file on event {:?}", err);
2641 }
2642 }
2643 }
2644
2645 *self.snapshot.lock() = snapshot;
2646
2647 // Scan any directories that were created as part of this event batch.
2648 drop(scan_queue_tx);
2649 self.executor
2650 .scoped(|scope| {
2651 for _ in 0..self.executor.num_cpus() {
2652 scope.spawn(async {
2653 while let Ok(job) = scan_queue_rx.recv().await {
2654 if let Err(err) = self
2655 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2656 .await
2657 {
2658 log::error!("error scanning {:?}: {}", job.abs_path, err);
2659 }
2660 }
2661 });
2662 }
2663 })
2664 .await;
2665
2666 // Attempt to detect renames only over a single batch of file-system events.
2667 self.snapshot.lock().removed_entry_ids.clear();
2668
2669 self.update_ignore_statuses().await;
2670 true
2671 }
2672
2673 async fn update_ignore_statuses(&self) {
2674 let mut snapshot = self.snapshot();
2675
2676 let mut ignores_to_update = Vec::new();
2677 let mut ignores_to_delete = Vec::new();
2678 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2679 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2680 ignores_to_update.push(parent_path.clone());
2681 }
2682
2683 let ignore_path = parent_path.join(&*GITIGNORE);
2684 if snapshot.entry_for_path(ignore_path).is_none() {
2685 ignores_to_delete.push(parent_path.clone());
2686 }
2687 }
2688
2689 for parent_path in ignores_to_delete {
2690 snapshot.ignores.remove(&parent_path);
2691 self.snapshot.lock().ignores.remove(&parent_path);
2692 }
2693
2694 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2695 ignores_to_update.sort_unstable();
2696 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2697 while let Some(parent_path) = ignores_to_update.next() {
2698 while ignores_to_update
2699 .peek()
2700 .map_or(false, |p| p.starts_with(&parent_path))
2701 {
2702 ignores_to_update.next().unwrap();
2703 }
2704
2705 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2706 ignore_queue_tx
2707 .send(UpdateIgnoreStatusJob {
2708 path: parent_path,
2709 ignore_stack,
2710 ignore_queue: ignore_queue_tx.clone(),
2711 })
2712 .await
2713 .unwrap();
2714 }
2715 drop(ignore_queue_tx);
2716
2717 self.executor
2718 .scoped(|scope| {
2719 for _ in 0..self.executor.num_cpus() {
2720 scope.spawn(async {
2721 while let Ok(job) = ignore_queue_rx.recv().await {
2722 self.update_ignore_status(job, &snapshot).await;
2723 }
2724 });
2725 }
2726 })
2727 .await;
2728 }
2729
2730 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
2731 let mut ignore_stack = job.ignore_stack;
2732 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2733 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2734 }
2735
2736 let mut entries_by_id_edits = Vec::new();
2737 let mut entries_by_path_edits = Vec::new();
2738 for mut entry in snapshot.child_entries(&job.path).cloned() {
2739 let was_ignored = entry.is_ignored;
2740 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2741 if entry.is_dir() {
2742 let child_ignore_stack = if entry.is_ignored {
2743 IgnoreStack::all()
2744 } else {
2745 ignore_stack.clone()
2746 };
2747 job.ignore_queue
2748 .send(UpdateIgnoreStatusJob {
2749 path: entry.path.clone(),
2750 ignore_stack: child_ignore_stack,
2751 ignore_queue: job.ignore_queue.clone(),
2752 })
2753 .await
2754 .unwrap();
2755 }
2756
2757 if entry.is_ignored != was_ignored {
2758 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2759 path_entry.scan_id = snapshot.scan_id;
2760 path_entry.is_ignored = entry.is_ignored;
2761 entries_by_id_edits.push(Edit::Insert(path_entry));
2762 entries_by_path_edits.push(Edit::Insert(entry));
2763 }
2764 }
2765
2766 let mut snapshot = self.snapshot.lock();
2767 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2768 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2769 }
2770}
2771
2772async fn refresh_entry(
2773 fs: &dyn Fs,
2774 snapshot: &Mutex<Snapshot>,
2775 path: Arc<Path>,
2776 abs_path: &Path,
2777) -> Result<Entry> {
2778 let root_char_bag;
2779 let next_entry_id;
2780 {
2781 let snapshot = snapshot.lock();
2782 root_char_bag = snapshot.root_char_bag;
2783 next_entry_id = snapshot.next_entry_id.clone();
2784 }
2785 let entry = Entry::new(
2786 path,
2787 &fs.metadata(abs_path)
2788 .await?
2789 .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
2790 &next_entry_id,
2791 root_char_bag,
2792 );
2793 Ok(snapshot.lock().insert_entry(entry, fs))
2794}
2795
2796fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2797 let mut result = root_char_bag;
2798 result.extend(
2799 path.to_string_lossy()
2800 .chars()
2801 .map(|c| c.to_ascii_lowercase()),
2802 );
2803 result
2804}
2805
2806struct ScanJob {
2807 abs_path: PathBuf,
2808 path: Arc<Path>,
2809 ignore_stack: Arc<IgnoreStack>,
2810 scan_queue: Sender<ScanJob>,
2811}
2812
2813struct UpdateIgnoreStatusJob {
2814 path: Arc<Path>,
2815 ignore_stack: Arc<IgnoreStack>,
2816 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2817}
2818
2819pub trait WorktreeHandle {
2820 #[cfg(test)]
2821 fn flush_fs_events<'a>(
2822 &self,
2823 cx: &'a gpui::TestAppContext,
2824 ) -> futures::future::LocalBoxFuture<'a, ()>;
2825}
2826
2827impl WorktreeHandle for ModelHandle<Worktree> {
2828 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2829 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2830 // extra directory scans, and emit extra scan-state notifications.
2831 //
2832 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2833 // to ensure that all redundant FS events have already been processed.
2834 #[cfg(test)]
2835 fn flush_fs_events<'a>(
2836 &self,
2837 cx: &'a gpui::TestAppContext,
2838 ) -> futures::future::LocalBoxFuture<'a, ()> {
2839 use smol::future::FutureExt;
2840
2841 let filename = "fs-event-sentinel";
2842 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
2843 let tree = self.clone();
2844 async move {
2845 std::fs::write(root_path.join(filename), "").unwrap();
2846 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2847 .await;
2848
2849 std::fs::remove_file(root_path.join(filename)).unwrap();
2850 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2851 .await;
2852
2853 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2854 .await;
2855 }
2856 .boxed_local()
2857 }
2858}
2859
2860#[derive(Clone, Debug)]
2861struct TraversalProgress<'a> {
2862 max_path: &'a Path,
2863 count: usize,
2864 visible_count: usize,
2865 file_count: usize,
2866 visible_file_count: usize,
2867}
2868
2869impl<'a> TraversalProgress<'a> {
2870 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2871 match (include_ignored, include_dirs) {
2872 (true, true) => self.count,
2873 (true, false) => self.file_count,
2874 (false, true) => self.visible_count,
2875 (false, false) => self.visible_file_count,
2876 }
2877 }
2878}
2879
2880impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2881 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2882 self.max_path = summary.max_path.as_ref();
2883 self.count += summary.count;
2884 self.visible_count += summary.visible_count;
2885 self.file_count += summary.file_count;
2886 self.visible_file_count += summary.visible_file_count;
2887 }
2888}
2889
2890impl<'a> Default for TraversalProgress<'a> {
2891 fn default() -> Self {
2892 Self {
2893 max_path: Path::new(""),
2894 count: 0,
2895 visible_count: 0,
2896 file_count: 0,
2897 visible_file_count: 0,
2898 }
2899 }
2900}
2901
2902pub struct Traversal<'a> {
2903 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2904 include_ignored: bool,
2905 include_dirs: bool,
2906}
2907
2908impl<'a> Traversal<'a> {
2909 pub fn advance(&mut self) -> bool {
2910 self.advance_to_offset(self.offset() + 1)
2911 }
2912
2913 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2914 self.cursor.seek_forward(
2915 &TraversalTarget::Count {
2916 count: offset,
2917 include_dirs: self.include_dirs,
2918 include_ignored: self.include_ignored,
2919 },
2920 Bias::Right,
2921 &(),
2922 )
2923 }
2924
2925 pub fn advance_to_sibling(&mut self) -> bool {
2926 while let Some(entry) = self.cursor.item() {
2927 self.cursor.seek_forward(
2928 &TraversalTarget::PathSuccessor(&entry.path),
2929 Bias::Left,
2930 &(),
2931 );
2932 if let Some(entry) = self.cursor.item() {
2933 if (self.include_dirs || !entry.is_dir())
2934 && (self.include_ignored || !entry.is_ignored)
2935 {
2936 return true;
2937 }
2938 }
2939 }
2940 false
2941 }
2942
2943 pub fn entry(&self) -> Option<&'a Entry> {
2944 self.cursor.item()
2945 }
2946
2947 pub fn offset(&self) -> usize {
2948 self.cursor
2949 .start()
2950 .count(self.include_dirs, self.include_ignored)
2951 }
2952}
2953
2954impl<'a> Iterator for Traversal<'a> {
2955 type Item = &'a Entry;
2956
2957 fn next(&mut self) -> Option<Self::Item> {
2958 if let Some(item) = self.entry() {
2959 self.advance();
2960 Some(item)
2961 } else {
2962 None
2963 }
2964 }
2965}
2966
2967#[derive(Debug)]
2968enum TraversalTarget<'a> {
2969 Path(&'a Path),
2970 PathSuccessor(&'a Path),
2971 Count {
2972 count: usize,
2973 include_ignored: bool,
2974 include_dirs: bool,
2975 },
2976}
2977
2978impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2979 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2980 match self {
2981 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2982 TraversalTarget::PathSuccessor(path) => {
2983 if !cursor_location.max_path.starts_with(path) {
2984 Ordering::Equal
2985 } else {
2986 Ordering::Greater
2987 }
2988 }
2989 TraversalTarget::Count {
2990 count,
2991 include_dirs,
2992 include_ignored,
2993 } => Ord::cmp(
2994 count,
2995 &cursor_location.count(*include_dirs, *include_ignored),
2996 ),
2997 }
2998 }
2999}
3000
3001struct ChildEntriesIter<'a> {
3002 parent_path: &'a Path,
3003 traversal: Traversal<'a>,
3004}
3005
3006impl<'a> Iterator for ChildEntriesIter<'a> {
3007 type Item = &'a Entry;
3008
3009 fn next(&mut self) -> Option<Self::Item> {
3010 if let Some(item) = self.traversal.entry() {
3011 if item.path.starts_with(&self.parent_path) {
3012 self.traversal.advance_to_sibling();
3013 return Some(item);
3014 }
3015 }
3016 None
3017 }
3018}
3019
3020impl<'a> From<&'a Entry> for proto::Entry {
3021 fn from(entry: &'a Entry) -> Self {
3022 Self {
3023 id: entry.id as u64,
3024 is_dir: entry.is_dir(),
3025 path: entry.path.to_string_lossy().to_string(),
3026 inode: entry.inode,
3027 mtime: Some(entry.mtime.into()),
3028 is_symlink: entry.is_symlink,
3029 is_ignored: entry.is_ignored,
3030 }
3031 }
3032}
3033
3034impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3035 type Error = anyhow::Error;
3036
3037 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3038 if let Some(mtime) = entry.mtime {
3039 let kind = if entry.is_dir {
3040 EntryKind::Dir
3041 } else {
3042 let mut char_bag = root_char_bag.clone();
3043 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3044 EntryKind::File(char_bag)
3045 };
3046 let path: Arc<Path> = Arc::from(Path::new(&entry.path));
3047 Ok(Entry {
3048 id: entry.id as usize,
3049 kind,
3050 path: path.clone(),
3051 inode: entry.inode,
3052 mtime: mtime.into(),
3053 is_symlink: entry.is_symlink,
3054 is_ignored: entry.is_ignored,
3055 })
3056 } else {
3057 Err(anyhow!(
3058 "missing mtime in remote worktree entry {:?}",
3059 entry.path
3060 ))
3061 }
3062 }
3063}
3064
3065trait ToPointUtf16 {
3066 fn to_point_utf16(self) -> PointUtf16;
3067}
3068
3069impl ToPointUtf16 for lsp::Position {
3070 fn to_point_utf16(self) -> PointUtf16 {
3071 PointUtf16::new(self.line, self.character)
3072 }
3073}
3074
3075fn diagnostic_ranges<'a>(
3076 diagnostic: &'a lsp::Diagnostic,
3077 abs_path: &'a Path,
3078) -> impl 'a + Iterator<Item = Range<PointUtf16>> {
3079 diagnostic
3080 .related_information
3081 .iter()
3082 .flatten()
3083 .filter_map(move |info| {
3084 if info.location.uri.to_file_path().ok()? == abs_path {
3085 let info_start = PointUtf16::new(
3086 info.location.range.start.line,
3087 info.location.range.start.character,
3088 );
3089 let info_end = PointUtf16::new(
3090 info.location.range.end.line,
3091 info.location.range.end.character,
3092 );
3093 Some(info_start..info_end)
3094 } else {
3095 None
3096 }
3097 })
3098 .chain(Some(
3099 diagnostic.range.start.to_point_utf16()..diagnostic.range.end.to_point_utf16(),
3100 ))
3101}
3102
3103#[cfg(test)]
3104mod tests {
3105 use super::*;
3106 use crate::fs::FakeFs;
3107 use anyhow::Result;
3108 use client::test::{FakeHttpClient, FakeServer};
3109 use fs::RealFs;
3110 use language::{tree_sitter_rust, DiagnosticEntry, LanguageServerConfig};
3111 use language::{Diagnostic, LanguageConfig};
3112 use lsp::Url;
3113 use rand::prelude::*;
3114 use serde_json::json;
3115 use std::{cell::RefCell, rc::Rc};
3116 use std::{
3117 env,
3118 fmt::Write,
3119 time::{SystemTime, UNIX_EPOCH},
3120 };
3121 use text::Point;
3122 use unindent::Unindent as _;
3123 use util::test::temp_tree;
3124
3125 #[gpui::test]
3126 async fn test_traversal(mut cx: gpui::TestAppContext) {
3127 let fs = FakeFs::new();
3128 fs.insert_tree(
3129 "/root",
3130 json!({
3131 ".gitignore": "a/b\n",
3132 "a": {
3133 "b": "",
3134 "c": "",
3135 }
3136 }),
3137 )
3138 .await;
3139
3140 let client = Client::new();
3141 let http_client = FakeHttpClient::with_404_response();
3142 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3143
3144 let tree = Worktree::open_local(
3145 client,
3146 user_store,
3147 Arc::from(Path::new("/root")),
3148 Arc::new(fs),
3149 Default::default(),
3150 &mut cx.to_async(),
3151 )
3152 .await
3153 .unwrap();
3154 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3155 .await;
3156
3157 tree.read_with(&cx, |tree, _| {
3158 assert_eq!(
3159 tree.entries(false)
3160 .map(|entry| entry.path.as_ref())
3161 .collect::<Vec<_>>(),
3162 vec![
3163 Path::new(""),
3164 Path::new(".gitignore"),
3165 Path::new("a"),
3166 Path::new("a/c"),
3167 ]
3168 );
3169 })
3170 }
3171
3172 #[gpui::test]
3173 async fn test_save_file(mut cx: gpui::TestAppContext) {
3174 let dir = temp_tree(json!({
3175 "file1": "the old contents",
3176 }));
3177
3178 let client = Client::new();
3179 let http_client = FakeHttpClient::with_404_response();
3180 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3181
3182 let tree = Worktree::open_local(
3183 client,
3184 user_store,
3185 dir.path(),
3186 Arc::new(RealFs),
3187 Default::default(),
3188 &mut cx.to_async(),
3189 )
3190 .await
3191 .unwrap();
3192 let buffer = tree
3193 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3194 .await
3195 .unwrap();
3196 let save = buffer.update(&mut cx, |buffer, cx| {
3197 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3198 buffer.save(cx).unwrap()
3199 });
3200 save.await.unwrap();
3201
3202 let new_text = std::fs::read_to_string(dir.path().join("file1")).unwrap();
3203 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3204 }
3205
3206 #[gpui::test]
3207 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
3208 let dir = temp_tree(json!({
3209 "file1": "the old contents",
3210 }));
3211 let file_path = dir.path().join("file1");
3212
3213 let client = Client::new();
3214 let http_client = FakeHttpClient::with_404_response();
3215 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3216
3217 let tree = Worktree::open_local(
3218 client,
3219 user_store,
3220 file_path.clone(),
3221 Arc::new(RealFs),
3222 Default::default(),
3223 &mut cx.to_async(),
3224 )
3225 .await
3226 .unwrap();
3227 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3228 .await;
3229 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
3230
3231 let buffer = tree
3232 .update(&mut cx, |tree, cx| tree.open_buffer("", cx))
3233 .await
3234 .unwrap();
3235 let save = buffer.update(&mut cx, |buffer, cx| {
3236 buffer.edit(Some(0..0), "a line of text.\n".repeat(10 * 1024), cx);
3237 buffer.save(cx).unwrap()
3238 });
3239 save.await.unwrap();
3240
3241 let new_text = std::fs::read_to_string(file_path).unwrap();
3242 assert_eq!(new_text, buffer.read_with(&cx, |buffer, _| buffer.text()));
3243 }
3244
3245 #[gpui::test]
3246 async fn test_rescan_and_remote_updates(mut cx: gpui::TestAppContext) {
3247 let dir = temp_tree(json!({
3248 "a": {
3249 "file1": "",
3250 "file2": "",
3251 "file3": "",
3252 },
3253 "b": {
3254 "c": {
3255 "file4": "",
3256 "file5": "",
3257 }
3258 }
3259 }));
3260
3261 let user_id = 5;
3262 let mut client = Client::new();
3263 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3264 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3265 let tree = Worktree::open_local(
3266 client,
3267 user_store.clone(),
3268 dir.path(),
3269 Arc::new(RealFs),
3270 Default::default(),
3271 &mut cx.to_async(),
3272 )
3273 .await
3274 .unwrap();
3275
3276 let buffer_for_path = |path: &'static str, cx: &mut gpui::TestAppContext| {
3277 let buffer = tree.update(cx, |tree, cx| tree.open_buffer(path, cx));
3278 async move { buffer.await.unwrap() }
3279 };
3280 let id_for_path = |path: &'static str, cx: &gpui::TestAppContext| {
3281 tree.read_with(cx, |tree, _| {
3282 tree.entry_for_path(path)
3283 .expect(&format!("no entry for path {}", path))
3284 .id
3285 })
3286 };
3287
3288 let buffer2 = buffer_for_path("a/file2", &mut cx).await;
3289 let buffer3 = buffer_for_path("a/file3", &mut cx).await;
3290 let buffer4 = buffer_for_path("b/c/file4", &mut cx).await;
3291 let buffer5 = buffer_for_path("b/c/file5", &mut cx).await;
3292
3293 let file2_id = id_for_path("a/file2", &cx);
3294 let file3_id = id_for_path("a/file3", &cx);
3295 let file4_id = id_for_path("b/c/file4", &cx);
3296
3297 // Wait for the initial scan.
3298 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3299 .await;
3300
3301 // Create a remote copy of this worktree.
3302 let initial_snapshot = tree.read_with(&cx, |tree, _| tree.snapshot());
3303 let worktree_id = 1;
3304 let share_request = tree.update(&mut cx, |tree, cx| {
3305 tree.as_local().unwrap().share_request(cx)
3306 });
3307 let open_worktree = server.receive::<proto::OpenWorktree>().await.unwrap();
3308 server
3309 .respond(
3310 open_worktree.receipt(),
3311 proto::OpenWorktreeResponse { worktree_id: 1 },
3312 )
3313 .await;
3314
3315 let remote = Worktree::remote(
3316 proto::JoinWorktreeResponse {
3317 worktree: share_request.await.unwrap().worktree,
3318 replica_id: 1,
3319 collaborators: Vec::new(),
3320 },
3321 Client::new(),
3322 user_store,
3323 Default::default(),
3324 &mut cx.to_async(),
3325 )
3326 .await
3327 .unwrap();
3328
3329 cx.read(|cx| {
3330 assert!(!buffer2.read(cx).is_dirty());
3331 assert!(!buffer3.read(cx).is_dirty());
3332 assert!(!buffer4.read(cx).is_dirty());
3333 assert!(!buffer5.read(cx).is_dirty());
3334 });
3335
3336 // Rename and delete files and directories.
3337 tree.flush_fs_events(&cx).await;
3338 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
3339 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
3340 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
3341 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
3342 tree.flush_fs_events(&cx).await;
3343
3344 let expected_paths = vec![
3345 "a",
3346 "a/file1",
3347 "a/file2.new",
3348 "b",
3349 "d",
3350 "d/file3",
3351 "d/file4",
3352 ];
3353
3354 cx.read(|app| {
3355 assert_eq!(
3356 tree.read(app)
3357 .paths()
3358 .map(|p| p.to_str().unwrap())
3359 .collect::<Vec<_>>(),
3360 expected_paths
3361 );
3362
3363 assert_eq!(id_for_path("a/file2.new", &cx), file2_id);
3364 assert_eq!(id_for_path("d/file3", &cx), file3_id);
3365 assert_eq!(id_for_path("d/file4", &cx), file4_id);
3366
3367 assert_eq!(
3368 buffer2.read(app).file().unwrap().path().as_ref(),
3369 Path::new("a/file2.new")
3370 );
3371 assert_eq!(
3372 buffer3.read(app).file().unwrap().path().as_ref(),
3373 Path::new("d/file3")
3374 );
3375 assert_eq!(
3376 buffer4.read(app).file().unwrap().path().as_ref(),
3377 Path::new("d/file4")
3378 );
3379 assert_eq!(
3380 buffer5.read(app).file().unwrap().path().as_ref(),
3381 Path::new("b/c/file5")
3382 );
3383
3384 assert!(!buffer2.read(app).file().unwrap().is_deleted());
3385 assert!(!buffer3.read(app).file().unwrap().is_deleted());
3386 assert!(!buffer4.read(app).file().unwrap().is_deleted());
3387 assert!(buffer5.read(app).file().unwrap().is_deleted());
3388 });
3389
3390 // Update the remote worktree. Check that it becomes consistent with the
3391 // local worktree.
3392 remote.update(&mut cx, |remote, cx| {
3393 let update_message =
3394 tree.read(cx)
3395 .snapshot()
3396 .build_update(&initial_snapshot, worktree_id, true);
3397 remote
3398 .as_remote_mut()
3399 .unwrap()
3400 .snapshot
3401 .apply_update(update_message)
3402 .unwrap();
3403
3404 assert_eq!(
3405 remote
3406 .paths()
3407 .map(|p| p.to_str().unwrap())
3408 .collect::<Vec<_>>(),
3409 expected_paths
3410 );
3411 });
3412 }
3413
3414 #[gpui::test]
3415 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
3416 let dir = temp_tree(json!({
3417 ".git": {},
3418 ".gitignore": "ignored-dir\n",
3419 "tracked-dir": {
3420 "tracked-file1": "tracked contents",
3421 },
3422 "ignored-dir": {
3423 "ignored-file1": "ignored contents",
3424 }
3425 }));
3426
3427 let client = Client::new();
3428 let http_client = FakeHttpClient::with_404_response();
3429 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3430
3431 let tree = Worktree::open_local(
3432 client,
3433 user_store,
3434 dir.path(),
3435 Arc::new(RealFs),
3436 Default::default(),
3437 &mut cx.to_async(),
3438 )
3439 .await
3440 .unwrap();
3441 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3442 .await;
3443 tree.flush_fs_events(&cx).await;
3444 cx.read(|cx| {
3445 let tree = tree.read(cx);
3446 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
3447 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
3448 assert_eq!(tracked.is_ignored, false);
3449 assert_eq!(ignored.is_ignored, true);
3450 });
3451
3452 std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
3453 std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
3454 tree.flush_fs_events(&cx).await;
3455 cx.read(|cx| {
3456 let tree = tree.read(cx);
3457 let dot_git = tree.entry_for_path(".git").unwrap();
3458 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
3459 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
3460 assert_eq!(tracked.is_ignored, false);
3461 assert_eq!(ignored.is_ignored, true);
3462 assert_eq!(dot_git.is_ignored, true);
3463 });
3464 }
3465
3466 #[gpui::test]
3467 async fn test_open_and_share_worktree(mut cx: gpui::TestAppContext) {
3468 let user_id = 100;
3469 let mut client = Client::new();
3470 let server = FakeServer::for_client(user_id, &mut client, &cx).await;
3471 let user_store = server.build_user_store(client.clone(), &mut cx).await;
3472
3473 let fs = Arc::new(FakeFs::new());
3474 fs.insert_tree(
3475 "/path",
3476 json!({
3477 "to": {
3478 "the-dir": {
3479 ".zed.toml": r#"collaborators = ["friend-1", "friend-2"]"#,
3480 "a.txt": "a-contents",
3481 },
3482 },
3483 }),
3484 )
3485 .await;
3486
3487 let worktree = Worktree::open_local(
3488 client.clone(),
3489 user_store,
3490 "/path/to/the-dir".as_ref(),
3491 fs,
3492 Default::default(),
3493 &mut cx.to_async(),
3494 )
3495 .await
3496 .unwrap();
3497
3498 let open_worktree = server.receive::<proto::OpenWorktree>().await.unwrap();
3499 assert_eq!(
3500 open_worktree.payload,
3501 proto::OpenWorktree {
3502 root_name: "the-dir".to_string(),
3503 authorized_logins: vec!["friend-1".to_string(), "friend-2".to_string()],
3504 }
3505 );
3506
3507 server
3508 .respond(
3509 open_worktree.receipt(),
3510 proto::OpenWorktreeResponse { worktree_id: 5 },
3511 )
3512 .await;
3513 let remote_id = worktree
3514 .update(&mut cx, |tree, _| tree.as_local().unwrap().next_remote_id())
3515 .await;
3516 assert_eq!(remote_id, Some(5));
3517
3518 cx.update(move |_| drop(worktree));
3519 server.receive::<proto::CloseWorktree>().await.unwrap();
3520 }
3521
3522 #[gpui::test]
3523 async fn test_buffer_is_dirty(mut cx: gpui::TestAppContext) {
3524 use std::fs;
3525
3526 let dir = temp_tree(json!({
3527 "file1": "abc",
3528 "file2": "def",
3529 "file3": "ghi",
3530 }));
3531 let client = Client::new();
3532 let http_client = FakeHttpClient::with_404_response();
3533 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3534
3535 let tree = Worktree::open_local(
3536 client,
3537 user_store,
3538 dir.path(),
3539 Arc::new(RealFs),
3540 Default::default(),
3541 &mut cx.to_async(),
3542 )
3543 .await
3544 .unwrap();
3545 tree.flush_fs_events(&cx).await;
3546 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3547 .await;
3548
3549 let buffer1 = tree
3550 .update(&mut cx, |tree, cx| tree.open_buffer("file1", cx))
3551 .await
3552 .unwrap();
3553 let events = Rc::new(RefCell::new(Vec::new()));
3554
3555 // initially, the buffer isn't dirty.
3556 buffer1.update(&mut cx, |buffer, cx| {
3557 cx.subscribe(&buffer1, {
3558 let events = events.clone();
3559 move |_, _, event, _| events.borrow_mut().push(event.clone())
3560 })
3561 .detach();
3562
3563 assert!(!buffer.is_dirty());
3564 assert!(events.borrow().is_empty());
3565
3566 buffer.edit(vec![1..2], "", cx);
3567 });
3568
3569 // after the first edit, the buffer is dirty, and emits a dirtied event.
3570 buffer1.update(&mut cx, |buffer, cx| {
3571 assert!(buffer.text() == "ac");
3572 assert!(buffer.is_dirty());
3573 assert_eq!(
3574 *events.borrow(),
3575 &[language::Event::Edited, language::Event::Dirtied]
3576 );
3577 events.borrow_mut().clear();
3578 buffer.did_save(buffer.version(), buffer.file().unwrap().mtime(), None, cx);
3579 });
3580
3581 // after saving, the buffer is not dirty, and emits a saved event.
3582 buffer1.update(&mut cx, |buffer, cx| {
3583 assert!(!buffer.is_dirty());
3584 assert_eq!(*events.borrow(), &[language::Event::Saved]);
3585 events.borrow_mut().clear();
3586
3587 buffer.edit(vec![1..1], "B", cx);
3588 buffer.edit(vec![2..2], "D", cx);
3589 });
3590
3591 // after editing again, the buffer is dirty, and emits another dirty event.
3592 buffer1.update(&mut cx, |buffer, cx| {
3593 assert!(buffer.text() == "aBDc");
3594 assert!(buffer.is_dirty());
3595 assert_eq!(
3596 *events.borrow(),
3597 &[
3598 language::Event::Edited,
3599 language::Event::Dirtied,
3600 language::Event::Edited,
3601 ],
3602 );
3603 events.borrow_mut().clear();
3604
3605 // TODO - currently, after restoring the buffer to its
3606 // previously-saved state, the is still considered dirty.
3607 buffer.edit([1..3], "", cx);
3608 assert!(buffer.text() == "ac");
3609 assert!(buffer.is_dirty());
3610 });
3611
3612 assert_eq!(*events.borrow(), &[language::Event::Edited]);
3613
3614 // When a file is deleted, the buffer is considered dirty.
3615 let events = Rc::new(RefCell::new(Vec::new()));
3616 let buffer2 = tree
3617 .update(&mut cx, |tree, cx| tree.open_buffer("file2", cx))
3618 .await
3619 .unwrap();
3620 buffer2.update(&mut cx, |_, cx| {
3621 cx.subscribe(&buffer2, {
3622 let events = events.clone();
3623 move |_, _, event, _| events.borrow_mut().push(event.clone())
3624 })
3625 .detach();
3626 });
3627
3628 fs::remove_file(dir.path().join("file2")).unwrap();
3629 buffer2.condition(&cx, |b, _| b.is_dirty()).await;
3630 assert_eq!(
3631 *events.borrow(),
3632 &[language::Event::Dirtied, language::Event::FileHandleChanged]
3633 );
3634
3635 // When a file is already dirty when deleted, we don't emit a Dirtied event.
3636 let events = Rc::new(RefCell::new(Vec::new()));
3637 let buffer3 = tree
3638 .update(&mut cx, |tree, cx| tree.open_buffer("file3", cx))
3639 .await
3640 .unwrap();
3641 buffer3.update(&mut cx, |_, cx| {
3642 cx.subscribe(&buffer3, {
3643 let events = events.clone();
3644 move |_, _, event, _| events.borrow_mut().push(event.clone())
3645 })
3646 .detach();
3647 });
3648
3649 tree.flush_fs_events(&cx).await;
3650 buffer3.update(&mut cx, |buffer, cx| {
3651 buffer.edit(Some(0..0), "x", cx);
3652 });
3653 events.borrow_mut().clear();
3654 fs::remove_file(dir.path().join("file3")).unwrap();
3655 buffer3
3656 .condition(&cx, |_, _| !events.borrow().is_empty())
3657 .await;
3658 assert_eq!(*events.borrow(), &[language::Event::FileHandleChanged]);
3659 cx.read(|cx| assert!(buffer3.read(cx).is_dirty()));
3660 }
3661
3662 #[gpui::test]
3663 async fn test_buffer_file_changes_on_disk(mut cx: gpui::TestAppContext) {
3664 use std::fs;
3665
3666 let initial_contents = "aaa\nbbbbb\nc\n";
3667 let dir = temp_tree(json!({ "the-file": initial_contents }));
3668 let client = Client::new();
3669 let http_client = FakeHttpClient::with_404_response();
3670 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3671
3672 let tree = Worktree::open_local(
3673 client,
3674 user_store,
3675 dir.path(),
3676 Arc::new(RealFs),
3677 Default::default(),
3678 &mut cx.to_async(),
3679 )
3680 .await
3681 .unwrap();
3682 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3683 .await;
3684
3685 let abs_path = dir.path().join("the-file");
3686 let buffer = tree
3687 .update(&mut cx, |tree, cx| {
3688 tree.open_buffer(Path::new("the-file"), cx)
3689 })
3690 .await
3691 .unwrap();
3692
3693 // TODO
3694 // Add a cursor on each row.
3695 // let selection_set_id = buffer.update(&mut cx, |buffer, cx| {
3696 // assert!(!buffer.is_dirty());
3697 // buffer.add_selection_set(
3698 // &(0..3)
3699 // .map(|row| Selection {
3700 // id: row as usize,
3701 // start: Point::new(row, 1),
3702 // end: Point::new(row, 1),
3703 // reversed: false,
3704 // goal: SelectionGoal::None,
3705 // })
3706 // .collect::<Vec<_>>(),
3707 // cx,
3708 // )
3709 // });
3710
3711 // Change the file on disk, adding two new lines of text, and removing
3712 // one line.
3713 buffer.read_with(&cx, |buffer, _| {
3714 assert!(!buffer.is_dirty());
3715 assert!(!buffer.has_conflict());
3716 });
3717 let new_contents = "AAAA\naaa\nBB\nbbbbb\n";
3718 fs::write(&abs_path, new_contents).unwrap();
3719
3720 // Because the buffer was not modified, it is reloaded from disk. Its
3721 // contents are edited according to the diff between the old and new
3722 // file contents.
3723 buffer
3724 .condition(&cx, |buffer, _| buffer.text() == new_contents)
3725 .await;
3726
3727 buffer.update(&mut cx, |buffer, _| {
3728 assert_eq!(buffer.text(), new_contents);
3729 assert!(!buffer.is_dirty());
3730 assert!(!buffer.has_conflict());
3731
3732 // TODO
3733 // let cursor_positions = buffer
3734 // .selection_set(selection_set_id)
3735 // .unwrap()
3736 // .selections::<Point>(&*buffer)
3737 // .map(|selection| {
3738 // assert_eq!(selection.start, selection.end);
3739 // selection.start
3740 // })
3741 // .collect::<Vec<_>>();
3742 // assert_eq!(
3743 // cursor_positions,
3744 // [Point::new(1, 1), Point::new(3, 1), Point::new(4, 0)]
3745 // );
3746 });
3747
3748 // Modify the buffer
3749 buffer.update(&mut cx, |buffer, cx| {
3750 buffer.edit(vec![0..0], " ", cx);
3751 assert!(buffer.is_dirty());
3752 assert!(!buffer.has_conflict());
3753 });
3754
3755 // Change the file on disk again, adding blank lines to the beginning.
3756 fs::write(&abs_path, "\n\n\nAAAA\naaa\nBB\nbbbbb\n").unwrap();
3757
3758 // Because the buffer is modified, it doesn't reload from disk, but is
3759 // marked as having a conflict.
3760 buffer
3761 .condition(&cx, |buffer, _| buffer.has_conflict())
3762 .await;
3763 }
3764
3765 #[gpui::test]
3766 async fn test_language_server_diagnostics(mut cx: gpui::TestAppContext) {
3767 let (language_server_config, mut fake_server) =
3768 LanguageServerConfig::fake(cx.background()).await;
3769 let mut languages = LanguageRegistry::new();
3770 languages.add(Arc::new(Language::new(
3771 LanguageConfig {
3772 name: "Rust".to_string(),
3773 path_suffixes: vec!["rs".to_string()],
3774 language_server: Some(language_server_config),
3775 ..Default::default()
3776 },
3777 Some(tree_sitter_rust::language()),
3778 )));
3779
3780 let dir = temp_tree(json!({
3781 "a.rs": "fn a() { A }",
3782 "b.rs": "const y: i32 = 1",
3783 }));
3784
3785 let client = Client::new();
3786 let http_client = FakeHttpClient::with_404_response();
3787 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3788
3789 let tree = Worktree::open_local(
3790 client,
3791 user_store,
3792 dir.path(),
3793 Arc::new(RealFs),
3794 Arc::new(languages),
3795 &mut cx.to_async(),
3796 )
3797 .await
3798 .unwrap();
3799 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3800 .await;
3801
3802 // Cause worktree to start the fake language server
3803 let _buffer = tree
3804 .update(&mut cx, |tree, cx| tree.open_buffer("b.rs", cx))
3805 .await
3806 .unwrap();
3807
3808 fake_server
3809 .notify::<lsp::notification::PublishDiagnostics>(lsp::PublishDiagnosticsParams {
3810 uri: Url::from_file_path(dir.path().join("a.rs")).unwrap(),
3811 version: None,
3812 diagnostics: vec![lsp::Diagnostic {
3813 range: lsp::Range::new(lsp::Position::new(0, 9), lsp::Position::new(0, 10)),
3814 severity: Some(lsp::DiagnosticSeverity::ERROR),
3815 message: "undefined variable 'A'".to_string(),
3816 ..Default::default()
3817 }],
3818 })
3819 .await;
3820
3821 let buffer = tree
3822 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3823 .await
3824 .unwrap();
3825
3826 buffer.read_with(&cx, |buffer, _| {
3827 let diagnostics = buffer
3828 .snapshot()
3829 .diagnostics_in_range::<_, Point>(0..buffer.len())
3830 .collect::<Vec<_>>();
3831 assert_eq!(
3832 diagnostics,
3833 &[DiagnosticEntry {
3834 range: Point::new(0, 9)..Point::new(0, 10),
3835 diagnostic: Diagnostic {
3836 severity: lsp::DiagnosticSeverity::ERROR,
3837 message: "undefined variable 'A'".to_string(),
3838 group_id: 0,
3839 is_primary: true,
3840 ..Default::default()
3841 }
3842 }]
3843 )
3844 });
3845 }
3846
3847 #[gpui::test]
3848 async fn test_grouped_diagnostics(mut cx: gpui::TestAppContext) {
3849 let fs = Arc::new(FakeFs::new());
3850 let client = Client::new();
3851 let http_client = FakeHttpClient::with_404_response();
3852 let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx));
3853
3854 fs.insert_tree(
3855 "/the-dir",
3856 json!({
3857 "a.rs": "
3858 fn foo(mut v: Vec<usize>) {
3859 for x in &v {
3860 v.push(1);
3861 }
3862 }
3863 "
3864 .unindent(),
3865 }),
3866 )
3867 .await;
3868
3869 let worktree = Worktree::open_local(
3870 client.clone(),
3871 user_store,
3872 "/the-dir".as_ref(),
3873 fs,
3874 Default::default(),
3875 &mut cx.to_async(),
3876 )
3877 .await
3878 .unwrap();
3879
3880 let buffer = worktree
3881 .update(&mut cx, |tree, cx| tree.open_buffer("a.rs", cx))
3882 .await
3883 .unwrap();
3884
3885 let buffer_uri = Url::from_file_path("/the-dir/a.rs").unwrap();
3886 let message = lsp::PublishDiagnosticsParams {
3887 uri: buffer_uri.clone(),
3888 diagnostics: vec![
3889 lsp::Diagnostic {
3890 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3891 severity: Some(DiagnosticSeverity::WARNING),
3892 message: "error 1".to_string(),
3893 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3894 location: lsp::Location {
3895 uri: buffer_uri.clone(),
3896 range: lsp::Range::new(
3897 lsp::Position::new(1, 8),
3898 lsp::Position::new(1, 9),
3899 ),
3900 },
3901 message: "error 1 hint 1".to_string(),
3902 }]),
3903 ..Default::default()
3904 },
3905 lsp::Diagnostic {
3906 range: lsp::Range::new(lsp::Position::new(1, 8), lsp::Position::new(1, 9)),
3907 severity: Some(DiagnosticSeverity::HINT),
3908 message: "error 1 hint 1".to_string(),
3909 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3910 location: lsp::Location {
3911 uri: buffer_uri.clone(),
3912 range: lsp::Range::new(
3913 lsp::Position::new(1, 8),
3914 lsp::Position::new(1, 9),
3915 ),
3916 },
3917 message: "original diagnostic".to_string(),
3918 }]),
3919 ..Default::default()
3920 },
3921 lsp::Diagnostic {
3922 range: lsp::Range::new(lsp::Position::new(2, 8), lsp::Position::new(2, 17)),
3923 severity: Some(DiagnosticSeverity::ERROR),
3924 message: "error 2".to_string(),
3925 related_information: Some(vec![
3926 lsp::DiagnosticRelatedInformation {
3927 location: lsp::Location {
3928 uri: buffer_uri.clone(),
3929 range: lsp::Range::new(
3930 lsp::Position::new(1, 13),
3931 lsp::Position::new(1, 15),
3932 ),
3933 },
3934 message: "error 2 hint 1".to_string(),
3935 },
3936 lsp::DiagnosticRelatedInformation {
3937 location: lsp::Location {
3938 uri: buffer_uri.clone(),
3939 range: lsp::Range::new(
3940 lsp::Position::new(1, 13),
3941 lsp::Position::new(1, 15),
3942 ),
3943 },
3944 message: "error 2 hint 2".to_string(),
3945 },
3946 ]),
3947 ..Default::default()
3948 },
3949 lsp::Diagnostic {
3950 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3951 severity: Some(DiagnosticSeverity::HINT),
3952 message: "error 2 hint 1".to_string(),
3953 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3954 location: lsp::Location {
3955 uri: buffer_uri.clone(),
3956 range: lsp::Range::new(
3957 lsp::Position::new(2, 8),
3958 lsp::Position::new(2, 17),
3959 ),
3960 },
3961 message: "original diagnostic".to_string(),
3962 }]),
3963 ..Default::default()
3964 },
3965 lsp::Diagnostic {
3966 range: lsp::Range::new(lsp::Position::new(1, 13), lsp::Position::new(1, 15)),
3967 severity: Some(DiagnosticSeverity::HINT),
3968 message: "error 2 hint 2".to_string(),
3969 related_information: Some(vec![lsp::DiagnosticRelatedInformation {
3970 location: lsp::Location {
3971 uri: buffer_uri.clone(),
3972 range: lsp::Range::new(
3973 lsp::Position::new(2, 8),
3974 lsp::Position::new(2, 17),
3975 ),
3976 },
3977 message: "original diagnostic".to_string(),
3978 }]),
3979 ..Default::default()
3980 },
3981 ],
3982 version: None,
3983 };
3984
3985 worktree
3986 .update(&mut cx, |tree, cx| tree.update_diagnostics(message, cx))
3987 .unwrap();
3988 let buffer = buffer.read_with(&cx, |buffer, cx| buffer.snapshot());
3989
3990 assert_eq!(
3991 buffer
3992 .diagnostics_in_range::<_, Point>(0..buffer.len())
3993 .collect::<Vec<_>>(),
3994 &[
3995 DiagnosticEntry {
3996 range: Point::new(1, 8)..Point::new(1, 9),
3997 diagnostic: Diagnostic {
3998 severity: DiagnosticSeverity::WARNING,
3999 message: "error 1".to_string(),
4000 group_id: 0,
4001 is_primary: true,
4002 ..Default::default()
4003 }
4004 },
4005 DiagnosticEntry {
4006 range: Point::new(1, 8)..Point::new(1, 9),
4007 diagnostic: Diagnostic {
4008 severity: DiagnosticSeverity::HINT,
4009 message: "error 1 hint 1".to_string(),
4010 group_id: 0,
4011 is_primary: false,
4012 ..Default::default()
4013 }
4014 },
4015 DiagnosticEntry {
4016 range: Point::new(1, 13)..Point::new(1, 15),
4017 diagnostic: Diagnostic {
4018 severity: DiagnosticSeverity::HINT,
4019 message: "error 2 hint 1".to_string(),
4020 group_id: 1,
4021 is_primary: false,
4022 ..Default::default()
4023 }
4024 },
4025 DiagnosticEntry {
4026 range: Point::new(1, 13)..Point::new(1, 15),
4027 diagnostic: Diagnostic {
4028 severity: DiagnosticSeverity::HINT,
4029 message: "error 2 hint 2".to_string(),
4030 group_id: 1,
4031 is_primary: false,
4032 ..Default::default()
4033 }
4034 },
4035 DiagnosticEntry {
4036 range: Point::new(2, 8)..Point::new(2, 17),
4037 diagnostic: Diagnostic {
4038 severity: DiagnosticSeverity::ERROR,
4039 message: "error 2".to_string(),
4040 group_id: 1,
4041 is_primary: true,
4042 ..Default::default()
4043 }
4044 }
4045 ]
4046 );
4047
4048 assert_eq!(
4049 buffer.diagnostic_group::<Point>(0).collect::<Vec<_>>(),
4050 &[
4051 DiagnosticEntry {
4052 range: Point::new(1, 8)..Point::new(1, 9),
4053 diagnostic: Diagnostic {
4054 severity: DiagnosticSeverity::WARNING,
4055 message: "error 1".to_string(),
4056 group_id: 0,
4057 is_primary: true,
4058 ..Default::default()
4059 }
4060 },
4061 DiagnosticEntry {
4062 range: Point::new(1, 8)..Point::new(1, 9),
4063 diagnostic: Diagnostic {
4064 severity: DiagnosticSeverity::HINT,
4065 message: "error 1 hint 1".to_string(),
4066 group_id: 0,
4067 is_primary: false,
4068 ..Default::default()
4069 }
4070 },
4071 ]
4072 );
4073 assert_eq!(
4074 buffer.diagnostic_group::<Point>(1).collect::<Vec<_>>(),
4075 &[
4076 DiagnosticEntry {
4077 range: Point::new(1, 13)..Point::new(1, 15),
4078 diagnostic: Diagnostic {
4079 severity: DiagnosticSeverity::HINT,
4080 message: "error 2 hint 1".to_string(),
4081 group_id: 1,
4082 is_primary: false,
4083 ..Default::default()
4084 }
4085 },
4086 DiagnosticEntry {
4087 range: Point::new(1, 13)..Point::new(1, 15),
4088 diagnostic: Diagnostic {
4089 severity: DiagnosticSeverity::HINT,
4090 message: "error 2 hint 2".to_string(),
4091 group_id: 1,
4092 is_primary: false,
4093 ..Default::default()
4094 }
4095 },
4096 DiagnosticEntry {
4097 range: Point::new(2, 8)..Point::new(2, 17),
4098 diagnostic: Diagnostic {
4099 severity: DiagnosticSeverity::ERROR,
4100 message: "error 2".to_string(),
4101 group_id: 1,
4102 is_primary: true,
4103 ..Default::default()
4104 }
4105 }
4106 ]
4107 );
4108 }
4109
4110 #[gpui::test(iterations = 100)]
4111 fn test_random(mut rng: StdRng) {
4112 let operations = env::var("OPERATIONS")
4113 .map(|o| o.parse().unwrap())
4114 .unwrap_or(40);
4115 let initial_entries = env::var("INITIAL_ENTRIES")
4116 .map(|o| o.parse().unwrap())
4117 .unwrap_or(20);
4118
4119 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
4120 for _ in 0..initial_entries {
4121 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
4122 }
4123 log::info!("Generated initial tree");
4124
4125 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4126 let fs = Arc::new(RealFs);
4127 let next_entry_id = Arc::new(AtomicUsize::new(0));
4128 let mut initial_snapshot = Snapshot {
4129 id: 0,
4130 scan_id: 0,
4131 abs_path: root_dir.path().into(),
4132 entries_by_path: Default::default(),
4133 entries_by_id: Default::default(),
4134 removed_entry_ids: Default::default(),
4135 ignores: Default::default(),
4136 root_name: Default::default(),
4137 root_char_bag: Default::default(),
4138 next_entry_id: next_entry_id.clone(),
4139 };
4140 initial_snapshot.insert_entry(
4141 Entry::new(
4142 Path::new("").into(),
4143 &smol::block_on(fs.metadata(root_dir.path()))
4144 .unwrap()
4145 .unwrap(),
4146 &next_entry_id,
4147 Default::default(),
4148 ),
4149 fs.as_ref(),
4150 );
4151 let mut scanner = BackgroundScanner::new(
4152 Arc::new(Mutex::new(initial_snapshot.clone())),
4153 notify_tx,
4154 fs.clone(),
4155 Arc::new(gpui::executor::Background::new()),
4156 );
4157 smol::block_on(scanner.scan_dirs()).unwrap();
4158 scanner.snapshot().check_invariants();
4159
4160 let mut events = Vec::new();
4161 let mut snapshots = Vec::new();
4162 let mut mutations_len = operations;
4163 while mutations_len > 1 {
4164 if !events.is_empty() && rng.gen_bool(0.4) {
4165 let len = rng.gen_range(0..=events.len());
4166 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
4167 log::info!("Delivering events: {:#?}", to_deliver);
4168 smol::block_on(scanner.process_events(to_deliver));
4169 scanner.snapshot().check_invariants();
4170 } else {
4171 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
4172 mutations_len -= 1;
4173 }
4174
4175 if rng.gen_bool(0.2) {
4176 snapshots.push(scanner.snapshot());
4177 }
4178 }
4179 log::info!("Quiescing: {:#?}", events);
4180 smol::block_on(scanner.process_events(events));
4181 scanner.snapshot().check_invariants();
4182
4183 let (notify_tx, _notify_rx) = smol::channel::unbounded();
4184 let mut new_scanner = BackgroundScanner::new(
4185 Arc::new(Mutex::new(initial_snapshot)),
4186 notify_tx,
4187 scanner.fs.clone(),
4188 scanner.executor.clone(),
4189 );
4190 smol::block_on(new_scanner.scan_dirs()).unwrap();
4191 assert_eq!(
4192 scanner.snapshot().to_vec(true),
4193 new_scanner.snapshot().to_vec(true)
4194 );
4195
4196 for mut prev_snapshot in snapshots {
4197 let include_ignored = rng.gen::<bool>();
4198 if !include_ignored {
4199 let mut entries_by_path_edits = Vec::new();
4200 let mut entries_by_id_edits = Vec::new();
4201 for entry in prev_snapshot
4202 .entries_by_id
4203 .cursor::<()>()
4204 .filter(|e| e.is_ignored)
4205 {
4206 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4207 entries_by_id_edits.push(Edit::Remove(entry.id));
4208 }
4209
4210 prev_snapshot
4211 .entries_by_path
4212 .edit(entries_by_path_edits, &());
4213 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4214 }
4215
4216 let update = scanner
4217 .snapshot()
4218 .build_update(&prev_snapshot, 0, include_ignored);
4219 prev_snapshot.apply_update(update).unwrap();
4220 assert_eq!(
4221 prev_snapshot.to_vec(true),
4222 scanner.snapshot().to_vec(include_ignored)
4223 );
4224 }
4225 }
4226
4227 fn randomly_mutate_tree(
4228 root_path: &Path,
4229 insertion_probability: f64,
4230 rng: &mut impl Rng,
4231 ) -> Result<Vec<fsevent::Event>> {
4232 let root_path = root_path.canonicalize().unwrap();
4233 let (dirs, files) = read_dir_recursive(root_path.clone());
4234
4235 let mut events = Vec::new();
4236 let mut record_event = |path: PathBuf| {
4237 events.push(fsevent::Event {
4238 event_id: SystemTime::now()
4239 .duration_since(UNIX_EPOCH)
4240 .unwrap()
4241 .as_secs(),
4242 flags: fsevent::StreamFlags::empty(),
4243 path,
4244 });
4245 };
4246
4247 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4248 let path = dirs.choose(rng).unwrap();
4249 let new_path = path.join(gen_name(rng));
4250
4251 if rng.gen() {
4252 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
4253 std::fs::create_dir(&new_path)?;
4254 } else {
4255 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
4256 std::fs::write(&new_path, "")?;
4257 }
4258 record_event(new_path);
4259 } else if rng.gen_bool(0.05) {
4260 let ignore_dir_path = dirs.choose(rng).unwrap();
4261 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4262
4263 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
4264 let files_to_ignore = {
4265 let len = rng.gen_range(0..=subfiles.len());
4266 subfiles.choose_multiple(rng, len)
4267 };
4268 let dirs_to_ignore = {
4269 let len = rng.gen_range(0..subdirs.len());
4270 subdirs.choose_multiple(rng, len)
4271 };
4272
4273 let mut ignore_contents = String::new();
4274 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4275 write!(
4276 ignore_contents,
4277 "{}\n",
4278 path_to_ignore
4279 .strip_prefix(&ignore_dir_path)?
4280 .to_str()
4281 .unwrap()
4282 )
4283 .unwrap();
4284 }
4285 log::info!(
4286 "Creating {:?} with contents:\n{}",
4287 ignore_path.strip_prefix(&root_path)?,
4288 ignore_contents
4289 );
4290 std::fs::write(&ignore_path, ignore_contents).unwrap();
4291 record_event(ignore_path);
4292 } else {
4293 let old_path = {
4294 let file_path = files.choose(rng);
4295 let dir_path = dirs[1..].choose(rng);
4296 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4297 };
4298
4299 let is_rename = rng.gen();
4300 if is_rename {
4301 let new_path_parent = dirs
4302 .iter()
4303 .filter(|d| !d.starts_with(old_path))
4304 .choose(rng)
4305 .unwrap();
4306
4307 let overwrite_existing_dir =
4308 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4309 let new_path = if overwrite_existing_dir {
4310 std::fs::remove_dir_all(&new_path_parent).ok();
4311 new_path_parent.to_path_buf()
4312 } else {
4313 new_path_parent.join(gen_name(rng))
4314 };
4315
4316 log::info!(
4317 "Renaming {:?} to {}{:?}",
4318 old_path.strip_prefix(&root_path)?,
4319 if overwrite_existing_dir {
4320 "overwrite "
4321 } else {
4322 ""
4323 },
4324 new_path.strip_prefix(&root_path)?
4325 );
4326 std::fs::rename(&old_path, &new_path)?;
4327 record_event(old_path.clone());
4328 record_event(new_path);
4329 } else if old_path.is_dir() {
4330 let (dirs, files) = read_dir_recursive(old_path.clone());
4331
4332 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
4333 std::fs::remove_dir_all(&old_path).unwrap();
4334 for file in files {
4335 record_event(file);
4336 }
4337 for dir in dirs {
4338 record_event(dir);
4339 }
4340 } else {
4341 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
4342 std::fs::remove_file(old_path).unwrap();
4343 record_event(old_path.clone());
4344 }
4345 }
4346
4347 Ok(events)
4348 }
4349
4350 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
4351 let child_entries = std::fs::read_dir(&path).unwrap();
4352 let mut dirs = vec![path];
4353 let mut files = Vec::new();
4354 for child_entry in child_entries {
4355 let child_path = child_entry.unwrap().path();
4356 if child_path.is_dir() {
4357 let (child_dirs, child_files) = read_dir_recursive(child_path);
4358 dirs.extend(child_dirs);
4359 files.extend(child_files);
4360 } else {
4361 files.push(child_path);
4362 }
4363 }
4364 (dirs, files)
4365 }
4366
4367 fn gen_name(rng: &mut impl Rng) -> String {
4368 (0..6)
4369 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4370 .map(char::from)
4371 .collect()
4372 }
4373
4374 impl Snapshot {
4375 fn check_invariants(&self) {
4376 let mut files = self.files(true, 0);
4377 let mut visible_files = self.files(false, 0);
4378 for entry in self.entries_by_path.cursor::<()>() {
4379 if entry.is_file() {
4380 assert_eq!(files.next().unwrap().inode, entry.inode);
4381 if !entry.is_ignored {
4382 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4383 }
4384 }
4385 }
4386 assert!(files.next().is_none());
4387 assert!(visible_files.next().is_none());
4388
4389 let mut bfs_paths = Vec::new();
4390 let mut stack = vec![Path::new("")];
4391 while let Some(path) = stack.pop() {
4392 bfs_paths.push(path);
4393 let ix = stack.len();
4394 for child_entry in self.child_entries(path) {
4395 stack.insert(ix, &child_entry.path);
4396 }
4397 }
4398
4399 let dfs_paths = self
4400 .entries_by_path
4401 .cursor::<()>()
4402 .map(|e| e.path.as_ref())
4403 .collect::<Vec<_>>();
4404 assert_eq!(bfs_paths, dfs_paths);
4405
4406 for (ignore_parent_path, _) in &self.ignores {
4407 assert!(self.entry_for_path(ignore_parent_path).is_some());
4408 assert!(self
4409 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4410 .is_some());
4411 }
4412 }
4413
4414 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4415 let mut paths = Vec::new();
4416 for entry in self.entries_by_path.cursor::<()>() {
4417 if include_ignored || !entry.is_ignored {
4418 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4419 }
4420 }
4421 paths.sort_by(|a, b| a.0.cmp(&b.0));
4422 paths
4423 }
4424 }
4425}