1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{
10 repository::{GitFileStatus, GitRepository, RepoPath, RepoPathDescendants},
11 Fs, LineEnding,
12};
13use futures::{
14 channel::{
15 mpsc::{self, UnboundedSender},
16 oneshot,
17 },
18 select_biased,
19 task::Poll,
20 Stream, StreamExt,
21};
22use fuzzy::CharBag;
23use git::{DOT_GIT, GITIGNORE};
24use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
25use language::{
26 proto::{
27 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
28 serialize_version,
29 },
30 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
31};
32use lsp::LanguageServerId;
33use parking_lot::Mutex;
34use postage::{
35 barrier,
36 prelude::{Sink as _, Stream as _},
37 watch,
38};
39use smol::channel::{self, Sender};
40use std::{
41 any::Any,
42 cmp::{self, Ordering},
43 convert::TryFrom,
44 ffi::OsStr,
45 fmt,
46 future::Future,
47 mem,
48 ops::{Deref, DerefMut},
49 path::{Path, PathBuf},
50 pin::Pin,
51 sync::{
52 atomic::{AtomicUsize, Ordering::SeqCst},
53 Arc,
54 },
55 time::{Duration, SystemTime},
56};
57use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
58use util::{paths::HOME, ResultExt, TakeUntilExt, TryFutureExt};
59
60#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
61pub struct WorktreeId(usize);
62
63pub enum Worktree {
64 Local(LocalWorktree),
65 Remote(RemoteWorktree),
66}
67
68pub struct LocalWorktree {
69 snapshot: LocalSnapshot,
70 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
71 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
72 _background_scanner_task: Task<()>,
73 share: Option<ShareState>,
74 diagnostics: HashMap<
75 Arc<Path>,
76 Vec<(
77 LanguageServerId,
78 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
79 )>,
80 >,
81 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
82 client: Arc<Client>,
83 fs: Arc<dyn Fs>,
84 visible: bool,
85}
86
87pub struct RemoteWorktree {
88 snapshot: Snapshot,
89 background_snapshot: Arc<Mutex<Snapshot>>,
90 project_id: u64,
91 client: Arc<Client>,
92 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
93 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
94 replica_id: ReplicaId,
95 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
96 visible: bool,
97 disconnected: bool,
98}
99
100#[derive(Clone)]
101pub struct Snapshot {
102 id: WorktreeId,
103 abs_path: Arc<Path>,
104 root_name: String,
105 root_char_bag: CharBag,
106 entries_by_path: SumTree<Entry>,
107 entries_by_id: SumTree<PathEntry>,
108 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
109
110 /// A number that increases every time the worktree begins scanning
111 /// a set of paths from the filesystem. This scanning could be caused
112 /// by some operation performed on the worktree, such as reading or
113 /// writing a file, or by an event reported by the filesystem.
114 scan_id: usize,
115
116 /// The latest scan id that has completed, and whose preceding scans
117 /// have all completed. The current `scan_id` could be more than one
118 /// greater than the `completed_scan_id` if operations are performed
119 /// on the worktree while it is processing a file-system event.
120 completed_scan_id: usize,
121}
122
123#[derive(Clone, Debug, PartialEq, Eq)]
124pub struct RepositoryEntry {
125 pub(crate) work_directory: WorkDirectoryEntry,
126 pub(crate) branch: Option<Arc<str>>,
127 pub(crate) statuses: TreeMap<RepoPath, GitFileStatus>,
128}
129
130fn read_git_status(git_status: i32) -> Option<GitFileStatus> {
131 proto::GitStatus::from_i32(git_status).map(|status| match status {
132 proto::GitStatus::Added => GitFileStatus::Added,
133 proto::GitStatus::Modified => GitFileStatus::Modified,
134 proto::GitStatus::Conflict => GitFileStatus::Conflict,
135 })
136}
137
138impl RepositoryEntry {
139 pub fn branch(&self) -> Option<Arc<str>> {
140 self.branch.clone()
141 }
142
143 pub fn work_directory_id(&self) -> ProjectEntryId {
144 *self.work_directory
145 }
146
147 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
148 snapshot
149 .entry_for_id(self.work_directory_id())
150 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
151 }
152
153 pub fn status_for_path(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
154 self.work_directory
155 .relativize(snapshot, path)
156 .and_then(|repo_path| {
157 self.statuses
158 .iter_from(&repo_path)
159 .take_while(|(key, _)| key.starts_with(&repo_path))
160 // Short circut once we've found the highest level
161 .take_until(|(_, status)| status == &&GitFileStatus::Conflict)
162 .map(|(_, status)| status)
163 .reduce(
164 |status_first, status_second| match (status_first, status_second) {
165 (GitFileStatus::Conflict, _) | (_, GitFileStatus::Conflict) => {
166 &GitFileStatus::Conflict
167 }
168 (GitFileStatus::Modified, _) | (_, GitFileStatus::Modified) => {
169 &GitFileStatus::Modified
170 }
171 _ => &GitFileStatus::Added,
172 },
173 )
174 .copied()
175 })
176 }
177
178 #[cfg(any(test, feature = "test-support"))]
179 pub fn status_for_file(&self, snapshot: &Snapshot, path: &Path) -> Option<GitFileStatus> {
180 self.work_directory
181 .relativize(snapshot, path)
182 .and_then(|repo_path| (&self.statuses).get(&repo_path))
183 .cloned()
184 }
185
186 pub fn build_update(&self, other: &Self) -> proto::RepositoryEntry {
187 let mut updated_statuses: Vec<proto::StatusEntry> = Vec::new();
188 let mut removed_statuses: Vec<String> = Vec::new();
189
190 let mut self_statuses = self.statuses.iter().peekable();
191 let mut other_statuses = other.statuses.iter().peekable();
192 loop {
193 match (self_statuses.peek(), other_statuses.peek()) {
194 (Some((self_repo_path, self_status)), Some((other_repo_path, other_status))) => {
195 match Ord::cmp(self_repo_path, other_repo_path) {
196 Ordering::Less => {
197 updated_statuses.push(make_status_entry(self_repo_path, self_status));
198 self_statuses.next();
199 }
200 Ordering::Equal => {
201 if self_status != other_status {
202 updated_statuses
203 .push(make_status_entry(self_repo_path, self_status));
204 }
205
206 self_statuses.next();
207 other_statuses.next();
208 }
209 Ordering::Greater => {
210 removed_statuses.push(make_repo_path(other_repo_path));
211 other_statuses.next();
212 }
213 }
214 }
215 (Some((self_repo_path, self_status)), None) => {
216 updated_statuses.push(make_status_entry(self_repo_path, self_status));
217 self_statuses.next();
218 }
219 (None, Some((other_repo_path, _))) => {
220 removed_statuses.push(make_repo_path(other_repo_path));
221 other_statuses.next();
222 }
223 (None, None) => break,
224 }
225 }
226
227 proto::RepositoryEntry {
228 work_directory_id: self.work_directory_id().to_proto(),
229 branch: self.branch.as_ref().map(|str| str.to_string()),
230 removed_repo_paths: removed_statuses,
231 updated_statuses,
232 }
233 }
234}
235
236fn make_repo_path(path: &RepoPath) -> String {
237 path.as_os_str().to_string_lossy().to_string()
238}
239
240fn make_status_entry(path: &RepoPath, status: &GitFileStatus) -> proto::StatusEntry {
241 proto::StatusEntry {
242 repo_path: make_repo_path(path),
243 status: match status {
244 GitFileStatus::Added => proto::GitStatus::Added.into(),
245 GitFileStatus::Modified => proto::GitStatus::Modified.into(),
246 GitFileStatus::Conflict => proto::GitStatus::Conflict.into(),
247 },
248 }
249}
250
251impl From<&RepositoryEntry> for proto::RepositoryEntry {
252 fn from(value: &RepositoryEntry) -> Self {
253 proto::RepositoryEntry {
254 work_directory_id: value.work_directory.to_proto(),
255 branch: value.branch.as_ref().map(|str| str.to_string()),
256 updated_statuses: value
257 .statuses
258 .iter()
259 .map(|(repo_path, status)| make_status_entry(repo_path, status))
260 .collect(),
261 removed_repo_paths: Default::default(),
262 }
263 }
264}
265
266/// This path corresponds to the 'content path' (the folder that contains the .git)
267#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
268pub struct RepositoryWorkDirectory(Arc<Path>);
269
270impl Default for RepositoryWorkDirectory {
271 fn default() -> Self {
272 RepositoryWorkDirectory(Arc::from(Path::new("")))
273 }
274}
275
276impl AsRef<Path> for RepositoryWorkDirectory {
277 fn as_ref(&self) -> &Path {
278 self.0.as_ref()
279 }
280}
281
282#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
283pub struct WorkDirectoryEntry(ProjectEntryId);
284
285impl WorkDirectoryEntry {
286 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
287 worktree.entry_for_id(self.0).and_then(|entry| {
288 path.strip_prefix(&entry.path)
289 .ok()
290 .map(move |path| path.into())
291 })
292 }
293}
294
295impl Deref for WorkDirectoryEntry {
296 type Target = ProjectEntryId;
297
298 fn deref(&self) -> &Self::Target {
299 &self.0
300 }
301}
302
303impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
304 fn from(value: ProjectEntryId) -> Self {
305 WorkDirectoryEntry(value)
306 }
307}
308
309#[derive(Debug, Clone)]
310pub struct LocalSnapshot {
311 snapshot: Snapshot,
312 /// All of the gitignore files in the worktree, indexed by their relative path.
313 /// The boolean indicates whether the gitignore needs to be updated.
314 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, bool)>,
315 /// All of the git repositories in the worktree, indexed by the project entry
316 /// id of their parent directory.
317 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
318}
319
320pub struct LocalMutableSnapshot {
321 snapshot: LocalSnapshot,
322 /// The ids of all of the entries that were removed from the snapshot
323 /// as part of the current update. These entry ids may be re-used
324 /// if the same inode is discovered at a new path, or if the given
325 /// path is re-created after being deleted.
326 removed_entry_ids: HashMap<u64, ProjectEntryId>,
327}
328
329#[derive(Debug, Clone)]
330pub struct LocalRepositoryEntry {
331 pub(crate) scan_id: usize,
332 pub(crate) git_dir_scan_id: usize,
333 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
334 /// Path to the actual .git folder.
335 /// Note: if .git is a file, this points to the folder indicated by the .git file
336 pub(crate) git_dir_path: Arc<Path>,
337}
338
339impl LocalRepositoryEntry {
340 // Note that this path should be relative to the worktree root.
341 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
342 path.starts_with(self.git_dir_path.as_ref())
343 }
344}
345
346impl Deref for LocalSnapshot {
347 type Target = Snapshot;
348
349 fn deref(&self) -> &Self::Target {
350 &self.snapshot
351 }
352}
353
354impl DerefMut for LocalSnapshot {
355 fn deref_mut(&mut self) -> &mut Self::Target {
356 &mut self.snapshot
357 }
358}
359
360impl Deref for LocalMutableSnapshot {
361 type Target = LocalSnapshot;
362
363 fn deref(&self) -> &Self::Target {
364 &self.snapshot
365 }
366}
367
368impl DerefMut for LocalMutableSnapshot {
369 fn deref_mut(&mut self) -> &mut Self::Target {
370 &mut self.snapshot
371 }
372}
373
374enum ScanState {
375 Started,
376 Updated {
377 snapshot: LocalSnapshot,
378 changes: HashMap<(Arc<Path>, ProjectEntryId), PathChange>,
379 barrier: Option<barrier::Sender>,
380 scanning: bool,
381 },
382}
383
384struct ShareState {
385 project_id: u64,
386 snapshots_tx: watch::Sender<LocalSnapshot>,
387 resume_updates: watch::Sender<()>,
388 _maintain_remote_snapshot: Task<Option<()>>,
389}
390
391pub enum Event {
392 UpdatedEntries(HashMap<(Arc<Path>, ProjectEntryId), PathChange>),
393 UpdatedGitRepositories(HashMap<Arc<Path>, LocalRepositoryEntry>),
394}
395
396impl Entity for Worktree {
397 type Event = Event;
398}
399
400impl Worktree {
401 pub async fn local(
402 client: Arc<Client>,
403 path: impl Into<Arc<Path>>,
404 visible: bool,
405 fs: Arc<dyn Fs>,
406 next_entry_id: Arc<AtomicUsize>,
407 cx: &mut AsyncAppContext,
408 ) -> Result<ModelHandle<Self>> {
409 // After determining whether the root entry is a file or a directory, populate the
410 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
411 let abs_path = path.into();
412 let metadata = fs
413 .metadata(&abs_path)
414 .await
415 .context("failed to stat worktree path")?;
416
417 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
418 let root_name = abs_path
419 .file_name()
420 .map_or(String::new(), |f| f.to_string_lossy().to_string());
421
422 let mut snapshot = LocalSnapshot {
423 ignores_by_parent_abs_path: Default::default(),
424 git_repositories: Default::default(),
425 snapshot: Snapshot {
426 id: WorktreeId::from_usize(cx.model_id()),
427 abs_path: abs_path.clone(),
428 root_name: root_name.clone(),
429 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
430 entries_by_path: Default::default(),
431 entries_by_id: Default::default(),
432 repository_entries: Default::default(),
433 scan_id: 1,
434 completed_scan_id: 0,
435 },
436 };
437
438 if let Some(metadata) = metadata {
439 snapshot.insert_entry(
440 Entry::new(
441 Arc::from(Path::new("")),
442 &metadata,
443 &next_entry_id,
444 snapshot.root_char_bag,
445 ),
446 fs.as_ref(),
447 );
448 }
449
450 let (path_changes_tx, path_changes_rx) = channel::unbounded();
451 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
452
453 cx.spawn_weak(|this, mut cx| async move {
454 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
455 this.update(&mut cx, |this, cx| {
456 let this = this.as_local_mut().unwrap();
457 match state {
458 ScanState::Started => {
459 *this.is_scanning.0.borrow_mut() = true;
460 }
461 ScanState::Updated {
462 snapshot,
463 changes,
464 barrier,
465 scanning,
466 } => {
467 *this.is_scanning.0.borrow_mut() = scanning;
468 this.set_snapshot(snapshot, cx);
469 cx.emit(Event::UpdatedEntries(changes));
470 drop(barrier);
471 }
472 }
473 cx.notify();
474 });
475 }
476 })
477 .detach();
478
479 let background_scanner_task = cx.background().spawn({
480 let fs = fs.clone();
481 let snapshot = snapshot.clone();
482 let background = cx.background().clone();
483 async move {
484 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
485 BackgroundScanner::new(
486 snapshot,
487 next_entry_id,
488 fs,
489 scan_states_tx,
490 background,
491 path_changes_rx,
492 )
493 .run(events)
494 .await;
495 }
496 });
497
498 Worktree::Local(LocalWorktree {
499 snapshot,
500 is_scanning: watch::channel_with(true),
501 share: None,
502 path_changes_tx,
503 _background_scanner_task: background_scanner_task,
504 diagnostics: Default::default(),
505 diagnostic_summaries: Default::default(),
506 client,
507 fs,
508 visible,
509 })
510 }))
511 }
512
513 pub fn remote(
514 project_remote_id: u64,
515 replica_id: ReplicaId,
516 worktree: proto::WorktreeMetadata,
517 client: Arc<Client>,
518 cx: &mut AppContext,
519 ) -> ModelHandle<Self> {
520 cx.add_model(|cx: &mut ModelContext<Self>| {
521 let snapshot = Snapshot {
522 id: WorktreeId(worktree.id as usize),
523 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
524 root_name: worktree.root_name.clone(),
525 root_char_bag: worktree
526 .root_name
527 .chars()
528 .map(|c| c.to_ascii_lowercase())
529 .collect(),
530 entries_by_path: Default::default(),
531 entries_by_id: Default::default(),
532 repository_entries: Default::default(),
533 scan_id: 1,
534 completed_scan_id: 0,
535 };
536
537 let (updates_tx, mut updates_rx) = mpsc::unbounded();
538 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
539 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
540
541 cx.background()
542 .spawn({
543 let background_snapshot = background_snapshot.clone();
544 async move {
545 while let Some(update) = updates_rx.next().await {
546 if let Err(error) =
547 background_snapshot.lock().apply_remote_update(update)
548 {
549 log::error!("error applying worktree update: {}", error);
550 }
551 snapshot_updated_tx.send(()).await.ok();
552 }
553 }
554 })
555 .detach();
556
557 cx.spawn_weak(|this, mut cx| async move {
558 while (snapshot_updated_rx.recv().await).is_some() {
559 if let Some(this) = this.upgrade(&cx) {
560 this.update(&mut cx, |this, cx| {
561 let this = this.as_remote_mut().unwrap();
562 this.snapshot = this.background_snapshot.lock().clone();
563 cx.emit(Event::UpdatedEntries(Default::default()));
564 cx.notify();
565 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
566 if this.observed_snapshot(*scan_id) {
567 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
568 let _ = tx.send(());
569 } else {
570 break;
571 }
572 }
573 });
574 } else {
575 break;
576 }
577 }
578 })
579 .detach();
580
581 Worktree::Remote(RemoteWorktree {
582 project_id: project_remote_id,
583 replica_id,
584 snapshot: snapshot.clone(),
585 background_snapshot,
586 updates_tx: Some(updates_tx),
587 snapshot_subscriptions: Default::default(),
588 client: client.clone(),
589 diagnostic_summaries: Default::default(),
590 visible: worktree.visible,
591 disconnected: false,
592 })
593 })
594 }
595
596 pub fn as_local(&self) -> Option<&LocalWorktree> {
597 if let Worktree::Local(worktree) = self {
598 Some(worktree)
599 } else {
600 None
601 }
602 }
603
604 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
605 if let Worktree::Remote(worktree) = self {
606 Some(worktree)
607 } else {
608 None
609 }
610 }
611
612 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
613 if let Worktree::Local(worktree) = self {
614 Some(worktree)
615 } else {
616 None
617 }
618 }
619
620 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
621 if let Worktree::Remote(worktree) = self {
622 Some(worktree)
623 } else {
624 None
625 }
626 }
627
628 pub fn is_local(&self) -> bool {
629 matches!(self, Worktree::Local(_))
630 }
631
632 pub fn is_remote(&self) -> bool {
633 !self.is_local()
634 }
635
636 pub fn snapshot(&self) -> Snapshot {
637 match self {
638 Worktree::Local(worktree) => worktree.snapshot().snapshot,
639 Worktree::Remote(worktree) => worktree.snapshot(),
640 }
641 }
642
643 pub fn scan_id(&self) -> usize {
644 match self {
645 Worktree::Local(worktree) => worktree.snapshot.scan_id,
646 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
647 }
648 }
649
650 pub fn completed_scan_id(&self) -> usize {
651 match self {
652 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
653 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
654 }
655 }
656
657 pub fn is_visible(&self) -> bool {
658 match self {
659 Worktree::Local(worktree) => worktree.visible,
660 Worktree::Remote(worktree) => worktree.visible,
661 }
662 }
663
664 pub fn replica_id(&self) -> ReplicaId {
665 match self {
666 Worktree::Local(_) => 0,
667 Worktree::Remote(worktree) => worktree.replica_id,
668 }
669 }
670
671 pub fn diagnostic_summaries(
672 &self,
673 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
674 match self {
675 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
676 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
677 }
678 .iter()
679 .flat_map(|(path, summaries)| {
680 summaries
681 .iter()
682 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
683 })
684 }
685
686 pub fn abs_path(&self) -> Arc<Path> {
687 match self {
688 Worktree::Local(worktree) => worktree.abs_path.clone(),
689 Worktree::Remote(worktree) => worktree.abs_path.clone(),
690 }
691 }
692}
693
694impl LocalWorktree {
695 pub fn contains_abs_path(&self, path: &Path) -> bool {
696 path.starts_with(&self.abs_path)
697 }
698
699 fn absolutize(&self, path: &Path) -> PathBuf {
700 if path.file_name().is_some() {
701 self.abs_path.join(path)
702 } else {
703 self.abs_path.to_path_buf()
704 }
705 }
706
707 pub(crate) fn load_buffer(
708 &mut self,
709 id: u64,
710 path: &Path,
711 cx: &mut ModelContext<Worktree>,
712 ) -> Task<Result<ModelHandle<Buffer>>> {
713 let path = Arc::from(path);
714 cx.spawn(move |this, mut cx| async move {
715 let (file, contents, diff_base) = this
716 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
717 .await?;
718 let text_buffer = cx
719 .background()
720 .spawn(async move { text::Buffer::new(0, id, contents) })
721 .await;
722 Ok(cx.add_model(|_| Buffer::build(text_buffer, diff_base, Some(Arc::new(file)))))
723 })
724 }
725
726 pub fn diagnostics_for_path(
727 &self,
728 path: &Path,
729 ) -> Vec<(
730 LanguageServerId,
731 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
732 )> {
733 self.diagnostics.get(path).cloned().unwrap_or_default()
734 }
735
736 pub fn clear_diagnostics_for_language_server(
737 &mut self,
738 server_id: LanguageServerId,
739 _: &mut ModelContext<Worktree>,
740 ) {
741 let worktree_id = self.id().to_proto();
742 self.diagnostic_summaries
743 .retain(|path, summaries_by_server_id| {
744 if summaries_by_server_id.remove(&server_id).is_some() {
745 if let Some(share) = self.share.as_ref() {
746 self.client
747 .send(proto::UpdateDiagnosticSummary {
748 project_id: share.project_id,
749 worktree_id,
750 summary: Some(proto::DiagnosticSummary {
751 path: path.to_string_lossy().to_string(),
752 language_server_id: server_id.0 as u64,
753 error_count: 0,
754 warning_count: 0,
755 }),
756 })
757 .log_err();
758 }
759 !summaries_by_server_id.is_empty()
760 } else {
761 true
762 }
763 });
764
765 self.diagnostics.retain(|_, diagnostics_by_server_id| {
766 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
767 diagnostics_by_server_id.remove(ix);
768 !diagnostics_by_server_id.is_empty()
769 } else {
770 true
771 }
772 });
773 }
774
775 pub fn update_diagnostics(
776 &mut self,
777 server_id: LanguageServerId,
778 worktree_path: Arc<Path>,
779 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
780 _: &mut ModelContext<Worktree>,
781 ) -> Result<bool> {
782 let summaries_by_server_id = self
783 .diagnostic_summaries
784 .entry(worktree_path.clone())
785 .or_default();
786
787 let old_summary = summaries_by_server_id
788 .remove(&server_id)
789 .unwrap_or_default();
790
791 let new_summary = DiagnosticSummary::new(&diagnostics);
792 if new_summary.is_empty() {
793 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
794 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
795 diagnostics_by_server_id.remove(ix);
796 }
797 if diagnostics_by_server_id.is_empty() {
798 self.diagnostics.remove(&worktree_path);
799 }
800 }
801 } else {
802 summaries_by_server_id.insert(server_id, new_summary);
803 let diagnostics_by_server_id =
804 self.diagnostics.entry(worktree_path.clone()).or_default();
805 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
806 Ok(ix) => {
807 diagnostics_by_server_id[ix] = (server_id, diagnostics);
808 }
809 Err(ix) => {
810 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
811 }
812 }
813 }
814
815 if !old_summary.is_empty() || !new_summary.is_empty() {
816 if let Some(share) = self.share.as_ref() {
817 self.client
818 .send(proto::UpdateDiagnosticSummary {
819 project_id: share.project_id,
820 worktree_id: self.id().to_proto(),
821 summary: Some(proto::DiagnosticSummary {
822 path: worktree_path.to_string_lossy().to_string(),
823 language_server_id: server_id.0 as u64,
824 error_count: new_summary.error_count as u32,
825 warning_count: new_summary.warning_count as u32,
826 }),
827 })
828 .log_err();
829 }
830 }
831
832 Ok(!old_summary.is_empty() || !new_summary.is_empty())
833 }
834
835 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
836 let updated_repos =
837 self.changed_repos(&self.git_repositories, &new_snapshot.git_repositories);
838
839 self.snapshot = new_snapshot;
840
841 if let Some(share) = self.share.as_mut() {
842 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
843 }
844
845 if !updated_repos.is_empty() {
846 cx.emit(Event::UpdatedGitRepositories(updated_repos));
847 }
848 }
849
850 fn changed_repos(
851 &self,
852 old_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
853 new_repos: &TreeMap<ProjectEntryId, LocalRepositoryEntry>,
854 ) -> HashMap<Arc<Path>, LocalRepositoryEntry> {
855 let mut diff = HashMap::default();
856 let mut old_repos = old_repos.iter().peekable();
857 let mut new_repos = new_repos.iter().peekable();
858 loop {
859 match (old_repos.peek(), new_repos.peek()) {
860 (Some((old_entry_id, old_repo)), Some((new_entry_id, new_repo))) => {
861 match Ord::cmp(old_entry_id, new_entry_id) {
862 Ordering::Less => {
863 if let Some(entry) = self.entry_for_id(**old_entry_id) {
864 diff.insert(entry.path.clone(), (*old_repo).clone());
865 }
866 old_repos.next();
867 }
868 Ordering::Equal => {
869 if old_repo.git_dir_scan_id != new_repo.git_dir_scan_id {
870 if let Some(entry) = self.entry_for_id(**new_entry_id) {
871 diff.insert(entry.path.clone(), (*new_repo).clone());
872 }
873 }
874
875 old_repos.next();
876 new_repos.next();
877 }
878 Ordering::Greater => {
879 if let Some(entry) = self.entry_for_id(**new_entry_id) {
880 diff.insert(entry.path.clone(), (*new_repo).clone());
881 }
882 new_repos.next();
883 }
884 }
885 }
886 (Some((old_entry_id, old_repo)), None) => {
887 if let Some(entry) = self.entry_for_id(**old_entry_id) {
888 diff.insert(entry.path.clone(), (*old_repo).clone());
889 }
890 old_repos.next();
891 }
892 (None, Some((new_entry_id, new_repo))) => {
893 if let Some(entry) = self.entry_for_id(**new_entry_id) {
894 diff.insert(entry.path.clone(), (*new_repo).clone());
895 }
896 new_repos.next();
897 }
898 (None, None) => break,
899 }
900 }
901 diff
902 }
903
904 pub fn scan_complete(&self) -> impl Future<Output = ()> {
905 let mut is_scanning_rx = self.is_scanning.1.clone();
906 async move {
907 let mut is_scanning = is_scanning_rx.borrow().clone();
908 while is_scanning {
909 if let Some(value) = is_scanning_rx.recv().await {
910 is_scanning = value;
911 } else {
912 break;
913 }
914 }
915 }
916 }
917
918 pub fn snapshot(&self) -> LocalSnapshot {
919 self.snapshot.clone()
920 }
921
922 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
923 proto::WorktreeMetadata {
924 id: self.id().to_proto(),
925 root_name: self.root_name().to_string(),
926 visible: self.visible,
927 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
928 }
929 }
930
931 fn load(
932 &self,
933 path: &Path,
934 cx: &mut ModelContext<Worktree>,
935 ) -> Task<Result<(File, String, Option<String>)>> {
936 let handle = cx.handle();
937 let path = Arc::from(path);
938 let abs_path = self.absolutize(&path);
939 let fs = self.fs.clone();
940 let snapshot = self.snapshot();
941
942 let mut index_task = None;
943
944 if let Some(repo) = snapshot.repository_for_path(&path) {
945 let repo_path = repo.work_directory.relativize(self, &path).unwrap();
946 if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
947 let repo = repo.repo_ptr.to_owned();
948 index_task = Some(
949 cx.background()
950 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
951 );
952 }
953 }
954
955 cx.spawn(|this, mut cx| async move {
956 let text = fs.load(&abs_path).await?;
957
958 let diff_base = if let Some(index_task) = index_task {
959 index_task.await
960 } else {
961 None
962 };
963
964 // Eagerly populate the snapshot with an updated entry for the loaded file
965 let entry = this
966 .update(&mut cx, |this, cx| {
967 this.as_local().unwrap().refresh_entry(path, None, cx)
968 })
969 .await?;
970
971 Ok((
972 File {
973 entry_id: entry.id,
974 worktree: handle,
975 path: entry.path,
976 mtime: entry.mtime,
977 is_local: true,
978 is_deleted: false,
979 },
980 text,
981 diff_base,
982 ))
983 })
984 }
985
986 pub fn save_buffer(
987 &self,
988 buffer_handle: ModelHandle<Buffer>,
989 path: Arc<Path>,
990 has_changed_file: bool,
991 cx: &mut ModelContext<Worktree>,
992 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
993 let handle = cx.handle();
994 let buffer = buffer_handle.read(cx);
995
996 let rpc = self.client.clone();
997 let buffer_id = buffer.remote_id();
998 let project_id = self.share.as_ref().map(|share| share.project_id);
999
1000 let text = buffer.as_rope().clone();
1001 let fingerprint = text.fingerprint();
1002 let version = buffer.version();
1003 let save = self.write_file(path, text, buffer.line_ending(), cx);
1004
1005 cx.as_mut().spawn(|mut cx| async move {
1006 let entry = save.await?;
1007
1008 if has_changed_file {
1009 let new_file = Arc::new(File {
1010 entry_id: entry.id,
1011 worktree: handle,
1012 path: entry.path,
1013 mtime: entry.mtime,
1014 is_local: true,
1015 is_deleted: false,
1016 });
1017
1018 if let Some(project_id) = project_id {
1019 rpc.send(proto::UpdateBufferFile {
1020 project_id,
1021 buffer_id,
1022 file: Some(new_file.to_proto()),
1023 })
1024 .log_err();
1025 }
1026
1027 buffer_handle.update(&mut cx, |buffer, cx| {
1028 if has_changed_file {
1029 buffer.file_updated(new_file, cx).detach();
1030 }
1031 });
1032 }
1033
1034 if let Some(project_id) = project_id {
1035 rpc.send(proto::BufferSaved {
1036 project_id,
1037 buffer_id,
1038 version: serialize_version(&version),
1039 mtime: Some(entry.mtime.into()),
1040 fingerprint: serialize_fingerprint(fingerprint),
1041 })?;
1042 }
1043
1044 buffer_handle.update(&mut cx, |buffer, cx| {
1045 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
1046 });
1047
1048 Ok((version, fingerprint, entry.mtime))
1049 })
1050 }
1051
1052 pub fn create_entry(
1053 &self,
1054 path: impl Into<Arc<Path>>,
1055 is_dir: bool,
1056 cx: &mut ModelContext<Worktree>,
1057 ) -> Task<Result<Entry>> {
1058 let path = path.into();
1059 let abs_path = self.absolutize(&path);
1060 let fs = self.fs.clone();
1061 let write = cx.background().spawn(async move {
1062 if is_dir {
1063 fs.create_dir(&abs_path).await
1064 } else {
1065 fs.save(&abs_path, &Default::default(), Default::default())
1066 .await
1067 }
1068 });
1069
1070 cx.spawn(|this, mut cx| async move {
1071 write.await?;
1072 this.update(&mut cx, |this, cx| {
1073 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1074 })
1075 .await
1076 })
1077 }
1078
1079 pub fn write_file(
1080 &self,
1081 path: impl Into<Arc<Path>>,
1082 text: Rope,
1083 line_ending: LineEnding,
1084 cx: &mut ModelContext<Worktree>,
1085 ) -> Task<Result<Entry>> {
1086 let path = path.into();
1087 let abs_path = self.absolutize(&path);
1088 let fs = self.fs.clone();
1089 let write = cx
1090 .background()
1091 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
1092
1093 cx.spawn(|this, mut cx| async move {
1094 write.await?;
1095 this.update(&mut cx, |this, cx| {
1096 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
1097 })
1098 .await
1099 })
1100 }
1101
1102 pub fn delete_entry(
1103 &self,
1104 entry_id: ProjectEntryId,
1105 cx: &mut ModelContext<Worktree>,
1106 ) -> Option<Task<Result<()>>> {
1107 let entry = self.entry_for_id(entry_id)?.clone();
1108 let abs_path = self.abs_path.clone();
1109 let fs = self.fs.clone();
1110
1111 let delete = cx.background().spawn(async move {
1112 let mut abs_path = fs.canonicalize(&abs_path).await?;
1113 if entry.path.file_name().is_some() {
1114 abs_path = abs_path.join(&entry.path);
1115 }
1116 if entry.is_file() {
1117 fs.remove_file(&abs_path, Default::default()).await?;
1118 } else {
1119 fs.remove_dir(
1120 &abs_path,
1121 RemoveOptions {
1122 recursive: true,
1123 ignore_if_not_exists: false,
1124 },
1125 )
1126 .await?;
1127 }
1128 anyhow::Ok(abs_path)
1129 });
1130
1131 Some(cx.spawn(|this, mut cx| async move {
1132 let abs_path = delete.await?;
1133 let (tx, mut rx) = barrier::channel();
1134 this.update(&mut cx, |this, _| {
1135 this.as_local_mut()
1136 .unwrap()
1137 .path_changes_tx
1138 .try_send((vec![abs_path], tx))
1139 })?;
1140 rx.recv().await;
1141 Ok(())
1142 }))
1143 }
1144
1145 pub fn rename_entry(
1146 &self,
1147 entry_id: ProjectEntryId,
1148 new_path: impl Into<Arc<Path>>,
1149 cx: &mut ModelContext<Worktree>,
1150 ) -> Option<Task<Result<Entry>>> {
1151 let old_path = self.entry_for_id(entry_id)?.path.clone();
1152 let new_path = new_path.into();
1153 let abs_old_path = self.absolutize(&old_path);
1154 let abs_new_path = self.absolutize(&new_path);
1155 let fs = self.fs.clone();
1156 let rename = cx.background().spawn(async move {
1157 fs.rename(&abs_old_path, &abs_new_path, Default::default())
1158 .await
1159 });
1160
1161 Some(cx.spawn(|this, mut cx| async move {
1162 rename.await?;
1163 this.update(&mut cx, |this, cx| {
1164 this.as_local_mut()
1165 .unwrap()
1166 .refresh_entry(new_path.clone(), Some(old_path), cx)
1167 })
1168 .await
1169 }))
1170 }
1171
1172 pub fn copy_entry(
1173 &self,
1174 entry_id: ProjectEntryId,
1175 new_path: impl Into<Arc<Path>>,
1176 cx: &mut ModelContext<Worktree>,
1177 ) -> Option<Task<Result<Entry>>> {
1178 let old_path = self.entry_for_id(entry_id)?.path.clone();
1179 let new_path = new_path.into();
1180 let abs_old_path = self.absolutize(&old_path);
1181 let abs_new_path = self.absolutize(&new_path);
1182 let fs = self.fs.clone();
1183 let copy = cx.background().spawn(async move {
1184 copy_recursive(
1185 fs.as_ref(),
1186 &abs_old_path,
1187 &abs_new_path,
1188 Default::default(),
1189 )
1190 .await
1191 });
1192
1193 Some(cx.spawn(|this, mut cx| async move {
1194 copy.await?;
1195 this.update(&mut cx, |this, cx| {
1196 this.as_local_mut()
1197 .unwrap()
1198 .refresh_entry(new_path.clone(), None, cx)
1199 })
1200 .await
1201 }))
1202 }
1203
1204 fn refresh_entry(
1205 &self,
1206 path: Arc<Path>,
1207 old_path: Option<Arc<Path>>,
1208 cx: &mut ModelContext<Worktree>,
1209 ) -> Task<Result<Entry>> {
1210 let fs = self.fs.clone();
1211 let abs_root_path = self.abs_path.clone();
1212 let path_changes_tx = self.path_changes_tx.clone();
1213 cx.spawn_weak(move |this, mut cx| async move {
1214 let abs_path = fs.canonicalize(&abs_root_path).await?;
1215 let mut paths = Vec::with_capacity(2);
1216 paths.push(if path.file_name().is_some() {
1217 abs_path.join(&path)
1218 } else {
1219 abs_path.clone()
1220 });
1221 if let Some(old_path) = old_path {
1222 paths.push(if old_path.file_name().is_some() {
1223 abs_path.join(&old_path)
1224 } else {
1225 abs_path.clone()
1226 });
1227 }
1228
1229 let (tx, mut rx) = barrier::channel();
1230 path_changes_tx.try_send((paths, tx))?;
1231 rx.recv().await;
1232 this.upgrade(&cx)
1233 .ok_or_else(|| anyhow!("worktree was dropped"))?
1234 .update(&mut cx, |this, _| {
1235 this.entry_for_path(path)
1236 .cloned()
1237 .ok_or_else(|| anyhow!("failed to read path after update"))
1238 })
1239 })
1240 }
1241
1242 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1243 let (share_tx, share_rx) = oneshot::channel();
1244
1245 if let Some(share) = self.share.as_mut() {
1246 let _ = share_tx.send(());
1247 *share.resume_updates.borrow_mut() = ();
1248 } else {
1249 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1250 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1251 let worktree_id = cx.model_id() as u64;
1252
1253 for (path, summaries) in &self.diagnostic_summaries {
1254 for (&server_id, summary) in summaries {
1255 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1256 project_id,
1257 worktree_id,
1258 summary: Some(summary.to_proto(server_id, &path)),
1259 }) {
1260 return Task::ready(Err(e));
1261 }
1262 }
1263 }
1264
1265 let _maintain_remote_snapshot = cx.background().spawn({
1266 let client = self.client.clone();
1267 async move {
1268 let mut share_tx = Some(share_tx);
1269 let mut prev_snapshot = LocalSnapshot {
1270 ignores_by_parent_abs_path: Default::default(),
1271 git_repositories: Default::default(),
1272 snapshot: Snapshot {
1273 id: WorktreeId(worktree_id as usize),
1274 abs_path: Path::new("").into(),
1275 root_name: Default::default(),
1276 root_char_bag: Default::default(),
1277 entries_by_path: Default::default(),
1278 entries_by_id: Default::default(),
1279 repository_entries: Default::default(),
1280 scan_id: 0,
1281 completed_scan_id: 0,
1282 },
1283 };
1284 while let Some(snapshot) = snapshots_rx.recv().await {
1285 #[cfg(any(test, feature = "test-support"))]
1286 const MAX_CHUNK_SIZE: usize = 2;
1287 #[cfg(not(any(test, feature = "test-support")))]
1288 const MAX_CHUNK_SIZE: usize = 256;
1289
1290 let update =
1291 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1292 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1293 let _ = resume_updates_rx.try_recv();
1294 while let Err(error) = client.request(update.clone()).await {
1295 log::error!("failed to send worktree update: {}", error);
1296 log::info!("waiting to resume updates");
1297 if resume_updates_rx.next().await.is_none() {
1298 return Ok(());
1299 }
1300 }
1301 }
1302
1303 if let Some(share_tx) = share_tx.take() {
1304 let _ = share_tx.send(());
1305 }
1306
1307 prev_snapshot = snapshot;
1308 }
1309
1310 Ok::<_, anyhow::Error>(())
1311 }
1312 .log_err()
1313 });
1314
1315 self.share = Some(ShareState {
1316 project_id,
1317 snapshots_tx,
1318 resume_updates: resume_updates_tx,
1319 _maintain_remote_snapshot,
1320 });
1321 }
1322
1323 cx.foreground()
1324 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1325 }
1326
1327 pub fn unshare(&mut self) {
1328 self.share.take();
1329 }
1330
1331 pub fn is_shared(&self) -> bool {
1332 self.share.is_some()
1333 }
1334}
1335
1336impl RemoteWorktree {
1337 fn snapshot(&self) -> Snapshot {
1338 self.snapshot.clone()
1339 }
1340
1341 pub fn disconnected_from_host(&mut self) {
1342 self.updates_tx.take();
1343 self.snapshot_subscriptions.clear();
1344 self.disconnected = true;
1345 }
1346
1347 pub fn save_buffer(
1348 &self,
1349 buffer_handle: ModelHandle<Buffer>,
1350 cx: &mut ModelContext<Worktree>,
1351 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1352 let buffer = buffer_handle.read(cx);
1353 let buffer_id = buffer.remote_id();
1354 let version = buffer.version();
1355 let rpc = self.client.clone();
1356 let project_id = self.project_id;
1357 cx.as_mut().spawn(|mut cx| async move {
1358 let response = rpc
1359 .request(proto::SaveBuffer {
1360 project_id,
1361 buffer_id,
1362 version: serialize_version(&version),
1363 })
1364 .await?;
1365 let version = deserialize_version(&response.version);
1366 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1367 let mtime = response
1368 .mtime
1369 .ok_or_else(|| anyhow!("missing mtime"))?
1370 .into();
1371
1372 buffer_handle.update(&mut cx, |buffer, cx| {
1373 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1374 });
1375
1376 Ok((version, fingerprint, mtime))
1377 })
1378 }
1379
1380 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1381 if let Some(updates_tx) = &self.updates_tx {
1382 updates_tx
1383 .unbounded_send(update)
1384 .expect("consumer runs to completion");
1385 }
1386 }
1387
1388 fn observed_snapshot(&self, scan_id: usize) -> bool {
1389 self.completed_scan_id >= scan_id
1390 }
1391
1392 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1393 let (tx, rx) = oneshot::channel();
1394 if self.observed_snapshot(scan_id) {
1395 let _ = tx.send(());
1396 } else if self.disconnected {
1397 drop(tx);
1398 } else {
1399 match self
1400 .snapshot_subscriptions
1401 .binary_search_by_key(&scan_id, |probe| probe.0)
1402 {
1403 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1404 }
1405 }
1406
1407 async move {
1408 rx.await?;
1409 Ok(())
1410 }
1411 }
1412
1413 pub fn update_diagnostic_summary(
1414 &mut self,
1415 path: Arc<Path>,
1416 summary: &proto::DiagnosticSummary,
1417 ) {
1418 let server_id = LanguageServerId(summary.language_server_id as usize);
1419 let summary = DiagnosticSummary {
1420 error_count: summary.error_count as usize,
1421 warning_count: summary.warning_count as usize,
1422 };
1423
1424 if summary.is_empty() {
1425 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1426 summaries.remove(&server_id);
1427 if summaries.is_empty() {
1428 self.diagnostic_summaries.remove(&path);
1429 }
1430 }
1431 } else {
1432 self.diagnostic_summaries
1433 .entry(path)
1434 .or_default()
1435 .insert(server_id, summary);
1436 }
1437 }
1438
1439 pub fn insert_entry(
1440 &mut self,
1441 entry: proto::Entry,
1442 scan_id: usize,
1443 cx: &mut ModelContext<Worktree>,
1444 ) -> Task<Result<Entry>> {
1445 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1446 cx.spawn(|this, mut cx| async move {
1447 wait_for_snapshot.await?;
1448 this.update(&mut cx, |worktree, _| {
1449 let worktree = worktree.as_remote_mut().unwrap();
1450 let mut snapshot = worktree.background_snapshot.lock();
1451 let entry = snapshot.insert_entry(entry);
1452 worktree.snapshot = snapshot.clone();
1453 entry
1454 })
1455 })
1456 }
1457
1458 pub(crate) fn delete_entry(
1459 &mut self,
1460 id: ProjectEntryId,
1461 scan_id: usize,
1462 cx: &mut ModelContext<Worktree>,
1463 ) -> Task<Result<()>> {
1464 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1465 cx.spawn(|this, mut cx| async move {
1466 wait_for_snapshot.await?;
1467 this.update(&mut cx, |worktree, _| {
1468 let worktree = worktree.as_remote_mut().unwrap();
1469 let mut snapshot = worktree.background_snapshot.lock();
1470 snapshot.delete_entry(id);
1471 worktree.snapshot = snapshot.clone();
1472 });
1473 Ok(())
1474 })
1475 }
1476}
1477
1478impl Snapshot {
1479 pub fn id(&self) -> WorktreeId {
1480 self.id
1481 }
1482
1483 pub fn abs_path(&self) -> &Arc<Path> {
1484 &self.abs_path
1485 }
1486
1487 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1488 self.entries_by_id.get(&entry_id, &()).is_some()
1489 }
1490
1491 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1492 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1493 let old_entry = self.entries_by_id.insert_or_replace(
1494 PathEntry {
1495 id: entry.id,
1496 path: entry.path.clone(),
1497 is_ignored: entry.is_ignored,
1498 scan_id: 0,
1499 },
1500 &(),
1501 );
1502 if let Some(old_entry) = old_entry {
1503 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1504 }
1505 self.entries_by_path.insert_or_replace(entry.clone(), &());
1506 Ok(entry)
1507 }
1508
1509 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1510 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1511 self.entries_by_path = {
1512 let mut cursor = self.entries_by_path.cursor();
1513 let mut new_entries_by_path =
1514 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1515 while let Some(entry) = cursor.item() {
1516 if entry.path.starts_with(&removed_entry.path) {
1517 self.entries_by_id.remove(&entry.id, &());
1518 cursor.next(&());
1519 } else {
1520 break;
1521 }
1522 }
1523 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1524 new_entries_by_path
1525 };
1526
1527 Some(removed_entry.path)
1528 }
1529
1530 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1531 let mut entries_by_path_edits = Vec::new();
1532 let mut entries_by_id_edits = Vec::new();
1533 for entry_id in update.removed_entries {
1534 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1535 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1536 entries_by_id_edits.push(Edit::Remove(entry.id));
1537 }
1538 }
1539
1540 for entry in update.updated_entries {
1541 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1542 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1543 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1544 }
1545 entries_by_id_edits.push(Edit::Insert(PathEntry {
1546 id: entry.id,
1547 path: entry.path.clone(),
1548 is_ignored: entry.is_ignored,
1549 scan_id: 0,
1550 }));
1551 entries_by_path_edits.push(Edit::Insert(entry));
1552 }
1553
1554 self.entries_by_path.edit(entries_by_path_edits, &());
1555 self.entries_by_id.edit(entries_by_id_edits, &());
1556
1557 update.removed_repositories.sort_unstable();
1558 self.repository_entries.retain(|_, entry| {
1559 if let Ok(_) = update
1560 .removed_repositories
1561 .binary_search(&entry.work_directory.to_proto())
1562 {
1563 false
1564 } else {
1565 true
1566 }
1567 });
1568
1569 for repository in update.updated_repositories {
1570 let work_directory_entry: WorkDirectoryEntry =
1571 ProjectEntryId::from_proto(repository.work_directory_id).into();
1572
1573 if let Some(entry) = self.entry_for_id(*work_directory_entry) {
1574 let mut statuses = TreeMap::default();
1575 for status_entry in repository.updated_statuses {
1576 let Some(git_file_status) = read_git_status(status_entry.status) else {
1577 continue;
1578 };
1579
1580 let repo_path = RepoPath::new(status_entry.repo_path.into());
1581 statuses.insert(repo_path, git_file_status);
1582 }
1583
1584 let work_directory = RepositoryWorkDirectory(entry.path.clone());
1585 if self.repository_entries.get(&work_directory).is_some() {
1586 self.repository_entries.update(&work_directory, |repo| {
1587 repo.branch = repository.branch.map(Into::into);
1588 repo.statuses.insert_tree(statuses);
1589
1590 for repo_path in repository.removed_repo_paths {
1591 let repo_path = RepoPath::new(repo_path.into());
1592 repo.statuses.remove(&repo_path);
1593 }
1594 });
1595 } else {
1596 self.repository_entries.insert(
1597 work_directory,
1598 RepositoryEntry {
1599 work_directory: work_directory_entry,
1600 branch: repository.branch.map(Into::into),
1601 statuses,
1602 },
1603 )
1604 }
1605 } else {
1606 log::error!("no work directory entry for repository {:?}", repository)
1607 }
1608 }
1609
1610 self.scan_id = update.scan_id as usize;
1611 if update.is_last_update {
1612 self.completed_scan_id = update.scan_id as usize;
1613 }
1614
1615 Ok(())
1616 }
1617
1618 pub fn file_count(&self) -> usize {
1619 self.entries_by_path.summary().file_count
1620 }
1621
1622 pub fn visible_file_count(&self) -> usize {
1623 self.entries_by_path.summary().visible_file_count
1624 }
1625
1626 fn traverse_from_offset(
1627 &self,
1628 include_dirs: bool,
1629 include_ignored: bool,
1630 start_offset: usize,
1631 ) -> Traversal {
1632 let mut cursor = self.entries_by_path.cursor();
1633 cursor.seek(
1634 &TraversalTarget::Count {
1635 count: start_offset,
1636 include_dirs,
1637 include_ignored,
1638 },
1639 Bias::Right,
1640 &(),
1641 );
1642 Traversal {
1643 cursor,
1644 include_dirs,
1645 include_ignored,
1646 }
1647 }
1648
1649 fn traverse_from_path(
1650 &self,
1651 include_dirs: bool,
1652 include_ignored: bool,
1653 path: &Path,
1654 ) -> Traversal {
1655 let mut cursor = self.entries_by_path.cursor();
1656 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1657 Traversal {
1658 cursor,
1659 include_dirs,
1660 include_ignored,
1661 }
1662 }
1663
1664 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1665 self.traverse_from_offset(false, include_ignored, start)
1666 }
1667
1668 pub fn entries(&self, include_ignored: bool) -> Traversal {
1669 self.traverse_from_offset(true, include_ignored, 0)
1670 }
1671
1672 pub fn repositories(&self) -> impl Iterator<Item = (&Arc<Path>, &RepositoryEntry)> {
1673 self.repository_entries
1674 .iter()
1675 .map(|(path, entry)| (&path.0, entry))
1676 }
1677
1678 /// Get the repository whose work directory contains the given path.
1679 pub fn repository_for_work_directory(&self, path: &Path) -> Option<RepositoryEntry> {
1680 self.repository_entries
1681 .get(&RepositoryWorkDirectory(path.into()))
1682 .cloned()
1683 }
1684
1685 /// Get the repository whose work directory contains the given path.
1686 pub fn repository_for_path(&self, path: &Path) -> Option<RepositoryEntry> {
1687 let mut max_len = 0;
1688 let mut current_candidate = None;
1689 for (work_directory, repo) in (&self.repository_entries).iter() {
1690 if path.starts_with(&work_directory.0) {
1691 if work_directory.0.as_os_str().len() >= max_len {
1692 current_candidate = Some(repo);
1693 max_len = work_directory.0.as_os_str().len();
1694 } else {
1695 break;
1696 }
1697 }
1698 }
1699
1700 current_candidate.cloned()
1701 }
1702
1703 /// Given an ordered iterator of entries, returns an iterator of those entries,
1704 /// along with their containing git repository.
1705 pub fn entries_with_repositories<'a>(
1706 &'a self,
1707 entries: impl 'a + Iterator<Item = &'a Entry>,
1708 ) -> impl 'a + Iterator<Item = (&'a Entry, Option<&'a RepositoryEntry>)> {
1709 let mut containing_repos = Vec::<(&Arc<Path>, &RepositoryEntry)>::new();
1710 let mut repositories = self.repositories().peekable();
1711 entries.map(move |entry| {
1712 while let Some((repo_path, _)) = containing_repos.last() {
1713 if !entry.path.starts_with(repo_path) {
1714 containing_repos.pop();
1715 } else {
1716 break;
1717 }
1718 }
1719 while let Some((repo_path, _)) = repositories.peek() {
1720 if entry.path.starts_with(repo_path) {
1721 containing_repos.push(repositories.next().unwrap());
1722 } else {
1723 break;
1724 }
1725 }
1726 let repo = containing_repos.last().map(|(_, repo)| *repo);
1727 (entry, repo)
1728 })
1729 }
1730
1731 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1732 let empty_path = Path::new("");
1733 self.entries_by_path
1734 .cursor::<()>()
1735 .filter(move |entry| entry.path.as_ref() != empty_path)
1736 .map(|entry| &entry.path)
1737 }
1738
1739 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1740 let mut cursor = self.entries_by_path.cursor();
1741 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1742 let traversal = Traversal {
1743 cursor,
1744 include_dirs: true,
1745 include_ignored: true,
1746 };
1747 ChildEntriesIter {
1748 traversal,
1749 parent_path,
1750 }
1751 }
1752
1753 fn descendent_entries<'a>(
1754 &'a self,
1755 include_dirs: bool,
1756 include_ignored: bool,
1757 parent_path: &'a Path,
1758 ) -> DescendentEntriesIter<'a> {
1759 let mut cursor = self.entries_by_path.cursor();
1760 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Left, &());
1761 let mut traversal = Traversal {
1762 cursor,
1763 include_dirs,
1764 include_ignored,
1765 };
1766
1767 if traversal.end_offset() == traversal.start_offset() {
1768 traversal.advance();
1769 }
1770
1771 DescendentEntriesIter {
1772 traversal,
1773 parent_path,
1774 }
1775 }
1776
1777 pub fn root_entry(&self) -> Option<&Entry> {
1778 self.entry_for_path("")
1779 }
1780
1781 pub fn root_name(&self) -> &str {
1782 &self.root_name
1783 }
1784
1785 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1786 self.repository_entries
1787 .get(&RepositoryWorkDirectory(Path::new("").into()))
1788 .map(|entry| entry.to_owned())
1789 }
1790
1791 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1792 self.repository_entries.values()
1793 }
1794
1795 pub fn scan_id(&self) -> usize {
1796 self.scan_id
1797 }
1798
1799 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1800 let path = path.as_ref();
1801 self.traverse_from_path(true, true, path)
1802 .entry()
1803 .and_then(|entry| {
1804 if entry.path.as_ref() == path {
1805 Some(entry)
1806 } else {
1807 None
1808 }
1809 })
1810 }
1811
1812 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1813 let entry = self.entries_by_id.get(&id, &())?;
1814 self.entry_for_path(&entry.path)
1815 }
1816
1817 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1818 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1819 }
1820}
1821
1822impl LocalSnapshot {
1823 pub(crate) fn get_local_repo(&self, repo: &RepositoryEntry) -> Option<&LocalRepositoryEntry> {
1824 self.git_repositories.get(&repo.work_directory.0)
1825 }
1826
1827 pub(crate) fn repo_for_metadata(
1828 &self,
1829 path: &Path,
1830 ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
1831 self.git_repositories
1832 .iter()
1833 .find(|(_, repo)| repo.in_dot_git(path))
1834 }
1835
1836 #[cfg(test)]
1837 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1838 let root_name = self.root_name.clone();
1839 proto::UpdateWorktree {
1840 project_id,
1841 worktree_id: self.id().to_proto(),
1842 abs_path: self.abs_path().to_string_lossy().into(),
1843 root_name,
1844 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1845 removed_entries: Default::default(),
1846 scan_id: self.scan_id as u64,
1847 is_last_update: true,
1848 updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1849 removed_repositories: Default::default(),
1850 }
1851 }
1852
1853 pub(crate) fn build_update(
1854 &self,
1855 other: &Self,
1856 project_id: u64,
1857 worktree_id: u64,
1858 include_ignored: bool,
1859 ) -> proto::UpdateWorktree {
1860 let mut updated_entries = Vec::new();
1861 let mut removed_entries = Vec::new();
1862 let mut self_entries = self
1863 .entries_by_id
1864 .cursor::<()>()
1865 .filter(|e| include_ignored || !e.is_ignored)
1866 .peekable();
1867 let mut other_entries = other
1868 .entries_by_id
1869 .cursor::<()>()
1870 .filter(|e| include_ignored || !e.is_ignored)
1871 .peekable();
1872 loop {
1873 match (self_entries.peek(), other_entries.peek()) {
1874 (Some(self_entry), Some(other_entry)) => {
1875 match Ord::cmp(&self_entry.id, &other_entry.id) {
1876 Ordering::Less => {
1877 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1878 updated_entries.push(entry);
1879 self_entries.next();
1880 }
1881 Ordering::Equal => {
1882 if self_entry.scan_id != other_entry.scan_id {
1883 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1884 updated_entries.push(entry);
1885 }
1886
1887 self_entries.next();
1888 other_entries.next();
1889 }
1890 Ordering::Greater => {
1891 removed_entries.push(other_entry.id.to_proto());
1892 other_entries.next();
1893 }
1894 }
1895 }
1896 (Some(self_entry), None) => {
1897 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1898 updated_entries.push(entry);
1899 self_entries.next();
1900 }
1901 (None, Some(other_entry)) => {
1902 removed_entries.push(other_entry.id.to_proto());
1903 other_entries.next();
1904 }
1905 (None, None) => break,
1906 }
1907 }
1908
1909 let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1910 let mut removed_repositories = Vec::new();
1911 let mut self_repos = self.snapshot.repository_entries.iter().peekable();
1912 let mut other_repos = other.snapshot.repository_entries.iter().peekable();
1913 loop {
1914 match (self_repos.peek(), other_repos.peek()) {
1915 (Some((self_work_dir, self_repo)), Some((other_work_dir, other_repo))) => {
1916 match Ord::cmp(self_work_dir, other_work_dir) {
1917 Ordering::Less => {
1918 updated_repositories.push((*self_repo).into());
1919 self_repos.next();
1920 }
1921 Ordering::Equal => {
1922 if self_repo != other_repo {
1923 updated_repositories.push(self_repo.build_update(other_repo));
1924 }
1925
1926 self_repos.next();
1927 other_repos.next();
1928 }
1929 Ordering::Greater => {
1930 removed_repositories.push(other_repo.work_directory.to_proto());
1931 other_repos.next();
1932 }
1933 }
1934 }
1935 (Some((_, self_repo)), None) => {
1936 updated_repositories.push((*self_repo).into());
1937 self_repos.next();
1938 }
1939 (None, Some((_, other_repo))) => {
1940 removed_repositories.push(other_repo.work_directory.to_proto());
1941 other_repos.next();
1942 }
1943 (None, None) => break,
1944 }
1945 }
1946
1947 proto::UpdateWorktree {
1948 project_id,
1949 worktree_id,
1950 abs_path: self.abs_path().to_string_lossy().into(),
1951 root_name: self.root_name().to_string(),
1952 updated_entries,
1953 removed_entries,
1954 scan_id: self.scan_id as u64,
1955 is_last_update: self.completed_scan_id == self.scan_id,
1956 updated_repositories,
1957 removed_repositories,
1958 }
1959 }
1960
1961 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1962 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1963 let abs_path = self.abs_path.join(&entry.path);
1964 match smol::block_on(build_gitignore(&abs_path, fs)) {
1965 Ok(ignore) => {
1966 self.ignores_by_parent_abs_path
1967 .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true));
1968 }
1969 Err(error) => {
1970 log::error!(
1971 "error loading .gitignore file {:?} - {:?}",
1972 &entry.path,
1973 error
1974 );
1975 }
1976 }
1977 }
1978
1979 if entry.kind == EntryKind::PendingDir {
1980 if let Some(existing_entry) =
1981 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1982 {
1983 entry.kind = existing_entry.kind;
1984 }
1985 }
1986
1987 let scan_id = self.scan_id;
1988 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1989 if let Some(removed) = removed {
1990 if removed.id != entry.id {
1991 self.entries_by_id.remove(&removed.id, &());
1992 }
1993 }
1994 self.entries_by_id.insert_or_replace(
1995 PathEntry {
1996 id: entry.id,
1997 path: entry.path.clone(),
1998 is_ignored: entry.is_ignored,
1999 scan_id,
2000 },
2001 &(),
2002 );
2003
2004 entry
2005 }
2006
2007 fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
2008 let abs_path = self.abs_path.join(&parent_path);
2009 let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
2010
2011 // Guard against repositories inside the repository metadata
2012 if work_dir
2013 .components()
2014 .find(|component| component.as_os_str() == *DOT_GIT)
2015 .is_some()
2016 {
2017 return None;
2018 };
2019
2020 let work_dir_id = self
2021 .entry_for_path(work_dir.clone())
2022 .map(|entry| entry.id)?;
2023
2024 if self.git_repositories.get(&work_dir_id).is_none() {
2025 let repo = fs.open_repo(abs_path.as_path())?;
2026 let work_directory = RepositoryWorkDirectory(work_dir.clone());
2027 let scan_id = self.scan_id;
2028
2029 let repo_lock = repo.lock();
2030
2031 self.repository_entries.insert(
2032 work_directory,
2033 RepositoryEntry {
2034 work_directory: work_dir_id.into(),
2035 branch: repo_lock.branch_name().map(Into::into),
2036 statuses: repo_lock.statuses().unwrap_or_default(),
2037 },
2038 );
2039 drop(repo_lock);
2040
2041 self.git_repositories.insert(
2042 work_dir_id,
2043 LocalRepositoryEntry {
2044 scan_id,
2045 git_dir_scan_id: scan_id,
2046 repo_ptr: repo,
2047 git_dir_path: parent_path.clone(),
2048 },
2049 )
2050 }
2051
2052 Some(())
2053 }
2054
2055 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
2056 let mut inodes = TreeSet::default();
2057 for ancestor in path.ancestors().skip(1) {
2058 if let Some(entry) = self.entry_for_path(ancestor) {
2059 inodes.insert(entry.inode);
2060 }
2061 }
2062 inodes
2063 }
2064
2065 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
2066 let mut new_ignores = Vec::new();
2067 for ancestor in abs_path.ancestors().skip(1) {
2068 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
2069 new_ignores.push((ancestor, Some(ignore.clone())));
2070 } else {
2071 new_ignores.push((ancestor, None));
2072 }
2073 }
2074
2075 let mut ignore_stack = IgnoreStack::none();
2076 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
2077 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
2078 ignore_stack = IgnoreStack::all();
2079 break;
2080 } else if let Some(ignore) = ignore {
2081 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
2082 }
2083 }
2084
2085 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
2086 ignore_stack = IgnoreStack::all();
2087 }
2088
2089 ignore_stack
2090 }
2091}
2092
2093impl LocalMutableSnapshot {
2094 fn reuse_entry_id(&mut self, entry: &mut Entry) {
2095 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
2096 entry.id = removed_entry_id;
2097 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
2098 entry.id = existing_entry.id;
2099 }
2100 }
2101
2102 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
2103 self.reuse_entry_id(&mut entry);
2104 self.snapshot.insert_entry(entry, fs)
2105 }
2106
2107 fn populate_dir(
2108 &mut self,
2109 parent_path: Arc<Path>,
2110 entries: impl IntoIterator<Item = Entry>,
2111 ignore: Option<Arc<Gitignore>>,
2112 fs: &dyn Fs,
2113 ) {
2114 let mut parent_entry = if let Some(parent_entry) =
2115 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
2116 {
2117 parent_entry.clone()
2118 } else {
2119 log::warn!(
2120 "populating a directory {:?} that has been removed",
2121 parent_path
2122 );
2123 return;
2124 };
2125
2126 match parent_entry.kind {
2127 EntryKind::PendingDir => {
2128 parent_entry.kind = EntryKind::Dir;
2129 }
2130 EntryKind::Dir => {}
2131 _ => return,
2132 }
2133
2134 if let Some(ignore) = ignore {
2135 let abs_parent_path = self.abs_path.join(&parent_path).into();
2136 self.ignores_by_parent_abs_path
2137 .insert(abs_parent_path, (ignore, false));
2138 }
2139
2140 if parent_path.file_name() == Some(&DOT_GIT) {
2141 self.build_repo(parent_path, fs);
2142 }
2143
2144 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
2145 let mut entries_by_id_edits = Vec::new();
2146
2147 for mut entry in entries {
2148 self.reuse_entry_id(&mut entry);
2149 entries_by_id_edits.push(Edit::Insert(PathEntry {
2150 id: entry.id,
2151 path: entry.path.clone(),
2152 is_ignored: entry.is_ignored,
2153 scan_id: self.scan_id,
2154 }));
2155 entries_by_path_edits.push(Edit::Insert(entry));
2156 }
2157
2158 self.entries_by_path.edit(entries_by_path_edits, &());
2159 self.entries_by_id.edit(entries_by_id_edits, &());
2160 }
2161
2162 fn remove_path(&mut self, path: &Path) {
2163 let mut new_entries;
2164 let removed_entries;
2165 {
2166 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
2167 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
2168 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
2169 new_entries.push_tree(cursor.suffix(&()), &());
2170 }
2171 self.entries_by_path = new_entries;
2172
2173 let mut entries_by_id_edits = Vec::new();
2174 for entry in removed_entries.cursor::<()>() {
2175 let removed_entry_id = self
2176 .removed_entry_ids
2177 .entry(entry.inode)
2178 .or_insert(entry.id);
2179 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
2180 entries_by_id_edits.push(Edit::Remove(entry.id));
2181 }
2182 self.entries_by_id.edit(entries_by_id_edits, &());
2183
2184 if path.file_name() == Some(&GITIGNORE) {
2185 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
2186 if let Some((_, needs_update)) = self
2187 .ignores_by_parent_abs_path
2188 .get_mut(abs_parent_path.as_path())
2189 {
2190 *needs_update = true;
2191 }
2192 }
2193 }
2194}
2195
2196async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
2197 let contents = fs.load(abs_path).await?;
2198 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
2199 let mut builder = GitignoreBuilder::new(parent);
2200 for line in contents.lines() {
2201 builder.add_line(Some(abs_path.into()), line)?;
2202 }
2203 Ok(builder.build()?)
2204}
2205
2206impl WorktreeId {
2207 pub fn from_usize(handle_id: usize) -> Self {
2208 Self(handle_id)
2209 }
2210
2211 pub(crate) fn from_proto(id: u64) -> Self {
2212 Self(id as usize)
2213 }
2214
2215 pub fn to_proto(&self) -> u64 {
2216 self.0 as u64
2217 }
2218
2219 pub fn to_usize(&self) -> usize {
2220 self.0
2221 }
2222}
2223
2224impl fmt::Display for WorktreeId {
2225 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2226 self.0.fmt(f)
2227 }
2228}
2229
2230impl Deref for Worktree {
2231 type Target = Snapshot;
2232
2233 fn deref(&self) -> &Self::Target {
2234 match self {
2235 Worktree::Local(worktree) => &worktree.snapshot,
2236 Worktree::Remote(worktree) => &worktree.snapshot,
2237 }
2238 }
2239}
2240
2241impl Deref for LocalWorktree {
2242 type Target = LocalSnapshot;
2243
2244 fn deref(&self) -> &Self::Target {
2245 &self.snapshot
2246 }
2247}
2248
2249impl Deref for RemoteWorktree {
2250 type Target = Snapshot;
2251
2252 fn deref(&self) -> &Self::Target {
2253 &self.snapshot
2254 }
2255}
2256
2257impl fmt::Debug for LocalWorktree {
2258 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2259 self.snapshot.fmt(f)
2260 }
2261}
2262
2263impl fmt::Debug for Snapshot {
2264 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2265 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2266 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2267
2268 impl<'a> fmt::Debug for EntriesByPath<'a> {
2269 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2270 f.debug_map()
2271 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2272 .finish()
2273 }
2274 }
2275
2276 impl<'a> fmt::Debug for EntriesById<'a> {
2277 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2278 f.debug_list().entries(self.0.iter()).finish()
2279 }
2280 }
2281
2282 f.debug_struct("Snapshot")
2283 .field("id", &self.id)
2284 .field("root_name", &self.root_name)
2285 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2286 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2287 .finish()
2288 }
2289}
2290
2291#[derive(Clone, PartialEq)]
2292pub struct File {
2293 pub worktree: ModelHandle<Worktree>,
2294 pub path: Arc<Path>,
2295 pub mtime: SystemTime,
2296 pub(crate) entry_id: ProjectEntryId,
2297 pub(crate) is_local: bool,
2298 pub(crate) is_deleted: bool,
2299}
2300
2301impl language::File for File {
2302 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2303 if self.is_local {
2304 Some(self)
2305 } else {
2306 None
2307 }
2308 }
2309
2310 fn mtime(&self) -> SystemTime {
2311 self.mtime
2312 }
2313
2314 fn path(&self) -> &Arc<Path> {
2315 &self.path
2316 }
2317
2318 fn full_path(&self, cx: &AppContext) -> PathBuf {
2319 let mut full_path = PathBuf::new();
2320 let worktree = self.worktree.read(cx);
2321
2322 if worktree.is_visible() {
2323 full_path.push(worktree.root_name());
2324 } else {
2325 let path = worktree.abs_path();
2326
2327 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2328 full_path.push("~");
2329 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2330 } else {
2331 full_path.push(path)
2332 }
2333 }
2334
2335 if self.path.components().next().is_some() {
2336 full_path.push(&self.path);
2337 }
2338
2339 full_path
2340 }
2341
2342 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2343 /// of its worktree, then this method will return the name of the worktree itself.
2344 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2345 self.path
2346 .file_name()
2347 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2348 }
2349
2350 fn is_deleted(&self) -> bool {
2351 self.is_deleted
2352 }
2353
2354 fn as_any(&self) -> &dyn Any {
2355 self
2356 }
2357
2358 fn to_proto(&self) -> rpc::proto::File {
2359 rpc::proto::File {
2360 worktree_id: self.worktree.id() as u64,
2361 entry_id: self.entry_id.to_proto(),
2362 path: self.path.to_string_lossy().into(),
2363 mtime: Some(self.mtime.into()),
2364 is_deleted: self.is_deleted,
2365 }
2366 }
2367}
2368
2369impl language::LocalFile for File {
2370 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2371 self.worktree
2372 .read(cx)
2373 .as_local()
2374 .unwrap()
2375 .abs_path
2376 .join(&self.path)
2377 }
2378
2379 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2380 let worktree = self.worktree.read(cx).as_local().unwrap();
2381 let abs_path = worktree.absolutize(&self.path);
2382 let fs = worktree.fs.clone();
2383 cx.background()
2384 .spawn(async move { fs.load(&abs_path).await })
2385 }
2386
2387 fn buffer_reloaded(
2388 &self,
2389 buffer_id: u64,
2390 version: &clock::Global,
2391 fingerprint: RopeFingerprint,
2392 line_ending: LineEnding,
2393 mtime: SystemTime,
2394 cx: &mut AppContext,
2395 ) {
2396 let worktree = self.worktree.read(cx).as_local().unwrap();
2397 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2398 worktree
2399 .client
2400 .send(proto::BufferReloaded {
2401 project_id,
2402 buffer_id,
2403 version: serialize_version(version),
2404 mtime: Some(mtime.into()),
2405 fingerprint: serialize_fingerprint(fingerprint),
2406 line_ending: serialize_line_ending(line_ending) as i32,
2407 })
2408 .log_err();
2409 }
2410 }
2411}
2412
2413impl File {
2414 pub fn from_proto(
2415 proto: rpc::proto::File,
2416 worktree: ModelHandle<Worktree>,
2417 cx: &AppContext,
2418 ) -> Result<Self> {
2419 let worktree_id = worktree
2420 .read(cx)
2421 .as_remote()
2422 .ok_or_else(|| anyhow!("not remote"))?
2423 .id();
2424
2425 if worktree_id.to_proto() != proto.worktree_id {
2426 return Err(anyhow!("worktree id does not match file"));
2427 }
2428
2429 Ok(Self {
2430 worktree,
2431 path: Path::new(&proto.path).into(),
2432 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2433 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2434 is_local: false,
2435 is_deleted: proto.is_deleted,
2436 })
2437 }
2438
2439 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2440 file.and_then(|f| f.as_any().downcast_ref())
2441 }
2442
2443 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2444 self.worktree.read(cx).id()
2445 }
2446
2447 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2448 if self.is_deleted {
2449 None
2450 } else {
2451 Some(self.entry_id)
2452 }
2453 }
2454}
2455
2456#[derive(Clone, Debug, PartialEq, Eq)]
2457pub struct Entry {
2458 pub id: ProjectEntryId,
2459 pub kind: EntryKind,
2460 pub path: Arc<Path>,
2461 pub inode: u64,
2462 pub mtime: SystemTime,
2463 pub is_symlink: bool,
2464 pub is_ignored: bool,
2465}
2466
2467#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2468pub enum EntryKind {
2469 PendingDir,
2470 Dir,
2471 File(CharBag),
2472}
2473
2474#[derive(Clone, Copy, Debug)]
2475pub enum PathChange {
2476 Added,
2477 Removed,
2478 Updated,
2479 AddedOrUpdated,
2480}
2481
2482impl Entry {
2483 fn new(
2484 path: Arc<Path>,
2485 metadata: &fs::Metadata,
2486 next_entry_id: &AtomicUsize,
2487 root_char_bag: CharBag,
2488 ) -> Self {
2489 Self {
2490 id: ProjectEntryId::new(next_entry_id),
2491 kind: if metadata.is_dir {
2492 EntryKind::PendingDir
2493 } else {
2494 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2495 },
2496 path,
2497 inode: metadata.inode,
2498 mtime: metadata.mtime,
2499 is_symlink: metadata.is_symlink,
2500 is_ignored: false,
2501 }
2502 }
2503
2504 pub fn is_dir(&self) -> bool {
2505 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2506 }
2507
2508 pub fn is_file(&self) -> bool {
2509 matches!(self.kind, EntryKind::File(_))
2510 }
2511}
2512
2513impl sum_tree::Item for Entry {
2514 type Summary = EntrySummary;
2515
2516 fn summary(&self) -> Self::Summary {
2517 let visible_count = if self.is_ignored { 0 } else { 1 };
2518 let file_count;
2519 let visible_file_count;
2520 if self.is_file() {
2521 file_count = 1;
2522 visible_file_count = visible_count;
2523 } else {
2524 file_count = 0;
2525 visible_file_count = 0;
2526 }
2527
2528 EntrySummary {
2529 max_path: self.path.clone(),
2530 count: 1,
2531 visible_count,
2532 file_count,
2533 visible_file_count,
2534 }
2535 }
2536}
2537
2538impl sum_tree::KeyedItem for Entry {
2539 type Key = PathKey;
2540
2541 fn key(&self) -> Self::Key {
2542 PathKey(self.path.clone())
2543 }
2544}
2545
2546#[derive(Clone, Debug)]
2547pub struct EntrySummary {
2548 max_path: Arc<Path>,
2549 count: usize,
2550 visible_count: usize,
2551 file_count: usize,
2552 visible_file_count: usize,
2553}
2554
2555impl Default for EntrySummary {
2556 fn default() -> Self {
2557 Self {
2558 max_path: Arc::from(Path::new("")),
2559 count: 0,
2560 visible_count: 0,
2561 file_count: 0,
2562 visible_file_count: 0,
2563 }
2564 }
2565}
2566
2567impl sum_tree::Summary for EntrySummary {
2568 type Context = ();
2569
2570 fn add_summary(&mut self, rhs: &Self, _: &()) {
2571 self.max_path = rhs.max_path.clone();
2572 self.count += rhs.count;
2573 self.visible_count += rhs.visible_count;
2574 self.file_count += rhs.file_count;
2575 self.visible_file_count += rhs.visible_file_count;
2576 }
2577}
2578
2579#[derive(Clone, Debug)]
2580struct PathEntry {
2581 id: ProjectEntryId,
2582 path: Arc<Path>,
2583 is_ignored: bool,
2584 scan_id: usize,
2585}
2586
2587impl sum_tree::Item for PathEntry {
2588 type Summary = PathEntrySummary;
2589
2590 fn summary(&self) -> Self::Summary {
2591 PathEntrySummary { max_id: self.id }
2592 }
2593}
2594
2595impl sum_tree::KeyedItem for PathEntry {
2596 type Key = ProjectEntryId;
2597
2598 fn key(&self) -> Self::Key {
2599 self.id
2600 }
2601}
2602
2603#[derive(Clone, Debug, Default)]
2604struct PathEntrySummary {
2605 max_id: ProjectEntryId,
2606}
2607
2608impl sum_tree::Summary for PathEntrySummary {
2609 type Context = ();
2610
2611 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2612 self.max_id = summary.max_id;
2613 }
2614}
2615
2616impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2617 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2618 *self = summary.max_id;
2619 }
2620}
2621
2622#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2623pub struct PathKey(Arc<Path>);
2624
2625impl Default for PathKey {
2626 fn default() -> Self {
2627 Self(Path::new("").into())
2628 }
2629}
2630
2631impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2632 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2633 self.0 = summary.max_path.clone();
2634 }
2635}
2636
2637struct BackgroundScanner {
2638 snapshot: Mutex<LocalMutableSnapshot>,
2639 fs: Arc<dyn Fs>,
2640 status_updates_tx: UnboundedSender<ScanState>,
2641 executor: Arc<executor::Background>,
2642 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2643 prev_state: Mutex<BackgroundScannerState>,
2644 next_entry_id: Arc<AtomicUsize>,
2645 finished_initial_scan: bool,
2646}
2647
2648struct BackgroundScannerState {
2649 snapshot: Snapshot,
2650 event_paths: Vec<Arc<Path>>,
2651}
2652
2653impl BackgroundScanner {
2654 fn new(
2655 snapshot: LocalSnapshot,
2656 next_entry_id: Arc<AtomicUsize>,
2657 fs: Arc<dyn Fs>,
2658 status_updates_tx: UnboundedSender<ScanState>,
2659 executor: Arc<executor::Background>,
2660 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2661 ) -> Self {
2662 Self {
2663 fs,
2664 status_updates_tx,
2665 executor,
2666 refresh_requests_rx,
2667 next_entry_id,
2668 prev_state: Mutex::new(BackgroundScannerState {
2669 snapshot: snapshot.snapshot.clone(),
2670 event_paths: Default::default(),
2671 }),
2672 snapshot: Mutex::new(LocalMutableSnapshot {
2673 snapshot,
2674 removed_entry_ids: Default::default(),
2675 }),
2676 finished_initial_scan: false,
2677 }
2678 }
2679
2680 async fn run(
2681 &mut self,
2682 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2683 ) {
2684 use futures::FutureExt as _;
2685
2686 let (root_abs_path, root_inode) = {
2687 let snapshot = self.snapshot.lock();
2688 (
2689 snapshot.abs_path.clone(),
2690 snapshot.root_entry().map(|e| e.inode),
2691 )
2692 };
2693
2694 // Populate ignores above the root.
2695 let ignore_stack;
2696 for ancestor in root_abs_path.ancestors().skip(1) {
2697 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2698 {
2699 self.snapshot
2700 .lock()
2701 .ignores_by_parent_abs_path
2702 .insert(ancestor.into(), (ignore.into(), false));
2703 }
2704 }
2705 {
2706 let mut snapshot = self.snapshot.lock();
2707 snapshot.scan_id += 1;
2708 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2709 if ignore_stack.is_all() {
2710 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2711 root_entry.is_ignored = true;
2712 snapshot.insert_entry(root_entry, self.fs.as_ref());
2713 }
2714 }
2715 };
2716
2717 // Perform an initial scan of the directory.
2718 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2719 smol::block_on(scan_job_tx.send(ScanJob {
2720 abs_path: root_abs_path,
2721 path: Arc::from(Path::new("")),
2722 ignore_stack,
2723 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2724 scan_queue: scan_job_tx.clone(),
2725 }))
2726 .unwrap();
2727 drop(scan_job_tx);
2728 self.scan_dirs(true, scan_job_rx).await;
2729 {
2730 let mut snapshot = self.snapshot.lock();
2731 snapshot.completed_scan_id = snapshot.scan_id;
2732 }
2733 self.send_status_update(false, None);
2734
2735 // Process any any FS events that occurred while performing the initial scan.
2736 // For these events, update events cannot be as precise, because we didn't
2737 // have the previous state loaded yet.
2738 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2739 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2740 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2741 paths.extend(more_events.into_iter().map(|e| e.path));
2742 }
2743 self.process_events(paths).await;
2744 }
2745
2746 self.finished_initial_scan = true;
2747
2748 // Continue processing events until the worktree is dropped.
2749 loop {
2750 select_biased! {
2751 // Process any path refresh requests from the worktree. Prioritize
2752 // these before handling changes reported by the filesystem.
2753 request = self.refresh_requests_rx.recv().fuse() => {
2754 let Ok((paths, barrier)) = request else { break };
2755 if !self.process_refresh_request(paths.clone(), barrier).await {
2756 return;
2757 }
2758 }
2759
2760 events = events_rx.next().fuse() => {
2761 let Some(events) = events else { break };
2762 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2763 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2764 paths.extend(more_events.into_iter().map(|e| e.path));
2765 }
2766 self.process_events(paths.clone()).await;
2767 }
2768 }
2769 }
2770 }
2771
2772 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2773 if let Some(mut paths) = self.reload_entries_for_paths(paths, None).await {
2774 paths.sort_unstable();
2775 util::extend_sorted(
2776 &mut self.prev_state.lock().event_paths,
2777 paths,
2778 usize::MAX,
2779 Ord::cmp,
2780 );
2781 }
2782 self.send_status_update(false, Some(barrier))
2783 }
2784
2785 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2786 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2787 let paths = self
2788 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2789 .await;
2790 if let Some(paths) = &paths {
2791 util::extend_sorted(
2792 &mut self.prev_state.lock().event_paths,
2793 paths.iter().cloned(),
2794 usize::MAX,
2795 Ord::cmp,
2796 );
2797 }
2798 drop(scan_job_tx);
2799 self.scan_dirs(false, scan_job_rx).await;
2800
2801 self.update_ignore_statuses().await;
2802
2803 let mut snapshot = self.snapshot.lock();
2804
2805 if let Some(paths) = paths {
2806 for path in paths {
2807 self.reload_repo_for_file_path(&path, &mut *snapshot, self.fs.as_ref());
2808 }
2809 }
2810
2811 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2812 git_repositories.retain(|work_directory_id, _| {
2813 snapshot
2814 .entry_for_id(*work_directory_id)
2815 .map_or(false, |entry| {
2816 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2817 })
2818 });
2819 snapshot.git_repositories = git_repositories;
2820
2821 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2822 git_repository_entries.retain(|_, entry| {
2823 snapshot
2824 .git_repositories
2825 .get(&entry.work_directory.0)
2826 .is_some()
2827 });
2828 snapshot.snapshot.repository_entries = git_repository_entries;
2829 snapshot.completed_scan_id = snapshot.scan_id;
2830 drop(snapshot);
2831
2832 self.send_status_update(false, None);
2833 self.prev_state.lock().event_paths.clear();
2834 }
2835
2836 async fn scan_dirs(
2837 &self,
2838 enable_progress_updates: bool,
2839 scan_jobs_rx: channel::Receiver<ScanJob>,
2840 ) {
2841 use futures::FutureExt as _;
2842
2843 if self
2844 .status_updates_tx
2845 .unbounded_send(ScanState::Started)
2846 .is_err()
2847 {
2848 return;
2849 }
2850
2851 let progress_update_count = AtomicUsize::new(0);
2852 self.executor
2853 .scoped(|scope| {
2854 for _ in 0..self.executor.num_cpus() {
2855 scope.spawn(async {
2856 let mut last_progress_update_count = 0;
2857 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2858 futures::pin_mut!(progress_update_timer);
2859
2860 loop {
2861 select_biased! {
2862 // Process any path refresh requests before moving on to process
2863 // the scan queue, so that user operations are prioritized.
2864 request = self.refresh_requests_rx.recv().fuse() => {
2865 let Ok((paths, barrier)) = request else { break };
2866 if !self.process_refresh_request(paths, barrier).await {
2867 return;
2868 }
2869 }
2870
2871 // Send periodic progress updates to the worktree. Use an atomic counter
2872 // to ensure that only one of the workers sends a progress update after
2873 // the update interval elapses.
2874 _ = progress_update_timer => {
2875 match progress_update_count.compare_exchange(
2876 last_progress_update_count,
2877 last_progress_update_count + 1,
2878 SeqCst,
2879 SeqCst
2880 ) {
2881 Ok(_) => {
2882 last_progress_update_count += 1;
2883 self.send_status_update(true, None);
2884 }
2885 Err(count) => {
2886 last_progress_update_count = count;
2887 }
2888 }
2889 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2890 }
2891
2892 // Recursively load directories from the file system.
2893 job = scan_jobs_rx.recv().fuse() => {
2894 let Ok(job) = job else { break };
2895 if let Err(err) = self.scan_dir(&job).await {
2896 if job.path.as_ref() != Path::new("") {
2897 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2898 }
2899 }
2900 }
2901 }
2902 }
2903 })
2904 }
2905 })
2906 .await;
2907 }
2908
2909 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2910 let mut prev_state = self.prev_state.lock();
2911 let new_snapshot = self.snapshot.lock().clone();
2912 let old_snapshot = mem::replace(&mut prev_state.snapshot, new_snapshot.snapshot.clone());
2913
2914 let changes = self.build_change_set(
2915 &old_snapshot,
2916 &new_snapshot.snapshot,
2917 &prev_state.event_paths,
2918 );
2919
2920 self.status_updates_tx
2921 .unbounded_send(ScanState::Updated {
2922 snapshot: new_snapshot,
2923 changes,
2924 scanning,
2925 barrier,
2926 })
2927 .is_ok()
2928 }
2929
2930 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2931 let mut new_entries: Vec<Entry> = Vec::new();
2932 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2933 let mut ignore_stack = job.ignore_stack.clone();
2934 let mut new_ignore = None;
2935 let (root_abs_path, root_char_bag, next_entry_id) = {
2936 let snapshot = self.snapshot.lock();
2937 (
2938 snapshot.abs_path().clone(),
2939 snapshot.root_char_bag,
2940 self.next_entry_id.clone(),
2941 )
2942 };
2943 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2944 while let Some(child_abs_path) = child_paths.next().await {
2945 let child_abs_path: Arc<Path> = match child_abs_path {
2946 Ok(child_abs_path) => child_abs_path.into(),
2947 Err(error) => {
2948 log::error!("error processing entry {:?}", error);
2949 continue;
2950 }
2951 };
2952
2953 let child_name = child_abs_path.file_name().unwrap();
2954 let child_path: Arc<Path> = job.path.join(child_name).into();
2955 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2956 Ok(Some(metadata)) => metadata,
2957 Ok(None) => continue,
2958 Err(err) => {
2959 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2960 continue;
2961 }
2962 };
2963
2964 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2965 if child_name == *GITIGNORE {
2966 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2967 Ok(ignore) => {
2968 let ignore = Arc::new(ignore);
2969 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2970 new_ignore = Some(ignore);
2971 }
2972 Err(error) => {
2973 log::error!(
2974 "error loading .gitignore file {:?} - {:?}",
2975 child_name,
2976 error
2977 );
2978 }
2979 }
2980
2981 // Update ignore status of any child entries we've already processed to reflect the
2982 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2983 // there should rarely be too numerous. Update the ignore stack associated with any
2984 // new jobs as well.
2985 let mut new_jobs = new_jobs.iter_mut();
2986 for entry in &mut new_entries {
2987 let entry_abs_path = root_abs_path.join(&entry.path);
2988 entry.is_ignored =
2989 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2990
2991 if entry.is_dir() {
2992 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2993 job.ignore_stack = if entry.is_ignored {
2994 IgnoreStack::all()
2995 } else {
2996 ignore_stack.clone()
2997 };
2998 }
2999 }
3000 }
3001 }
3002
3003 let mut child_entry = Entry::new(
3004 child_path.clone(),
3005 &child_metadata,
3006 &next_entry_id,
3007 root_char_bag,
3008 );
3009
3010 if child_entry.is_dir() {
3011 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
3012 child_entry.is_ignored = is_ignored;
3013
3014 // Avoid recursing until crash in the case of a recursive symlink
3015 if !job.ancestor_inodes.contains(&child_entry.inode) {
3016 let mut ancestor_inodes = job.ancestor_inodes.clone();
3017 ancestor_inodes.insert(child_entry.inode);
3018
3019 new_jobs.push(Some(ScanJob {
3020 abs_path: child_abs_path,
3021 path: child_path,
3022 ignore_stack: if is_ignored {
3023 IgnoreStack::all()
3024 } else {
3025 ignore_stack.clone()
3026 },
3027 ancestor_inodes,
3028 scan_queue: job.scan_queue.clone(),
3029 }));
3030 } else {
3031 new_jobs.push(None);
3032 }
3033 } else {
3034 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
3035 }
3036
3037 new_entries.push(child_entry);
3038 }
3039
3040 self.snapshot.lock().populate_dir(
3041 job.path.clone(),
3042 new_entries,
3043 new_ignore,
3044 self.fs.as_ref(),
3045 );
3046
3047 for new_job in new_jobs {
3048 if let Some(new_job) = new_job {
3049 job.scan_queue.send(new_job).await.unwrap();
3050 }
3051 }
3052
3053 Ok(())
3054 }
3055
3056 async fn reload_entries_for_paths(
3057 &self,
3058 mut abs_paths: Vec<PathBuf>,
3059 scan_queue_tx: Option<Sender<ScanJob>>,
3060 ) -> Option<Vec<Arc<Path>>> {
3061 let doing_recursive_update = scan_queue_tx.is_some();
3062
3063 abs_paths.sort_unstable();
3064 abs_paths.dedup_by(|a, b| a.starts_with(&b));
3065
3066 let root_abs_path = self.snapshot.lock().abs_path.clone();
3067 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
3068 let metadata = futures::future::join_all(
3069 abs_paths
3070 .iter()
3071 .map(|abs_path| self.fs.metadata(&abs_path))
3072 .collect::<Vec<_>>(),
3073 )
3074 .await;
3075
3076 let mut snapshot = self.snapshot.lock();
3077 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
3078 snapshot.scan_id += 1;
3079 if is_idle && !doing_recursive_update {
3080 snapshot.completed_scan_id = snapshot.scan_id;
3081 }
3082
3083 // Remove any entries for paths that no longer exist or are being recursively
3084 // refreshed. Do this before adding any new entries, so that renames can be
3085 // detected regardless of the order of the paths.
3086 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
3087 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
3088 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
3089 if matches!(metadata, Ok(None)) || doing_recursive_update {
3090 snapshot.remove_path(path);
3091 }
3092 event_paths.push(path.into());
3093 } else {
3094 log::error!(
3095 "unexpected event {:?} for root path {:?}",
3096 abs_path,
3097 root_canonical_path
3098 );
3099 }
3100 }
3101
3102 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
3103 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
3104
3105 match metadata {
3106 Ok(Some(metadata)) => {
3107 let ignore_stack =
3108 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
3109 let mut fs_entry = Entry::new(
3110 path.clone(),
3111 &metadata,
3112 self.next_entry_id.as_ref(),
3113 snapshot.root_char_bag,
3114 );
3115 fs_entry.is_ignored = ignore_stack.is_all();
3116 snapshot.insert_entry(fs_entry, self.fs.as_ref());
3117
3118 if let Some(scan_queue_tx) = &scan_queue_tx {
3119 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
3120 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
3121 ancestor_inodes.insert(metadata.inode);
3122 smol::block_on(scan_queue_tx.send(ScanJob {
3123 abs_path,
3124 path,
3125 ignore_stack,
3126 ancestor_inodes,
3127 scan_queue: scan_queue_tx.clone(),
3128 }))
3129 .unwrap();
3130 }
3131 }
3132 }
3133 Ok(None) => {
3134 self.remove_repo_path(&path, &mut snapshot);
3135 }
3136 Err(err) => {
3137 // TODO - create a special 'error' entry in the entries tree to mark this
3138 log::error!("error reading file on event {:?}", err);
3139 }
3140 }
3141 }
3142
3143 Some(event_paths)
3144 }
3145
3146 fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> {
3147 if !path
3148 .components()
3149 .any(|component| component.as_os_str() == *DOT_GIT)
3150 {
3151 let scan_id = snapshot.scan_id;
3152
3153 if let Some(repository) = snapshot.repository_for_work_directory(path) {
3154 let entry = repository.work_directory.0;
3155 snapshot.git_repositories.remove(&entry);
3156 snapshot
3157 .snapshot
3158 .repository_entries
3159 .remove(&RepositoryWorkDirectory(path.into()));
3160 return Some(());
3161 }
3162
3163 let repo = snapshot.repository_for_path(&path)?;
3164
3165 let repo_path = repo.work_directory.relativize(&snapshot, &path)?;
3166
3167 let work_dir = repo.work_directory(snapshot)?;
3168 let work_dir_id = repo.work_directory;
3169
3170 snapshot
3171 .git_repositories
3172 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3173
3174 snapshot.repository_entries.update(&work_dir, |entry| {
3175 entry
3176 .statuses
3177 .remove_range(&repo_path, &RepoPathDescendants(&repo_path))
3178 });
3179 }
3180
3181 Some(())
3182 }
3183
3184 fn reload_repo_for_file_path(
3185 &self,
3186 path: &Path,
3187 snapshot: &mut LocalSnapshot,
3188 fs: &dyn Fs,
3189 ) -> Option<()> {
3190 let scan_id = snapshot.scan_id;
3191
3192 if path
3193 .components()
3194 .any(|component| component.as_os_str() == *DOT_GIT)
3195 {
3196 let (entry_id, repo_ptr) = {
3197 let Some((entry_id, repo)) = snapshot.repo_for_metadata(&path) else {
3198 let dot_git_dir = path.ancestors()
3199 .skip_while(|ancestor| ancestor.file_name() != Some(&*DOT_GIT))
3200 .next()?;
3201
3202 snapshot.build_repo(dot_git_dir.into(), fs);
3203 return None;
3204 };
3205 if repo.git_dir_scan_id == scan_id {
3206 return None;
3207 }
3208 (*entry_id, repo.repo_ptr.to_owned())
3209 };
3210
3211 let work_dir = snapshot
3212 .entry_for_id(entry_id)
3213 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?;
3214
3215 let repo = repo_ptr.lock();
3216 repo.reload_index();
3217 let branch = repo.branch_name();
3218 let statuses = repo.statuses().unwrap_or_default();
3219
3220 snapshot.git_repositories.update(&entry_id, |entry| {
3221 entry.scan_id = scan_id;
3222 entry.git_dir_scan_id = scan_id;
3223 });
3224
3225 snapshot.repository_entries.update(&work_dir, |entry| {
3226 entry.branch = branch.map(Into::into);
3227 entry.statuses = statuses;
3228 });
3229 } else {
3230 if snapshot
3231 .entry_for_path(&path)
3232 .map(|entry| entry.is_ignored)
3233 .unwrap_or(false)
3234 {
3235 self.remove_repo_path(&path, snapshot);
3236 return None;
3237 }
3238
3239 let repo = snapshot.repository_for_path(&path)?;
3240
3241 let work_dir = repo.work_directory(snapshot)?;
3242 let work_dir_id = repo.work_directory.clone();
3243
3244 snapshot
3245 .git_repositories
3246 .update(&work_dir_id, |entry| entry.scan_id = scan_id);
3247
3248 let local_repo = snapshot.get_local_repo(&repo)?.to_owned();
3249
3250 // Short circuit if we've already scanned everything
3251 if local_repo.git_dir_scan_id == scan_id {
3252 return None;
3253 }
3254
3255 let mut repository = snapshot.repository_entries.remove(&work_dir)?;
3256
3257 for entry in snapshot.descendent_entries(false, false, path) {
3258 let Some(repo_path) = repo.work_directory.relativize(snapshot, &entry.path) else {
3259 continue;
3260 };
3261
3262 let status = local_repo.repo_ptr.lock().status(&repo_path);
3263 if let Some(status) = status {
3264 repository.statuses.insert(repo_path.clone(), status);
3265 } else {
3266 repository.statuses.remove(&repo_path);
3267 }
3268 }
3269
3270 snapshot.repository_entries.insert(work_dir, repository)
3271 }
3272
3273 Some(())
3274 }
3275
3276 async fn update_ignore_statuses(&self) {
3277 use futures::FutureExt as _;
3278
3279 let mut snapshot = self.snapshot.lock().clone();
3280 let mut ignores_to_update = Vec::new();
3281 let mut ignores_to_delete = Vec::new();
3282 let abs_path = snapshot.abs_path.clone();
3283 for (parent_abs_path, (_, needs_update)) in &mut snapshot.ignores_by_parent_abs_path {
3284 if let Ok(parent_path) = parent_abs_path.strip_prefix(&abs_path) {
3285 if *needs_update {
3286 *needs_update = false;
3287 if snapshot.snapshot.entry_for_path(parent_path).is_some() {
3288 ignores_to_update.push(parent_abs_path.clone());
3289 }
3290 }
3291
3292 let ignore_path = parent_path.join(&*GITIGNORE);
3293 if snapshot.snapshot.entry_for_path(ignore_path).is_none() {
3294 ignores_to_delete.push(parent_abs_path.clone());
3295 }
3296 }
3297 }
3298
3299 for parent_abs_path in ignores_to_delete {
3300 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
3301 self.snapshot
3302 .lock()
3303 .ignores_by_parent_abs_path
3304 .remove(&parent_abs_path);
3305 }
3306
3307 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
3308 ignores_to_update.sort_unstable();
3309 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
3310 while let Some(parent_abs_path) = ignores_to_update.next() {
3311 while ignores_to_update
3312 .peek()
3313 .map_or(false, |p| p.starts_with(&parent_abs_path))
3314 {
3315 ignores_to_update.next().unwrap();
3316 }
3317
3318 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
3319 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
3320 abs_path: parent_abs_path,
3321 ignore_stack,
3322 ignore_queue: ignore_queue_tx.clone(),
3323 }))
3324 .unwrap();
3325 }
3326 drop(ignore_queue_tx);
3327
3328 self.executor
3329 .scoped(|scope| {
3330 for _ in 0..self.executor.num_cpus() {
3331 scope.spawn(async {
3332 loop {
3333 select_biased! {
3334 // Process any path refresh requests before moving on to process
3335 // the queue of ignore statuses.
3336 request = self.refresh_requests_rx.recv().fuse() => {
3337 let Ok((paths, barrier)) = request else { break };
3338 if !self.process_refresh_request(paths, barrier).await {
3339 return;
3340 }
3341 }
3342
3343 // Recursively process directories whose ignores have changed.
3344 job = ignore_queue_rx.recv().fuse() => {
3345 let Ok(job) = job else { break };
3346 self.update_ignore_status(job, &snapshot).await;
3347 }
3348 }
3349 }
3350 });
3351 }
3352 })
3353 .await;
3354 }
3355
3356 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
3357 let mut ignore_stack = job.ignore_stack;
3358 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
3359 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
3360 }
3361
3362 let mut entries_by_id_edits = Vec::new();
3363 let mut entries_by_path_edits = Vec::new();
3364 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
3365 for mut entry in snapshot.child_entries(path).cloned() {
3366 let was_ignored = entry.is_ignored;
3367 let abs_path = snapshot.abs_path().join(&entry.path);
3368 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
3369 if entry.is_dir() {
3370 let child_ignore_stack = if entry.is_ignored {
3371 IgnoreStack::all()
3372 } else {
3373 ignore_stack.clone()
3374 };
3375 job.ignore_queue
3376 .send(UpdateIgnoreStatusJob {
3377 abs_path: abs_path.into(),
3378 ignore_stack: child_ignore_stack,
3379 ignore_queue: job.ignore_queue.clone(),
3380 })
3381 .await
3382 .unwrap();
3383 }
3384
3385 if entry.is_ignored != was_ignored {
3386 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
3387 path_entry.scan_id = snapshot.scan_id;
3388 path_entry.is_ignored = entry.is_ignored;
3389 entries_by_id_edits.push(Edit::Insert(path_entry));
3390 entries_by_path_edits.push(Edit::Insert(entry));
3391 }
3392 }
3393
3394 let mut snapshot = self.snapshot.lock();
3395 snapshot.entries_by_path.edit(entries_by_path_edits, &());
3396 snapshot.entries_by_id.edit(entries_by_id_edits, &());
3397 }
3398
3399 fn build_change_set(
3400 &self,
3401 old_snapshot: &Snapshot,
3402 new_snapshot: &Snapshot,
3403 event_paths: &[Arc<Path>],
3404 ) -> HashMap<(Arc<Path>, ProjectEntryId), PathChange> {
3405 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3406
3407 let mut changes = HashMap::default();
3408 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3409 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3410 let received_before_initialized = !self.finished_initial_scan;
3411
3412 for path in event_paths {
3413 let path = PathKey(path.clone());
3414 old_paths.seek(&path, Bias::Left, &());
3415 new_paths.seek(&path, Bias::Left, &());
3416
3417 loop {
3418 match (old_paths.item(), new_paths.item()) {
3419 (Some(old_entry), Some(new_entry)) => {
3420 if old_entry.path > path.0
3421 && new_entry.path > path.0
3422 && !old_entry.path.starts_with(&path.0)
3423 && !new_entry.path.starts_with(&path.0)
3424 {
3425 break;
3426 }
3427
3428 match Ord::cmp(&old_entry.path, &new_entry.path) {
3429 Ordering::Less => {
3430 changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3431 old_paths.next(&());
3432 }
3433 Ordering::Equal => {
3434 if received_before_initialized {
3435 // If the worktree was not fully initialized when this event was generated,
3436 // we can't know whether this entry was added during the scan or whether
3437 // it was merely updated.
3438 changes.insert(
3439 (new_entry.path.clone(), new_entry.id),
3440 AddedOrUpdated,
3441 );
3442 } else if old_entry.mtime != new_entry.mtime {
3443 changes.insert((new_entry.path.clone(), new_entry.id), Updated);
3444 }
3445 old_paths.next(&());
3446 new_paths.next(&());
3447 }
3448 Ordering::Greater => {
3449 changes.insert((new_entry.path.clone(), new_entry.id), Added);
3450 new_paths.next(&());
3451 }
3452 }
3453 }
3454 (Some(old_entry), None) => {
3455 changes.insert((old_entry.path.clone(), old_entry.id), Removed);
3456 old_paths.next(&());
3457 }
3458 (None, Some(new_entry)) => {
3459 changes.insert((new_entry.path.clone(), new_entry.id), Added);
3460 new_paths.next(&());
3461 }
3462 (None, None) => break,
3463 }
3464 }
3465 }
3466
3467 changes
3468 }
3469
3470 async fn progress_timer(&self, running: bool) {
3471 if !running {
3472 return futures::future::pending().await;
3473 }
3474
3475 #[cfg(any(test, feature = "test-support"))]
3476 if self.fs.is_fake() {
3477 return self.executor.simulate_random_delay().await;
3478 }
3479
3480 smol::Timer::after(Duration::from_millis(100)).await;
3481 }
3482}
3483
3484fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3485 let mut result = root_char_bag;
3486 result.extend(
3487 path.to_string_lossy()
3488 .chars()
3489 .map(|c| c.to_ascii_lowercase()),
3490 );
3491 result
3492}
3493
3494struct ScanJob {
3495 abs_path: Arc<Path>,
3496 path: Arc<Path>,
3497 ignore_stack: Arc<IgnoreStack>,
3498 scan_queue: Sender<ScanJob>,
3499 ancestor_inodes: TreeSet<u64>,
3500}
3501
3502struct UpdateIgnoreStatusJob {
3503 abs_path: Arc<Path>,
3504 ignore_stack: Arc<IgnoreStack>,
3505 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3506}
3507
3508pub trait WorktreeHandle {
3509 #[cfg(any(test, feature = "test-support"))]
3510 fn flush_fs_events<'a>(
3511 &self,
3512 cx: &'a gpui::TestAppContext,
3513 ) -> futures::future::LocalBoxFuture<'a, ()>;
3514}
3515
3516impl WorktreeHandle for ModelHandle<Worktree> {
3517 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3518 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3519 // extra directory scans, and emit extra scan-state notifications.
3520 //
3521 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3522 // to ensure that all redundant FS events have already been processed.
3523 #[cfg(any(test, feature = "test-support"))]
3524 fn flush_fs_events<'a>(
3525 &self,
3526 cx: &'a gpui::TestAppContext,
3527 ) -> futures::future::LocalBoxFuture<'a, ()> {
3528 use smol::future::FutureExt;
3529
3530 let filename = "fs-event-sentinel";
3531 let tree = self.clone();
3532 let (fs, root_path) = self.read_with(cx, |tree, _| {
3533 let tree = tree.as_local().unwrap();
3534 (tree.fs.clone(), tree.abs_path().clone())
3535 });
3536
3537 async move {
3538 fs.create_file(&root_path.join(filename), Default::default())
3539 .await
3540 .unwrap();
3541 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3542 .await;
3543
3544 fs.remove_file(&root_path.join(filename), Default::default())
3545 .await
3546 .unwrap();
3547 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3548 .await;
3549
3550 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3551 .await;
3552 }
3553 .boxed_local()
3554 }
3555}
3556
3557#[derive(Clone, Debug)]
3558struct TraversalProgress<'a> {
3559 max_path: &'a Path,
3560 count: usize,
3561 visible_count: usize,
3562 file_count: usize,
3563 visible_file_count: usize,
3564}
3565
3566impl<'a> TraversalProgress<'a> {
3567 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3568 match (include_ignored, include_dirs) {
3569 (true, true) => self.count,
3570 (true, false) => self.file_count,
3571 (false, true) => self.visible_count,
3572 (false, false) => self.visible_file_count,
3573 }
3574 }
3575}
3576
3577impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3578 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3579 self.max_path = summary.max_path.as_ref();
3580 self.count += summary.count;
3581 self.visible_count += summary.visible_count;
3582 self.file_count += summary.file_count;
3583 self.visible_file_count += summary.visible_file_count;
3584 }
3585}
3586
3587impl<'a> Default for TraversalProgress<'a> {
3588 fn default() -> Self {
3589 Self {
3590 max_path: Path::new(""),
3591 count: 0,
3592 visible_count: 0,
3593 file_count: 0,
3594 visible_file_count: 0,
3595 }
3596 }
3597}
3598
3599pub struct Traversal<'a> {
3600 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3601 include_ignored: bool,
3602 include_dirs: bool,
3603}
3604
3605impl<'a> Traversal<'a> {
3606 pub fn advance(&mut self) -> bool {
3607 self.cursor.seek_forward(
3608 &TraversalTarget::Count {
3609 count: self.end_offset() + 1,
3610 include_dirs: self.include_dirs,
3611 include_ignored: self.include_ignored,
3612 },
3613 Bias::Left,
3614 &(),
3615 )
3616 }
3617
3618 pub fn advance_to_sibling(&mut self) -> bool {
3619 while let Some(entry) = self.cursor.item() {
3620 self.cursor.seek_forward(
3621 &TraversalTarget::PathSuccessor(&entry.path),
3622 Bias::Left,
3623 &(),
3624 );
3625 if let Some(entry) = self.cursor.item() {
3626 if (self.include_dirs || !entry.is_dir())
3627 && (self.include_ignored || !entry.is_ignored)
3628 {
3629 return true;
3630 }
3631 }
3632 }
3633 false
3634 }
3635
3636 pub fn entry(&self) -> Option<&'a Entry> {
3637 self.cursor.item()
3638 }
3639
3640 pub fn start_offset(&self) -> usize {
3641 self.cursor
3642 .start()
3643 .count(self.include_dirs, self.include_ignored)
3644 }
3645
3646 pub fn end_offset(&self) -> usize {
3647 self.cursor
3648 .end(&())
3649 .count(self.include_dirs, self.include_ignored)
3650 }
3651}
3652
3653impl<'a> Iterator for Traversal<'a> {
3654 type Item = &'a Entry;
3655
3656 fn next(&mut self) -> Option<Self::Item> {
3657 if let Some(item) = self.entry() {
3658 self.advance();
3659 Some(item)
3660 } else {
3661 None
3662 }
3663 }
3664}
3665
3666#[derive(Debug)]
3667enum TraversalTarget<'a> {
3668 Path(&'a Path),
3669 PathSuccessor(&'a Path),
3670 Count {
3671 count: usize,
3672 include_ignored: bool,
3673 include_dirs: bool,
3674 },
3675}
3676
3677impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3678 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3679 match self {
3680 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3681 TraversalTarget::PathSuccessor(path) => {
3682 if !cursor_location.max_path.starts_with(path) {
3683 Ordering::Equal
3684 } else {
3685 Ordering::Greater
3686 }
3687 }
3688 TraversalTarget::Count {
3689 count,
3690 include_dirs,
3691 include_ignored,
3692 } => Ord::cmp(
3693 count,
3694 &cursor_location.count(*include_dirs, *include_ignored),
3695 ),
3696 }
3697 }
3698}
3699
3700struct ChildEntriesIter<'a> {
3701 parent_path: &'a Path,
3702 traversal: Traversal<'a>,
3703}
3704
3705impl<'a> Iterator for ChildEntriesIter<'a> {
3706 type Item = &'a Entry;
3707
3708 fn next(&mut self) -> Option<Self::Item> {
3709 if let Some(item) = self.traversal.entry() {
3710 if item.path.starts_with(&self.parent_path) {
3711 self.traversal.advance_to_sibling();
3712 return Some(item);
3713 }
3714 }
3715 None
3716 }
3717}
3718
3719struct DescendentEntriesIter<'a> {
3720 parent_path: &'a Path,
3721 traversal: Traversal<'a>,
3722}
3723
3724impl<'a> Iterator for DescendentEntriesIter<'a> {
3725 type Item = &'a Entry;
3726
3727 fn next(&mut self) -> Option<Self::Item> {
3728 if let Some(item) = self.traversal.entry() {
3729 if item.path.starts_with(&self.parent_path) {
3730 self.traversal.advance();
3731 return Some(item);
3732 }
3733 }
3734 None
3735 }
3736}
3737
3738impl<'a> From<&'a Entry> for proto::Entry {
3739 fn from(entry: &'a Entry) -> Self {
3740 Self {
3741 id: entry.id.to_proto(),
3742 is_dir: entry.is_dir(),
3743 path: entry.path.to_string_lossy().into(),
3744 inode: entry.inode,
3745 mtime: Some(entry.mtime.into()),
3746 is_symlink: entry.is_symlink,
3747 is_ignored: entry.is_ignored,
3748 }
3749 }
3750}
3751
3752impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3753 type Error = anyhow::Error;
3754
3755 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3756 if let Some(mtime) = entry.mtime {
3757 let kind = if entry.is_dir {
3758 EntryKind::Dir
3759 } else {
3760 let mut char_bag = *root_char_bag;
3761 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3762 EntryKind::File(char_bag)
3763 };
3764 let path: Arc<Path> = PathBuf::from(entry.path).into();
3765 Ok(Entry {
3766 id: ProjectEntryId::from_proto(entry.id),
3767 kind,
3768 path,
3769 inode: entry.inode,
3770 mtime: mtime.into(),
3771 is_symlink: entry.is_symlink,
3772 is_ignored: entry.is_ignored,
3773 })
3774 } else {
3775 Err(anyhow!(
3776 "missing mtime in remote worktree entry {:?}",
3777 entry.path
3778 ))
3779 }
3780 }
3781}
3782
3783#[cfg(test)]
3784mod tests {
3785 use super::*;
3786 use fs::{FakeFs, RealFs};
3787 use gpui::{executor::Deterministic, TestAppContext};
3788 use pretty_assertions::assert_eq;
3789 use rand::prelude::*;
3790 use serde_json::json;
3791 use std::{env, fmt::Write};
3792 use util::{http::FakeHttpClient, test::temp_tree};
3793
3794 #[gpui::test]
3795 async fn test_traversal(cx: &mut TestAppContext) {
3796 let fs = FakeFs::new(cx.background());
3797 fs.insert_tree(
3798 "/root",
3799 json!({
3800 ".gitignore": "a/b\n",
3801 "a": {
3802 "b": "",
3803 "c": "",
3804 }
3805 }),
3806 )
3807 .await;
3808
3809 let http_client = FakeHttpClient::with_404_response();
3810 let client = cx.read(|cx| Client::new(http_client, cx));
3811
3812 let tree = Worktree::local(
3813 client,
3814 Path::new("/root"),
3815 true,
3816 fs,
3817 Default::default(),
3818 &mut cx.to_async(),
3819 )
3820 .await
3821 .unwrap();
3822 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3823 .await;
3824
3825 tree.read_with(cx, |tree, _| {
3826 assert_eq!(
3827 tree.entries(false)
3828 .map(|entry| entry.path.as_ref())
3829 .collect::<Vec<_>>(),
3830 vec![
3831 Path::new(""),
3832 Path::new(".gitignore"),
3833 Path::new("a"),
3834 Path::new("a/c"),
3835 ]
3836 );
3837 assert_eq!(
3838 tree.entries(true)
3839 .map(|entry| entry.path.as_ref())
3840 .collect::<Vec<_>>(),
3841 vec![
3842 Path::new(""),
3843 Path::new(".gitignore"),
3844 Path::new("a"),
3845 Path::new("a/b"),
3846 Path::new("a/c"),
3847 ]
3848 );
3849 })
3850 }
3851
3852 #[gpui::test]
3853 async fn test_descendent_entries(cx: &mut TestAppContext) {
3854 let fs = FakeFs::new(cx.background());
3855 fs.insert_tree(
3856 "/root",
3857 json!({
3858 "a": "",
3859 "b": {
3860 "c": {
3861 "d": ""
3862 },
3863 "e": {}
3864 },
3865 "f": "",
3866 "g": {
3867 "h": {}
3868 },
3869 "i": {
3870 "j": {
3871 "k": ""
3872 },
3873 "l": {
3874
3875 }
3876 },
3877 ".gitignore": "i/j\n",
3878 }),
3879 )
3880 .await;
3881
3882 let http_client = FakeHttpClient::with_404_response();
3883 let client = cx.read(|cx| Client::new(http_client, cx));
3884
3885 let tree = Worktree::local(
3886 client,
3887 Path::new("/root"),
3888 true,
3889 fs,
3890 Default::default(),
3891 &mut cx.to_async(),
3892 )
3893 .await
3894 .unwrap();
3895 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3896 .await;
3897
3898 tree.read_with(cx, |tree, _| {
3899 assert_eq!(
3900 tree.descendent_entries(false, false, Path::new("b"))
3901 .map(|entry| entry.path.as_ref())
3902 .collect::<Vec<_>>(),
3903 vec![Path::new("b/c/d"),]
3904 );
3905 assert_eq!(
3906 tree.descendent_entries(true, false, Path::new("b"))
3907 .map(|entry| entry.path.as_ref())
3908 .collect::<Vec<_>>(),
3909 vec![
3910 Path::new("b"),
3911 Path::new("b/c"),
3912 Path::new("b/c/d"),
3913 Path::new("b/e"),
3914 ]
3915 );
3916
3917 assert_eq!(
3918 tree.descendent_entries(false, false, Path::new("g"))
3919 .map(|entry| entry.path.as_ref())
3920 .collect::<Vec<_>>(),
3921 Vec::<PathBuf>::new()
3922 );
3923 assert_eq!(
3924 tree.descendent_entries(true, false, Path::new("g"))
3925 .map(|entry| entry.path.as_ref())
3926 .collect::<Vec<_>>(),
3927 vec![Path::new("g"), Path::new("g/h"),]
3928 );
3929
3930 assert_eq!(
3931 tree.descendent_entries(false, false, Path::new("i"))
3932 .map(|entry| entry.path.as_ref())
3933 .collect::<Vec<_>>(),
3934 Vec::<PathBuf>::new()
3935 );
3936 assert_eq!(
3937 tree.descendent_entries(false, true, Path::new("i"))
3938 .map(|entry| entry.path.as_ref())
3939 .collect::<Vec<_>>(),
3940 vec![Path::new("i/j/k")]
3941 );
3942 assert_eq!(
3943 tree.descendent_entries(true, false, Path::new("i"))
3944 .map(|entry| entry.path.as_ref())
3945 .collect::<Vec<_>>(),
3946 vec![Path::new("i"), Path::new("i/l"),]
3947 );
3948 })
3949 }
3950
3951 #[gpui::test(iterations = 10)]
3952 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3953 let fs = FakeFs::new(cx.background());
3954 fs.insert_tree(
3955 "/root",
3956 json!({
3957 "lib": {
3958 "a": {
3959 "a.txt": ""
3960 },
3961 "b": {
3962 "b.txt": ""
3963 }
3964 }
3965 }),
3966 )
3967 .await;
3968 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3969 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3970
3971 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3972 let tree = Worktree::local(
3973 client,
3974 Path::new("/root"),
3975 true,
3976 fs.clone(),
3977 Default::default(),
3978 &mut cx.to_async(),
3979 )
3980 .await
3981 .unwrap();
3982
3983 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3984 .await;
3985
3986 tree.read_with(cx, |tree, _| {
3987 assert_eq!(
3988 tree.entries(false)
3989 .map(|entry| entry.path.as_ref())
3990 .collect::<Vec<_>>(),
3991 vec![
3992 Path::new(""),
3993 Path::new("lib"),
3994 Path::new("lib/a"),
3995 Path::new("lib/a/a.txt"),
3996 Path::new("lib/a/lib"),
3997 Path::new("lib/b"),
3998 Path::new("lib/b/b.txt"),
3999 Path::new("lib/b/lib"),
4000 ]
4001 );
4002 });
4003
4004 fs.rename(
4005 Path::new("/root/lib/a/lib"),
4006 Path::new("/root/lib/a/lib-2"),
4007 Default::default(),
4008 )
4009 .await
4010 .unwrap();
4011 executor.run_until_parked();
4012 tree.read_with(cx, |tree, _| {
4013 assert_eq!(
4014 tree.entries(false)
4015 .map(|entry| entry.path.as_ref())
4016 .collect::<Vec<_>>(),
4017 vec![
4018 Path::new(""),
4019 Path::new("lib"),
4020 Path::new("lib/a"),
4021 Path::new("lib/a/a.txt"),
4022 Path::new("lib/a/lib-2"),
4023 Path::new("lib/b"),
4024 Path::new("lib/b/b.txt"),
4025 Path::new("lib/b/lib"),
4026 ]
4027 );
4028 });
4029 }
4030
4031 #[gpui::test]
4032 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
4033 // .gitignores are handled explicitly by Zed and do not use the git
4034 // machinery that the git_tests module checks
4035 let parent_dir = temp_tree(json!({
4036 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
4037 "tree": {
4038 ".git": {},
4039 ".gitignore": "ignored-dir\n",
4040 "tracked-dir": {
4041 "tracked-file1": "",
4042 "ancestor-ignored-file1": "",
4043 },
4044 "ignored-dir": {
4045 "ignored-file1": ""
4046 }
4047 }
4048 }));
4049 let dir = parent_dir.path().join("tree");
4050
4051 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4052
4053 let tree = Worktree::local(
4054 client,
4055 dir.as_path(),
4056 true,
4057 Arc::new(RealFs),
4058 Default::default(),
4059 &mut cx.to_async(),
4060 )
4061 .await
4062 .unwrap();
4063 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4064 .await;
4065 tree.flush_fs_events(cx).await;
4066 cx.read(|cx| {
4067 let tree = tree.read(cx);
4068 assert!(
4069 !tree
4070 .entry_for_path("tracked-dir/tracked-file1")
4071 .unwrap()
4072 .is_ignored
4073 );
4074 assert!(
4075 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
4076 .unwrap()
4077 .is_ignored
4078 );
4079 assert!(
4080 tree.entry_for_path("ignored-dir/ignored-file1")
4081 .unwrap()
4082 .is_ignored
4083 );
4084 });
4085
4086 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
4087 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
4088 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
4089 tree.flush_fs_events(cx).await;
4090 cx.read(|cx| {
4091 let tree = tree.read(cx);
4092 assert!(
4093 !tree
4094 .entry_for_path("tracked-dir/tracked-file2")
4095 .unwrap()
4096 .is_ignored
4097 );
4098 assert!(
4099 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
4100 .unwrap()
4101 .is_ignored
4102 );
4103 assert!(
4104 tree.entry_for_path("ignored-dir/ignored-file2")
4105 .unwrap()
4106 .is_ignored
4107 );
4108 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
4109 });
4110 }
4111
4112 #[gpui::test]
4113 async fn test_write_file(cx: &mut TestAppContext) {
4114 let dir = temp_tree(json!({
4115 ".git": {},
4116 ".gitignore": "ignored-dir\n",
4117 "tracked-dir": {},
4118 "ignored-dir": {}
4119 }));
4120
4121 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4122
4123 let tree = Worktree::local(
4124 client,
4125 dir.path(),
4126 true,
4127 Arc::new(RealFs),
4128 Default::default(),
4129 &mut cx.to_async(),
4130 )
4131 .await
4132 .unwrap();
4133 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4134 .await;
4135 tree.flush_fs_events(cx).await;
4136
4137 tree.update(cx, |tree, cx| {
4138 tree.as_local().unwrap().write_file(
4139 Path::new("tracked-dir/file.txt"),
4140 "hello".into(),
4141 Default::default(),
4142 cx,
4143 )
4144 })
4145 .await
4146 .unwrap();
4147 tree.update(cx, |tree, cx| {
4148 tree.as_local().unwrap().write_file(
4149 Path::new("ignored-dir/file.txt"),
4150 "world".into(),
4151 Default::default(),
4152 cx,
4153 )
4154 })
4155 .await
4156 .unwrap();
4157
4158 tree.read_with(cx, |tree, _| {
4159 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
4160 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
4161 assert!(!tracked.is_ignored);
4162 assert!(ignored.is_ignored);
4163 });
4164 }
4165
4166 #[gpui::test(iterations = 30)]
4167 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
4168 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4169
4170 let fs = FakeFs::new(cx.background());
4171 fs.insert_tree(
4172 "/root",
4173 json!({
4174 "b": {},
4175 "c": {},
4176 "d": {},
4177 }),
4178 )
4179 .await;
4180
4181 let tree = Worktree::local(
4182 client,
4183 "/root".as_ref(),
4184 true,
4185 fs,
4186 Default::default(),
4187 &mut cx.to_async(),
4188 )
4189 .await
4190 .unwrap();
4191
4192 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4193
4194 let entry = tree
4195 .update(cx, |tree, cx| {
4196 tree.as_local_mut()
4197 .unwrap()
4198 .create_entry("a/e".as_ref(), true, cx)
4199 })
4200 .await
4201 .unwrap();
4202 assert!(entry.is_dir());
4203
4204 cx.foreground().run_until_parked();
4205 tree.read_with(cx, |tree, _| {
4206 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
4207 });
4208
4209 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4210 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
4211 snapshot1.apply_remote_update(update).unwrap();
4212 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
4213 }
4214
4215 #[gpui::test(iterations = 100)]
4216 async fn test_random_worktree_operations_during_initial_scan(
4217 cx: &mut TestAppContext,
4218 mut rng: StdRng,
4219 ) {
4220 let operations = env::var("OPERATIONS")
4221 .map(|o| o.parse().unwrap())
4222 .unwrap_or(5);
4223 let initial_entries = env::var("INITIAL_ENTRIES")
4224 .map(|o| o.parse().unwrap())
4225 .unwrap_or(20);
4226
4227 let root_dir = Path::new("/test");
4228 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4229 fs.as_fake().insert_tree(root_dir, json!({})).await;
4230 for _ in 0..initial_entries {
4231 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4232 }
4233 log::info!("generated initial tree");
4234
4235 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4236 let worktree = Worktree::local(
4237 client.clone(),
4238 root_dir,
4239 true,
4240 fs.clone(),
4241 Default::default(),
4242 &mut cx.to_async(),
4243 )
4244 .await
4245 .unwrap();
4246
4247 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
4248
4249 for _ in 0..operations {
4250 worktree
4251 .update(cx, |worktree, cx| {
4252 randomly_mutate_worktree(worktree, &mut rng, cx)
4253 })
4254 .await
4255 .log_err();
4256 worktree.read_with(cx, |tree, _| {
4257 tree.as_local().unwrap().snapshot.check_invariants()
4258 });
4259
4260 if rng.gen_bool(0.6) {
4261 let new_snapshot =
4262 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4263 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4264 snapshot.apply_remote_update(update.clone()).unwrap();
4265 assert_eq!(
4266 snapshot.to_vec(true),
4267 new_snapshot.to_vec(true),
4268 "incorrect snapshot after update {:?}",
4269 update
4270 );
4271 }
4272 }
4273
4274 worktree
4275 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4276 .await;
4277 worktree.read_with(cx, |tree, _| {
4278 tree.as_local().unwrap().snapshot.check_invariants()
4279 });
4280
4281 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4282 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
4283 snapshot.apply_remote_update(update.clone()).unwrap();
4284 assert_eq!(
4285 snapshot.to_vec(true),
4286 new_snapshot.to_vec(true),
4287 "incorrect snapshot after update {:?}",
4288 update
4289 );
4290 }
4291
4292 #[gpui::test(iterations = 100)]
4293 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
4294 let operations = env::var("OPERATIONS")
4295 .map(|o| o.parse().unwrap())
4296 .unwrap_or(40);
4297 let initial_entries = env::var("INITIAL_ENTRIES")
4298 .map(|o| o.parse().unwrap())
4299 .unwrap_or(20);
4300
4301 let root_dir = Path::new("/test");
4302 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
4303 fs.as_fake().insert_tree(root_dir, json!({})).await;
4304 for _ in 0..initial_entries {
4305 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4306 }
4307 log::info!("generated initial tree");
4308
4309 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
4310 let worktree = Worktree::local(
4311 client.clone(),
4312 root_dir,
4313 true,
4314 fs.clone(),
4315 Default::default(),
4316 &mut cx.to_async(),
4317 )
4318 .await
4319 .unwrap();
4320
4321 worktree
4322 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4323 .await;
4324
4325 // After the initial scan is complete, the `UpdatedEntries` event can
4326 // be used to follow along with all changes to the worktree's snapshot.
4327 worktree.update(cx, |tree, cx| {
4328 let mut paths = tree
4329 .as_local()
4330 .unwrap()
4331 .paths()
4332 .cloned()
4333 .collect::<Vec<_>>();
4334
4335 cx.subscribe(&worktree, move |tree, _, event, _| {
4336 if let Event::UpdatedEntries(changes) = event {
4337 for ((path, _), change_type) in changes.iter() {
4338 let path = path.clone();
4339 let ix = match paths.binary_search(&path) {
4340 Ok(ix) | Err(ix) => ix,
4341 };
4342 match change_type {
4343 PathChange::Added => {
4344 assert_ne!(paths.get(ix), Some(&path));
4345 paths.insert(ix, path);
4346 }
4347
4348 PathChange::Removed => {
4349 assert_eq!(paths.get(ix), Some(&path));
4350 paths.remove(ix);
4351 }
4352
4353 PathChange::Updated => {
4354 assert_eq!(paths.get(ix), Some(&path));
4355 }
4356
4357 PathChange::AddedOrUpdated => {
4358 if paths[ix] != path {
4359 paths.insert(ix, path);
4360 }
4361 }
4362 }
4363 }
4364
4365 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
4366 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
4367 }
4368 })
4369 .detach();
4370 });
4371
4372 fs.as_fake().pause_events();
4373 let mut snapshots = Vec::new();
4374 let mut mutations_len = operations;
4375 while mutations_len > 1 {
4376 if rng.gen_bool(0.2) {
4377 worktree
4378 .update(cx, |worktree, cx| {
4379 randomly_mutate_worktree(worktree, &mut rng, cx)
4380 })
4381 .await
4382 .log_err();
4383 } else {
4384 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4385 }
4386
4387 let buffered_event_count = fs.as_fake().buffered_event_count();
4388 if buffered_event_count > 0 && rng.gen_bool(0.3) {
4389 let len = rng.gen_range(0..=buffered_event_count);
4390 log::info!("flushing {} events", len);
4391 fs.as_fake().flush_events(len);
4392 } else {
4393 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4394 mutations_len -= 1;
4395 }
4396
4397 cx.foreground().run_until_parked();
4398 if rng.gen_bool(0.2) {
4399 log::info!("storing snapshot {}", snapshots.len());
4400 let snapshot =
4401 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4402 snapshots.push(snapshot);
4403 }
4404 }
4405
4406 log::info!("quiescing");
4407 fs.as_fake().flush_events(usize::MAX);
4408 cx.foreground().run_until_parked();
4409 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4410 snapshot.check_invariants();
4411
4412 {
4413 let new_worktree = Worktree::local(
4414 client.clone(),
4415 root_dir,
4416 true,
4417 fs.clone(),
4418 Default::default(),
4419 &mut cx.to_async(),
4420 )
4421 .await
4422 .unwrap();
4423 new_worktree
4424 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4425 .await;
4426 let new_snapshot =
4427 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4428 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4429 }
4430
4431 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4432 let include_ignored = rng.gen::<bool>();
4433 if !include_ignored {
4434 let mut entries_by_path_edits = Vec::new();
4435 let mut entries_by_id_edits = Vec::new();
4436 for entry in prev_snapshot
4437 .entries_by_id
4438 .cursor::<()>()
4439 .filter(|e| e.is_ignored)
4440 {
4441 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4442 entries_by_id_edits.push(Edit::Remove(entry.id));
4443 }
4444
4445 prev_snapshot
4446 .entries_by_path
4447 .edit(entries_by_path_edits, &());
4448 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4449 }
4450
4451 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4452 prev_snapshot.apply_remote_update(update.clone()).unwrap();
4453 assert_eq!(
4454 prev_snapshot.to_vec(include_ignored),
4455 snapshot.to_vec(include_ignored),
4456 "wrong update for snapshot {i}. update: {:?}",
4457 update
4458 );
4459 }
4460 }
4461
4462 fn randomly_mutate_worktree(
4463 worktree: &mut Worktree,
4464 rng: &mut impl Rng,
4465 cx: &mut ModelContext<Worktree>,
4466 ) -> Task<Result<()>> {
4467 log::info!("mutating worktree");
4468 let worktree = worktree.as_local_mut().unwrap();
4469 let snapshot = worktree.snapshot();
4470 let entry = snapshot.entries(false).choose(rng).unwrap();
4471
4472 match rng.gen_range(0_u32..100) {
4473 0..=33 if entry.path.as_ref() != Path::new("") => {
4474 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4475 worktree.delete_entry(entry.id, cx).unwrap()
4476 }
4477 ..=66 if entry.path.as_ref() != Path::new("") => {
4478 let other_entry = snapshot.entries(false).choose(rng).unwrap();
4479 let new_parent_path = if other_entry.is_dir() {
4480 other_entry.path.clone()
4481 } else {
4482 other_entry.path.parent().unwrap().into()
4483 };
4484 let mut new_path = new_parent_path.join(gen_name(rng));
4485 if new_path.starts_with(&entry.path) {
4486 new_path = gen_name(rng).into();
4487 }
4488
4489 log::info!(
4490 "renaming entry {:?} ({}) to {:?}",
4491 entry.path,
4492 entry.id.0,
4493 new_path
4494 );
4495 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4496 cx.foreground().spawn(async move {
4497 task.await?;
4498 Ok(())
4499 })
4500 }
4501 _ => {
4502 let task = if entry.is_dir() {
4503 let child_path = entry.path.join(gen_name(rng));
4504 let is_dir = rng.gen_bool(0.3);
4505 log::info!(
4506 "creating {} at {:?}",
4507 if is_dir { "dir" } else { "file" },
4508 child_path,
4509 );
4510 worktree.create_entry(child_path, is_dir, cx)
4511 } else {
4512 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4513 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4514 };
4515 cx.foreground().spawn(async move {
4516 task.await?;
4517 Ok(())
4518 })
4519 }
4520 }
4521 }
4522
4523 async fn randomly_mutate_fs(
4524 fs: &Arc<dyn Fs>,
4525 root_path: &Path,
4526 insertion_probability: f64,
4527 rng: &mut impl Rng,
4528 ) {
4529 log::info!("mutating fs");
4530 let mut files = Vec::new();
4531 let mut dirs = Vec::new();
4532 for path in fs.as_fake().paths() {
4533 if path.starts_with(root_path) {
4534 if fs.is_file(&path).await {
4535 files.push(path);
4536 } else {
4537 dirs.push(path);
4538 }
4539 }
4540 }
4541
4542 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4543 let path = dirs.choose(rng).unwrap();
4544 let new_path = path.join(gen_name(rng));
4545
4546 if rng.gen() {
4547 log::info!(
4548 "creating dir {:?}",
4549 new_path.strip_prefix(root_path).unwrap()
4550 );
4551 fs.create_dir(&new_path).await.unwrap();
4552 } else {
4553 log::info!(
4554 "creating file {:?}",
4555 new_path.strip_prefix(root_path).unwrap()
4556 );
4557 fs.create_file(&new_path, Default::default()).await.unwrap();
4558 }
4559 } else if rng.gen_bool(0.05) {
4560 let ignore_dir_path = dirs.choose(rng).unwrap();
4561 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4562
4563 let subdirs = dirs
4564 .iter()
4565 .filter(|d| d.starts_with(&ignore_dir_path))
4566 .cloned()
4567 .collect::<Vec<_>>();
4568 let subfiles = files
4569 .iter()
4570 .filter(|d| d.starts_with(&ignore_dir_path))
4571 .cloned()
4572 .collect::<Vec<_>>();
4573 let files_to_ignore = {
4574 let len = rng.gen_range(0..=subfiles.len());
4575 subfiles.choose_multiple(rng, len)
4576 };
4577 let dirs_to_ignore = {
4578 let len = rng.gen_range(0..subdirs.len());
4579 subdirs.choose_multiple(rng, len)
4580 };
4581
4582 let mut ignore_contents = String::new();
4583 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4584 writeln!(
4585 ignore_contents,
4586 "{}",
4587 path_to_ignore
4588 .strip_prefix(&ignore_dir_path)
4589 .unwrap()
4590 .to_str()
4591 .unwrap()
4592 )
4593 .unwrap();
4594 }
4595 log::info!(
4596 "creating gitignore {:?} with contents:\n{}",
4597 ignore_path.strip_prefix(&root_path).unwrap(),
4598 ignore_contents
4599 );
4600 fs.save(
4601 &ignore_path,
4602 &ignore_contents.as_str().into(),
4603 Default::default(),
4604 )
4605 .await
4606 .unwrap();
4607 } else {
4608 let old_path = {
4609 let file_path = files.choose(rng);
4610 let dir_path = dirs[1..].choose(rng);
4611 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4612 };
4613
4614 let is_rename = rng.gen();
4615 if is_rename {
4616 let new_path_parent = dirs
4617 .iter()
4618 .filter(|d| !d.starts_with(old_path))
4619 .choose(rng)
4620 .unwrap();
4621
4622 let overwrite_existing_dir =
4623 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4624 let new_path = if overwrite_existing_dir {
4625 fs.remove_dir(
4626 &new_path_parent,
4627 RemoveOptions {
4628 recursive: true,
4629 ignore_if_not_exists: true,
4630 },
4631 )
4632 .await
4633 .unwrap();
4634 new_path_parent.to_path_buf()
4635 } else {
4636 new_path_parent.join(gen_name(rng))
4637 };
4638
4639 log::info!(
4640 "renaming {:?} to {}{:?}",
4641 old_path.strip_prefix(&root_path).unwrap(),
4642 if overwrite_existing_dir {
4643 "overwrite "
4644 } else {
4645 ""
4646 },
4647 new_path.strip_prefix(&root_path).unwrap()
4648 );
4649 fs.rename(
4650 &old_path,
4651 &new_path,
4652 fs::RenameOptions {
4653 overwrite: true,
4654 ignore_if_exists: true,
4655 },
4656 )
4657 .await
4658 .unwrap();
4659 } else if fs.is_file(&old_path).await {
4660 log::info!(
4661 "deleting file {:?}",
4662 old_path.strip_prefix(&root_path).unwrap()
4663 );
4664 fs.remove_file(old_path, Default::default()).await.unwrap();
4665 } else {
4666 log::info!(
4667 "deleting dir {:?}",
4668 old_path.strip_prefix(&root_path).unwrap()
4669 );
4670 fs.remove_dir(
4671 &old_path,
4672 RemoveOptions {
4673 recursive: true,
4674 ignore_if_not_exists: true,
4675 },
4676 )
4677 .await
4678 .unwrap();
4679 }
4680 }
4681 }
4682
4683 fn gen_name(rng: &mut impl Rng) -> String {
4684 (0..6)
4685 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4686 .map(char::from)
4687 .collect()
4688 }
4689
4690 impl LocalSnapshot {
4691 fn check_invariants(&self) {
4692 assert_eq!(
4693 self.entries_by_path
4694 .cursor::<()>()
4695 .map(|e| (&e.path, e.id))
4696 .collect::<Vec<_>>(),
4697 self.entries_by_id
4698 .cursor::<()>()
4699 .map(|e| (&e.path, e.id))
4700 .collect::<collections::BTreeSet<_>>()
4701 .into_iter()
4702 .collect::<Vec<_>>(),
4703 "entries_by_path and entries_by_id are inconsistent"
4704 );
4705
4706 let mut files = self.files(true, 0);
4707 let mut visible_files = self.files(false, 0);
4708 for entry in self.entries_by_path.cursor::<()>() {
4709 if entry.is_file() {
4710 assert_eq!(files.next().unwrap().inode, entry.inode);
4711 if !entry.is_ignored {
4712 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4713 }
4714 }
4715 }
4716
4717 assert!(files.next().is_none());
4718 assert!(visible_files.next().is_none());
4719
4720 let mut bfs_paths = Vec::new();
4721 let mut stack = vec![Path::new("")];
4722 while let Some(path) = stack.pop() {
4723 bfs_paths.push(path);
4724 let ix = stack.len();
4725 for child_entry in self.child_entries(path) {
4726 stack.insert(ix, &child_entry.path);
4727 }
4728 }
4729
4730 let dfs_paths_via_iter = self
4731 .entries_by_path
4732 .cursor::<()>()
4733 .map(|e| e.path.as_ref())
4734 .collect::<Vec<_>>();
4735 assert_eq!(bfs_paths, dfs_paths_via_iter);
4736
4737 let dfs_paths_via_traversal = self
4738 .entries(true)
4739 .map(|e| e.path.as_ref())
4740 .collect::<Vec<_>>();
4741 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4742
4743 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4744 let ignore_parent_path =
4745 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4746 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4747 assert!(self
4748 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4749 .is_some());
4750 }
4751 }
4752
4753 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4754 let mut paths = Vec::new();
4755 for entry in self.entries_by_path.cursor::<()>() {
4756 if include_ignored || !entry.is_ignored {
4757 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4758 }
4759 }
4760 paths.sort_by(|a, b| a.0.cmp(b.0));
4761 paths
4762 }
4763 }
4764
4765 mod git_tests {
4766 use super::*;
4767 use pretty_assertions::assert_eq;
4768
4769 #[gpui::test]
4770 async fn test_rename_work_directory(cx: &mut TestAppContext) {
4771 let root = temp_tree(json!({
4772 "projects": {
4773 "project1": {
4774 "a": "",
4775 "b": "",
4776 }
4777 },
4778
4779 }));
4780 let root_path = root.path();
4781
4782 let http_client = FakeHttpClient::with_404_response();
4783 let client = cx.read(|cx| Client::new(http_client, cx));
4784 let tree = Worktree::local(
4785 client,
4786 root_path,
4787 true,
4788 Arc::new(RealFs),
4789 Default::default(),
4790 &mut cx.to_async(),
4791 )
4792 .await
4793 .unwrap();
4794
4795 let repo = git_init(&root_path.join("projects/project1"));
4796 git_add("a", &repo);
4797 git_commit("init", &repo);
4798 std::fs::write(root_path.join("projects/project1/a"), "aa").ok();
4799
4800 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4801 .await;
4802
4803 tree.flush_fs_events(cx).await;
4804
4805 cx.read(|cx| {
4806 let tree = tree.read(cx);
4807 let (work_dir, repo) = tree.repositories().next().unwrap();
4808 assert_eq!(work_dir.as_ref(), Path::new("projects/project1"));
4809 assert_eq!(
4810 repo.status_for_file(tree, Path::new("projects/project1/a")),
4811 Some(GitFileStatus::Modified)
4812 );
4813 assert_eq!(
4814 repo.status_for_file(tree, Path::new("projects/project1/b")),
4815 Some(GitFileStatus::Added)
4816 );
4817 });
4818
4819 std::fs::rename(
4820 root_path.join("projects/project1"),
4821 root_path.join("projects/project2"),
4822 )
4823 .ok();
4824 tree.flush_fs_events(cx).await;
4825
4826 cx.read(|cx| {
4827 let tree = tree.read(cx);
4828 let (work_dir, repo) = tree.repositories().next().unwrap();
4829 assert_eq!(work_dir.as_ref(), Path::new("projects/project2"));
4830 assert_eq!(
4831 repo.status_for_file(tree, Path::new("projects/project2/a")),
4832 Some(GitFileStatus::Modified)
4833 );
4834 assert_eq!(
4835 repo.status_for_file(tree, Path::new("projects/project2/b")),
4836 Some(GitFileStatus::Added)
4837 );
4838 });
4839 }
4840
4841 #[gpui::test]
4842 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
4843 let root = temp_tree(json!({
4844 "c.txt": "",
4845 "dir1": {
4846 ".git": {},
4847 "deps": {
4848 "dep1": {
4849 ".git": {},
4850 "src": {
4851 "a.txt": ""
4852 }
4853 }
4854 },
4855 "src": {
4856 "b.txt": ""
4857 }
4858 },
4859 }));
4860
4861 let http_client = FakeHttpClient::with_404_response();
4862 let client = cx.read(|cx| Client::new(http_client, cx));
4863 let tree = Worktree::local(
4864 client,
4865 root.path(),
4866 true,
4867 Arc::new(RealFs),
4868 Default::default(),
4869 &mut cx.to_async(),
4870 )
4871 .await
4872 .unwrap();
4873
4874 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4875 .await;
4876 tree.flush_fs_events(cx).await;
4877
4878 tree.read_with(cx, |tree, _cx| {
4879 let tree = tree.as_local().unwrap();
4880
4881 assert!(tree.repository_for_path("c.txt".as_ref()).is_none());
4882
4883 let entry = tree.repository_for_path("dir1/src/b.txt".as_ref()).unwrap();
4884 assert_eq!(
4885 entry
4886 .work_directory(tree)
4887 .map(|directory| directory.as_ref().to_owned()),
4888 Some(Path::new("dir1").to_owned())
4889 );
4890
4891 let entry = tree
4892 .repository_for_path("dir1/deps/dep1/src/a.txt".as_ref())
4893 .unwrap();
4894 assert_eq!(
4895 entry
4896 .work_directory(tree)
4897 .map(|directory| directory.as_ref().to_owned()),
4898 Some(Path::new("dir1/deps/dep1").to_owned())
4899 );
4900
4901 let entries = tree.files(false, 0);
4902
4903 let paths_with_repos = tree
4904 .entries_with_repositories(entries)
4905 .map(|(entry, repo)| {
4906 (
4907 entry.path.as_ref(),
4908 repo.and_then(|repo| {
4909 repo.work_directory(&tree)
4910 .map(|work_directory| work_directory.0.to_path_buf())
4911 }),
4912 )
4913 })
4914 .collect::<Vec<_>>();
4915
4916 assert_eq!(
4917 paths_with_repos,
4918 &[
4919 (Path::new("c.txt"), None),
4920 (
4921 Path::new("dir1/deps/dep1/src/a.txt"),
4922 Some(Path::new("dir1/deps/dep1").into())
4923 ),
4924 (Path::new("dir1/src/b.txt"), Some(Path::new("dir1").into())),
4925 ]
4926 );
4927 });
4928
4929 let repo_update_events = Arc::new(Mutex::new(vec![]));
4930 tree.update(cx, |_, cx| {
4931 let repo_update_events = repo_update_events.clone();
4932 cx.subscribe(&tree, move |_, _, event, _| {
4933 if let Event::UpdatedGitRepositories(update) = event {
4934 repo_update_events.lock().push(update.clone());
4935 }
4936 })
4937 .detach();
4938 });
4939
4940 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
4941 tree.flush_fs_events(cx).await;
4942
4943 assert_eq!(
4944 repo_update_events.lock()[0]
4945 .keys()
4946 .cloned()
4947 .collect::<Vec<Arc<Path>>>(),
4948 vec![Path::new("dir1").into()]
4949 );
4950
4951 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
4952 tree.flush_fs_events(cx).await;
4953
4954 tree.read_with(cx, |tree, _cx| {
4955 let tree = tree.as_local().unwrap();
4956
4957 assert!(tree
4958 .repository_for_path("dir1/src/b.txt".as_ref())
4959 .is_none());
4960 });
4961 }
4962
4963 #[gpui::test]
4964 async fn test_git_status(cx: &mut TestAppContext) {
4965 const IGNORE_RULE: &'static str = "**/target";
4966
4967 let root = temp_tree(json!({
4968 "project": {
4969 "a.txt": "a",
4970 "b.txt": "bb",
4971 "c": {
4972 "d": {
4973 "e.txt": "eee"
4974 }
4975 },
4976 "f.txt": "ffff",
4977 "target": {
4978 "build_file": "???"
4979 },
4980 ".gitignore": IGNORE_RULE
4981 },
4982
4983 }));
4984
4985 let http_client = FakeHttpClient::with_404_response();
4986 let client = cx.read(|cx| Client::new(http_client, cx));
4987 let tree = Worktree::local(
4988 client,
4989 root.path(),
4990 true,
4991 Arc::new(RealFs),
4992 Default::default(),
4993 &mut cx.to_async(),
4994 )
4995 .await
4996 .unwrap();
4997
4998 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
4999 .await;
5000
5001 const A_TXT: &'static str = "a.txt";
5002 const B_TXT: &'static str = "b.txt";
5003 const E_TXT: &'static str = "c/d/e.txt";
5004 const F_TXT: &'static str = "f.txt";
5005 const DOTGITIGNORE: &'static str = ".gitignore";
5006 const BUILD_FILE: &'static str = "target/build_file";
5007
5008 let work_dir = root.path().join("project");
5009 let mut repo = git_init(work_dir.as_path());
5010 repo.add_ignore_rule(IGNORE_RULE).unwrap();
5011 git_add(Path::new(A_TXT), &repo);
5012 git_add(Path::new(E_TXT), &repo);
5013 git_add(Path::new(DOTGITIGNORE), &repo);
5014 git_commit("Initial commit", &repo);
5015
5016 std::fs::write(work_dir.join(A_TXT), "aa").unwrap();
5017
5018 tree.flush_fs_events(cx).await;
5019
5020 // Check that the right git state is observed on startup
5021 tree.read_with(cx, |tree, _cx| {
5022 let snapshot = tree.snapshot();
5023 assert_eq!(snapshot.repository_entries.iter().count(), 1);
5024 let (dir, repo) = snapshot.repository_entries.iter().next().unwrap();
5025 assert_eq!(dir.0.as_ref(), Path::new("project"));
5026
5027 assert_eq!(repo.statuses.iter().count(), 3);
5028 assert_eq!(
5029 repo.statuses.get(&Path::new(A_TXT).into()),
5030 Some(&GitFileStatus::Modified)
5031 );
5032 assert_eq!(
5033 repo.statuses.get(&Path::new(B_TXT).into()),
5034 Some(&GitFileStatus::Added)
5035 );
5036 assert_eq!(
5037 repo.statuses.get(&Path::new(F_TXT).into()),
5038 Some(&GitFileStatus::Added)
5039 );
5040 });
5041
5042 git_add(Path::new(A_TXT), &repo);
5043 git_add(Path::new(B_TXT), &repo);
5044 git_commit("Committing modified and added", &repo);
5045 tree.flush_fs_events(cx).await;
5046
5047 // Check that repo only changes are tracked
5048 tree.read_with(cx, |tree, _cx| {
5049 let snapshot = tree.snapshot();
5050 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5051
5052 assert_eq!(repo.statuses.iter().count(), 1);
5053 assert_eq!(
5054 repo.statuses.get(&Path::new(F_TXT).into()),
5055 Some(&GitFileStatus::Added)
5056 );
5057 });
5058
5059 git_reset(0, &repo);
5060 git_remove_index(Path::new(B_TXT), &repo);
5061 git_stash(&mut repo);
5062 std::fs::write(work_dir.join(E_TXT), "eeee").unwrap();
5063 std::fs::write(work_dir.join(BUILD_FILE), "this should be ignored").unwrap();
5064 tree.flush_fs_events(cx).await;
5065
5066 // Check that more complex repo changes are tracked
5067 tree.read_with(cx, |tree, _cx| {
5068 let snapshot = tree.snapshot();
5069 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5070
5071 assert_eq!(repo.statuses.iter().count(), 3);
5072 assert_eq!(repo.statuses.get(&Path::new(A_TXT).into()), None);
5073 assert_eq!(
5074 repo.statuses.get(&Path::new(B_TXT).into()),
5075 Some(&GitFileStatus::Added)
5076 );
5077 assert_eq!(
5078 repo.statuses.get(&Path::new(E_TXT).into()),
5079 Some(&GitFileStatus::Modified)
5080 );
5081 assert_eq!(
5082 repo.statuses.get(&Path::new(F_TXT).into()),
5083 Some(&GitFileStatus::Added)
5084 );
5085 });
5086
5087 std::fs::remove_file(work_dir.join(B_TXT)).unwrap();
5088 std::fs::remove_dir_all(work_dir.join("c")).unwrap();
5089 std::fs::write(
5090 work_dir.join(DOTGITIGNORE),
5091 [IGNORE_RULE, "f.txt"].join("\n"),
5092 )
5093 .unwrap();
5094
5095 git_add(Path::new(DOTGITIGNORE), &repo);
5096 git_commit("Committing modified git ignore", &repo);
5097
5098 tree.flush_fs_events(cx).await;
5099
5100 // Check that non-repo behavior is tracked
5101 tree.read_with(cx, |tree, _cx| {
5102 let snapshot = tree.snapshot();
5103 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5104
5105 assert_eq!(repo.statuses.iter().count(), 0);
5106 });
5107
5108 let mut renamed_dir_name = "first_directory/second_directory";
5109 const RENAMED_FILE: &'static str = "rf.txt";
5110
5111 std::fs::create_dir_all(work_dir.join(renamed_dir_name)).unwrap();
5112 std::fs::write(
5113 work_dir.join(renamed_dir_name).join(RENAMED_FILE),
5114 "new-contents",
5115 )
5116 .unwrap();
5117
5118 tree.flush_fs_events(cx).await;
5119
5120 tree.read_with(cx, |tree, _cx| {
5121 let snapshot = tree.snapshot();
5122 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5123
5124 assert_eq!(repo.statuses.iter().count(), 1);
5125 assert_eq!(
5126 repo.statuses
5127 .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5128 Some(&GitFileStatus::Added)
5129 );
5130 });
5131
5132 renamed_dir_name = "new_first_directory/second_directory";
5133
5134 std::fs::rename(
5135 work_dir.join("first_directory"),
5136 work_dir.join("new_first_directory"),
5137 )
5138 .unwrap();
5139
5140 tree.flush_fs_events(cx).await;
5141
5142 tree.read_with(cx, |tree, _cx| {
5143 let snapshot = tree.snapshot();
5144 let (_, repo) = snapshot.repository_entries.iter().next().unwrap();
5145
5146 assert_eq!(repo.statuses.iter().count(), 1);
5147 assert_eq!(
5148 repo.statuses
5149 .get(&Path::new(renamed_dir_name).join(RENAMED_FILE).into()),
5150 Some(&GitFileStatus::Added)
5151 );
5152 });
5153 }
5154
5155 #[track_caller]
5156 fn git_init(path: &Path) -> git2::Repository {
5157 git2::Repository::init(path).expect("Failed to initialize git repository")
5158 }
5159
5160 #[track_caller]
5161 fn git_add<P: AsRef<Path>>(path: P, repo: &git2::Repository) {
5162 let path = path.as_ref();
5163 let mut index = repo.index().expect("Failed to get index");
5164 index.add_path(path).expect("Failed to add a.txt");
5165 index.write().expect("Failed to write index");
5166 }
5167
5168 #[track_caller]
5169 fn git_remove_index(path: &Path, repo: &git2::Repository) {
5170 let mut index = repo.index().expect("Failed to get index");
5171 index.remove_path(path).expect("Failed to add a.txt");
5172 index.write().expect("Failed to write index");
5173 }
5174
5175 #[track_caller]
5176 fn git_commit(msg: &'static str, repo: &git2::Repository) {
5177 use git2::Signature;
5178
5179 let signature = Signature::now("test", "test@zed.dev").unwrap();
5180 let oid = repo.index().unwrap().write_tree().unwrap();
5181 let tree = repo.find_tree(oid).unwrap();
5182 if let Some(head) = repo.head().ok() {
5183 let parent_obj = head.peel(git2::ObjectType::Commit).unwrap();
5184
5185 let parent_commit = parent_obj.as_commit().unwrap();
5186
5187 repo.commit(
5188 Some("HEAD"),
5189 &signature,
5190 &signature,
5191 msg,
5192 &tree,
5193 &[parent_commit],
5194 )
5195 .expect("Failed to commit with parent");
5196 } else {
5197 repo.commit(Some("HEAD"), &signature, &signature, msg, &tree, &[])
5198 .expect("Failed to commit");
5199 }
5200 }
5201
5202 #[track_caller]
5203 fn git_stash(repo: &mut git2::Repository) {
5204 use git2::Signature;
5205
5206 let signature = Signature::now("test", "test@zed.dev").unwrap();
5207 repo.stash_save(&signature, "N/A", None)
5208 .expect("Failed to stash");
5209 }
5210
5211 #[track_caller]
5212 fn git_reset(offset: usize, repo: &git2::Repository) {
5213 let head = repo.head().expect("Couldn't get repo head");
5214 let object = head.peel(git2::ObjectType::Commit).unwrap();
5215 let commit = object.as_commit().unwrap();
5216 let new_head = commit
5217 .parents()
5218 .inspect(|parnet| {
5219 parnet.message();
5220 })
5221 .skip(offset)
5222 .next()
5223 .expect("Not enough history");
5224 repo.reset(&new_head.as_object(), git2::ResetType::Soft, None)
5225 .expect("Could not reset");
5226 }
5227
5228 #[allow(dead_code)]
5229 #[track_caller]
5230 fn git_status(repo: &git2::Repository) -> HashMap<String, git2::Status> {
5231 repo.statuses(None)
5232 .unwrap()
5233 .iter()
5234 .map(|status| (status.path().unwrap().to_string(), status.status()))
5235 .collect()
5236 }
5237 }
5238}