1pub mod fs_watcher;
2
3use parking_lot::Mutex;
4use std::sync::atomic::{AtomicU8, AtomicUsize, Ordering};
5use std::time::Instant;
6use util::maybe;
7
8use anyhow::{Context as _, Result, anyhow};
9#[cfg(any(target_os = "linux", target_os = "freebsd"))]
10use ashpd::desktop::trash;
11use futures::stream::iter;
12use gpui::App;
13use gpui::BackgroundExecutor;
14use gpui::Global;
15use gpui::ReadGlobal as _;
16use gpui::SharedString;
17use std::borrow::Cow;
18#[cfg(unix)]
19use std::ffi::CString;
20use util::command::new_command;
21
22#[cfg(unix)]
23use std::os::fd::{AsFd, AsRawFd};
24#[cfg(unix)]
25use std::os::unix::ffi::OsStrExt;
26
27#[cfg(unix)]
28use std::os::unix::fs::{FileTypeExt, MetadataExt};
29
30#[cfg(any(target_os = "macos", target_os = "freebsd"))]
31use std::mem::MaybeUninit;
32
33use async_tar::Archive;
34use futures::{AsyncRead, Stream, StreamExt, future::BoxFuture};
35use git::repository::{GitRepository, RealGitRepository};
36use is_executable::IsExecutable;
37use rope::Rope;
38use serde::{Deserialize, Serialize};
39use smol::io::AsyncWriteExt;
40#[cfg(feature = "test-support")]
41use std::path::Component;
42use std::{
43 io::{self, Write},
44 path::{Path, PathBuf},
45 pin::Pin,
46 sync::Arc,
47 time::{Duration, SystemTime, UNIX_EPOCH},
48};
49use tempfile::TempDir;
50use text::LineEnding;
51
52#[cfg(feature = "test-support")]
53mod fake_git_repo;
54#[cfg(feature = "test-support")]
55use collections::{BTreeMap, btree_map};
56#[cfg(feature = "test-support")]
57use fake_git_repo::FakeGitRepositoryState;
58#[cfg(feature = "test-support")]
59use git::{
60 repository::{InitialGraphCommitData, RepoPath, Worktree, repo_path},
61 status::{FileStatus, StatusCode, TrackedStatus, UnmergedStatus},
62};
63#[cfg(feature = "test-support")]
64use util::normalize_path;
65
66#[cfg(feature = "test-support")]
67use smol::io::AsyncReadExt;
68#[cfg(feature = "test-support")]
69use std::ffi::OsStr;
70
71pub trait Watcher: Send + Sync {
72 fn add(&self, path: &Path) -> Result<()>;
73 fn remove(&self, path: &Path) -> Result<()>;
74}
75
76#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
77pub enum PathEventKind {
78 Removed,
79 Created,
80 Changed,
81 Rescan,
82}
83
84#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
85pub struct PathEvent {
86 pub path: PathBuf,
87 pub kind: Option<PathEventKind>,
88}
89
90impl From<PathEvent> for PathBuf {
91 fn from(event: PathEvent) -> Self {
92 event.path
93 }
94}
95
96#[async_trait::async_trait]
97pub trait Fs: Send + Sync {
98 async fn create_dir(&self, path: &Path) -> Result<()>;
99 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()>;
100 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()>;
101 async fn create_file_with(
102 &self,
103 path: &Path,
104 content: Pin<&mut (dyn AsyncRead + Send)>,
105 ) -> Result<()>;
106 async fn extract_tar_file(
107 &self,
108 path: &Path,
109 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
110 ) -> Result<()>;
111 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()>;
112 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()>;
113 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()>;
114 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
115 self.remove_dir(path, options).await
116 }
117 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()>;
118 async fn trash_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
119 self.remove_file(path, options).await
120 }
121 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>>;
122 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>>;
123 async fn load(&self, path: &Path) -> Result<String> {
124 Ok(String::from_utf8(self.load_bytes(path).await?)?)
125 }
126 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>>;
127 async fn atomic_write(&self, path: PathBuf, text: String) -> Result<()>;
128 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()>;
129 async fn write(&self, path: &Path, content: &[u8]) -> Result<()>;
130 async fn canonicalize(&self, path: &Path) -> Result<PathBuf>;
131 async fn is_file(&self, path: &Path) -> bool;
132 async fn is_dir(&self, path: &Path) -> bool;
133 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>>;
134 async fn read_link(&self, path: &Path) -> Result<PathBuf>;
135 async fn read_dir(
136 &self,
137 path: &Path,
138 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>>;
139
140 async fn watch(
141 &self,
142 path: &Path,
143 latency: Duration,
144 ) -> (
145 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
146 Arc<dyn Watcher>,
147 );
148
149 fn open_repo(
150 &self,
151 abs_dot_git: &Path,
152 system_git_binary_path: Option<&Path>,
153 ) -> Result<Arc<dyn GitRepository>>;
154 async fn git_init(&self, abs_work_directory: &Path, fallback_branch_name: String)
155 -> Result<()>;
156 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()>;
157 fn is_fake(&self) -> bool;
158 async fn is_case_sensitive(&self) -> bool;
159 fn subscribe_to_jobs(&self) -> JobEventReceiver;
160
161 #[cfg(feature = "test-support")]
162 fn as_fake(&self) -> Arc<FakeFs> {
163 panic!("called as_fake on a real fs");
164 }
165}
166
167struct GlobalFs(Arc<dyn Fs>);
168
169impl Global for GlobalFs {}
170
171impl dyn Fs {
172 /// Returns the global [`Fs`].
173 pub fn global(cx: &App) -> Arc<Self> {
174 GlobalFs::global(cx).0.clone()
175 }
176
177 /// Sets the global [`Fs`].
178 pub fn set_global(fs: Arc<Self>, cx: &mut App) {
179 cx.set_global(GlobalFs(fs));
180 }
181}
182
183#[derive(Copy, Clone, Default)]
184pub struct CreateOptions {
185 pub overwrite: bool,
186 pub ignore_if_exists: bool,
187}
188
189#[derive(Copy, Clone, Default)]
190pub struct CopyOptions {
191 pub overwrite: bool,
192 pub ignore_if_exists: bool,
193}
194
195#[derive(Copy, Clone, Default)]
196pub struct RenameOptions {
197 pub overwrite: bool,
198 pub ignore_if_exists: bool,
199 /// Whether to create parent directories if they do not exist.
200 pub create_parents: bool,
201}
202
203#[derive(Copy, Clone, Default)]
204pub struct RemoveOptions {
205 pub recursive: bool,
206 pub ignore_if_not_exists: bool,
207}
208
209#[derive(Copy, Clone, Debug)]
210pub struct Metadata {
211 pub inode: u64,
212 pub mtime: MTime,
213 pub is_symlink: bool,
214 pub is_dir: bool,
215 pub len: u64,
216 pub is_fifo: bool,
217 pub is_executable: bool,
218}
219
220/// Filesystem modification time. The purpose of this newtype is to discourage use of operations
221/// that do not make sense for mtimes. In particular, it is not always valid to compare mtimes using
222/// `<` or `>`, as there are many things that can cause the mtime of a file to be earlier than it
223/// was. See ["mtime comparison considered harmful" - apenwarr](https://apenwarr.ca/log/20181113).
224///
225/// Do not derive Ord, PartialOrd, or arithmetic operation traits.
226#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize)]
227#[serde(transparent)]
228pub struct MTime(SystemTime);
229
230pub type JobId = usize;
231
232#[derive(Clone, Debug)]
233pub struct JobInfo {
234 pub start: Instant,
235 pub message: SharedString,
236 pub id: JobId,
237}
238
239#[derive(Debug, Clone)]
240pub enum JobEvent {
241 Started { info: JobInfo },
242 Completed { id: JobId },
243}
244
245pub type JobEventSender = futures::channel::mpsc::UnboundedSender<JobEvent>;
246pub type JobEventReceiver = futures::channel::mpsc::UnboundedReceiver<JobEvent>;
247
248struct JobTracker {
249 id: JobId,
250 subscribers: Arc<Mutex<Vec<JobEventSender>>>,
251}
252
253impl JobTracker {
254 fn new(info: JobInfo, subscribers: Arc<Mutex<Vec<JobEventSender>>>) -> Self {
255 let id = info.id;
256 {
257 let mut subs = subscribers.lock();
258 subs.retain(|sender| {
259 sender
260 .unbounded_send(JobEvent::Started { info: info.clone() })
261 .is_ok()
262 });
263 }
264 Self { id, subscribers }
265 }
266}
267
268impl Drop for JobTracker {
269 fn drop(&mut self) {
270 let mut subs = self.subscribers.lock();
271 subs.retain(|sender| {
272 sender
273 .unbounded_send(JobEvent::Completed { id: self.id })
274 .is_ok()
275 });
276 }
277}
278
279impl MTime {
280 /// Conversion intended for persistence and testing.
281 pub fn from_seconds_and_nanos(secs: u64, nanos: u32) -> Self {
282 MTime(UNIX_EPOCH + Duration::new(secs, nanos))
283 }
284
285 /// Conversion intended for persistence.
286 pub fn to_seconds_and_nanos_for_persistence(self) -> Option<(u64, u32)> {
287 self.0
288 .duration_since(UNIX_EPOCH)
289 .ok()
290 .map(|duration| (duration.as_secs(), duration.subsec_nanos()))
291 }
292
293 /// Returns the value wrapped by this `MTime`, for presentation to the user. The name including
294 /// "_for_user" is to discourage misuse - this method should not be used when making decisions
295 /// about file dirtiness.
296 pub fn timestamp_for_user(self) -> SystemTime {
297 self.0
298 }
299
300 /// Temporary method to split out the behavior changes from introduction of this newtype.
301 pub fn bad_is_greater_than(self, other: MTime) -> bool {
302 self.0 > other.0
303 }
304}
305
306impl From<proto::Timestamp> for MTime {
307 fn from(timestamp: proto::Timestamp) -> Self {
308 MTime(timestamp.into())
309 }
310}
311
312impl From<MTime> for proto::Timestamp {
313 fn from(mtime: MTime) -> Self {
314 mtime.0.into()
315 }
316}
317
318pub struct RealFs {
319 bundled_git_binary_path: Option<PathBuf>,
320 executor: BackgroundExecutor,
321 next_job_id: Arc<AtomicUsize>,
322 job_event_subscribers: Arc<Mutex<Vec<JobEventSender>>>,
323 is_case_sensitive: AtomicU8,
324}
325
326pub trait FileHandle: Send + Sync + std::fmt::Debug {
327 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf>;
328}
329
330impl FileHandle for std::fs::File {
331 #[cfg(target_os = "macos")]
332 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
333 use std::{
334 ffi::{CStr, OsStr},
335 os::unix::ffi::OsStrExt,
336 };
337
338 let fd = self.as_fd();
339 let mut path_buf = MaybeUninit::<[u8; libc::PATH_MAX as usize]>::uninit();
340
341 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_GETPATH, path_buf.as_mut_ptr()) };
342 anyhow::ensure!(result != -1, "fcntl returned -1");
343
344 // SAFETY: `fcntl` will initialize the path buffer.
345 let c_str = unsafe { CStr::from_ptr(path_buf.as_ptr().cast()) };
346 anyhow::ensure!(!c_str.is_empty(), "Could find a path for the file handle");
347 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
348 Ok(path)
349 }
350
351 #[cfg(target_os = "linux")]
352 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
353 let fd = self.as_fd();
354 let fd_path = format!("/proc/self/fd/{}", fd.as_raw_fd());
355 let new_path = std::fs::read_link(fd_path)?;
356 if new_path
357 .file_name()
358 .is_some_and(|f| f.to_string_lossy().ends_with(" (deleted)"))
359 {
360 anyhow::bail!("file was deleted")
361 };
362
363 Ok(new_path)
364 }
365
366 #[cfg(target_os = "freebsd")]
367 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
368 use std::{
369 ffi::{CStr, OsStr},
370 os::unix::ffi::OsStrExt,
371 };
372
373 let fd = self.as_fd();
374 let mut kif = MaybeUninit::<libc::kinfo_file>::uninit();
375 kif.kf_structsize = libc::KINFO_FILE_SIZE;
376
377 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_KINFO, kif.as_mut_ptr()) };
378 anyhow::ensure!(result != -1, "fcntl returned -1");
379
380 // SAFETY: `fcntl` will initialize the kif.
381 let c_str = unsafe { CStr::from_ptr(kif.assume_init().kf_path.as_ptr()) };
382 anyhow::ensure!(!c_str.is_empty(), "Could find a path for the file handle");
383 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
384 Ok(path)
385 }
386
387 #[cfg(target_os = "windows")]
388 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
389 use std::ffi::OsString;
390 use std::os::windows::ffi::OsStringExt;
391 use std::os::windows::io::AsRawHandle;
392
393 use windows::Win32::Foundation::HANDLE;
394 use windows::Win32::Storage::FileSystem::{
395 FILE_NAME_NORMALIZED, GetFinalPathNameByHandleW,
396 };
397
398 let handle = HANDLE(self.as_raw_handle() as _);
399
400 // Query required buffer size (in wide chars)
401 let required_len =
402 unsafe { GetFinalPathNameByHandleW(handle, &mut [], FILE_NAME_NORMALIZED) };
403 anyhow::ensure!(
404 required_len != 0,
405 "GetFinalPathNameByHandleW returned 0 length"
406 );
407
408 // Allocate buffer and retrieve the path
409 let mut buf: Vec<u16> = vec![0u16; required_len as usize + 1];
410 let written = unsafe { GetFinalPathNameByHandleW(handle, &mut buf, FILE_NAME_NORMALIZED) };
411 anyhow::ensure!(
412 written != 0,
413 "GetFinalPathNameByHandleW failed to write path"
414 );
415
416 let os_str: OsString = OsString::from_wide(&buf[..written as usize]);
417 anyhow::ensure!(!os_str.is_empty(), "Could find a path for the file handle");
418 Ok(PathBuf::from(os_str))
419 }
420}
421
422pub struct RealWatcher {}
423
424impl RealFs {
425 pub fn new(git_binary_path: Option<PathBuf>, executor: BackgroundExecutor) -> Self {
426 Self {
427 bundled_git_binary_path: git_binary_path,
428 executor,
429 next_job_id: Arc::new(AtomicUsize::new(0)),
430 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
431 is_case_sensitive: Default::default(),
432 }
433 }
434
435 #[cfg(target_os = "windows")]
436 fn canonicalize(path: &Path) -> Result<PathBuf> {
437 use std::ffi::OsString;
438 use std::os::windows::ffi::OsStringExt;
439 use windows::Win32::Storage::FileSystem::GetVolumePathNameW;
440 use windows::core::HSTRING;
441
442 // std::fs::canonicalize resolves mapped network paths to UNC paths, which can
443 // confuse some software. To mitigate this, we canonicalize the input, then rebase
444 // the result onto the input's original volume root if both paths are on the same
445 // volume. This keeps the same drive letter or mount point the caller used.
446
447 let abs_path = if path.is_relative() {
448 std::env::current_dir()?.join(path)
449 } else {
450 path.to_path_buf()
451 };
452
453 let path_hstring = HSTRING::from(abs_path.as_os_str());
454 let mut vol_buf = vec![0u16; abs_path.as_os_str().len() + 2];
455 unsafe { GetVolumePathNameW(&path_hstring, &mut vol_buf)? };
456 let volume_root = {
457 let len = vol_buf
458 .iter()
459 .position(|&c| c == 0)
460 .unwrap_or(vol_buf.len());
461 PathBuf::from(OsString::from_wide(&vol_buf[..len]))
462 };
463
464 let resolved_path = dunce::canonicalize(&abs_path)?;
465 let resolved_root = dunce::canonicalize(&volume_root)?;
466
467 if let Ok(relative) = resolved_path.strip_prefix(&resolved_root) {
468 let mut result = volume_root;
469 result.push(relative);
470 Ok(result)
471 } else {
472 Ok(resolved_path)
473 }
474 }
475}
476
477#[cfg(any(target_os = "macos", target_os = "linux"))]
478fn rename_without_replace(source: &Path, target: &Path) -> io::Result<()> {
479 let source = path_to_c_string(source)?;
480 let target = path_to_c_string(target)?;
481
482 #[cfg(target_os = "macos")]
483 let result = unsafe { libc::renamex_np(source.as_ptr(), target.as_ptr(), libc::RENAME_EXCL) };
484
485 #[cfg(target_os = "linux")]
486 let result = unsafe {
487 libc::syscall(
488 libc::SYS_renameat2,
489 libc::AT_FDCWD,
490 source.as_ptr(),
491 libc::AT_FDCWD,
492 target.as_ptr(),
493 libc::RENAME_NOREPLACE,
494 )
495 };
496
497 if result == 0 {
498 Ok(())
499 } else {
500 Err(io::Error::last_os_error())
501 }
502}
503
504#[cfg(target_os = "windows")]
505fn rename_without_replace(source: &Path, target: &Path) -> io::Result<()> {
506 use std::os::windows::ffi::OsStrExt;
507
508 use windows::Win32::Storage::FileSystem::{MOVE_FILE_FLAGS, MoveFileExW};
509 use windows::core::PCWSTR;
510
511 let source: Vec<u16> = source.as_os_str().encode_wide().chain(Some(0)).collect();
512 let target: Vec<u16> = target.as_os_str().encode_wide().chain(Some(0)).collect();
513
514 unsafe {
515 MoveFileExW(
516 PCWSTR(source.as_ptr()),
517 PCWSTR(target.as_ptr()),
518 MOVE_FILE_FLAGS::default(),
519 )
520 }
521 .map_err(|_| io::Error::last_os_error())
522}
523
524#[cfg(any(target_os = "macos", target_os = "linux"))]
525fn path_to_c_string(path: &Path) -> io::Result<CString> {
526 CString::new(path.as_os_str().as_bytes()).map_err(|_| {
527 io::Error::new(
528 io::ErrorKind::InvalidInput,
529 format!("path contains interior NUL: {}", path.display()),
530 )
531 })
532}
533
534#[async_trait::async_trait]
535impl Fs for RealFs {
536 async fn create_dir(&self, path: &Path) -> Result<()> {
537 Ok(smol::fs::create_dir_all(path).await?)
538 }
539
540 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
541 #[cfg(unix)]
542 smol::fs::unix::symlink(target, path).await?;
543
544 #[cfg(windows)]
545 if smol::fs::metadata(&target).await?.is_dir() {
546 let status = new_command("cmd")
547 .args(["/C", "mklink", "/J"])
548 .args([path, target.as_path()])
549 .status()
550 .await?;
551
552 if !status.success() {
553 return Err(anyhow::anyhow!(
554 "Failed to create junction from {:?} to {:?}",
555 path,
556 target
557 ));
558 }
559 } else {
560 smol::fs::windows::symlink_file(target, path).await?
561 }
562
563 Ok(())
564 }
565
566 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
567 let mut open_options = smol::fs::OpenOptions::new();
568 open_options.write(true).create(true);
569 if options.overwrite {
570 open_options.truncate(true);
571 } else if !options.ignore_if_exists {
572 open_options.create_new(true);
573 }
574 open_options
575 .open(path)
576 .await
577 .with_context(|| format!("Failed to create file at {:?}", path))?;
578 Ok(())
579 }
580
581 async fn create_file_with(
582 &self,
583 path: &Path,
584 content: Pin<&mut (dyn AsyncRead + Send)>,
585 ) -> Result<()> {
586 let mut file = smol::fs::File::create(&path)
587 .await
588 .with_context(|| format!("Failed to create file at {:?}", path))?;
589 futures::io::copy(content, &mut file).await?;
590 Ok(())
591 }
592
593 async fn extract_tar_file(
594 &self,
595 path: &Path,
596 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
597 ) -> Result<()> {
598 content.unpack(path).await?;
599 Ok(())
600 }
601
602 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
603 if !options.overwrite && smol::fs::metadata(target).await.is_ok() {
604 if options.ignore_if_exists {
605 return Ok(());
606 } else {
607 anyhow::bail!("{target:?} already exists");
608 }
609 }
610
611 smol::fs::copy(source, target).await?;
612 Ok(())
613 }
614
615 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()> {
616 if options.create_parents {
617 if let Some(parent) = target.parent() {
618 self.create_dir(parent).await?;
619 }
620 }
621
622 if options.overwrite {
623 smol::fs::rename(source, target).await?;
624 return Ok(());
625 }
626
627 let use_metadata_fallback = {
628 #[cfg(any(target_os = "macos", target_os = "linux", target_os = "windows"))]
629 {
630 let source = source.to_path_buf();
631 let target = target.to_path_buf();
632 match self
633 .executor
634 .spawn(async move { rename_without_replace(&source, &target) })
635 .await
636 {
637 Ok(()) => return Ok(()),
638 Err(error) if error.kind() == io::ErrorKind::AlreadyExists => {
639 if options.ignore_if_exists {
640 return Ok(());
641 }
642 return Err(error.into());
643 }
644 Err(error)
645 if error.raw_os_error().is_some_and(|code| {
646 code == libc::ENOSYS
647 || code == libc::ENOTSUP
648 || code == libc::EOPNOTSUPP
649 || code == libc::EINVAL
650 }) =>
651 {
652 // For case when filesystem or kernel does not support atomic no-overwrite rename.
653 // EINVAL is returned by FUSE-based filesystems (e.g. NTFS via ntfs-3g)
654 // that don't support RENAME_NOREPLACE.
655 true
656 }
657 Err(error) => return Err(error.into()),
658 }
659 }
660
661 #[cfg(not(any(target_os = "macos", target_os = "linux", target_os = "windows")))]
662 {
663 // For platforms which do not have an atomic no-overwrite rename yet.
664 true
665 }
666 };
667
668 if use_metadata_fallback && smol::fs::metadata(target).await.is_ok() {
669 if options.ignore_if_exists {
670 return Ok(());
671 } else {
672 anyhow::bail!("{target:?} already exists");
673 }
674 }
675
676 smol::fs::rename(source, target).await?;
677 Ok(())
678 }
679
680 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
681 let result = if options.recursive {
682 smol::fs::remove_dir_all(path).await
683 } else {
684 smol::fs::remove_dir(path).await
685 };
686 match result {
687 Ok(()) => Ok(()),
688 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
689 Ok(())
690 }
691 Err(err) => Err(err)?,
692 }
693 }
694
695 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
696 #[cfg(windows)]
697 if let Ok(Some(metadata)) = self.metadata(path).await
698 && metadata.is_symlink
699 && metadata.is_dir
700 {
701 self.remove_dir(
702 path,
703 RemoveOptions {
704 recursive: false,
705 ignore_if_not_exists: true,
706 },
707 )
708 .await?;
709 return Ok(());
710 }
711
712 match smol::fs::remove_file(path).await {
713 Ok(()) => Ok(()),
714 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
715 Ok(())
716 }
717 Err(err) => Err(err)?,
718 }
719 }
720
721 #[cfg(target_os = "macos")]
722 async fn trash_file(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
723 use cocoa::{
724 base::{id, nil},
725 foundation::{NSAutoreleasePool, NSString},
726 };
727 use objc::{class, msg_send, sel, sel_impl};
728
729 unsafe {
730 /// Allow NSString::alloc use here because it sets autorelease
731 #[allow(clippy::disallowed_methods)]
732 unsafe fn ns_string(string: &str) -> id {
733 unsafe { NSString::alloc(nil).init_str(string).autorelease() }
734 }
735
736 let url: id = msg_send![class!(NSURL), fileURLWithPath: ns_string(path.to_string_lossy().as_ref())];
737 let array: id = msg_send![class!(NSArray), arrayWithObject: url];
738 let workspace: id = msg_send![class!(NSWorkspace), sharedWorkspace];
739
740 let _: id = msg_send![workspace, recycleURLs: array completionHandler: nil];
741 }
742 Ok(())
743 }
744
745 #[cfg(any(target_os = "linux", target_os = "freebsd"))]
746 async fn trash_file(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
747 if let Ok(Some(metadata)) = self.metadata(path).await
748 && metadata.is_symlink
749 {
750 // TODO: trash_file does not support trashing symlinks yet - https://github.com/bilelmoussaoui/ashpd/issues/255
751 return self.remove_file(path, RemoveOptions::default()).await;
752 }
753 let file = smol::fs::File::open(path).await?;
754 match trash::trash_file(&file.as_fd()).await {
755 Ok(_) => Ok(()),
756 Err(err) => {
757 log::error!("Failed to trash file: {}", err);
758 // Trashing files can fail if you don't have a trashing dbus service configured.
759 // In that case, delete the file directly instead.
760 return self.remove_file(path, RemoveOptions::default()).await;
761 }
762 }
763 }
764
765 #[cfg(target_os = "windows")]
766 async fn trash_file(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
767 use util::paths::SanitizedPath;
768 use windows::{
769 Storage::{StorageDeleteOption, StorageFile},
770 core::HSTRING,
771 };
772 // todo(windows)
773 // When new version of `windows-rs` release, make this operation `async`
774 let path = path.canonicalize()?;
775 let path = SanitizedPath::new(&path);
776 let path_string = path.to_string();
777 let file = StorageFile::GetFileFromPathAsync(&HSTRING::from(path_string))?.get()?;
778 file.DeleteAsync(StorageDeleteOption::Default)?.get()?;
779 Ok(())
780 }
781
782 #[cfg(target_os = "macos")]
783 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
784 self.trash_file(path, options).await
785 }
786
787 #[cfg(any(target_os = "linux", target_os = "freebsd"))]
788 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
789 self.trash_file(path, options).await
790 }
791
792 #[cfg(target_os = "windows")]
793 async fn trash_dir(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
794 use util::paths::SanitizedPath;
795 use windows::{
796 Storage::{StorageDeleteOption, StorageFolder},
797 core::HSTRING,
798 };
799
800 // todo(windows)
801 // When new version of `windows-rs` release, make this operation `async`
802 let path = path.canonicalize()?;
803 let path = SanitizedPath::new(&path);
804 let path_string = path.to_string();
805 let folder = StorageFolder::GetFolderFromPathAsync(&HSTRING::from(path_string))?.get()?;
806 folder.DeleteAsync(StorageDeleteOption::Default)?.get()?;
807 Ok(())
808 }
809
810 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
811 Ok(Box::new(std::fs::File::open(path)?))
812 }
813
814 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
815 let mut options = std::fs::OpenOptions::new();
816 options.read(true);
817 #[cfg(windows)]
818 {
819 use std::os::windows::fs::OpenOptionsExt;
820 options.custom_flags(windows::Win32::Storage::FileSystem::FILE_FLAG_BACKUP_SEMANTICS.0);
821 }
822 Ok(Arc::new(options.open(path)?))
823 }
824
825 async fn load(&self, path: &Path) -> Result<String> {
826 let path = path.to_path_buf();
827 self.executor
828 .spawn(async move {
829 std::fs::read_to_string(&path)
830 .with_context(|| format!("Failed to read file {}", path.display()))
831 })
832 .await
833 }
834
835 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
836 let path = path.to_path_buf();
837 let bytes = self
838 .executor
839 .spawn(async move { std::fs::read(path) })
840 .await?;
841 Ok(bytes)
842 }
843
844 #[cfg(not(target_os = "windows"))]
845 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
846 smol::unblock(move || {
847 // Use the directory of the destination as temp dir to avoid
848 // invalid cross-device link error, and XDG_CACHE_DIR for fallback.
849 // See https://github.com/zed-industries/zed/pull/8437 for more details.
850 let mut tmp_file =
851 tempfile::NamedTempFile::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
852 tmp_file.write_all(data.as_bytes())?;
853 tmp_file.persist(path)?;
854 anyhow::Ok(())
855 })
856 .await?;
857
858 Ok(())
859 }
860
861 #[cfg(target_os = "windows")]
862 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
863 smol::unblock(move || {
864 // If temp dir is set to a different drive than the destination,
865 // we receive error:
866 //
867 // failed to persist temporary file:
868 // The system cannot move the file to a different disk drive. (os error 17)
869 //
870 // This is because `ReplaceFileW` does not support cross volume moves.
871 // See the remark section: "The backup file, replaced file, and replacement file must all reside on the same volume."
872 // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-replacefilew#remarks
873 //
874 // So we use the directory of the destination as a temp dir to avoid it.
875 // https://github.com/zed-industries/zed/issues/16571
876 let temp_dir = TempDir::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
877 let temp_file = {
878 let temp_file_path = temp_dir.path().join("temp_file");
879 let mut file = std::fs::File::create_new(&temp_file_path)?;
880 file.write_all(data.as_bytes())?;
881 temp_file_path
882 };
883 atomic_replace(path.as_path(), temp_file.as_path())?;
884 anyhow::Ok(())
885 })
886 .await?;
887 Ok(())
888 }
889
890 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
891 let buffer_size = text.summary().len.min(10 * 1024);
892 if let Some(path) = path.parent() {
893 self.create_dir(path)
894 .await
895 .with_context(|| format!("Failed to create directory at {:?}", path))?;
896 }
897 let file = smol::fs::File::create(path)
898 .await
899 .with_context(|| format!("Failed to create file at {:?}", path))?;
900 let mut writer = smol::io::BufWriter::with_capacity(buffer_size, file);
901 for chunk in text::chunks_with_line_ending(text, line_ending) {
902 writer.write_all(chunk.as_bytes()).await?;
903 }
904 writer.flush().await?;
905 Ok(())
906 }
907
908 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
909 if let Some(path) = path.parent() {
910 self.create_dir(path)
911 .await
912 .with_context(|| format!("Failed to create directory at {:?}", path))?;
913 }
914 let path = path.to_owned();
915 let contents = content.to_owned();
916 self.executor
917 .spawn(async move {
918 std::fs::write(path, contents)?;
919 Ok(())
920 })
921 .await
922 }
923
924 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
925 let path = path.to_owned();
926 self.executor
927 .spawn(async move {
928 #[cfg(target_os = "windows")]
929 let result = Self::canonicalize(&path);
930
931 #[cfg(not(target_os = "windows"))]
932 let result = std::fs::canonicalize(&path);
933
934 result.with_context(|| format!("canonicalizing {path:?}"))
935 })
936 .await
937 }
938
939 async fn is_file(&self, path: &Path) -> bool {
940 let path = path.to_owned();
941 self.executor
942 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_file()) })
943 .await
944 }
945
946 async fn is_dir(&self, path: &Path) -> bool {
947 let path = path.to_owned();
948 self.executor
949 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_dir()) })
950 .await
951 }
952
953 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
954 let path_buf = path.to_owned();
955 let symlink_metadata = match self
956 .executor
957 .spawn(async move { std::fs::symlink_metadata(&path_buf) })
958 .await
959 {
960 Ok(metadata) => metadata,
961 Err(err) => {
962 return match err.kind() {
963 io::ErrorKind::NotFound | io::ErrorKind::NotADirectory => Ok(None),
964 _ => Err(anyhow::Error::new(err)),
965 };
966 }
967 };
968
969 let is_symlink = symlink_metadata.file_type().is_symlink();
970 let metadata = if is_symlink {
971 let path_buf = path.to_path_buf();
972 // Read target metadata, if the target exists
973 match self
974 .executor
975 .spawn(async move { std::fs::metadata(path_buf) })
976 .await
977 {
978 Ok(target_metadata) => target_metadata,
979 Err(err) => {
980 if err.kind() != io::ErrorKind::NotFound {
981 // TODO: Also FilesystemLoop when that's stable
982 log::warn!(
983 "Failed to read symlink target metadata for path {path:?}: {err}"
984 );
985 }
986 // For a broken or recursive symlink, return the symlink metadata. (Or
987 // as edge cases, a symlink into a directory we can't read, which is hard
988 // to distinguish from just being broken.)
989 symlink_metadata
990 }
991 }
992 } else {
993 symlink_metadata
994 };
995
996 #[cfg(unix)]
997 let inode = metadata.ino();
998
999 #[cfg(windows)]
1000 let inode = file_id(path).await?;
1001
1002 #[cfg(windows)]
1003 let is_fifo = false;
1004
1005 #[cfg(unix)]
1006 let is_fifo = metadata.file_type().is_fifo();
1007
1008 let path_buf = path.to_path_buf();
1009 let is_executable = self
1010 .executor
1011 .spawn(async move { path_buf.is_executable() })
1012 .await;
1013
1014 Ok(Some(Metadata {
1015 inode,
1016 mtime: MTime(metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH)),
1017 len: metadata.len(),
1018 is_symlink,
1019 is_dir: metadata.file_type().is_dir(),
1020 is_fifo,
1021 is_executable,
1022 }))
1023 }
1024
1025 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
1026 let path = path.to_owned();
1027 let path = self
1028 .executor
1029 .spawn(async move { std::fs::read_link(&path) })
1030 .await?;
1031 Ok(path)
1032 }
1033
1034 async fn read_dir(
1035 &self,
1036 path: &Path,
1037 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
1038 let path = path.to_owned();
1039 let result = iter(
1040 self.executor
1041 .spawn(async move { std::fs::read_dir(path) })
1042 .await?,
1043 )
1044 .map(|entry| match entry {
1045 Ok(entry) => Ok(entry.path()),
1046 Err(error) => Err(anyhow!("failed to read dir entry {error:?}")),
1047 });
1048 Ok(Box::pin(result))
1049 }
1050
1051 async fn watch(
1052 &self,
1053 path: &Path,
1054 latency: Duration,
1055 ) -> (
1056 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
1057 Arc<dyn Watcher>,
1058 ) {
1059 use util::{ResultExt as _, paths::SanitizedPath};
1060 let executor = self.executor.clone();
1061
1062 let (tx, rx) = smol::channel::unbounded();
1063 let pending_paths: Arc<Mutex<Vec<PathEvent>>> = Default::default();
1064 let watcher = Arc::new(fs_watcher::FsWatcher::new(tx, pending_paths.clone()));
1065
1066 // If the path doesn't exist yet (e.g. settings.json), watch the parent dir to learn when it's created.
1067 if let Err(e) = watcher.add(path)
1068 && let Some(parent) = path.parent()
1069 && let Err(parent_e) = watcher.add(parent)
1070 {
1071 log::warn!(
1072 "Failed to watch {} and its parent directory {}:\n{e}\n{parent_e}",
1073 path.display(),
1074 parent.display()
1075 );
1076 }
1077
1078 // Check if path is a symlink and follow the target parent
1079 if let Some(mut target) = self.read_link(path).await.ok() {
1080 log::trace!("watch symlink {path:?} -> {target:?}");
1081 // Check if symlink target is relative path, if so make it absolute
1082 if target.is_relative()
1083 && let Some(parent) = path.parent()
1084 {
1085 target = parent.join(target);
1086 if let Ok(canonical) = self.canonicalize(&target).await {
1087 target = SanitizedPath::new(&canonical).as_path().to_path_buf();
1088 }
1089 }
1090 watcher.add(&target).ok();
1091 if let Some(parent) = target.parent() {
1092 watcher.add(parent).log_err();
1093 }
1094 }
1095
1096 (
1097 Box::pin(rx.filter_map({
1098 let watcher = watcher.clone();
1099 let executor = executor.clone();
1100 move |_| {
1101 let _ = watcher.clone();
1102 let pending_paths = pending_paths.clone();
1103 let executor = executor.clone();
1104 async move {
1105 executor.timer(latency).await;
1106 let paths = std::mem::take(&mut *pending_paths.lock());
1107 (!paths.is_empty()).then_some(paths)
1108 }
1109 }
1110 })),
1111 watcher,
1112 )
1113 }
1114
1115 fn open_repo(
1116 &self,
1117 dotgit_path: &Path,
1118 system_git_binary_path: Option<&Path>,
1119 ) -> Result<Arc<dyn GitRepository>> {
1120 Ok(Arc::new(RealGitRepository::new(
1121 dotgit_path,
1122 self.bundled_git_binary_path.clone(),
1123 system_git_binary_path.map(|path| path.to_path_buf()),
1124 self.executor.clone(),
1125 )?))
1126 }
1127
1128 async fn git_init(
1129 &self,
1130 abs_work_directory_path: &Path,
1131 fallback_branch_name: String,
1132 ) -> Result<()> {
1133 let config = new_command("git")
1134 .current_dir(abs_work_directory_path)
1135 .args(&["config", "--global", "--get", "init.defaultBranch"])
1136 .output()
1137 .await?;
1138
1139 let branch_name;
1140
1141 if config.status.success() && !config.stdout.is_empty() {
1142 branch_name = String::from_utf8_lossy(&config.stdout);
1143 } else {
1144 branch_name = Cow::Borrowed(fallback_branch_name.as_str());
1145 }
1146
1147 new_command("git")
1148 .current_dir(abs_work_directory_path)
1149 .args(&["init", "-b"])
1150 .arg(branch_name.trim())
1151 .output()
1152 .await?;
1153
1154 Ok(())
1155 }
1156
1157 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()> {
1158 let job_id = self.next_job_id.fetch_add(1, Ordering::SeqCst);
1159 let job_info = JobInfo {
1160 id: job_id,
1161 start: Instant::now(),
1162 message: SharedString::from(format!("Cloning {}", repo_url)),
1163 };
1164
1165 let _job_tracker = JobTracker::new(job_info, self.job_event_subscribers.clone());
1166
1167 let output = new_command("git")
1168 .current_dir(abs_work_directory)
1169 .args(&["clone", repo_url])
1170 .output()
1171 .await?;
1172
1173 if !output.status.success() {
1174 anyhow::bail!(
1175 "git clone failed: {}",
1176 String::from_utf8_lossy(&output.stderr)
1177 );
1178 }
1179
1180 Ok(())
1181 }
1182
1183 fn is_fake(&self) -> bool {
1184 false
1185 }
1186
1187 fn subscribe_to_jobs(&self) -> JobEventReceiver {
1188 let (sender, receiver) = futures::channel::mpsc::unbounded();
1189 self.job_event_subscribers.lock().push(sender);
1190 receiver
1191 }
1192
1193 /// Checks whether the file system is case sensitive by attempting to create two files
1194 /// that have the same name except for the casing.
1195 ///
1196 /// It creates both files in a temporary directory it removes at the end.
1197 async fn is_case_sensitive(&self) -> bool {
1198 const UNINITIALIZED: u8 = 0;
1199 const CASE_SENSITIVE: u8 = 1;
1200 const NOT_CASE_SENSITIVE: u8 = 2;
1201
1202 // Note we could CAS here, but really, if we race we do this work twice at worst which isn't a big deal.
1203 let load = self.is_case_sensitive.load(Ordering::Acquire);
1204 if load != UNINITIALIZED {
1205 return load == CASE_SENSITIVE;
1206 }
1207 let temp_dir = self.executor.spawn(async { TempDir::new() });
1208 let res = maybe!(async {
1209 let temp_dir = temp_dir.await?;
1210 let test_file_1 = temp_dir.path().join("case_sensitivity_test.tmp");
1211 let test_file_2 = temp_dir.path().join("CASE_SENSITIVITY_TEST.TMP");
1212
1213 let create_opts = CreateOptions {
1214 overwrite: false,
1215 ignore_if_exists: false,
1216 };
1217
1218 // Create file1
1219 self.create_file(&test_file_1, create_opts).await?;
1220
1221 // Now check whether it's possible to create file2
1222 let case_sensitive = match self.create_file(&test_file_2, create_opts).await {
1223 Ok(_) => Ok(true),
1224 Err(e) => {
1225 if let Some(io_error) = e.downcast_ref::<io::Error>() {
1226 if io_error.kind() == io::ErrorKind::AlreadyExists {
1227 Ok(false)
1228 } else {
1229 Err(e)
1230 }
1231 } else {
1232 Err(e)
1233 }
1234 }
1235 };
1236
1237 temp_dir.close()?;
1238 case_sensitive
1239 }).await.unwrap_or_else(|e| {
1240 log::error!(
1241 "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
1242 );
1243 true
1244 });
1245 self.is_case_sensitive.store(
1246 if res {
1247 CASE_SENSITIVE
1248 } else {
1249 NOT_CASE_SENSITIVE
1250 },
1251 Ordering::Release,
1252 );
1253 res
1254 }
1255}
1256
1257#[cfg(not(any(target_os = "linux", target_os = "freebsd")))]
1258impl Watcher for RealWatcher {
1259 fn add(&self, _: &Path) -> Result<()> {
1260 Ok(())
1261 }
1262
1263 fn remove(&self, _: &Path) -> Result<()> {
1264 Ok(())
1265 }
1266}
1267
1268#[cfg(feature = "test-support")]
1269pub struct FakeFs {
1270 this: std::sync::Weak<Self>,
1271 // Use an unfair lock to ensure tests are deterministic.
1272 state: Arc<Mutex<FakeFsState>>,
1273 executor: gpui::BackgroundExecutor,
1274}
1275
1276#[cfg(feature = "test-support")]
1277struct FakeFsState {
1278 root: FakeFsEntry,
1279 next_inode: u64,
1280 next_mtime: SystemTime,
1281 git_event_tx: smol::channel::Sender<PathBuf>,
1282 event_txs: Vec<(PathBuf, smol::channel::Sender<Vec<PathEvent>>)>,
1283 events_paused: bool,
1284 buffered_events: Vec<PathEvent>,
1285 metadata_call_count: usize,
1286 read_dir_call_count: usize,
1287 path_write_counts: std::collections::HashMap<PathBuf, usize>,
1288 moves: std::collections::HashMap<u64, PathBuf>,
1289 job_event_subscribers: Arc<Mutex<Vec<JobEventSender>>>,
1290}
1291
1292#[cfg(feature = "test-support")]
1293#[derive(Clone, Debug)]
1294enum FakeFsEntry {
1295 File {
1296 inode: u64,
1297 mtime: MTime,
1298 len: u64,
1299 content: Vec<u8>,
1300 // The path to the repository state directory, if this is a gitfile.
1301 git_dir_path: Option<PathBuf>,
1302 },
1303 Dir {
1304 inode: u64,
1305 mtime: MTime,
1306 len: u64,
1307 entries: BTreeMap<String, FakeFsEntry>,
1308 git_repo_state: Option<Arc<Mutex<FakeGitRepositoryState>>>,
1309 },
1310 Symlink {
1311 target: PathBuf,
1312 },
1313}
1314
1315#[cfg(feature = "test-support")]
1316impl PartialEq for FakeFsEntry {
1317 fn eq(&self, other: &Self) -> bool {
1318 match (self, other) {
1319 (
1320 Self::File {
1321 inode: l_inode,
1322 mtime: l_mtime,
1323 len: l_len,
1324 content: l_content,
1325 git_dir_path: l_git_dir_path,
1326 },
1327 Self::File {
1328 inode: r_inode,
1329 mtime: r_mtime,
1330 len: r_len,
1331 content: r_content,
1332 git_dir_path: r_git_dir_path,
1333 },
1334 ) => {
1335 l_inode == r_inode
1336 && l_mtime == r_mtime
1337 && l_len == r_len
1338 && l_content == r_content
1339 && l_git_dir_path == r_git_dir_path
1340 }
1341 (
1342 Self::Dir {
1343 inode: l_inode,
1344 mtime: l_mtime,
1345 len: l_len,
1346 entries: l_entries,
1347 git_repo_state: l_git_repo_state,
1348 },
1349 Self::Dir {
1350 inode: r_inode,
1351 mtime: r_mtime,
1352 len: r_len,
1353 entries: r_entries,
1354 git_repo_state: r_git_repo_state,
1355 },
1356 ) => {
1357 let same_repo_state = match (l_git_repo_state.as_ref(), r_git_repo_state.as_ref()) {
1358 (Some(l), Some(r)) => Arc::ptr_eq(l, r),
1359 (None, None) => true,
1360 _ => false,
1361 };
1362 l_inode == r_inode
1363 && l_mtime == r_mtime
1364 && l_len == r_len
1365 && l_entries == r_entries
1366 && same_repo_state
1367 }
1368 (Self::Symlink { target: l_target }, Self::Symlink { target: r_target }) => {
1369 l_target == r_target
1370 }
1371 _ => false,
1372 }
1373 }
1374}
1375
1376#[cfg(feature = "test-support")]
1377impl FakeFsState {
1378 fn get_and_increment_mtime(&mut self) -> MTime {
1379 let mtime = self.next_mtime;
1380 self.next_mtime += FakeFs::SYSTEMTIME_INTERVAL;
1381 MTime(mtime)
1382 }
1383
1384 fn get_and_increment_inode(&mut self) -> u64 {
1385 let inode = self.next_inode;
1386 self.next_inode += 1;
1387 inode
1388 }
1389
1390 fn canonicalize(&self, target: &Path, follow_symlink: bool) -> Option<PathBuf> {
1391 let mut canonical_path = PathBuf::new();
1392 let mut path = target.to_path_buf();
1393 let mut entry_stack = Vec::new();
1394 'outer: loop {
1395 let mut path_components = path.components().peekable();
1396 let mut prefix = None;
1397 while let Some(component) = path_components.next() {
1398 match component {
1399 Component::Prefix(prefix_component) => prefix = Some(prefix_component),
1400 Component::RootDir => {
1401 entry_stack.clear();
1402 entry_stack.push(&self.root);
1403 canonical_path.clear();
1404 match prefix {
1405 Some(prefix_component) => {
1406 canonical_path = PathBuf::from(prefix_component.as_os_str());
1407 // Prefixes like `C:\\` are represented without their trailing slash, so we have to re-add it.
1408 canonical_path.push(std::path::MAIN_SEPARATOR_STR);
1409 }
1410 None => canonical_path = PathBuf::from(std::path::MAIN_SEPARATOR_STR),
1411 }
1412 }
1413 Component::CurDir => {}
1414 Component::ParentDir => {
1415 entry_stack.pop()?;
1416 canonical_path.pop();
1417 }
1418 Component::Normal(name) => {
1419 let current_entry = *entry_stack.last()?;
1420 if let FakeFsEntry::Dir { entries, .. } = current_entry {
1421 let entry = entries.get(name.to_str().unwrap())?;
1422 if (path_components.peek().is_some() || follow_symlink)
1423 && let FakeFsEntry::Symlink { target, .. } = entry
1424 {
1425 let mut target = target.clone();
1426 target.extend(path_components);
1427 path = target;
1428 continue 'outer;
1429 }
1430 entry_stack.push(entry);
1431 canonical_path = canonical_path.join(name);
1432 } else {
1433 return None;
1434 }
1435 }
1436 }
1437 }
1438 break;
1439 }
1440
1441 if entry_stack.is_empty() {
1442 None
1443 } else {
1444 Some(canonical_path)
1445 }
1446 }
1447
1448 fn try_entry(
1449 &mut self,
1450 target: &Path,
1451 follow_symlink: bool,
1452 ) -> Option<(&mut FakeFsEntry, PathBuf)> {
1453 let canonical_path = self.canonicalize(target, follow_symlink)?;
1454
1455 let mut components = canonical_path
1456 .components()
1457 .skip_while(|component| matches!(component, Component::Prefix(_)));
1458 let Some(Component::RootDir) = components.next() else {
1459 panic!(
1460 "the path {:?} was not canonicalized properly {:?}",
1461 target, canonical_path
1462 )
1463 };
1464
1465 let mut entry = &mut self.root;
1466 for component in components {
1467 match component {
1468 Component::Normal(name) => {
1469 if let FakeFsEntry::Dir { entries, .. } = entry {
1470 entry = entries.get_mut(name.to_str().unwrap())?;
1471 } else {
1472 return None;
1473 }
1474 }
1475 _ => {
1476 panic!(
1477 "the path {:?} was not canonicalized properly {:?}",
1478 target, canonical_path
1479 )
1480 }
1481 }
1482 }
1483
1484 Some((entry, canonical_path))
1485 }
1486
1487 fn entry(&mut self, target: &Path) -> Result<&mut FakeFsEntry> {
1488 Ok(self
1489 .try_entry(target, true)
1490 .ok_or_else(|| {
1491 anyhow!(io::Error::new(
1492 io::ErrorKind::NotFound,
1493 format!("not found: {target:?}")
1494 ))
1495 })?
1496 .0)
1497 }
1498
1499 fn write_path<Fn, T>(&mut self, path: &Path, callback: Fn) -> Result<T>
1500 where
1501 Fn: FnOnce(btree_map::Entry<String, FakeFsEntry>) -> Result<T>,
1502 {
1503 let path = normalize_path(path);
1504 let filename = path.file_name().context("cannot overwrite the root")?;
1505 let parent_path = path.parent().unwrap();
1506
1507 let parent = self.entry(parent_path)?;
1508 let new_entry = parent
1509 .dir_entries(parent_path)?
1510 .entry(filename.to_str().unwrap().into());
1511 callback(new_entry)
1512 }
1513
1514 fn emit_event<I, T>(&mut self, paths: I)
1515 where
1516 I: IntoIterator<Item = (T, Option<PathEventKind>)>,
1517 T: Into<PathBuf>,
1518 {
1519 self.buffered_events
1520 .extend(paths.into_iter().map(|(path, kind)| PathEvent {
1521 path: path.into(),
1522 kind,
1523 }));
1524
1525 if !self.events_paused {
1526 self.flush_events(self.buffered_events.len());
1527 }
1528 }
1529
1530 fn flush_events(&mut self, mut count: usize) {
1531 count = count.min(self.buffered_events.len());
1532 let events = self.buffered_events.drain(0..count).collect::<Vec<_>>();
1533 self.event_txs.retain(|(_, tx)| {
1534 let _ = tx.try_send(events.clone());
1535 !tx.is_closed()
1536 });
1537 }
1538}
1539
1540#[cfg(feature = "test-support")]
1541pub static FS_DOT_GIT: std::sync::LazyLock<&'static OsStr> =
1542 std::sync::LazyLock::new(|| OsStr::new(".git"));
1543
1544#[cfg(feature = "test-support")]
1545impl FakeFs {
1546 /// We need to use something large enough for Windows and Unix to consider this a new file.
1547 /// https://doc.rust-lang.org/nightly/std/time/struct.SystemTime.html#platform-specific-behavior
1548 const SYSTEMTIME_INTERVAL: Duration = Duration::from_nanos(100);
1549
1550 pub fn new(executor: gpui::BackgroundExecutor) -> Arc<Self> {
1551 let (tx, rx) = smol::channel::bounded::<PathBuf>(10);
1552
1553 let this = Arc::new_cyclic(|this| Self {
1554 this: this.clone(),
1555 executor: executor.clone(),
1556 state: Arc::new(Mutex::new(FakeFsState {
1557 root: FakeFsEntry::Dir {
1558 inode: 0,
1559 mtime: MTime(UNIX_EPOCH),
1560 len: 0,
1561 entries: Default::default(),
1562 git_repo_state: None,
1563 },
1564 git_event_tx: tx,
1565 next_mtime: UNIX_EPOCH + Self::SYSTEMTIME_INTERVAL,
1566 next_inode: 1,
1567 event_txs: Default::default(),
1568 buffered_events: Vec::new(),
1569 events_paused: false,
1570 read_dir_call_count: 0,
1571 metadata_call_count: 0,
1572 path_write_counts: Default::default(),
1573 moves: Default::default(),
1574 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
1575 })),
1576 });
1577
1578 executor.spawn({
1579 let this = this.clone();
1580 async move {
1581 while let Ok(git_event) = rx.recv().await {
1582 if let Some(mut state) = this.state.try_lock() {
1583 state.emit_event([(git_event, Some(PathEventKind::Changed))]);
1584 } else {
1585 panic!("Failed to lock file system state, this execution would have caused a test hang");
1586 }
1587 }
1588 }
1589 }).detach();
1590
1591 this
1592 }
1593
1594 pub fn set_next_mtime(&self, next_mtime: SystemTime) {
1595 let mut state = self.state.lock();
1596 state.next_mtime = next_mtime;
1597 }
1598
1599 pub fn get_and_increment_mtime(&self) -> MTime {
1600 let mut state = self.state.lock();
1601 state.get_and_increment_mtime()
1602 }
1603
1604 pub async fn touch_path(&self, path: impl AsRef<Path>) {
1605 let mut state = self.state.lock();
1606 let path = path.as_ref();
1607 let new_mtime = state.get_and_increment_mtime();
1608 let new_inode = state.get_and_increment_inode();
1609 state
1610 .write_path(path, move |entry| {
1611 match entry {
1612 btree_map::Entry::Vacant(e) => {
1613 e.insert(FakeFsEntry::File {
1614 inode: new_inode,
1615 mtime: new_mtime,
1616 content: Vec::new(),
1617 len: 0,
1618 git_dir_path: None,
1619 });
1620 }
1621 btree_map::Entry::Occupied(mut e) => match &mut *e.get_mut() {
1622 FakeFsEntry::File { mtime, .. } => *mtime = new_mtime,
1623 FakeFsEntry::Dir { mtime, .. } => *mtime = new_mtime,
1624 FakeFsEntry::Symlink { .. } => {}
1625 },
1626 }
1627 Ok(())
1628 })
1629 .unwrap();
1630 state.emit_event([(path.to_path_buf(), Some(PathEventKind::Changed))]);
1631 }
1632
1633 pub async fn insert_file(&self, path: impl AsRef<Path>, content: Vec<u8>) {
1634 self.write_file_internal(path, content, true).unwrap()
1635 }
1636
1637 pub async fn insert_symlink(&self, path: impl AsRef<Path>, target: PathBuf) {
1638 let mut state = self.state.lock();
1639 let path = path.as_ref();
1640 let file = FakeFsEntry::Symlink { target };
1641 state
1642 .write_path(path.as_ref(), move |e| match e {
1643 btree_map::Entry::Vacant(e) => {
1644 e.insert(file);
1645 Ok(())
1646 }
1647 btree_map::Entry::Occupied(mut e) => {
1648 *e.get_mut() = file;
1649 Ok(())
1650 }
1651 })
1652 .unwrap();
1653 state.emit_event([(path, Some(PathEventKind::Created))]);
1654 }
1655
1656 fn write_file_internal(
1657 &self,
1658 path: impl AsRef<Path>,
1659 new_content: Vec<u8>,
1660 recreate_inode: bool,
1661 ) -> Result<()> {
1662 fn inner(
1663 this: &FakeFs,
1664 path: &Path,
1665 new_content: Vec<u8>,
1666 recreate_inode: bool,
1667 ) -> Result<()> {
1668 let mut state = this.state.lock();
1669 let path_buf = path.to_path_buf();
1670 *state.path_write_counts.entry(path_buf).or_insert(0) += 1;
1671 let new_inode = state.get_and_increment_inode();
1672 let new_mtime = state.get_and_increment_mtime();
1673 let new_len = new_content.len() as u64;
1674 let mut kind = None;
1675 state.write_path(path, |entry| {
1676 match entry {
1677 btree_map::Entry::Vacant(e) => {
1678 kind = Some(PathEventKind::Created);
1679 e.insert(FakeFsEntry::File {
1680 inode: new_inode,
1681 mtime: new_mtime,
1682 len: new_len,
1683 content: new_content,
1684 git_dir_path: None,
1685 });
1686 }
1687 btree_map::Entry::Occupied(mut e) => {
1688 kind = Some(PathEventKind::Changed);
1689 if let FakeFsEntry::File {
1690 inode,
1691 mtime,
1692 len,
1693 content,
1694 ..
1695 } = e.get_mut()
1696 {
1697 *mtime = new_mtime;
1698 *content = new_content;
1699 *len = new_len;
1700 if recreate_inode {
1701 *inode = new_inode;
1702 }
1703 } else {
1704 anyhow::bail!("not a file")
1705 }
1706 }
1707 }
1708 Ok(())
1709 })?;
1710 state.emit_event([(path, kind)]);
1711 Ok(())
1712 }
1713 inner(self, path.as_ref(), new_content, recreate_inode)
1714 }
1715
1716 pub fn read_file_sync(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1717 let path = path.as_ref();
1718 let path = normalize_path(path);
1719 let mut state = self.state.lock();
1720 let entry = state.entry(&path)?;
1721 entry.file_content(&path).cloned()
1722 }
1723
1724 async fn load_internal(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1725 let path = path.as_ref();
1726 let path = normalize_path(path);
1727 self.simulate_random_delay().await;
1728 let mut state = self.state.lock();
1729 let entry = state.entry(&path)?;
1730 entry.file_content(&path).cloned()
1731 }
1732
1733 pub fn pause_events(&self) {
1734 self.state.lock().events_paused = true;
1735 }
1736
1737 pub fn unpause_events_and_flush(&self) {
1738 self.state.lock().events_paused = false;
1739 self.flush_events(usize::MAX);
1740 }
1741
1742 pub fn buffered_event_count(&self) -> usize {
1743 self.state.lock().buffered_events.len()
1744 }
1745
1746 pub fn clear_buffered_events(&self) {
1747 self.state.lock().buffered_events.clear();
1748 }
1749
1750 pub fn flush_events(&self, count: usize) {
1751 self.state.lock().flush_events(count);
1752 }
1753
1754 pub(crate) fn entry(&self, target: &Path) -> Result<FakeFsEntry> {
1755 self.state.lock().entry(target).cloned()
1756 }
1757
1758 pub(crate) fn insert_entry(&self, target: &Path, new_entry: FakeFsEntry) -> Result<()> {
1759 let mut state = self.state.lock();
1760 state.write_path(target, |entry| {
1761 match entry {
1762 btree_map::Entry::Vacant(vacant_entry) => {
1763 vacant_entry.insert(new_entry);
1764 }
1765 btree_map::Entry::Occupied(mut occupied_entry) => {
1766 occupied_entry.insert(new_entry);
1767 }
1768 }
1769 Ok(())
1770 })
1771 }
1772
1773 #[must_use]
1774 pub fn insert_tree<'a>(
1775 &'a self,
1776 path: impl 'a + AsRef<Path> + Send,
1777 tree: serde_json::Value,
1778 ) -> futures::future::BoxFuture<'a, ()> {
1779 use futures::FutureExt as _;
1780 use serde_json::Value::*;
1781
1782 fn inner<'a>(
1783 this: &'a FakeFs,
1784 path: Arc<Path>,
1785 tree: serde_json::Value,
1786 ) -> futures::future::BoxFuture<'a, ()> {
1787 async move {
1788 match tree {
1789 Object(map) => {
1790 this.create_dir(&path).await.unwrap();
1791 for (name, contents) in map {
1792 let mut path = PathBuf::from(path.as_ref());
1793 path.push(name);
1794 this.insert_tree(&path, contents).await;
1795 }
1796 }
1797 Null => {
1798 this.create_dir(&path).await.unwrap();
1799 }
1800 String(contents) => {
1801 this.insert_file(&path, contents.into_bytes()).await;
1802 }
1803 _ => {
1804 panic!("JSON object must contain only objects, strings, or null");
1805 }
1806 }
1807 }
1808 .boxed()
1809 }
1810 inner(self, Arc::from(path.as_ref()), tree)
1811 }
1812
1813 pub fn insert_tree_from_real_fs<'a>(
1814 &'a self,
1815 path: impl 'a + AsRef<Path> + Send,
1816 src_path: impl 'a + AsRef<Path> + Send,
1817 ) -> futures::future::BoxFuture<'a, ()> {
1818 use futures::FutureExt as _;
1819
1820 async move {
1821 let path = path.as_ref();
1822 if std::fs::metadata(&src_path).unwrap().is_file() {
1823 let contents = std::fs::read(src_path).unwrap();
1824 self.insert_file(path, contents).await;
1825 } else {
1826 self.create_dir(path).await.unwrap();
1827 for entry in std::fs::read_dir(&src_path).unwrap() {
1828 let entry = entry.unwrap();
1829 self.insert_tree_from_real_fs(path.join(entry.file_name()), entry.path())
1830 .await;
1831 }
1832 }
1833 }
1834 .boxed()
1835 }
1836
1837 pub fn with_git_state_and_paths<T, F>(
1838 &self,
1839 dot_git: &Path,
1840 emit_git_event: bool,
1841 f: F,
1842 ) -> Result<T>
1843 where
1844 F: FnOnce(&mut FakeGitRepositoryState, &Path, &Path) -> T,
1845 {
1846 let mut state = self.state.lock();
1847 let git_event_tx = state.git_event_tx.clone();
1848 let entry = state.entry(dot_git).context("open .git")?;
1849
1850 if let FakeFsEntry::Dir { git_repo_state, .. } = entry {
1851 let repo_state = git_repo_state.get_or_insert_with(|| {
1852 log::debug!("insert git state for {dot_git:?}");
1853 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
1854 });
1855 let mut repo_state = repo_state.lock();
1856
1857 let result = f(&mut repo_state, dot_git, dot_git);
1858
1859 drop(repo_state);
1860 if emit_git_event {
1861 state.emit_event([(dot_git, Some(PathEventKind::Changed))]);
1862 }
1863
1864 Ok(result)
1865 } else if let FakeFsEntry::File {
1866 content,
1867 git_dir_path,
1868 ..
1869 } = &mut *entry
1870 {
1871 let path = match git_dir_path {
1872 Some(path) => path,
1873 None => {
1874 let path = std::str::from_utf8(content)
1875 .ok()
1876 .and_then(|content| content.strip_prefix("gitdir:"))
1877 .context("not a valid gitfile")?
1878 .trim();
1879 git_dir_path.insert(normalize_path(&dot_git.parent().unwrap().join(path)))
1880 }
1881 }
1882 .clone();
1883 let Some((git_dir_entry, canonical_path)) = state.try_entry(&path, true) else {
1884 anyhow::bail!("pointed-to git dir {path:?} not found")
1885 };
1886 let FakeFsEntry::Dir {
1887 git_repo_state,
1888 entries,
1889 ..
1890 } = git_dir_entry
1891 else {
1892 anyhow::bail!("gitfile points to a non-directory")
1893 };
1894 let common_dir = if let Some(child) = entries.get("commondir") {
1895 let raw = std::str::from_utf8(child.file_content("commondir".as_ref())?)
1896 .context("commondir content")?
1897 .trim();
1898 let raw_path = Path::new(raw);
1899 if raw_path.is_relative() {
1900 normalize_path(&canonical_path.join(raw_path))
1901 } else {
1902 raw_path.to_owned()
1903 }
1904 } else {
1905 canonical_path.clone()
1906 };
1907 let repo_state = git_repo_state.get_or_insert_with(|| {
1908 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
1909 });
1910 let mut repo_state = repo_state.lock();
1911
1912 let result = f(&mut repo_state, &canonical_path, &common_dir);
1913
1914 if emit_git_event {
1915 drop(repo_state);
1916 state.emit_event([(canonical_path, Some(PathEventKind::Changed))]);
1917 }
1918
1919 Ok(result)
1920 } else {
1921 anyhow::bail!("not a valid git repository");
1922 }
1923 }
1924
1925 pub fn with_git_state<T, F>(&self, dot_git: &Path, emit_git_event: bool, f: F) -> Result<T>
1926 where
1927 F: FnOnce(&mut FakeGitRepositoryState) -> T,
1928 {
1929 self.with_git_state_and_paths(dot_git, emit_git_event, |state, _, _| f(state))
1930 }
1931
1932 pub fn set_branch_name(&self, dot_git: &Path, branch: Option<impl Into<String>>) {
1933 self.with_git_state(dot_git, true, |state| {
1934 let branch = branch.map(Into::into);
1935 state.branches.extend(branch.clone());
1936 state.current_branch_name = branch
1937 })
1938 .unwrap();
1939 }
1940
1941 pub fn set_remote_for_repo(
1942 &self,
1943 dot_git: &Path,
1944 name: impl Into<String>,
1945 url: impl Into<String>,
1946 ) {
1947 self.with_git_state(dot_git, true, |state| {
1948 state.remotes.insert(name.into(), url.into());
1949 })
1950 .unwrap();
1951 }
1952
1953 pub fn insert_branches(&self, dot_git: &Path, branches: &[&str]) {
1954 self.with_git_state(dot_git, true, |state| {
1955 if let Some(first) = branches.first()
1956 && state.current_branch_name.is_none()
1957 {
1958 state.current_branch_name = Some(first.to_string())
1959 }
1960 state
1961 .branches
1962 .extend(branches.iter().map(ToString::to_string));
1963 })
1964 .unwrap();
1965 }
1966
1967 pub async fn add_linked_worktree_for_repo(
1968 &self,
1969 dot_git: &Path,
1970 emit_git_event: bool,
1971 worktree: Worktree,
1972 ) {
1973 let ref_name = worktree
1974 .ref_name
1975 .as_ref()
1976 .expect("linked worktree must have a ref_name");
1977 let branch_name = ref_name
1978 .strip_prefix("refs/heads/")
1979 .unwrap_or(ref_name.as_ref());
1980
1981 // Create ref in git state.
1982 self.with_git_state(dot_git, false, |state| {
1983 state
1984 .refs
1985 .insert(ref_name.to_string(), worktree.sha.to_string());
1986 })
1987 .unwrap();
1988
1989 // Create .git/worktrees/<name>/ directory with HEAD, commondir, and gitdir.
1990 let worktrees_entry_dir = dot_git.join("worktrees").join(branch_name);
1991 self.create_dir(&worktrees_entry_dir).await.unwrap();
1992
1993 self.write_file_internal(
1994 worktrees_entry_dir.join("HEAD"),
1995 format!("ref: {ref_name}").into_bytes(),
1996 false,
1997 )
1998 .unwrap();
1999
2000 self.write_file_internal(
2001 worktrees_entry_dir.join("commondir"),
2002 dot_git.to_string_lossy().into_owned().into_bytes(),
2003 false,
2004 )
2005 .unwrap();
2006
2007 let worktree_dot_git = worktree.path.join(".git");
2008 self.write_file_internal(
2009 worktrees_entry_dir.join("gitdir"),
2010 worktree_dot_git.to_string_lossy().into_owned().into_bytes(),
2011 false,
2012 )
2013 .unwrap();
2014
2015 // Create the worktree checkout directory with a .git file pointing back.
2016 self.create_dir(&worktree.path).await.unwrap();
2017
2018 self.write_file_internal(
2019 &worktree_dot_git,
2020 format!("gitdir: {}", worktrees_entry_dir.display()).into_bytes(),
2021 false,
2022 )
2023 .unwrap();
2024
2025 if emit_git_event {
2026 self.with_git_state(dot_git, true, |_| {}).unwrap();
2027 }
2028 }
2029
2030 pub async fn remove_worktree_for_repo(
2031 &self,
2032 dot_git: &Path,
2033 emit_git_event: bool,
2034 ref_name: &str,
2035 ) {
2036 let branch_name = ref_name.strip_prefix("refs/heads/").unwrap_or(ref_name);
2037 let worktrees_entry_dir = dot_git.join("worktrees").join(branch_name);
2038
2039 // Read gitdir to find the worktree checkout path.
2040 let gitdir_content = self
2041 .load_internal(worktrees_entry_dir.join("gitdir"))
2042 .await
2043 .unwrap();
2044 let gitdir_str = String::from_utf8(gitdir_content).unwrap();
2045 let worktree_path = PathBuf::from(gitdir_str.trim())
2046 .parent()
2047 .map(PathBuf::from)
2048 .unwrap_or_default();
2049
2050 // Remove the worktree checkout directory.
2051 self.remove_dir(
2052 &worktree_path,
2053 RemoveOptions {
2054 recursive: true,
2055 ignore_if_not_exists: true,
2056 },
2057 )
2058 .await
2059 .unwrap();
2060
2061 // Remove the .git/worktrees/<name>/ directory.
2062 self.remove_dir(
2063 &worktrees_entry_dir,
2064 RemoveOptions {
2065 recursive: true,
2066 ignore_if_not_exists: false,
2067 },
2068 )
2069 .await
2070 .unwrap();
2071
2072 if emit_git_event {
2073 self.with_git_state(dot_git, true, |_| {}).unwrap();
2074 }
2075 }
2076
2077 pub fn set_unmerged_paths_for_repo(
2078 &self,
2079 dot_git: &Path,
2080 unmerged_state: &[(RepoPath, UnmergedStatus)],
2081 ) {
2082 self.with_git_state(dot_git, true, |state| {
2083 state.unmerged_paths.clear();
2084 state.unmerged_paths.extend(
2085 unmerged_state
2086 .iter()
2087 .map(|(path, content)| (path.clone(), *content)),
2088 );
2089 })
2090 .unwrap();
2091 }
2092
2093 pub fn set_index_for_repo(&self, dot_git: &Path, index_state: &[(&str, String)]) {
2094 self.with_git_state(dot_git, true, |state| {
2095 state.index_contents.clear();
2096 state.index_contents.extend(
2097 index_state
2098 .iter()
2099 .map(|(path, content)| (repo_path(path), content.clone())),
2100 );
2101 })
2102 .unwrap();
2103 }
2104
2105 pub fn set_head_for_repo(
2106 &self,
2107 dot_git: &Path,
2108 head_state: &[(&str, String)],
2109 sha: impl Into<String>,
2110 ) {
2111 self.with_git_state(dot_git, true, |state| {
2112 state.head_contents.clear();
2113 state.head_contents.extend(
2114 head_state
2115 .iter()
2116 .map(|(path, content)| (repo_path(path), content.clone())),
2117 );
2118 state.refs.insert("HEAD".into(), sha.into());
2119 })
2120 .unwrap();
2121 }
2122
2123 pub fn set_head_and_index_for_repo(&self, dot_git: &Path, contents_by_path: &[(&str, String)]) {
2124 self.with_git_state(dot_git, true, |state| {
2125 state.head_contents.clear();
2126 state.head_contents.extend(
2127 contents_by_path
2128 .iter()
2129 .map(|(path, contents)| (repo_path(path), contents.clone())),
2130 );
2131 state.index_contents = state.head_contents.clone();
2132 })
2133 .unwrap();
2134 }
2135
2136 pub fn set_merge_base_content_for_repo(
2137 &self,
2138 dot_git: &Path,
2139 contents_by_path: &[(&str, String)],
2140 ) {
2141 self.with_git_state(dot_git, true, |state| {
2142 use git::Oid;
2143
2144 state.merge_base_contents.clear();
2145 let oids = (1..)
2146 .map(|n| n.to_string())
2147 .map(|n| Oid::from_bytes(n.repeat(20).as_bytes()).unwrap());
2148 for ((path, content), oid) in contents_by_path.iter().zip(oids) {
2149 state.merge_base_contents.insert(repo_path(path), oid);
2150 state.oids.insert(oid, content.clone());
2151 }
2152 })
2153 .unwrap();
2154 }
2155
2156 pub fn set_blame_for_repo(&self, dot_git: &Path, blames: Vec<(RepoPath, git::blame::Blame)>) {
2157 self.with_git_state(dot_git, true, |state| {
2158 state.blames.clear();
2159 state.blames.extend(blames);
2160 })
2161 .unwrap();
2162 }
2163
2164 pub fn set_graph_commits(&self, dot_git: &Path, commits: Vec<Arc<InitialGraphCommitData>>) {
2165 self.with_git_state(dot_git, true, |state| {
2166 state.graph_commits = commits;
2167 })
2168 .unwrap();
2169 }
2170
2171 /// Put the given git repository into a state with the given status,
2172 /// by mutating the head, index, and unmerged state.
2173 pub fn set_status_for_repo(&self, dot_git: &Path, statuses: &[(&str, FileStatus)]) {
2174 let workdir_path = dot_git.parent().unwrap();
2175 let workdir_contents = self.files_with_contents(workdir_path);
2176 self.with_git_state(dot_git, true, |state| {
2177 state.index_contents.clear();
2178 state.head_contents.clear();
2179 state.unmerged_paths.clear();
2180 for (path, content) in workdir_contents {
2181 use util::{paths::PathStyle, rel_path::RelPath};
2182
2183 let repo_path = RelPath::new(path.strip_prefix(&workdir_path).unwrap(), PathStyle::local()).unwrap();
2184 let repo_path = RepoPath::from_rel_path(&repo_path);
2185 let status = statuses
2186 .iter()
2187 .find_map(|(p, status)| (*p == repo_path.as_unix_str()).then_some(status));
2188 let mut content = String::from_utf8_lossy(&content).to_string();
2189
2190 let mut index_content = None;
2191 let mut head_content = None;
2192 match status {
2193 None => {
2194 index_content = Some(content.clone());
2195 head_content = Some(content);
2196 }
2197 Some(FileStatus::Untracked | FileStatus::Ignored) => {}
2198 Some(FileStatus::Unmerged(unmerged_status)) => {
2199 state
2200 .unmerged_paths
2201 .insert(repo_path.clone(), *unmerged_status);
2202 content.push_str(" (unmerged)");
2203 index_content = Some(content.clone());
2204 head_content = Some(content);
2205 }
2206 Some(FileStatus::Tracked(TrackedStatus {
2207 index_status,
2208 worktree_status,
2209 })) => {
2210 match worktree_status {
2211 StatusCode::Modified => {
2212 let mut content = content.clone();
2213 content.push_str(" (modified in working copy)");
2214 index_content = Some(content);
2215 }
2216 StatusCode::TypeChanged | StatusCode::Unmodified => {
2217 index_content = Some(content.clone());
2218 }
2219 StatusCode::Added => {}
2220 StatusCode::Deleted | StatusCode::Renamed | StatusCode::Copied => {
2221 panic!("cannot create these statuses for an existing file");
2222 }
2223 };
2224 match index_status {
2225 StatusCode::Modified => {
2226 let mut content = index_content.clone().expect(
2227 "file cannot be both modified in index and created in working copy",
2228 );
2229 content.push_str(" (modified in index)");
2230 head_content = Some(content);
2231 }
2232 StatusCode::TypeChanged | StatusCode::Unmodified => {
2233 head_content = Some(index_content.clone().expect("file cannot be both unmodified in index and created in working copy"));
2234 }
2235 StatusCode::Added => {}
2236 StatusCode::Deleted => {
2237 head_content = Some("".into());
2238 }
2239 StatusCode::Renamed | StatusCode::Copied => {
2240 panic!("cannot create these statuses for an existing file");
2241 }
2242 };
2243 }
2244 };
2245
2246 if let Some(content) = index_content {
2247 state.index_contents.insert(repo_path.clone(), content);
2248 }
2249 if let Some(content) = head_content {
2250 state.head_contents.insert(repo_path.clone(), content);
2251 }
2252 }
2253 }).unwrap();
2254 }
2255
2256 pub fn set_error_message_for_index_write(&self, dot_git: &Path, message: Option<String>) {
2257 self.with_git_state(dot_git, true, |state| {
2258 state.simulated_index_write_error_message = message;
2259 })
2260 .unwrap();
2261 }
2262
2263 pub fn set_create_worktree_error(&self, dot_git: &Path, message: Option<String>) {
2264 self.with_git_state(dot_git, true, |state| {
2265 state.simulated_create_worktree_error = message;
2266 })
2267 .unwrap();
2268 }
2269
2270 pub fn paths(&self, include_dot_git: bool) -> Vec<PathBuf> {
2271 let mut result = Vec::new();
2272 let mut queue = collections::VecDeque::new();
2273 let state = &*self.state.lock();
2274 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2275 while let Some((path, entry)) = queue.pop_front() {
2276 if let FakeFsEntry::Dir { entries, .. } = entry {
2277 for (name, entry) in entries {
2278 queue.push_back((path.join(name), entry));
2279 }
2280 }
2281 if include_dot_git
2282 || !path
2283 .components()
2284 .any(|component| component.as_os_str() == *FS_DOT_GIT)
2285 {
2286 result.push(path);
2287 }
2288 }
2289 result
2290 }
2291
2292 pub fn directories(&self, include_dot_git: bool) -> Vec<PathBuf> {
2293 let mut result = Vec::new();
2294 let mut queue = collections::VecDeque::new();
2295 let state = &*self.state.lock();
2296 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2297 while let Some((path, entry)) = queue.pop_front() {
2298 if let FakeFsEntry::Dir { entries, .. } = entry {
2299 for (name, entry) in entries {
2300 queue.push_back((path.join(name), entry));
2301 }
2302 if include_dot_git
2303 || !path
2304 .components()
2305 .any(|component| component.as_os_str() == *FS_DOT_GIT)
2306 {
2307 result.push(path);
2308 }
2309 }
2310 }
2311 result
2312 }
2313
2314 pub fn files(&self) -> Vec<PathBuf> {
2315 let mut result = Vec::new();
2316 let mut queue = collections::VecDeque::new();
2317 let state = &*self.state.lock();
2318 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2319 while let Some((path, entry)) = queue.pop_front() {
2320 match entry {
2321 FakeFsEntry::File { .. } => result.push(path),
2322 FakeFsEntry::Dir { entries, .. } => {
2323 for (name, entry) in entries {
2324 queue.push_back((path.join(name), entry));
2325 }
2326 }
2327 FakeFsEntry::Symlink { .. } => {}
2328 }
2329 }
2330 result
2331 }
2332
2333 pub fn files_with_contents(&self, prefix: &Path) -> Vec<(PathBuf, Vec<u8>)> {
2334 let mut result = Vec::new();
2335 let mut queue = collections::VecDeque::new();
2336 let state = &*self.state.lock();
2337 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2338 while let Some((path, entry)) = queue.pop_front() {
2339 match entry {
2340 FakeFsEntry::File { content, .. } => {
2341 if path.starts_with(prefix) {
2342 result.push((path, content.clone()));
2343 }
2344 }
2345 FakeFsEntry::Dir { entries, .. } => {
2346 for (name, entry) in entries {
2347 queue.push_back((path.join(name), entry));
2348 }
2349 }
2350 FakeFsEntry::Symlink { .. } => {}
2351 }
2352 }
2353 result
2354 }
2355
2356 /// How many `read_dir` calls have been issued.
2357 pub fn read_dir_call_count(&self) -> usize {
2358 self.state.lock().read_dir_call_count
2359 }
2360
2361 pub fn watched_paths(&self) -> Vec<PathBuf> {
2362 let state = self.state.lock();
2363 state
2364 .event_txs
2365 .iter()
2366 .filter_map(|(path, tx)| Some(path.clone()).filter(|_| !tx.is_closed()))
2367 .collect()
2368 }
2369
2370 /// How many `metadata` calls have been issued.
2371 pub fn metadata_call_count(&self) -> usize {
2372 self.state.lock().metadata_call_count
2373 }
2374
2375 /// How many write operations have been issued for a specific path.
2376 pub fn write_count_for_path(&self, path: impl AsRef<Path>) -> usize {
2377 let path = path.as_ref().to_path_buf();
2378 self.state
2379 .lock()
2380 .path_write_counts
2381 .get(&path)
2382 .copied()
2383 .unwrap_or(0)
2384 }
2385
2386 pub fn emit_fs_event(&self, path: impl Into<PathBuf>, event: Option<PathEventKind>) {
2387 self.state.lock().emit_event(std::iter::once((path, event)));
2388 }
2389
2390 fn simulate_random_delay(&self) -> impl futures::Future<Output = ()> {
2391 self.executor.simulate_random_delay()
2392 }
2393}
2394
2395#[cfg(feature = "test-support")]
2396impl FakeFsEntry {
2397 fn is_file(&self) -> bool {
2398 matches!(self, Self::File { .. })
2399 }
2400
2401 fn is_symlink(&self) -> bool {
2402 matches!(self, Self::Symlink { .. })
2403 }
2404
2405 fn file_content(&self, path: &Path) -> Result<&Vec<u8>> {
2406 if let Self::File { content, .. } = self {
2407 Ok(content)
2408 } else {
2409 anyhow::bail!("not a file: {path:?}");
2410 }
2411 }
2412
2413 fn dir_entries(&mut self, path: &Path) -> Result<&mut BTreeMap<String, FakeFsEntry>> {
2414 if let Self::Dir { entries, .. } = self {
2415 Ok(entries)
2416 } else {
2417 anyhow::bail!("not a directory: {path:?}");
2418 }
2419 }
2420}
2421
2422#[cfg(feature = "test-support")]
2423struct FakeWatcher {
2424 tx: smol::channel::Sender<Vec<PathEvent>>,
2425 original_path: PathBuf,
2426 fs_state: Arc<Mutex<FakeFsState>>,
2427 prefixes: Mutex<Vec<PathBuf>>,
2428}
2429
2430#[cfg(feature = "test-support")]
2431impl Watcher for FakeWatcher {
2432 fn add(&self, path: &Path) -> Result<()> {
2433 if path.starts_with(&self.original_path) {
2434 return Ok(());
2435 }
2436 self.fs_state
2437 .try_lock()
2438 .unwrap()
2439 .event_txs
2440 .push((path.to_owned(), self.tx.clone()));
2441 self.prefixes.lock().push(path.to_owned());
2442 Ok(())
2443 }
2444
2445 fn remove(&self, _: &Path) -> Result<()> {
2446 Ok(())
2447 }
2448}
2449
2450#[cfg(feature = "test-support")]
2451#[derive(Debug)]
2452struct FakeHandle {
2453 inode: u64,
2454}
2455
2456#[cfg(feature = "test-support")]
2457impl FileHandle for FakeHandle {
2458 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf> {
2459 let fs = fs.as_fake();
2460 let mut state = fs.state.lock();
2461 let Some(target) = state.moves.get(&self.inode).cloned() else {
2462 anyhow::bail!("fake fd not moved")
2463 };
2464
2465 if state.try_entry(&target, false).is_some() {
2466 return Ok(target);
2467 }
2468 anyhow::bail!("fake fd target not found")
2469 }
2470}
2471
2472#[cfg(feature = "test-support")]
2473#[async_trait::async_trait]
2474impl Fs for FakeFs {
2475 async fn create_dir(&self, path: &Path) -> Result<()> {
2476 self.simulate_random_delay().await;
2477
2478 let mut created_dirs = Vec::new();
2479 let mut cur_path = PathBuf::new();
2480 for component in path.components() {
2481 let should_skip = matches!(component, Component::Prefix(..) | Component::RootDir);
2482 cur_path.push(component);
2483 if should_skip {
2484 continue;
2485 }
2486 let mut state = self.state.lock();
2487
2488 let inode = state.get_and_increment_inode();
2489 let mtime = state.get_and_increment_mtime();
2490 state.write_path(&cur_path, |entry| {
2491 entry.or_insert_with(|| {
2492 created_dirs.push((cur_path.clone(), Some(PathEventKind::Created)));
2493 FakeFsEntry::Dir {
2494 inode,
2495 mtime,
2496 len: 0,
2497 entries: Default::default(),
2498 git_repo_state: None,
2499 }
2500 });
2501 Ok(())
2502 })?
2503 }
2504
2505 self.state.lock().emit_event(created_dirs);
2506 Ok(())
2507 }
2508
2509 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
2510 self.simulate_random_delay().await;
2511 let mut state = self.state.lock();
2512 let inode = state.get_and_increment_inode();
2513 let mtime = state.get_and_increment_mtime();
2514 let file = FakeFsEntry::File {
2515 inode,
2516 mtime,
2517 len: 0,
2518 content: Vec::new(),
2519 git_dir_path: None,
2520 };
2521 let mut kind = Some(PathEventKind::Created);
2522 state.write_path(path, |entry| {
2523 match entry {
2524 btree_map::Entry::Occupied(mut e) => {
2525 if options.overwrite {
2526 kind = Some(PathEventKind::Changed);
2527 *e.get_mut() = file;
2528 } else if !options.ignore_if_exists {
2529 anyhow::bail!("path already exists: {path:?}");
2530 }
2531 }
2532 btree_map::Entry::Vacant(e) => {
2533 e.insert(file);
2534 }
2535 }
2536 Ok(())
2537 })?;
2538 state.emit_event([(path, kind)]);
2539 Ok(())
2540 }
2541
2542 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
2543 let mut state = self.state.lock();
2544 let file = FakeFsEntry::Symlink { target };
2545 state
2546 .write_path(path.as_ref(), move |e| match e {
2547 btree_map::Entry::Vacant(e) => {
2548 e.insert(file);
2549 Ok(())
2550 }
2551 btree_map::Entry::Occupied(mut e) => {
2552 *e.get_mut() = file;
2553 Ok(())
2554 }
2555 })
2556 .unwrap();
2557 state.emit_event([(path, Some(PathEventKind::Created))]);
2558
2559 Ok(())
2560 }
2561
2562 async fn create_file_with(
2563 &self,
2564 path: &Path,
2565 mut content: Pin<&mut (dyn AsyncRead + Send)>,
2566 ) -> Result<()> {
2567 let mut bytes = Vec::new();
2568 content.read_to_end(&mut bytes).await?;
2569 self.write_file_internal(path, bytes, true)?;
2570 Ok(())
2571 }
2572
2573 async fn extract_tar_file(
2574 &self,
2575 path: &Path,
2576 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
2577 ) -> Result<()> {
2578 let mut entries = content.entries()?;
2579 while let Some(entry) = entries.next().await {
2580 let mut entry = entry?;
2581 if entry.header().entry_type().is_file() {
2582 let path = path.join(entry.path()?.as_ref());
2583 let mut bytes = Vec::new();
2584 entry.read_to_end(&mut bytes).await?;
2585 self.create_dir(path.parent().unwrap()).await?;
2586 self.write_file_internal(&path, bytes, true)?;
2587 }
2588 }
2589 Ok(())
2590 }
2591
2592 async fn rename(&self, old_path: &Path, new_path: &Path, options: RenameOptions) -> Result<()> {
2593 self.simulate_random_delay().await;
2594
2595 let old_path = normalize_path(old_path);
2596 let new_path = normalize_path(new_path);
2597
2598 if options.create_parents {
2599 if let Some(parent) = new_path.parent() {
2600 self.create_dir(parent).await?;
2601 }
2602 }
2603
2604 let mut state = self.state.lock();
2605 let moved_entry = state.write_path(&old_path, |e| {
2606 if let btree_map::Entry::Occupied(e) = e {
2607 Ok(e.get().clone())
2608 } else {
2609 anyhow::bail!("path does not exist: {old_path:?}")
2610 }
2611 })?;
2612
2613 let inode = match moved_entry {
2614 FakeFsEntry::File { inode, .. } => inode,
2615 FakeFsEntry::Dir { inode, .. } => inode,
2616 _ => 0,
2617 };
2618
2619 state.moves.insert(inode, new_path.clone());
2620
2621 state.write_path(&new_path, |e| {
2622 match e {
2623 btree_map::Entry::Occupied(mut e) => {
2624 if options.overwrite {
2625 *e.get_mut() = moved_entry;
2626 } else if !options.ignore_if_exists {
2627 anyhow::bail!("path already exists: {new_path:?}");
2628 }
2629 }
2630 btree_map::Entry::Vacant(e) => {
2631 e.insert(moved_entry);
2632 }
2633 }
2634 Ok(())
2635 })?;
2636
2637 state
2638 .write_path(&old_path, |e| {
2639 if let btree_map::Entry::Occupied(e) = e {
2640 Ok(e.remove())
2641 } else {
2642 unreachable!()
2643 }
2644 })
2645 .unwrap();
2646
2647 state.emit_event([
2648 (old_path, Some(PathEventKind::Removed)),
2649 (new_path, Some(PathEventKind::Created)),
2650 ]);
2651 Ok(())
2652 }
2653
2654 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
2655 self.simulate_random_delay().await;
2656
2657 let source = normalize_path(source);
2658 let target = normalize_path(target);
2659 let mut state = self.state.lock();
2660 let mtime = state.get_and_increment_mtime();
2661 let inode = state.get_and_increment_inode();
2662 let source_entry = state.entry(&source)?;
2663 let content = source_entry.file_content(&source)?.clone();
2664 let mut kind = Some(PathEventKind::Created);
2665 state.write_path(&target, |e| match e {
2666 btree_map::Entry::Occupied(e) => {
2667 if options.overwrite {
2668 kind = Some(PathEventKind::Changed);
2669 Ok(Some(e.get().clone()))
2670 } else if !options.ignore_if_exists {
2671 anyhow::bail!("{target:?} already exists");
2672 } else {
2673 Ok(None)
2674 }
2675 }
2676 btree_map::Entry::Vacant(e) => Ok(Some(
2677 e.insert(FakeFsEntry::File {
2678 inode,
2679 mtime,
2680 len: content.len() as u64,
2681 content,
2682 git_dir_path: None,
2683 })
2684 .clone(),
2685 )),
2686 })?;
2687 state.emit_event([(target, kind)]);
2688 Ok(())
2689 }
2690
2691 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
2692 self.simulate_random_delay().await;
2693
2694 let path = normalize_path(path);
2695 let parent_path = path.parent().context("cannot remove the root")?;
2696 let base_name = path.file_name().context("cannot remove the root")?;
2697
2698 let mut state = self.state.lock();
2699 let parent_entry = state.entry(parent_path)?;
2700 let entry = parent_entry
2701 .dir_entries(parent_path)?
2702 .entry(base_name.to_str().unwrap().into());
2703
2704 match entry {
2705 btree_map::Entry::Vacant(_) => {
2706 if !options.ignore_if_not_exists {
2707 anyhow::bail!("{path:?} does not exist");
2708 }
2709 }
2710 btree_map::Entry::Occupied(mut entry) => {
2711 {
2712 let children = entry.get_mut().dir_entries(&path)?;
2713 if !options.recursive && !children.is_empty() {
2714 anyhow::bail!("{path:?} is not empty");
2715 }
2716 }
2717 entry.remove();
2718 }
2719 }
2720 state.emit_event([(path, Some(PathEventKind::Removed))]);
2721 Ok(())
2722 }
2723
2724 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
2725 self.simulate_random_delay().await;
2726
2727 let path = normalize_path(path);
2728 let parent_path = path.parent().context("cannot remove the root")?;
2729 let base_name = path.file_name().unwrap();
2730 let mut state = self.state.lock();
2731 let parent_entry = state.entry(parent_path)?;
2732 let entry = parent_entry
2733 .dir_entries(parent_path)?
2734 .entry(base_name.to_str().unwrap().into());
2735 match entry {
2736 btree_map::Entry::Vacant(_) => {
2737 if !options.ignore_if_not_exists {
2738 anyhow::bail!("{path:?} does not exist");
2739 }
2740 }
2741 btree_map::Entry::Occupied(mut entry) => {
2742 entry.get_mut().file_content(&path)?;
2743 entry.remove();
2744 }
2745 }
2746 state.emit_event([(path, Some(PathEventKind::Removed))]);
2747 Ok(())
2748 }
2749
2750 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
2751 let bytes = self.load_internal(path).await?;
2752 Ok(Box::new(io::Cursor::new(bytes)))
2753 }
2754
2755 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
2756 self.simulate_random_delay().await;
2757 let mut state = self.state.lock();
2758 let inode = match state.entry(path)? {
2759 FakeFsEntry::File { inode, .. } => *inode,
2760 FakeFsEntry::Dir { inode, .. } => *inode,
2761 _ => unreachable!(),
2762 };
2763 Ok(Arc::new(FakeHandle { inode }))
2764 }
2765
2766 async fn load(&self, path: &Path) -> Result<String> {
2767 let content = self.load_internal(path).await?;
2768 Ok(String::from_utf8(content)?)
2769 }
2770
2771 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
2772 self.load_internal(path).await
2773 }
2774
2775 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
2776 self.simulate_random_delay().await;
2777 let path = normalize_path(path.as_path());
2778 if let Some(path) = path.parent() {
2779 self.create_dir(path).await?;
2780 }
2781 self.write_file_internal(path, data.into_bytes(), true)?;
2782 Ok(())
2783 }
2784
2785 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
2786 self.simulate_random_delay().await;
2787 let path = normalize_path(path);
2788 let content = text::chunks_with_line_ending(text, line_ending).collect::<String>();
2789 if let Some(path) = path.parent() {
2790 self.create_dir(path).await?;
2791 }
2792 self.write_file_internal(path, content.into_bytes(), false)?;
2793 Ok(())
2794 }
2795
2796 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
2797 self.simulate_random_delay().await;
2798 let path = normalize_path(path);
2799 if let Some(path) = path.parent() {
2800 self.create_dir(path).await?;
2801 }
2802 self.write_file_internal(path, content.to_vec(), false)?;
2803 Ok(())
2804 }
2805
2806 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
2807 let path = normalize_path(path);
2808 self.simulate_random_delay().await;
2809 let state = self.state.lock();
2810 let canonical_path = state
2811 .canonicalize(&path, true)
2812 .with_context(|| format!("path does not exist: {path:?}"))?;
2813 Ok(canonical_path)
2814 }
2815
2816 async fn is_file(&self, path: &Path) -> bool {
2817 let path = normalize_path(path);
2818 self.simulate_random_delay().await;
2819 let mut state = self.state.lock();
2820 if let Some((entry, _)) = state.try_entry(&path, true) {
2821 entry.is_file()
2822 } else {
2823 false
2824 }
2825 }
2826
2827 async fn is_dir(&self, path: &Path) -> bool {
2828 self.metadata(path)
2829 .await
2830 .is_ok_and(|metadata| metadata.is_some_and(|metadata| metadata.is_dir))
2831 }
2832
2833 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
2834 self.simulate_random_delay().await;
2835 let path = normalize_path(path);
2836 let mut state = self.state.lock();
2837 state.metadata_call_count += 1;
2838 if let Some((mut entry, _)) = state.try_entry(&path, false) {
2839 let is_symlink = entry.is_symlink();
2840 if is_symlink {
2841 if let Some(e) = state.try_entry(&path, true).map(|e| e.0) {
2842 entry = e;
2843 } else {
2844 return Ok(None);
2845 }
2846 }
2847
2848 Ok(Some(match &*entry {
2849 FakeFsEntry::File {
2850 inode, mtime, len, ..
2851 } => Metadata {
2852 inode: *inode,
2853 mtime: *mtime,
2854 len: *len,
2855 is_dir: false,
2856 is_symlink,
2857 is_fifo: false,
2858 is_executable: false,
2859 },
2860 FakeFsEntry::Dir {
2861 inode, mtime, len, ..
2862 } => Metadata {
2863 inode: *inode,
2864 mtime: *mtime,
2865 len: *len,
2866 is_dir: true,
2867 is_symlink,
2868 is_fifo: false,
2869 is_executable: false,
2870 },
2871 FakeFsEntry::Symlink { .. } => unreachable!(),
2872 }))
2873 } else {
2874 Ok(None)
2875 }
2876 }
2877
2878 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
2879 self.simulate_random_delay().await;
2880 let path = normalize_path(path);
2881 let mut state = self.state.lock();
2882 let (entry, _) = state
2883 .try_entry(&path, false)
2884 .with_context(|| format!("path does not exist: {path:?}"))?;
2885 if let FakeFsEntry::Symlink { target } = entry {
2886 Ok(target.clone())
2887 } else {
2888 anyhow::bail!("not a symlink: {path:?}")
2889 }
2890 }
2891
2892 async fn read_dir(
2893 &self,
2894 path: &Path,
2895 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
2896 self.simulate_random_delay().await;
2897 let path = normalize_path(path);
2898 let mut state = self.state.lock();
2899 state.read_dir_call_count += 1;
2900 let entry = state.entry(&path)?;
2901 let children = entry.dir_entries(&path)?;
2902 let paths = children
2903 .keys()
2904 .map(|file_name| Ok(path.join(file_name)))
2905 .collect::<Vec<_>>();
2906 Ok(Box::pin(futures::stream::iter(paths)))
2907 }
2908
2909 async fn watch(
2910 &self,
2911 path: &Path,
2912 _: Duration,
2913 ) -> (
2914 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
2915 Arc<dyn Watcher>,
2916 ) {
2917 self.simulate_random_delay().await;
2918 let (tx, rx) = smol::channel::unbounded();
2919 let path = path.to_path_buf();
2920 self.state.lock().event_txs.push((path.clone(), tx.clone()));
2921 let executor = self.executor.clone();
2922 let watcher = Arc::new(FakeWatcher {
2923 tx,
2924 original_path: path.to_owned(),
2925 fs_state: self.state.clone(),
2926 prefixes: Mutex::new(vec![path]),
2927 });
2928 (
2929 Box::pin(futures::StreamExt::filter(rx, {
2930 let watcher = watcher.clone();
2931 move |events| {
2932 let result = events.iter().any(|evt_path| {
2933 watcher
2934 .prefixes
2935 .lock()
2936 .iter()
2937 .any(|prefix| evt_path.path.starts_with(prefix))
2938 });
2939 let executor = executor.clone();
2940 async move {
2941 executor.simulate_random_delay().await;
2942 result
2943 }
2944 }
2945 })),
2946 watcher,
2947 )
2948 }
2949
2950 fn open_repo(
2951 &self,
2952 abs_dot_git: &Path,
2953 _system_git_binary: Option<&Path>,
2954 ) -> Result<Arc<dyn GitRepository>> {
2955 self.with_git_state_and_paths(
2956 abs_dot_git,
2957 false,
2958 |_, repository_dir_path, common_dir_path| {
2959 Arc::new(fake_git_repo::FakeGitRepository {
2960 fs: self.this.upgrade().unwrap(),
2961 executor: self.executor.clone(),
2962 dot_git_path: abs_dot_git.to_path_buf(),
2963 repository_dir_path: repository_dir_path.to_owned(),
2964 common_dir_path: common_dir_path.to_owned(),
2965 checkpoints: Arc::default(),
2966 is_trusted: Arc::default(),
2967 }) as _
2968 },
2969 )
2970 }
2971
2972 async fn git_init(
2973 &self,
2974 abs_work_directory_path: &Path,
2975 _fallback_branch_name: String,
2976 ) -> Result<()> {
2977 self.create_dir(&abs_work_directory_path.join(".git")).await
2978 }
2979
2980 async fn git_clone(&self, _repo_url: &str, _abs_work_directory: &Path) -> Result<()> {
2981 anyhow::bail!("Git clone is not supported in fake Fs")
2982 }
2983
2984 fn is_fake(&self) -> bool {
2985 true
2986 }
2987
2988 async fn is_case_sensitive(&self) -> bool {
2989 true
2990 }
2991
2992 fn subscribe_to_jobs(&self) -> JobEventReceiver {
2993 let (sender, receiver) = futures::channel::mpsc::unbounded();
2994 self.state.lock().job_event_subscribers.lock().push(sender);
2995 receiver
2996 }
2997
2998 #[cfg(feature = "test-support")]
2999 fn as_fake(&self) -> Arc<FakeFs> {
3000 self.this.upgrade().unwrap()
3001 }
3002}
3003
3004pub async fn copy_recursive<'a>(
3005 fs: &'a dyn Fs,
3006 source: &'a Path,
3007 target: &'a Path,
3008 options: CopyOptions,
3009) -> Result<()> {
3010 for (item, is_dir) in read_dir_items(fs, source).await? {
3011 let Ok(item_relative_path) = item.strip_prefix(source) else {
3012 continue;
3013 };
3014 let target_item = if item_relative_path == Path::new("") {
3015 target.to_path_buf()
3016 } else {
3017 target.join(item_relative_path)
3018 };
3019 if is_dir {
3020 if !options.overwrite && fs.metadata(&target_item).await.is_ok_and(|m| m.is_some()) {
3021 if options.ignore_if_exists {
3022 continue;
3023 } else {
3024 anyhow::bail!("{target_item:?} already exists");
3025 }
3026 }
3027 let _ = fs
3028 .remove_dir(
3029 &target_item,
3030 RemoveOptions {
3031 recursive: true,
3032 ignore_if_not_exists: true,
3033 },
3034 )
3035 .await;
3036 fs.create_dir(&target_item).await?;
3037 } else {
3038 fs.copy_file(&item, &target_item, options).await?;
3039 }
3040 }
3041 Ok(())
3042}
3043
3044/// Recursively reads all of the paths in the given directory.
3045///
3046/// Returns a vector of tuples of (path, is_dir).
3047pub async fn read_dir_items<'a>(fs: &'a dyn Fs, source: &'a Path) -> Result<Vec<(PathBuf, bool)>> {
3048 let mut items = Vec::new();
3049 read_recursive(fs, source, &mut items).await?;
3050 Ok(items)
3051}
3052
3053fn read_recursive<'a>(
3054 fs: &'a dyn Fs,
3055 source: &'a Path,
3056 output: &'a mut Vec<(PathBuf, bool)>,
3057) -> BoxFuture<'a, Result<()>> {
3058 use futures::future::FutureExt;
3059
3060 async move {
3061 let metadata = fs
3062 .metadata(source)
3063 .await?
3064 .with_context(|| format!("path does not exist: {source:?}"))?;
3065
3066 if metadata.is_dir {
3067 output.push((source.to_path_buf(), true));
3068 let mut children = fs.read_dir(source).await?;
3069 while let Some(child_path) = children.next().await {
3070 if let Ok(child_path) = child_path {
3071 read_recursive(fs, &child_path, output).await?;
3072 }
3073 }
3074 } else {
3075 output.push((source.to_path_buf(), false));
3076 }
3077 Ok(())
3078 }
3079 .boxed()
3080}
3081
3082// todo(windows)
3083// can we get file id not open the file twice?
3084// https://github.com/rust-lang/rust/issues/63010
3085#[cfg(target_os = "windows")]
3086async fn file_id(path: impl AsRef<Path>) -> Result<u64> {
3087 use std::os::windows::io::AsRawHandle;
3088
3089 use smol::fs::windows::OpenOptionsExt;
3090 use windows::Win32::{
3091 Foundation::HANDLE,
3092 Storage::FileSystem::{
3093 BY_HANDLE_FILE_INFORMATION, FILE_FLAG_BACKUP_SEMANTICS, GetFileInformationByHandle,
3094 },
3095 };
3096
3097 let file = smol::fs::OpenOptions::new()
3098 .read(true)
3099 .custom_flags(FILE_FLAG_BACKUP_SEMANTICS.0)
3100 .open(path)
3101 .await?;
3102
3103 let mut info: BY_HANDLE_FILE_INFORMATION = unsafe { std::mem::zeroed() };
3104 // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfileinformationbyhandle
3105 // This function supports Windows XP+
3106 smol::unblock(move || {
3107 unsafe { GetFileInformationByHandle(HANDLE(file.as_raw_handle() as _), &mut info)? };
3108
3109 Ok(((info.nFileIndexHigh as u64) << 32) | (info.nFileIndexLow as u64))
3110 })
3111 .await
3112}
3113
3114#[cfg(target_os = "windows")]
3115fn atomic_replace<P: AsRef<Path>>(
3116 replaced_file: P,
3117 replacement_file: P,
3118) -> windows::core::Result<()> {
3119 use windows::{
3120 Win32::Storage::FileSystem::{REPLACE_FILE_FLAGS, ReplaceFileW},
3121 core::HSTRING,
3122 };
3123
3124 // If the file does not exist, create it.
3125 let _ = std::fs::File::create_new(replaced_file.as_ref());
3126
3127 unsafe {
3128 ReplaceFileW(
3129 &HSTRING::from(replaced_file.as_ref().to_string_lossy().into_owned()),
3130 &HSTRING::from(replacement_file.as_ref().to_string_lossy().into_owned()),
3131 None,
3132 REPLACE_FILE_FLAGS::default(),
3133 None,
3134 None,
3135 )
3136 }
3137}