1pub mod fs_watcher;
2
3use parking_lot::Mutex;
4use std::ffi::OsString;
5use std::sync::atomic::{AtomicU8, AtomicUsize, Ordering};
6use std::time::Instant;
7use util::maybe;
8
9use anyhow::{Context as _, Result, anyhow};
10use futures::stream::iter;
11use gpui::App;
12use gpui::BackgroundExecutor;
13use gpui::Global;
14use gpui::ReadGlobal as _;
15use gpui::SharedString;
16use std::borrow::Cow;
17#[cfg(unix)]
18use std::ffi::CString;
19use util::command::new_command;
20
21#[cfg(unix)]
22use std::os::fd::{AsFd, AsRawFd};
23#[cfg(unix)]
24use std::os::unix::ffi::OsStrExt;
25
26#[cfg(unix)]
27use std::os::unix::fs::{FileTypeExt, MetadataExt};
28
29#[cfg(any(target_os = "macos", target_os = "freebsd"))]
30use std::mem::MaybeUninit;
31
32use async_tar::Archive;
33use futures::{AsyncRead, Stream, StreamExt, future::BoxFuture};
34use git::repository::{GitRepository, RealGitRepository};
35use is_executable::IsExecutable;
36use rope::Rope;
37use serde::{Deserialize, Serialize};
38use smol::io::AsyncWriteExt;
39#[cfg(feature = "test-support")]
40use std::path::Component;
41use std::{
42 io::{self, Write},
43 path::{Path, PathBuf},
44 pin::Pin,
45 sync::Arc,
46 time::{Duration, SystemTime, UNIX_EPOCH},
47};
48use tempfile::TempDir;
49use text::LineEnding;
50
51#[cfg(feature = "test-support")]
52mod fake_git_repo;
53#[cfg(feature = "test-support")]
54use collections::{BTreeMap, btree_map};
55#[cfg(feature = "test-support")]
56use fake_git_repo::FakeGitRepositoryState;
57#[cfg(feature = "test-support")]
58use git::{
59 repository::{InitialGraphCommitData, RepoPath, Worktree, repo_path},
60 status::{FileStatus, StatusCode, TrackedStatus, UnmergedStatus},
61};
62#[cfg(feature = "test-support")]
63use util::normalize_path;
64
65#[cfg(feature = "test-support")]
66use smol::io::AsyncReadExt;
67#[cfg(feature = "test-support")]
68use std::ffi::OsStr;
69
70pub trait Watcher: Send + Sync {
71 fn add(&self, path: &Path) -> Result<()>;
72 fn remove(&self, path: &Path) -> Result<()>;
73}
74
75#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
76pub enum PathEventKind {
77 Removed,
78 Created,
79 Changed,
80 Rescan,
81}
82
83#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
84pub struct PathEvent {
85 pub path: PathBuf,
86 pub kind: Option<PathEventKind>,
87}
88
89impl From<PathEvent> for PathBuf {
90 fn from(event: PathEvent) -> Self {
91 event.path
92 }
93}
94
95#[async_trait::async_trait]
96pub trait Fs: Send + Sync {
97 async fn create_dir(&self, path: &Path) -> Result<()>;
98 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()>;
99 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()>;
100 async fn create_file_with(
101 &self,
102 path: &Path,
103 content: Pin<&mut (dyn AsyncRead + Send)>,
104 ) -> Result<()>;
105 async fn extract_tar_file(
106 &self,
107 path: &Path,
108 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
109 ) -> Result<()>;
110 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()>;
111 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()>;
112
113 /// Removes a directory from the filesystem.
114 /// There is no expectation that the directory will be preserved in the
115 /// system trash.
116 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()>;
117
118 /// Moves a file or directory to the system trash.
119 /// Returns a [`TrashedEntry`] that can be used to keep track of the
120 /// location of the trashed item in the system's trash.
121 async fn trash(&self, path: &Path, options: RemoveOptions) -> Result<TrashedEntry>;
122
123 /// Removes a file from the filesystem.
124 /// There is no expectation that the file will be preserved in the system
125 /// trash.
126 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()>;
127
128 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>>;
129 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>>;
130 async fn load(&self, path: &Path) -> Result<String> {
131 Ok(String::from_utf8(self.load_bytes(path).await?)?)
132 }
133 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>>;
134 async fn atomic_write(&self, path: PathBuf, text: String) -> Result<()>;
135 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()>;
136 async fn write(&self, path: &Path, content: &[u8]) -> Result<()>;
137 async fn canonicalize(&self, path: &Path) -> Result<PathBuf>;
138 async fn is_file(&self, path: &Path) -> bool;
139 async fn is_dir(&self, path: &Path) -> bool;
140 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>>;
141 async fn read_link(&self, path: &Path) -> Result<PathBuf>;
142 async fn read_dir(
143 &self,
144 path: &Path,
145 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>>;
146
147 async fn watch(
148 &self,
149 path: &Path,
150 latency: Duration,
151 ) -> (
152 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
153 Arc<dyn Watcher>,
154 );
155
156 fn open_repo(
157 &self,
158 abs_dot_git: &Path,
159 system_git_binary_path: Option<&Path>,
160 ) -> Result<Arc<dyn GitRepository>>;
161 async fn git_init(&self, abs_work_directory: &Path, fallback_branch_name: String)
162 -> Result<()>;
163 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()>;
164 fn is_fake(&self) -> bool;
165 async fn is_case_sensitive(&self) -> bool;
166 fn subscribe_to_jobs(&self) -> JobEventReceiver;
167
168 /// Restores a given `TrashedEntry`, moving it from the system's trash back
169 /// to the original path.
170 async fn restore(
171 &self,
172 trashed_entry: TrashedEntry,
173 ) -> std::result::Result<PathBuf, TrashRestoreError>;
174
175 #[cfg(feature = "test-support")]
176 fn as_fake(&self) -> Arc<FakeFs> {
177 panic!("called as_fake on a real fs");
178 }
179}
180
181// We use our own type rather than `trash::TrashItem` directly to avoid carrying
182// over fields we don't need (e.g. `time_deleted`) and to insulate callers and
183// tests from changes to that crate's API surface.
184/// Represents a file or directory that has been moved to the system trash,
185/// retaining enough information to restore it to its original location.
186#[derive(Clone, PartialEq, Debug)]
187pub struct TrashedEntry {
188 /// Platform-specific identifier for the file/directory in the trash.
189 ///
190 /// * Freedesktop – Path to the `.trashinfo` file.
191 /// * macOS & Windows – Full path to the file/directory in the system's
192 /// trash.
193 pub id: OsString,
194 /// Name of the file/directory at the time of trashing, including extension.
195 pub name: OsString,
196 /// Absolute path to the parent directory at the time of trashing.
197 pub original_parent: PathBuf,
198}
199
200impl From<trash::TrashItem> for TrashedEntry {
201 fn from(item: trash::TrashItem) -> Self {
202 Self {
203 id: item.id,
204 name: item.name,
205 original_parent: item.original_parent,
206 }
207 }
208}
209
210impl TrashedEntry {
211 fn into_trash_item(self) -> trash::TrashItem {
212 trash::TrashItem {
213 id: self.id,
214 name: self.name,
215 original_parent: self.original_parent,
216 // `TrashedEntry` doesn't preserve `time_deleted` as we don't
217 // currently need it for restore, so we default it to 0 here.
218 time_deleted: 0,
219 }
220 }
221}
222
223#[derive(Debug, thiserror::Error)]
224pub enum TrashRestoreError {
225 #[error("The specified `path` ({}) was not found in the system's trash.", path.display())]
226 NotFound { path: PathBuf },
227 #[error("File or directory ({}) already exists at the restore destination.", path.display())]
228 Collision { path: PathBuf },
229 #[error("Unknown error ({description})")]
230 Unknown { description: String },
231}
232
233impl From<trash::Error> for TrashRestoreError {
234 fn from(err: trash::Error) -> Self {
235 match err {
236 trash::Error::RestoreCollision { path, .. } => Self::Collision { path },
237 trash::Error::Unknown { description } => Self::Unknown { description },
238 other => Self::Unknown {
239 description: other.to_string(),
240 },
241 }
242 }
243}
244
245struct GlobalFs(Arc<dyn Fs>);
246
247impl Global for GlobalFs {}
248
249impl dyn Fs {
250 /// Returns the global [`Fs`].
251 pub fn global(cx: &App) -> Arc<Self> {
252 GlobalFs::global(cx).0.clone()
253 }
254
255 /// Sets the global [`Fs`].
256 pub fn set_global(fs: Arc<Self>, cx: &mut App) {
257 cx.set_global(GlobalFs(fs));
258 }
259}
260
261#[derive(Copy, Clone, Default)]
262pub struct CreateOptions {
263 pub overwrite: bool,
264 pub ignore_if_exists: bool,
265}
266
267#[derive(Copy, Clone, Default)]
268pub struct CopyOptions {
269 pub overwrite: bool,
270 pub ignore_if_exists: bool,
271}
272
273#[derive(Copy, Clone, Default)]
274pub struct RenameOptions {
275 pub overwrite: bool,
276 pub ignore_if_exists: bool,
277 /// Whether to create parent directories if they do not exist.
278 pub create_parents: bool,
279}
280
281#[derive(Copy, Clone, Default)]
282pub struct RemoveOptions {
283 pub recursive: bool,
284 pub ignore_if_not_exists: bool,
285}
286
287#[derive(Copy, Clone, Debug)]
288pub struct Metadata {
289 pub inode: u64,
290 pub mtime: MTime,
291 pub is_symlink: bool,
292 pub is_dir: bool,
293 pub len: u64,
294 pub is_fifo: bool,
295 pub is_executable: bool,
296}
297
298/// Filesystem modification time. The purpose of this newtype is to discourage use of operations
299/// that do not make sense for mtimes. In particular, it is not always valid to compare mtimes using
300/// `<` or `>`, as there are many things that can cause the mtime of a file to be earlier than it
301/// was. See ["mtime comparison considered harmful" - apenwarr](https://apenwarr.ca/log/20181113).
302///
303/// Do not derive Ord, PartialOrd, or arithmetic operation traits.
304#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize)]
305#[serde(transparent)]
306pub struct MTime(SystemTime);
307
308pub type JobId = usize;
309
310#[derive(Clone, Debug)]
311pub struct JobInfo {
312 pub start: Instant,
313 pub message: SharedString,
314 pub id: JobId,
315}
316
317#[derive(Debug, Clone)]
318pub enum JobEvent {
319 Started { info: JobInfo },
320 Completed { id: JobId },
321}
322
323pub type JobEventSender = futures::channel::mpsc::UnboundedSender<JobEvent>;
324pub type JobEventReceiver = futures::channel::mpsc::UnboundedReceiver<JobEvent>;
325
326struct JobTracker {
327 id: JobId,
328 subscribers: Arc<Mutex<Vec<JobEventSender>>>,
329}
330
331impl JobTracker {
332 fn new(info: JobInfo, subscribers: Arc<Mutex<Vec<JobEventSender>>>) -> Self {
333 let id = info.id;
334 {
335 let mut subs = subscribers.lock();
336 subs.retain(|sender| {
337 sender
338 .unbounded_send(JobEvent::Started { info: info.clone() })
339 .is_ok()
340 });
341 }
342 Self { id, subscribers }
343 }
344}
345
346impl Drop for JobTracker {
347 fn drop(&mut self) {
348 let mut subs = self.subscribers.lock();
349 subs.retain(|sender| {
350 sender
351 .unbounded_send(JobEvent::Completed { id: self.id })
352 .is_ok()
353 });
354 }
355}
356
357impl MTime {
358 /// Conversion intended for persistence and testing.
359 pub fn from_seconds_and_nanos(secs: u64, nanos: u32) -> Self {
360 MTime(UNIX_EPOCH + Duration::new(secs, nanos))
361 }
362
363 /// Conversion intended for persistence.
364 pub fn to_seconds_and_nanos_for_persistence(self) -> Option<(u64, u32)> {
365 self.0
366 .duration_since(UNIX_EPOCH)
367 .ok()
368 .map(|duration| (duration.as_secs(), duration.subsec_nanos()))
369 }
370
371 /// Returns the value wrapped by this `MTime`, for presentation to the user. The name including
372 /// "_for_user" is to discourage misuse - this method should not be used when making decisions
373 /// about file dirtiness.
374 pub fn timestamp_for_user(self) -> SystemTime {
375 self.0
376 }
377
378 /// Temporary method to split out the behavior changes from introduction of this newtype.
379 pub fn bad_is_greater_than(self, other: MTime) -> bool {
380 self.0 > other.0
381 }
382}
383
384impl From<proto::Timestamp> for MTime {
385 fn from(timestamp: proto::Timestamp) -> Self {
386 MTime(timestamp.into())
387 }
388}
389
390impl From<MTime> for proto::Timestamp {
391 fn from(mtime: MTime) -> Self {
392 mtime.0.into()
393 }
394}
395
396pub struct RealFs {
397 bundled_git_binary_path: Option<PathBuf>,
398 executor: BackgroundExecutor,
399 next_job_id: Arc<AtomicUsize>,
400 job_event_subscribers: Arc<Mutex<Vec<JobEventSender>>>,
401 is_case_sensitive: AtomicU8,
402}
403
404pub trait FileHandle: Send + Sync + std::fmt::Debug {
405 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf>;
406}
407
408impl FileHandle for std::fs::File {
409 #[cfg(target_os = "macos")]
410 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
411 use std::{
412 ffi::{CStr, OsStr},
413 os::unix::ffi::OsStrExt,
414 };
415
416 let fd = self.as_fd();
417 let mut path_buf = MaybeUninit::<[u8; libc::PATH_MAX as usize]>::uninit();
418
419 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_GETPATH, path_buf.as_mut_ptr()) };
420 anyhow::ensure!(result != -1, "fcntl returned -1");
421
422 // SAFETY: `fcntl` will initialize the path buffer.
423 let c_str = unsafe { CStr::from_ptr(path_buf.as_ptr().cast()) };
424 anyhow::ensure!(!c_str.is_empty(), "Could find a path for the file handle");
425 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
426 Ok(path)
427 }
428
429 #[cfg(target_os = "linux")]
430 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
431 let fd = self.as_fd();
432 let fd_path = format!("/proc/self/fd/{}", fd.as_raw_fd());
433 let new_path = std::fs::read_link(fd_path)?;
434 if new_path
435 .file_name()
436 .is_some_and(|f| f.to_string_lossy().ends_with(" (deleted)"))
437 {
438 anyhow::bail!("file was deleted")
439 };
440
441 Ok(new_path)
442 }
443
444 #[cfg(target_os = "freebsd")]
445 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
446 use std::{
447 ffi::{CStr, OsStr},
448 os::unix::ffi::OsStrExt,
449 };
450
451 let fd = self.as_fd();
452 let mut kif = MaybeUninit::<libc::kinfo_file>::uninit();
453 kif.kf_structsize = libc::KINFO_FILE_SIZE;
454
455 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_KINFO, kif.as_mut_ptr()) };
456 anyhow::ensure!(result != -1, "fcntl returned -1");
457
458 // SAFETY: `fcntl` will initialize the kif.
459 let c_str = unsafe { CStr::from_ptr(kif.assume_init().kf_path.as_ptr()) };
460 anyhow::ensure!(!c_str.is_empty(), "Could find a path for the file handle");
461 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
462 Ok(path)
463 }
464
465 #[cfg(target_os = "windows")]
466 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
467 use std::ffi::OsString;
468 use std::os::windows::ffi::OsStringExt;
469 use std::os::windows::io::AsRawHandle;
470
471 use windows::Win32::Foundation::HANDLE;
472 use windows::Win32::Storage::FileSystem::{
473 FILE_NAME_NORMALIZED, GetFinalPathNameByHandleW,
474 };
475
476 let handle = HANDLE(self.as_raw_handle() as _);
477
478 // Query required buffer size (in wide chars)
479 let required_len =
480 unsafe { GetFinalPathNameByHandleW(handle, &mut [], FILE_NAME_NORMALIZED) };
481 anyhow::ensure!(
482 required_len != 0,
483 "GetFinalPathNameByHandleW returned 0 length"
484 );
485
486 // Allocate buffer and retrieve the path
487 let mut buf: Vec<u16> = vec![0u16; required_len as usize + 1];
488 let written = unsafe { GetFinalPathNameByHandleW(handle, &mut buf, FILE_NAME_NORMALIZED) };
489 anyhow::ensure!(
490 written != 0,
491 "GetFinalPathNameByHandleW failed to write path"
492 );
493
494 let os_str: OsString = OsString::from_wide(&buf[..written as usize]);
495 anyhow::ensure!(!os_str.is_empty(), "Could find a path for the file handle");
496 Ok(PathBuf::from(os_str))
497 }
498}
499
500pub struct RealWatcher {}
501
502impl RealFs {
503 pub fn new(git_binary_path: Option<PathBuf>, executor: BackgroundExecutor) -> Self {
504 Self {
505 bundled_git_binary_path: git_binary_path,
506 executor,
507 next_job_id: Arc::new(AtomicUsize::new(0)),
508 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
509 is_case_sensitive: Default::default(),
510 }
511 }
512
513 #[cfg(target_os = "windows")]
514 fn canonicalize(path: &Path) -> Result<PathBuf> {
515 use std::ffi::OsString;
516 use std::os::windows::ffi::OsStringExt;
517 use windows::Win32::Storage::FileSystem::GetVolumePathNameW;
518 use windows::core::HSTRING;
519
520 // std::fs::canonicalize resolves mapped network paths to UNC paths, which can
521 // confuse some software. To mitigate this, we canonicalize the input, then rebase
522 // the result onto the input's original volume root if both paths are on the same
523 // volume. This keeps the same drive letter or mount point the caller used.
524
525 let abs_path = if path.is_relative() {
526 std::env::current_dir()?.join(path)
527 } else {
528 path.to_path_buf()
529 };
530
531 let path_hstring = HSTRING::from(abs_path.as_os_str());
532 let mut vol_buf = vec![0u16; abs_path.as_os_str().len() + 2];
533 unsafe { GetVolumePathNameW(&path_hstring, &mut vol_buf)? };
534 let volume_root = {
535 let len = vol_buf
536 .iter()
537 .position(|&c| c == 0)
538 .unwrap_or(vol_buf.len());
539 PathBuf::from(OsString::from_wide(&vol_buf[..len]))
540 };
541
542 let resolved_path = dunce::canonicalize(&abs_path)?;
543 let resolved_root = dunce::canonicalize(&volume_root)?;
544
545 if let Ok(relative) = resolved_path.strip_prefix(&resolved_root) {
546 let mut result = volume_root;
547 result.push(relative);
548 Ok(result)
549 } else {
550 Ok(resolved_path)
551 }
552 }
553}
554
555#[cfg(any(target_os = "macos", target_os = "linux"))]
556fn rename_without_replace(source: &Path, target: &Path) -> io::Result<()> {
557 let source = path_to_c_string(source)?;
558 let target = path_to_c_string(target)?;
559
560 #[cfg(target_os = "macos")]
561 let result = unsafe { libc::renamex_np(source.as_ptr(), target.as_ptr(), libc::RENAME_EXCL) };
562
563 #[cfg(target_os = "linux")]
564 let result = unsafe {
565 libc::syscall(
566 libc::SYS_renameat2,
567 libc::AT_FDCWD,
568 source.as_ptr(),
569 libc::AT_FDCWD,
570 target.as_ptr(),
571 libc::RENAME_NOREPLACE,
572 )
573 };
574
575 if result == 0 {
576 Ok(())
577 } else {
578 Err(io::Error::last_os_error())
579 }
580}
581
582#[cfg(target_os = "windows")]
583fn rename_without_replace(source: &Path, target: &Path) -> io::Result<()> {
584 use std::os::windows::ffi::OsStrExt;
585
586 use windows::Win32::Storage::FileSystem::{MOVE_FILE_FLAGS, MoveFileExW};
587 use windows::core::PCWSTR;
588
589 let source: Vec<u16> = source.as_os_str().encode_wide().chain(Some(0)).collect();
590 let target: Vec<u16> = target.as_os_str().encode_wide().chain(Some(0)).collect();
591
592 unsafe {
593 MoveFileExW(
594 PCWSTR(source.as_ptr()),
595 PCWSTR(target.as_ptr()),
596 MOVE_FILE_FLAGS::default(),
597 )
598 }
599 .map_err(|_| io::Error::last_os_error())
600}
601
602#[cfg(any(target_os = "macos", target_os = "linux"))]
603fn path_to_c_string(path: &Path) -> io::Result<CString> {
604 CString::new(path.as_os_str().as_bytes()).map_err(|_| {
605 io::Error::new(
606 io::ErrorKind::InvalidInput,
607 format!("path contains interior NUL: {}", path.display()),
608 )
609 })
610}
611
612#[async_trait::async_trait]
613impl Fs for RealFs {
614 async fn create_dir(&self, path: &Path) -> Result<()> {
615 Ok(smol::fs::create_dir_all(path).await?)
616 }
617
618 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
619 #[cfg(unix)]
620 smol::fs::unix::symlink(target, path).await?;
621
622 #[cfg(windows)]
623 if smol::fs::metadata(&target).await?.is_dir() {
624 let status = new_command("cmd")
625 .args(["/C", "mklink", "/J"])
626 .args([path, target.as_path()])
627 .status()
628 .await?;
629
630 if !status.success() {
631 return Err(anyhow::anyhow!(
632 "Failed to create junction from {:?} to {:?}",
633 path,
634 target
635 ));
636 }
637 } else {
638 smol::fs::windows::symlink_file(target, path).await?
639 }
640
641 Ok(())
642 }
643
644 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
645 let mut open_options = smol::fs::OpenOptions::new();
646 open_options.write(true).create(true);
647 if options.overwrite {
648 open_options.truncate(true);
649 } else if !options.ignore_if_exists {
650 open_options.create_new(true);
651 }
652 open_options
653 .open(path)
654 .await
655 .with_context(|| format!("Failed to create file at {:?}", path))?;
656 Ok(())
657 }
658
659 async fn create_file_with(
660 &self,
661 path: &Path,
662 content: Pin<&mut (dyn AsyncRead + Send)>,
663 ) -> Result<()> {
664 let mut file = smol::fs::File::create(&path)
665 .await
666 .with_context(|| format!("Failed to create file at {:?}", path))?;
667 futures::io::copy(content, &mut file).await?;
668 Ok(())
669 }
670
671 async fn extract_tar_file(
672 &self,
673 path: &Path,
674 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
675 ) -> Result<()> {
676 content.unpack(path).await?;
677 Ok(())
678 }
679
680 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
681 if !options.overwrite && smol::fs::metadata(target).await.is_ok() {
682 if options.ignore_if_exists {
683 return Ok(());
684 } else {
685 anyhow::bail!("{target:?} already exists");
686 }
687 }
688
689 smol::fs::copy(source, target).await?;
690 Ok(())
691 }
692
693 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()> {
694 if options.create_parents {
695 if let Some(parent) = target.parent() {
696 self.create_dir(parent).await?;
697 }
698 }
699
700 if options.overwrite {
701 smol::fs::rename(source, target).await?;
702 return Ok(());
703 }
704
705 let use_metadata_fallback = {
706 #[cfg(any(target_os = "macos", target_os = "linux", target_os = "windows"))]
707 {
708 let source = source.to_path_buf();
709 let target = target.to_path_buf();
710 match self
711 .executor
712 .spawn(async move { rename_without_replace(&source, &target) })
713 .await
714 {
715 Ok(()) => return Ok(()),
716 Err(error) if error.kind() == io::ErrorKind::AlreadyExists => {
717 if options.ignore_if_exists {
718 return Ok(());
719 }
720 return Err(error.into());
721 }
722 Err(error)
723 if error.raw_os_error().is_some_and(|code| {
724 code == libc::ENOSYS
725 || code == libc::ENOTSUP
726 || code == libc::EOPNOTSUPP
727 || code == libc::EINVAL
728 }) =>
729 {
730 // For case when filesystem or kernel does not support atomic no-overwrite rename.
731 // EINVAL is returned by FUSE-based filesystems (e.g. NTFS via ntfs-3g)
732 // that don't support RENAME_NOREPLACE.
733 true
734 }
735 Err(error) => return Err(error.into()),
736 }
737 }
738
739 #[cfg(not(any(target_os = "macos", target_os = "linux", target_os = "windows")))]
740 {
741 // For platforms which do not have an atomic no-overwrite rename yet.
742 true
743 }
744 };
745
746 if use_metadata_fallback && smol::fs::metadata(target).await.is_ok() {
747 if options.ignore_if_exists {
748 return Ok(());
749 } else {
750 anyhow::bail!("{target:?} already exists");
751 }
752 }
753
754 smol::fs::rename(source, target).await?;
755 Ok(())
756 }
757
758 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
759 let result = if options.recursive {
760 smol::fs::remove_dir_all(path).await
761 } else {
762 smol::fs::remove_dir(path).await
763 };
764 match result {
765 Ok(()) => Ok(()),
766 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
767 Ok(())
768 }
769 Err(err) => Err(err)?,
770 }
771 }
772
773 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
774 #[cfg(windows)]
775 if let Ok(Some(metadata)) = self.metadata(path).await
776 && metadata.is_symlink
777 && metadata.is_dir
778 {
779 self.remove_dir(
780 path,
781 RemoveOptions {
782 recursive: false,
783 ignore_if_not_exists: true,
784 },
785 )
786 .await?;
787 return Ok(());
788 }
789
790 match smol::fs::remove_file(path).await {
791 Ok(()) => Ok(()),
792 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
793 Ok(())
794 }
795 Err(err) => Err(err)?,
796 }
797 }
798
799 async fn trash(&self, path: &Path, _options: RemoveOptions) -> Result<TrashedEntry> {
800 // We must make the path absolute or trash will make a weird abomination
801 // of the zed working directory (not usually the worktree) and whatever
802 // the path variable holds.
803 let path = self
804 .canonicalize(path)
805 .await
806 .context("Could not canonicalize the path of the file")?;
807
808 let (tx, rx) = futures::channel::oneshot::channel();
809 std::thread::Builder::new()
810 .name("trash file or dir".to_string())
811 .spawn(|| tx.send(trash::delete_with_info(path)))
812 .expect("The os can spawn threads");
813
814 Ok(rx
815 .await
816 .context("Tx dropped or fs.restore panicked")?
817 .context("Could not trash file or dir")?
818 .into())
819 }
820
821 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
822 Ok(Box::new(std::fs::File::open(path)?))
823 }
824
825 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
826 let mut options = std::fs::OpenOptions::new();
827 options.read(true);
828 #[cfg(windows)]
829 {
830 use std::os::windows::fs::OpenOptionsExt;
831 options.custom_flags(windows::Win32::Storage::FileSystem::FILE_FLAG_BACKUP_SEMANTICS.0);
832 }
833 Ok(Arc::new(options.open(path)?))
834 }
835
836 async fn load(&self, path: &Path) -> Result<String> {
837 let path = path.to_path_buf();
838 self.executor
839 .spawn(async move {
840 std::fs::read_to_string(&path)
841 .with_context(|| format!("Failed to read file {}", path.display()))
842 })
843 .await
844 }
845
846 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
847 let path = path.to_path_buf();
848 let bytes = self
849 .executor
850 .spawn(async move { std::fs::read(path) })
851 .await?;
852 Ok(bytes)
853 }
854
855 #[cfg(not(target_os = "windows"))]
856 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
857 smol::unblock(move || {
858 // Use the directory of the destination as temp dir to avoid
859 // invalid cross-device link error, and XDG_CACHE_DIR for fallback.
860 // See https://github.com/zed-industries/zed/pull/8437 for more details.
861 let mut tmp_file =
862 tempfile::NamedTempFile::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
863 tmp_file.write_all(data.as_bytes())?;
864 tmp_file.persist(path)?;
865 anyhow::Ok(())
866 })
867 .await?;
868
869 Ok(())
870 }
871
872 #[cfg(target_os = "windows")]
873 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
874 smol::unblock(move || {
875 // If temp dir is set to a different drive than the destination,
876 // we receive error:
877 //
878 // failed to persist temporary file:
879 // The system cannot move the file to a different disk drive. (os error 17)
880 //
881 // This is because `ReplaceFileW` does not support cross volume moves.
882 // See the remark section: "The backup file, replaced file, and replacement file must all reside on the same volume."
883 // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-replacefilew#remarks
884 //
885 // So we use the directory of the destination as a temp dir to avoid it.
886 // https://github.com/zed-industries/zed/issues/16571
887 let temp_dir = TempDir::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
888 let temp_file = {
889 let temp_file_path = temp_dir.path().join("temp_file");
890 let mut file = std::fs::File::create_new(&temp_file_path)?;
891 file.write_all(data.as_bytes())?;
892 temp_file_path
893 };
894 atomic_replace(path.as_path(), temp_file.as_path())?;
895 anyhow::Ok(())
896 })
897 .await?;
898 Ok(())
899 }
900
901 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
902 let buffer_size = text.summary().len.min(10 * 1024);
903 if let Some(path) = path.parent() {
904 self.create_dir(path)
905 .await
906 .with_context(|| format!("Failed to create directory at {:?}", path))?;
907 }
908 let file = smol::fs::File::create(path)
909 .await
910 .with_context(|| format!("Failed to create file at {:?}", path))?;
911 let mut writer = smol::io::BufWriter::with_capacity(buffer_size, file);
912 for chunk in text::chunks_with_line_ending(text, line_ending) {
913 writer.write_all(chunk.as_bytes()).await?;
914 }
915 writer.flush().await?;
916 Ok(())
917 }
918
919 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
920 if let Some(path) = path.parent() {
921 self.create_dir(path)
922 .await
923 .with_context(|| format!("Failed to create directory at {:?}", path))?;
924 }
925 let path = path.to_owned();
926 let contents = content.to_owned();
927 self.executor
928 .spawn(async move {
929 std::fs::write(path, contents)?;
930 Ok(())
931 })
932 .await
933 }
934
935 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
936 let path = path.to_owned();
937 self.executor
938 .spawn(async move {
939 #[cfg(target_os = "windows")]
940 let result = Self::canonicalize(&path);
941
942 #[cfg(not(target_os = "windows"))]
943 let result = std::fs::canonicalize(&path);
944
945 result.with_context(|| format!("canonicalizing {path:?}"))
946 })
947 .await
948 }
949
950 async fn is_file(&self, path: &Path) -> bool {
951 let path = path.to_owned();
952 self.executor
953 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_file()) })
954 .await
955 }
956
957 async fn is_dir(&self, path: &Path) -> bool {
958 let path = path.to_owned();
959 self.executor
960 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_dir()) })
961 .await
962 }
963
964 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
965 let path_buf = path.to_owned();
966 let symlink_metadata = match self
967 .executor
968 .spawn(async move { std::fs::symlink_metadata(&path_buf) })
969 .await
970 {
971 Ok(metadata) => metadata,
972 Err(err) => {
973 return match err.kind() {
974 io::ErrorKind::NotFound | io::ErrorKind::NotADirectory => Ok(None),
975 _ => Err(anyhow::Error::new(err)),
976 };
977 }
978 };
979
980 let is_symlink = symlink_metadata.file_type().is_symlink();
981 let metadata = if is_symlink {
982 let path_buf = path.to_path_buf();
983 // Read target metadata, if the target exists
984 match self
985 .executor
986 .spawn(async move { std::fs::metadata(path_buf) })
987 .await
988 {
989 Ok(target_metadata) => target_metadata,
990 Err(err) => {
991 if err.kind() != io::ErrorKind::NotFound {
992 // TODO: Also FilesystemLoop when that's stable
993 log::warn!(
994 "Failed to read symlink target metadata for path {path:?}: {err}"
995 );
996 }
997 // For a broken or recursive symlink, return the symlink metadata. (Or
998 // as edge cases, a symlink into a directory we can't read, which is hard
999 // to distinguish from just being broken.)
1000 symlink_metadata
1001 }
1002 }
1003 } else {
1004 symlink_metadata
1005 };
1006
1007 #[cfg(unix)]
1008 let inode = metadata.ino();
1009
1010 #[cfg(windows)]
1011 let inode = file_id(path).await?;
1012
1013 #[cfg(windows)]
1014 let is_fifo = false;
1015
1016 #[cfg(unix)]
1017 let is_fifo = metadata.file_type().is_fifo();
1018
1019 let path_buf = path.to_path_buf();
1020 let is_executable = self
1021 .executor
1022 .spawn(async move { path_buf.is_executable() })
1023 .await;
1024
1025 Ok(Some(Metadata {
1026 inode,
1027 mtime: MTime(metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH)),
1028 len: metadata.len(),
1029 is_symlink,
1030 is_dir: metadata.file_type().is_dir(),
1031 is_fifo,
1032 is_executable,
1033 }))
1034 }
1035
1036 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
1037 let path = path.to_owned();
1038 let path = self
1039 .executor
1040 .spawn(async move { std::fs::read_link(&path) })
1041 .await?;
1042 Ok(path)
1043 }
1044
1045 async fn read_dir(
1046 &self,
1047 path: &Path,
1048 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
1049 let path = path.to_owned();
1050 let result = iter(
1051 self.executor
1052 .spawn(async move { std::fs::read_dir(path) })
1053 .await?,
1054 )
1055 .map(|entry| match entry {
1056 Ok(entry) => Ok(entry.path()),
1057 Err(error) => Err(anyhow!("failed to read dir entry {error:?}")),
1058 });
1059 Ok(Box::pin(result))
1060 }
1061
1062 async fn watch(
1063 &self,
1064 path: &Path,
1065 latency: Duration,
1066 ) -> (
1067 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
1068 Arc<dyn Watcher>,
1069 ) {
1070 use util::{ResultExt as _, paths::SanitizedPath};
1071 let executor = self.executor.clone();
1072
1073 let (tx, rx) = smol::channel::unbounded();
1074 let pending_paths: Arc<Mutex<Vec<PathEvent>>> = Default::default();
1075 let watcher = Arc::new(fs_watcher::FsWatcher::new(tx, pending_paths.clone()));
1076
1077 // If the path doesn't exist yet (e.g. settings.json), watch the parent dir to learn when it's created.
1078 if let Err(e) = watcher.add(path)
1079 && let Some(parent) = path.parent()
1080 && let Err(parent_e) = watcher.add(parent)
1081 {
1082 log::warn!(
1083 "Failed to watch {} and its parent directory {}:\n{e}\n{parent_e}",
1084 path.display(),
1085 parent.display()
1086 );
1087 }
1088
1089 // Check if path is a symlink and follow the target parent
1090 if let Some(mut target) = self.read_link(path).await.ok() {
1091 log::trace!("watch symlink {path:?} -> {target:?}");
1092 // Check if symlink target is relative path, if so make it absolute
1093 if target.is_relative()
1094 && let Some(parent) = path.parent()
1095 {
1096 target = parent.join(target);
1097 if let Ok(canonical) = self.canonicalize(&target).await {
1098 target = SanitizedPath::new(&canonical).as_path().to_path_buf();
1099 }
1100 }
1101 watcher.add(&target).ok();
1102 if let Some(parent) = target.parent() {
1103 watcher.add(parent).log_err();
1104 }
1105 }
1106
1107 (
1108 Box::pin(rx.filter_map({
1109 let watcher = watcher.clone();
1110 let executor = executor.clone();
1111 move |_| {
1112 let _ = watcher.clone();
1113 let pending_paths = pending_paths.clone();
1114 let executor = executor.clone();
1115 async move {
1116 executor.timer(latency).await;
1117 let paths = std::mem::take(&mut *pending_paths.lock());
1118 (!paths.is_empty()).then_some(paths)
1119 }
1120 }
1121 })),
1122 watcher,
1123 )
1124 }
1125
1126 fn open_repo(
1127 &self,
1128 dotgit_path: &Path,
1129 system_git_binary_path: Option<&Path>,
1130 ) -> Result<Arc<dyn GitRepository>> {
1131 Ok(Arc::new(RealGitRepository::new(
1132 dotgit_path,
1133 self.bundled_git_binary_path.clone(),
1134 system_git_binary_path.map(|path| path.to_path_buf()),
1135 self.executor.clone(),
1136 )?))
1137 }
1138
1139 async fn git_init(
1140 &self,
1141 abs_work_directory_path: &Path,
1142 fallback_branch_name: String,
1143 ) -> Result<()> {
1144 let config = new_command("git")
1145 .current_dir(abs_work_directory_path)
1146 .args(&["config", "--global", "--get", "init.defaultBranch"])
1147 .output()
1148 .await?;
1149
1150 let branch_name;
1151
1152 if config.status.success() && !config.stdout.is_empty() {
1153 branch_name = String::from_utf8_lossy(&config.stdout);
1154 } else {
1155 branch_name = Cow::Borrowed(fallback_branch_name.as_str());
1156 }
1157
1158 new_command("git")
1159 .current_dir(abs_work_directory_path)
1160 .args(&["init", "-b"])
1161 .arg(branch_name.trim())
1162 .output()
1163 .await?;
1164
1165 Ok(())
1166 }
1167
1168 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()> {
1169 let job_id = self.next_job_id.fetch_add(1, Ordering::SeqCst);
1170 let job_info = JobInfo {
1171 id: job_id,
1172 start: Instant::now(),
1173 message: SharedString::from(format!("Cloning {}", repo_url)),
1174 };
1175
1176 let _job_tracker = JobTracker::new(job_info, self.job_event_subscribers.clone());
1177
1178 let output = new_command("git")
1179 .current_dir(abs_work_directory)
1180 .args(&["clone", repo_url])
1181 .output()
1182 .await?;
1183
1184 if !output.status.success() {
1185 anyhow::bail!(
1186 "git clone failed: {}",
1187 String::from_utf8_lossy(&output.stderr)
1188 );
1189 }
1190
1191 Ok(())
1192 }
1193
1194 fn is_fake(&self) -> bool {
1195 false
1196 }
1197
1198 fn subscribe_to_jobs(&self) -> JobEventReceiver {
1199 let (sender, receiver) = futures::channel::mpsc::unbounded();
1200 self.job_event_subscribers.lock().push(sender);
1201 receiver
1202 }
1203
1204 /// Checks whether the file system is case sensitive by attempting to create two files
1205 /// that have the same name except for the casing.
1206 ///
1207 /// It creates both files in a temporary directory it removes at the end.
1208 async fn is_case_sensitive(&self) -> bool {
1209 const UNINITIALIZED: u8 = 0;
1210 const CASE_SENSITIVE: u8 = 1;
1211 const NOT_CASE_SENSITIVE: u8 = 2;
1212
1213 // Note we could CAS here, but really, if we race we do this work twice at worst which isn't a big deal.
1214 let load = self.is_case_sensitive.load(Ordering::Acquire);
1215 if load != UNINITIALIZED {
1216 return load == CASE_SENSITIVE;
1217 }
1218 let temp_dir = self.executor.spawn(async { TempDir::new() });
1219 let res = maybe!(async {
1220 let temp_dir = temp_dir.await?;
1221 let test_file_1 = temp_dir.path().join("case_sensitivity_test.tmp");
1222 let test_file_2 = temp_dir.path().join("CASE_SENSITIVITY_TEST.TMP");
1223
1224 let create_opts = CreateOptions {
1225 overwrite: false,
1226 ignore_if_exists: false,
1227 };
1228
1229 // Create file1
1230 self.create_file(&test_file_1, create_opts).await?;
1231
1232 // Now check whether it's possible to create file2
1233 let case_sensitive = match self.create_file(&test_file_2, create_opts).await {
1234 Ok(_) => Ok(true),
1235 Err(e) => {
1236 if let Some(io_error) = e.downcast_ref::<io::Error>() {
1237 if io_error.kind() == io::ErrorKind::AlreadyExists {
1238 Ok(false)
1239 } else {
1240 Err(e)
1241 }
1242 } else {
1243 Err(e)
1244 }
1245 }
1246 };
1247
1248 temp_dir.close()?;
1249 case_sensitive
1250 }).await.unwrap_or_else(|e| {
1251 log::error!(
1252 "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
1253 );
1254 true
1255 });
1256 self.is_case_sensitive.store(
1257 if res {
1258 CASE_SENSITIVE
1259 } else {
1260 NOT_CASE_SENSITIVE
1261 },
1262 Ordering::Release,
1263 );
1264 res
1265 }
1266
1267 async fn restore(
1268 &self,
1269 trashed_entry: TrashedEntry,
1270 ) -> std::result::Result<PathBuf, TrashRestoreError> {
1271 let restored_item_path = trashed_entry.original_parent.join(&trashed_entry.name);
1272
1273 let (tx, rx) = futures::channel::oneshot::channel();
1274 std::thread::Builder::new()
1275 .name("restore trashed item".to_string())
1276 .spawn(move || {
1277 let res = trash::restore_all([trashed_entry.into_trash_item()]);
1278 tx.send(res)
1279 })
1280 .expect("The OS can spawn a threads");
1281 rx.await.expect("Restore all never panics")?;
1282 Ok(restored_item_path)
1283 }
1284}
1285
1286#[cfg(not(any(target_os = "linux", target_os = "freebsd")))]
1287impl Watcher for RealWatcher {
1288 fn add(&self, _: &Path) -> Result<()> {
1289 Ok(())
1290 }
1291
1292 fn remove(&self, _: &Path) -> Result<()> {
1293 Ok(())
1294 }
1295}
1296
1297#[cfg(feature = "test-support")]
1298pub struct FakeFs {
1299 this: std::sync::Weak<Self>,
1300 // Use an unfair lock to ensure tests are deterministic.
1301 state: Arc<Mutex<FakeFsState>>,
1302 executor: gpui::BackgroundExecutor,
1303}
1304
1305#[cfg(feature = "test-support")]
1306struct FakeFsState {
1307 root: FakeFsEntry,
1308 next_inode: u64,
1309 next_mtime: SystemTime,
1310 git_event_tx: smol::channel::Sender<PathBuf>,
1311 event_txs: Vec<(PathBuf, smol::channel::Sender<Vec<PathEvent>>)>,
1312 events_paused: bool,
1313 buffered_events: Vec<PathEvent>,
1314 metadata_call_count: usize,
1315 read_dir_call_count: usize,
1316 path_write_counts: std::collections::HashMap<PathBuf, usize>,
1317 moves: std::collections::HashMap<u64, PathBuf>,
1318 job_event_subscribers: Arc<Mutex<Vec<JobEventSender>>>,
1319 trash: Vec<(TrashedEntry, FakeFsEntry)>,
1320}
1321
1322#[cfg(feature = "test-support")]
1323#[derive(Clone, Debug)]
1324enum FakeFsEntry {
1325 File {
1326 inode: u64,
1327 mtime: MTime,
1328 len: u64,
1329 content: Vec<u8>,
1330 // The path to the repository state directory, if this is a gitfile.
1331 git_dir_path: Option<PathBuf>,
1332 },
1333 Dir {
1334 inode: u64,
1335 mtime: MTime,
1336 len: u64,
1337 entries: BTreeMap<String, FakeFsEntry>,
1338 git_repo_state: Option<Arc<Mutex<FakeGitRepositoryState>>>,
1339 },
1340 Symlink {
1341 target: PathBuf,
1342 },
1343}
1344
1345#[cfg(feature = "test-support")]
1346impl PartialEq for FakeFsEntry {
1347 fn eq(&self, other: &Self) -> bool {
1348 match (self, other) {
1349 (
1350 Self::File {
1351 inode: l_inode,
1352 mtime: l_mtime,
1353 len: l_len,
1354 content: l_content,
1355 git_dir_path: l_git_dir_path,
1356 },
1357 Self::File {
1358 inode: r_inode,
1359 mtime: r_mtime,
1360 len: r_len,
1361 content: r_content,
1362 git_dir_path: r_git_dir_path,
1363 },
1364 ) => {
1365 l_inode == r_inode
1366 && l_mtime == r_mtime
1367 && l_len == r_len
1368 && l_content == r_content
1369 && l_git_dir_path == r_git_dir_path
1370 }
1371 (
1372 Self::Dir {
1373 inode: l_inode,
1374 mtime: l_mtime,
1375 len: l_len,
1376 entries: l_entries,
1377 git_repo_state: l_git_repo_state,
1378 },
1379 Self::Dir {
1380 inode: r_inode,
1381 mtime: r_mtime,
1382 len: r_len,
1383 entries: r_entries,
1384 git_repo_state: r_git_repo_state,
1385 },
1386 ) => {
1387 let same_repo_state = match (l_git_repo_state.as_ref(), r_git_repo_state.as_ref()) {
1388 (Some(l), Some(r)) => Arc::ptr_eq(l, r),
1389 (None, None) => true,
1390 _ => false,
1391 };
1392 l_inode == r_inode
1393 && l_mtime == r_mtime
1394 && l_len == r_len
1395 && l_entries == r_entries
1396 && same_repo_state
1397 }
1398 (Self::Symlink { target: l_target }, Self::Symlink { target: r_target }) => {
1399 l_target == r_target
1400 }
1401 _ => false,
1402 }
1403 }
1404}
1405
1406#[cfg(feature = "test-support")]
1407impl FakeFsState {
1408 fn get_and_increment_mtime(&mut self) -> MTime {
1409 let mtime = self.next_mtime;
1410 self.next_mtime += FakeFs::SYSTEMTIME_INTERVAL;
1411 MTime(mtime)
1412 }
1413
1414 fn get_and_increment_inode(&mut self) -> u64 {
1415 let inode = self.next_inode;
1416 self.next_inode += 1;
1417 inode
1418 }
1419
1420 fn canonicalize(&self, target: &Path, follow_symlink: bool) -> Option<PathBuf> {
1421 let mut canonical_path = PathBuf::new();
1422 let mut path = target.to_path_buf();
1423 let mut entry_stack = Vec::new();
1424 'outer: loop {
1425 let mut path_components = path.components().peekable();
1426 let mut prefix = None;
1427 while let Some(component) = path_components.next() {
1428 match component {
1429 Component::Prefix(prefix_component) => prefix = Some(prefix_component),
1430 Component::RootDir => {
1431 entry_stack.clear();
1432 entry_stack.push(&self.root);
1433 canonical_path.clear();
1434 match prefix {
1435 Some(prefix_component) => {
1436 canonical_path = PathBuf::from(prefix_component.as_os_str());
1437 // Prefixes like `C:\\` are represented without their trailing slash, so we have to re-add it.
1438 canonical_path.push(std::path::MAIN_SEPARATOR_STR);
1439 }
1440 None => canonical_path = PathBuf::from(std::path::MAIN_SEPARATOR_STR),
1441 }
1442 }
1443 Component::CurDir => {}
1444 Component::ParentDir => {
1445 entry_stack.pop()?;
1446 canonical_path.pop();
1447 }
1448 Component::Normal(name) => {
1449 let current_entry = *entry_stack.last()?;
1450 if let FakeFsEntry::Dir { entries, .. } = current_entry {
1451 let entry = entries.get(name.to_str().unwrap())?;
1452 if (path_components.peek().is_some() || follow_symlink)
1453 && let FakeFsEntry::Symlink { target, .. } = entry
1454 {
1455 let mut target = target.clone();
1456 target.extend(path_components);
1457 path = target;
1458 continue 'outer;
1459 }
1460 entry_stack.push(entry);
1461 canonical_path = canonical_path.join(name);
1462 } else {
1463 return None;
1464 }
1465 }
1466 }
1467 }
1468 break;
1469 }
1470
1471 if entry_stack.is_empty() {
1472 None
1473 } else {
1474 Some(canonical_path)
1475 }
1476 }
1477
1478 fn try_entry(
1479 &mut self,
1480 target: &Path,
1481 follow_symlink: bool,
1482 ) -> Option<(&mut FakeFsEntry, PathBuf)> {
1483 let canonical_path = self.canonicalize(target, follow_symlink)?;
1484
1485 let mut components = canonical_path
1486 .components()
1487 .skip_while(|component| matches!(component, Component::Prefix(_)));
1488 let Some(Component::RootDir) = components.next() else {
1489 panic!(
1490 "the path {:?} was not canonicalized properly {:?}",
1491 target, canonical_path
1492 )
1493 };
1494
1495 let mut entry = &mut self.root;
1496 for component in components {
1497 match component {
1498 Component::Normal(name) => {
1499 if let FakeFsEntry::Dir { entries, .. } = entry {
1500 entry = entries.get_mut(name.to_str().unwrap())?;
1501 } else {
1502 return None;
1503 }
1504 }
1505 _ => {
1506 panic!(
1507 "the path {:?} was not canonicalized properly {:?}",
1508 target, canonical_path
1509 )
1510 }
1511 }
1512 }
1513
1514 Some((entry, canonical_path))
1515 }
1516
1517 fn entry(&mut self, target: &Path) -> Result<&mut FakeFsEntry> {
1518 Ok(self
1519 .try_entry(target, true)
1520 .ok_or_else(|| {
1521 anyhow!(io::Error::new(
1522 io::ErrorKind::NotFound,
1523 format!("not found: {target:?}")
1524 ))
1525 })?
1526 .0)
1527 }
1528
1529 fn write_path<Fn, T>(&mut self, path: &Path, callback: Fn) -> Result<T>
1530 where
1531 Fn: FnOnce(btree_map::Entry<String, FakeFsEntry>) -> Result<T>,
1532 {
1533 let path = normalize_path(path);
1534 let filename = path.file_name().context("cannot overwrite the root")?;
1535 let parent_path = path.parent().unwrap();
1536
1537 let parent = self.entry(parent_path)?;
1538 let new_entry = parent
1539 .dir_entries(parent_path)?
1540 .entry(filename.to_str().unwrap().into());
1541 callback(new_entry)
1542 }
1543
1544 fn emit_event<I, T>(&mut self, paths: I)
1545 where
1546 I: IntoIterator<Item = (T, Option<PathEventKind>)>,
1547 T: Into<PathBuf>,
1548 {
1549 self.buffered_events
1550 .extend(paths.into_iter().map(|(path, kind)| PathEvent {
1551 path: path.into(),
1552 kind,
1553 }));
1554
1555 if !self.events_paused {
1556 self.flush_events(self.buffered_events.len());
1557 }
1558 }
1559
1560 fn flush_events(&mut self, mut count: usize) {
1561 count = count.min(self.buffered_events.len());
1562 let events = self.buffered_events.drain(0..count).collect::<Vec<_>>();
1563 self.event_txs.retain(|(_, tx)| {
1564 let _ = tx.try_send(events.clone());
1565 !tx.is_closed()
1566 });
1567 }
1568}
1569
1570#[cfg(feature = "test-support")]
1571pub static FS_DOT_GIT: std::sync::LazyLock<&'static OsStr> =
1572 std::sync::LazyLock::new(|| OsStr::new(".git"));
1573
1574#[cfg(feature = "test-support")]
1575impl FakeFs {
1576 /// We need to use something large enough for Windows and Unix to consider this a new file.
1577 /// https://doc.rust-lang.org/nightly/std/time/struct.SystemTime.html#platform-specific-behavior
1578 const SYSTEMTIME_INTERVAL: Duration = Duration::from_nanos(100);
1579
1580 pub fn new(executor: gpui::BackgroundExecutor) -> Arc<Self> {
1581 let (tx, rx) = smol::channel::bounded::<PathBuf>(10);
1582
1583 let this = Arc::new_cyclic(|this| Self {
1584 this: this.clone(),
1585 executor: executor.clone(),
1586 state: Arc::new(Mutex::new(FakeFsState {
1587 root: FakeFsEntry::Dir {
1588 inode: 0,
1589 mtime: MTime(UNIX_EPOCH),
1590 len: 0,
1591 entries: Default::default(),
1592 git_repo_state: None,
1593 },
1594 git_event_tx: tx,
1595 next_mtime: UNIX_EPOCH + Self::SYSTEMTIME_INTERVAL,
1596 next_inode: 1,
1597 event_txs: Default::default(),
1598 buffered_events: Vec::new(),
1599 events_paused: false,
1600 read_dir_call_count: 0,
1601 metadata_call_count: 0,
1602 path_write_counts: Default::default(),
1603 moves: Default::default(),
1604 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
1605 trash: Vec::new(),
1606 })),
1607 });
1608
1609 executor.spawn({
1610 let this = this.clone();
1611 async move {
1612 while let Ok(git_event) = rx.recv().await {
1613 if let Some(mut state) = this.state.try_lock() {
1614 state.emit_event([(git_event, Some(PathEventKind::Changed))]);
1615 } else {
1616 panic!("Failed to lock file system state, this execution would have caused a test hang");
1617 }
1618 }
1619 }
1620 }).detach();
1621
1622 this
1623 }
1624
1625 pub fn set_next_mtime(&self, next_mtime: SystemTime) {
1626 let mut state = self.state.lock();
1627 state.next_mtime = next_mtime;
1628 }
1629
1630 pub fn get_and_increment_mtime(&self) -> MTime {
1631 let mut state = self.state.lock();
1632 state.get_and_increment_mtime()
1633 }
1634
1635 pub async fn touch_path(&self, path: impl AsRef<Path>) {
1636 let mut state = self.state.lock();
1637 let path = path.as_ref();
1638 let new_mtime = state.get_and_increment_mtime();
1639 let new_inode = state.get_and_increment_inode();
1640 state
1641 .write_path(path, move |entry| {
1642 match entry {
1643 btree_map::Entry::Vacant(e) => {
1644 e.insert(FakeFsEntry::File {
1645 inode: new_inode,
1646 mtime: new_mtime,
1647 content: Vec::new(),
1648 len: 0,
1649 git_dir_path: None,
1650 });
1651 }
1652 btree_map::Entry::Occupied(mut e) => match &mut *e.get_mut() {
1653 FakeFsEntry::File { mtime, .. } => *mtime = new_mtime,
1654 FakeFsEntry::Dir { mtime, .. } => *mtime = new_mtime,
1655 FakeFsEntry::Symlink { .. } => {}
1656 },
1657 }
1658 Ok(())
1659 })
1660 .unwrap();
1661 state.emit_event([(path.to_path_buf(), Some(PathEventKind::Changed))]);
1662 }
1663
1664 pub async fn insert_file(&self, path: impl AsRef<Path>, content: Vec<u8>) {
1665 self.write_file_internal(path, content, true).unwrap()
1666 }
1667
1668 pub async fn insert_symlink(&self, path: impl AsRef<Path>, target: PathBuf) {
1669 let mut state = self.state.lock();
1670 let path = path.as_ref();
1671 let file = FakeFsEntry::Symlink { target };
1672 state
1673 .write_path(path.as_ref(), move |e| match e {
1674 btree_map::Entry::Vacant(e) => {
1675 e.insert(file);
1676 Ok(())
1677 }
1678 btree_map::Entry::Occupied(mut e) => {
1679 *e.get_mut() = file;
1680 Ok(())
1681 }
1682 })
1683 .unwrap();
1684 state.emit_event([(path, Some(PathEventKind::Created))]);
1685 }
1686
1687 fn write_file_internal(
1688 &self,
1689 path: impl AsRef<Path>,
1690 new_content: Vec<u8>,
1691 recreate_inode: bool,
1692 ) -> Result<()> {
1693 fn inner(
1694 this: &FakeFs,
1695 path: &Path,
1696 new_content: Vec<u8>,
1697 recreate_inode: bool,
1698 ) -> Result<()> {
1699 let mut state = this.state.lock();
1700 let path_buf = path.to_path_buf();
1701 *state.path_write_counts.entry(path_buf).or_insert(0) += 1;
1702 let new_inode = state.get_and_increment_inode();
1703 let new_mtime = state.get_and_increment_mtime();
1704 let new_len = new_content.len() as u64;
1705 let mut kind = None;
1706 state.write_path(path, |entry| {
1707 match entry {
1708 btree_map::Entry::Vacant(e) => {
1709 kind = Some(PathEventKind::Created);
1710 e.insert(FakeFsEntry::File {
1711 inode: new_inode,
1712 mtime: new_mtime,
1713 len: new_len,
1714 content: new_content,
1715 git_dir_path: None,
1716 });
1717 }
1718 btree_map::Entry::Occupied(mut e) => {
1719 kind = Some(PathEventKind::Changed);
1720 if let FakeFsEntry::File {
1721 inode,
1722 mtime,
1723 len,
1724 content,
1725 ..
1726 } = e.get_mut()
1727 {
1728 *mtime = new_mtime;
1729 *content = new_content;
1730 *len = new_len;
1731 if recreate_inode {
1732 *inode = new_inode;
1733 }
1734 } else {
1735 anyhow::bail!("not a file")
1736 }
1737 }
1738 }
1739 Ok(())
1740 })?;
1741 state.emit_event([(path, kind)]);
1742 Ok(())
1743 }
1744 inner(self, path.as_ref(), new_content, recreate_inode)
1745 }
1746
1747 pub fn read_file_sync(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1748 let path = path.as_ref();
1749 let path = normalize_path(path);
1750 let mut state = self.state.lock();
1751 let entry = state.entry(&path)?;
1752 entry.file_content(&path).cloned()
1753 }
1754
1755 async fn load_internal(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1756 let path = path.as_ref();
1757 let path = normalize_path(path);
1758 self.simulate_random_delay().await;
1759 let mut state = self.state.lock();
1760 let entry = state.entry(&path)?;
1761 entry.file_content(&path).cloned()
1762 }
1763
1764 pub fn pause_events(&self) {
1765 self.state.lock().events_paused = true;
1766 }
1767
1768 pub fn unpause_events_and_flush(&self) {
1769 self.state.lock().events_paused = false;
1770 self.flush_events(usize::MAX);
1771 }
1772
1773 pub fn buffered_event_count(&self) -> usize {
1774 self.state.lock().buffered_events.len()
1775 }
1776
1777 pub fn clear_buffered_events(&self) {
1778 self.state.lock().buffered_events.clear();
1779 }
1780
1781 pub fn flush_events(&self, count: usize) {
1782 self.state.lock().flush_events(count);
1783 }
1784
1785 pub(crate) fn entry(&self, target: &Path) -> Result<FakeFsEntry> {
1786 self.state.lock().entry(target).cloned()
1787 }
1788
1789 pub(crate) fn insert_entry(&self, target: &Path, new_entry: FakeFsEntry) -> Result<()> {
1790 let mut state = self.state.lock();
1791 state.write_path(target, |entry| {
1792 match entry {
1793 btree_map::Entry::Vacant(vacant_entry) => {
1794 vacant_entry.insert(new_entry);
1795 }
1796 btree_map::Entry::Occupied(mut occupied_entry) => {
1797 occupied_entry.insert(new_entry);
1798 }
1799 }
1800 Ok(())
1801 })
1802 }
1803
1804 #[must_use]
1805 pub fn insert_tree<'a>(
1806 &'a self,
1807 path: impl 'a + AsRef<Path> + Send,
1808 tree: serde_json::Value,
1809 ) -> futures::future::BoxFuture<'a, ()> {
1810 use futures::FutureExt as _;
1811 use serde_json::Value::*;
1812
1813 fn inner<'a>(
1814 this: &'a FakeFs,
1815 path: Arc<Path>,
1816 tree: serde_json::Value,
1817 ) -> futures::future::BoxFuture<'a, ()> {
1818 async move {
1819 match tree {
1820 Object(map) => {
1821 this.create_dir(&path).await.unwrap();
1822 for (name, contents) in map {
1823 let mut path = PathBuf::from(path.as_ref());
1824 path.push(name);
1825 this.insert_tree(&path, contents).await;
1826 }
1827 }
1828 Null => {
1829 this.create_dir(&path).await.unwrap();
1830 }
1831 String(contents) => {
1832 this.insert_file(&path, contents.into_bytes()).await;
1833 }
1834 _ => {
1835 panic!("JSON object must contain only objects, strings, or null");
1836 }
1837 }
1838 }
1839 .boxed()
1840 }
1841 inner(self, Arc::from(path.as_ref()), tree)
1842 }
1843
1844 pub fn insert_tree_from_real_fs<'a>(
1845 &'a self,
1846 path: impl 'a + AsRef<Path> + Send,
1847 src_path: impl 'a + AsRef<Path> + Send,
1848 ) -> futures::future::BoxFuture<'a, ()> {
1849 use futures::FutureExt as _;
1850
1851 async move {
1852 let path = path.as_ref();
1853 if std::fs::metadata(&src_path).unwrap().is_file() {
1854 let contents = std::fs::read(src_path).unwrap();
1855 self.insert_file(path, contents).await;
1856 } else {
1857 self.create_dir(path).await.unwrap();
1858 for entry in std::fs::read_dir(&src_path).unwrap() {
1859 let entry = entry.unwrap();
1860 self.insert_tree_from_real_fs(path.join(entry.file_name()), entry.path())
1861 .await;
1862 }
1863 }
1864 }
1865 .boxed()
1866 }
1867
1868 pub fn with_git_state_and_paths<T, F>(
1869 &self,
1870 dot_git: &Path,
1871 emit_git_event: bool,
1872 f: F,
1873 ) -> Result<T>
1874 where
1875 F: FnOnce(&mut FakeGitRepositoryState, &Path, &Path) -> T,
1876 {
1877 let mut state = self.state.lock();
1878 let git_event_tx = state.git_event_tx.clone();
1879 let entry = state.entry(dot_git).context("open .git")?;
1880
1881 if let FakeFsEntry::Dir { git_repo_state, .. } = entry {
1882 let repo_state = git_repo_state.get_or_insert_with(|| {
1883 log::debug!("insert git state for {dot_git:?}");
1884 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
1885 });
1886 let mut repo_state = repo_state.lock();
1887
1888 let result = f(&mut repo_state, dot_git, dot_git);
1889
1890 drop(repo_state);
1891 if emit_git_event {
1892 state.emit_event([(dot_git, Some(PathEventKind::Changed))]);
1893 }
1894
1895 Ok(result)
1896 } else if let FakeFsEntry::File {
1897 content,
1898 git_dir_path,
1899 ..
1900 } = &mut *entry
1901 {
1902 let path = match git_dir_path {
1903 Some(path) => path,
1904 None => {
1905 let path = std::str::from_utf8(content)
1906 .ok()
1907 .and_then(|content| content.strip_prefix("gitdir:"))
1908 .context("not a valid gitfile")?
1909 .trim();
1910 git_dir_path.insert(normalize_path(&dot_git.parent().unwrap().join(path)))
1911 }
1912 }
1913 .clone();
1914 let Some((git_dir_entry, canonical_path)) = state.try_entry(&path, true) else {
1915 anyhow::bail!("pointed-to git dir {path:?} not found")
1916 };
1917 let FakeFsEntry::Dir {
1918 git_repo_state,
1919 entries,
1920 ..
1921 } = git_dir_entry
1922 else {
1923 anyhow::bail!("gitfile points to a non-directory")
1924 };
1925 let common_dir = if let Some(child) = entries.get("commondir") {
1926 let raw = std::str::from_utf8(child.file_content("commondir".as_ref())?)
1927 .context("commondir content")?
1928 .trim();
1929 let raw_path = Path::new(raw);
1930 if raw_path.is_relative() {
1931 normalize_path(&canonical_path.join(raw_path))
1932 } else {
1933 raw_path.to_owned()
1934 }
1935 } else {
1936 canonical_path.clone()
1937 };
1938 let repo_state = git_repo_state.get_or_insert_with(|| {
1939 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
1940 });
1941 let mut repo_state = repo_state.lock();
1942
1943 let result = f(&mut repo_state, &canonical_path, &common_dir);
1944
1945 if emit_git_event {
1946 drop(repo_state);
1947 state.emit_event([(canonical_path, Some(PathEventKind::Changed))]);
1948 }
1949
1950 Ok(result)
1951 } else {
1952 anyhow::bail!("not a valid git repository");
1953 }
1954 }
1955
1956 pub fn with_git_state<T, F>(&self, dot_git: &Path, emit_git_event: bool, f: F) -> Result<T>
1957 where
1958 F: FnOnce(&mut FakeGitRepositoryState) -> T,
1959 {
1960 self.with_git_state_and_paths(dot_git, emit_git_event, |state, _, _| f(state))
1961 }
1962
1963 pub fn set_branch_name(&self, dot_git: &Path, branch: Option<impl Into<String>>) {
1964 self.with_git_state(dot_git, true, |state| {
1965 let branch = branch.map(Into::into);
1966 state.branches.extend(branch.clone());
1967 state.current_branch_name = branch
1968 })
1969 .unwrap();
1970 }
1971
1972 pub fn set_remote_for_repo(
1973 &self,
1974 dot_git: &Path,
1975 name: impl Into<String>,
1976 url: impl Into<String>,
1977 ) {
1978 self.with_git_state(dot_git, true, |state| {
1979 state.remotes.insert(name.into(), url.into());
1980 })
1981 .unwrap();
1982 }
1983
1984 pub fn insert_branches(&self, dot_git: &Path, branches: &[&str]) {
1985 self.with_git_state(dot_git, true, |state| {
1986 if let Some(first) = branches.first()
1987 && state.current_branch_name.is_none()
1988 {
1989 state.current_branch_name = Some(first.to_string())
1990 }
1991 state
1992 .branches
1993 .extend(branches.iter().map(ToString::to_string));
1994 })
1995 .unwrap();
1996 }
1997
1998 pub async fn add_linked_worktree_for_repo(
1999 &self,
2000 dot_git: &Path,
2001 emit_git_event: bool,
2002 worktree: Worktree,
2003 ) {
2004 let ref_name = worktree
2005 .ref_name
2006 .as_ref()
2007 .expect("linked worktree must have a ref_name");
2008 let branch_name = ref_name
2009 .strip_prefix("refs/heads/")
2010 .unwrap_or(ref_name.as_ref());
2011
2012 // Create ref in git state.
2013 self.with_git_state(dot_git, false, |state| {
2014 state
2015 .refs
2016 .insert(ref_name.to_string(), worktree.sha.to_string());
2017 })
2018 .unwrap();
2019
2020 // Create .git/worktrees/<name>/ directory with HEAD, commondir, and gitdir.
2021 let worktrees_entry_dir = dot_git.join("worktrees").join(branch_name);
2022 self.create_dir(&worktrees_entry_dir).await.unwrap();
2023
2024 self.write_file_internal(
2025 worktrees_entry_dir.join("HEAD"),
2026 format!("ref: {ref_name}").into_bytes(),
2027 false,
2028 )
2029 .unwrap();
2030
2031 self.write_file_internal(
2032 worktrees_entry_dir.join("commondir"),
2033 dot_git.to_string_lossy().into_owned().into_bytes(),
2034 false,
2035 )
2036 .unwrap();
2037
2038 let worktree_dot_git = worktree.path.join(".git");
2039 self.write_file_internal(
2040 worktrees_entry_dir.join("gitdir"),
2041 worktree_dot_git.to_string_lossy().into_owned().into_bytes(),
2042 false,
2043 )
2044 .unwrap();
2045
2046 // Create the worktree checkout directory with a .git file pointing back.
2047 self.create_dir(&worktree.path).await.unwrap();
2048
2049 self.write_file_internal(
2050 &worktree_dot_git,
2051 format!("gitdir: {}", worktrees_entry_dir.display()).into_bytes(),
2052 false,
2053 )
2054 .unwrap();
2055
2056 if emit_git_event {
2057 self.with_git_state(dot_git, true, |_| {}).unwrap();
2058 }
2059 }
2060
2061 pub async fn remove_worktree_for_repo(
2062 &self,
2063 dot_git: &Path,
2064 emit_git_event: bool,
2065 ref_name: &str,
2066 ) {
2067 let branch_name = ref_name.strip_prefix("refs/heads/").unwrap_or(ref_name);
2068 let worktrees_entry_dir = dot_git.join("worktrees").join(branch_name);
2069
2070 // Read gitdir to find the worktree checkout path.
2071 let gitdir_content = self
2072 .load_internal(worktrees_entry_dir.join("gitdir"))
2073 .await
2074 .unwrap();
2075 let gitdir_str = String::from_utf8(gitdir_content).unwrap();
2076 let worktree_path = PathBuf::from(gitdir_str.trim())
2077 .parent()
2078 .map(PathBuf::from)
2079 .unwrap_or_default();
2080
2081 // Remove the worktree checkout directory.
2082 self.remove_dir(
2083 &worktree_path,
2084 RemoveOptions {
2085 recursive: true,
2086 ignore_if_not_exists: true,
2087 },
2088 )
2089 .await
2090 .unwrap();
2091
2092 // Remove the .git/worktrees/<name>/ directory.
2093 self.remove_dir(
2094 &worktrees_entry_dir,
2095 RemoveOptions {
2096 recursive: true,
2097 ignore_if_not_exists: false,
2098 },
2099 )
2100 .await
2101 .unwrap();
2102
2103 if emit_git_event {
2104 self.with_git_state(dot_git, true, |_| {}).unwrap();
2105 }
2106 }
2107
2108 pub fn set_unmerged_paths_for_repo(
2109 &self,
2110 dot_git: &Path,
2111 unmerged_state: &[(RepoPath, UnmergedStatus)],
2112 ) {
2113 self.with_git_state(dot_git, true, |state| {
2114 state.unmerged_paths.clear();
2115 state.unmerged_paths.extend(
2116 unmerged_state
2117 .iter()
2118 .map(|(path, content)| (path.clone(), *content)),
2119 );
2120 })
2121 .unwrap();
2122 }
2123
2124 pub fn set_index_for_repo(&self, dot_git: &Path, index_state: &[(&str, String)]) {
2125 self.with_git_state(dot_git, true, |state| {
2126 state.index_contents.clear();
2127 state.index_contents.extend(
2128 index_state
2129 .iter()
2130 .map(|(path, content)| (repo_path(path), content.clone())),
2131 );
2132 })
2133 .unwrap();
2134 }
2135
2136 pub fn set_head_for_repo(
2137 &self,
2138 dot_git: &Path,
2139 head_state: &[(&str, String)],
2140 sha: impl Into<String>,
2141 ) {
2142 self.with_git_state(dot_git, true, |state| {
2143 state.head_contents.clear();
2144 state.head_contents.extend(
2145 head_state
2146 .iter()
2147 .map(|(path, content)| (repo_path(path), content.clone())),
2148 );
2149 state.refs.insert("HEAD".into(), sha.into());
2150 })
2151 .unwrap();
2152 }
2153
2154 pub fn set_head_and_index_for_repo(&self, dot_git: &Path, contents_by_path: &[(&str, String)]) {
2155 self.with_git_state(dot_git, true, |state| {
2156 state.head_contents.clear();
2157 state.head_contents.extend(
2158 contents_by_path
2159 .iter()
2160 .map(|(path, contents)| (repo_path(path), contents.clone())),
2161 );
2162 state.index_contents = state.head_contents.clone();
2163 })
2164 .unwrap();
2165 }
2166
2167 pub fn set_merge_base_content_for_repo(
2168 &self,
2169 dot_git: &Path,
2170 contents_by_path: &[(&str, String)],
2171 ) {
2172 self.with_git_state(dot_git, true, |state| {
2173 use git::Oid;
2174
2175 state.merge_base_contents.clear();
2176 let oids = (1..)
2177 .map(|n| n.to_string())
2178 .map(|n| Oid::from_bytes(n.repeat(20).as_bytes()).unwrap());
2179 for ((path, content), oid) in contents_by_path.iter().zip(oids) {
2180 state.merge_base_contents.insert(repo_path(path), oid);
2181 state.oids.insert(oid, content.clone());
2182 }
2183 })
2184 .unwrap();
2185 }
2186
2187 pub fn set_blame_for_repo(&self, dot_git: &Path, blames: Vec<(RepoPath, git::blame::Blame)>) {
2188 self.with_git_state(dot_git, true, |state| {
2189 state.blames.clear();
2190 state.blames.extend(blames);
2191 })
2192 .unwrap();
2193 }
2194
2195 pub fn set_graph_commits(&self, dot_git: &Path, commits: Vec<Arc<InitialGraphCommitData>>) {
2196 self.with_git_state(dot_git, true, |state| {
2197 state.graph_commits = commits;
2198 })
2199 .unwrap();
2200 }
2201
2202 pub fn set_graph_error(&self, dot_git: &Path, error: Option<String>) {
2203 self.with_git_state(dot_git, true, |state| {
2204 state.simulated_graph_error = error;
2205 })
2206 .unwrap();
2207 }
2208
2209 /// Put the given git repository into a state with the given status,
2210 /// by mutating the head, index, and unmerged state.
2211 pub fn set_status_for_repo(&self, dot_git: &Path, statuses: &[(&str, FileStatus)]) {
2212 let workdir_path = dot_git.parent().unwrap();
2213 let workdir_contents = self.files_with_contents(workdir_path);
2214 self.with_git_state(dot_git, true, |state| {
2215 state.index_contents.clear();
2216 state.head_contents.clear();
2217 state.unmerged_paths.clear();
2218 for (path, content) in workdir_contents {
2219 use util::{paths::PathStyle, rel_path::RelPath};
2220
2221 let repo_path = RelPath::new(path.strip_prefix(&workdir_path).unwrap(), PathStyle::local()).unwrap();
2222 let repo_path = RepoPath::from_rel_path(&repo_path);
2223 let status = statuses
2224 .iter()
2225 .find_map(|(p, status)| (*p == repo_path.as_unix_str()).then_some(status));
2226 let mut content = String::from_utf8_lossy(&content).to_string();
2227
2228 let mut index_content = None;
2229 let mut head_content = None;
2230 match status {
2231 None => {
2232 index_content = Some(content.clone());
2233 head_content = Some(content);
2234 }
2235 Some(FileStatus::Untracked | FileStatus::Ignored) => {}
2236 Some(FileStatus::Unmerged(unmerged_status)) => {
2237 state
2238 .unmerged_paths
2239 .insert(repo_path.clone(), *unmerged_status);
2240 content.push_str(" (unmerged)");
2241 index_content = Some(content.clone());
2242 head_content = Some(content);
2243 }
2244 Some(FileStatus::Tracked(TrackedStatus {
2245 index_status,
2246 worktree_status,
2247 })) => {
2248 match worktree_status {
2249 StatusCode::Modified => {
2250 let mut content = content.clone();
2251 content.push_str(" (modified in working copy)");
2252 index_content = Some(content);
2253 }
2254 StatusCode::TypeChanged | StatusCode::Unmodified => {
2255 index_content = Some(content.clone());
2256 }
2257 StatusCode::Added => {}
2258 StatusCode::Deleted | StatusCode::Renamed | StatusCode::Copied => {
2259 panic!("cannot create these statuses for an existing file");
2260 }
2261 };
2262 match index_status {
2263 StatusCode::Modified => {
2264 let mut content = index_content.clone().expect(
2265 "file cannot be both modified in index and created in working copy",
2266 );
2267 content.push_str(" (modified in index)");
2268 head_content = Some(content);
2269 }
2270 StatusCode::TypeChanged | StatusCode::Unmodified => {
2271 head_content = Some(index_content.clone().expect("file cannot be both unmodified in index and created in working copy"));
2272 }
2273 StatusCode::Added => {}
2274 StatusCode::Deleted => {
2275 head_content = Some("".into());
2276 }
2277 StatusCode::Renamed | StatusCode::Copied => {
2278 panic!("cannot create these statuses for an existing file");
2279 }
2280 };
2281 }
2282 };
2283
2284 if let Some(content) = index_content {
2285 state.index_contents.insert(repo_path.clone(), content);
2286 }
2287 if let Some(content) = head_content {
2288 state.head_contents.insert(repo_path.clone(), content);
2289 }
2290 }
2291 }).unwrap();
2292 }
2293
2294 pub fn set_error_message_for_index_write(&self, dot_git: &Path, message: Option<String>) {
2295 self.with_git_state(dot_git, true, |state| {
2296 state.simulated_index_write_error_message = message;
2297 })
2298 .unwrap();
2299 }
2300
2301 pub fn set_create_worktree_error(&self, dot_git: &Path, message: Option<String>) {
2302 self.with_git_state(dot_git, true, |state| {
2303 state.simulated_create_worktree_error = message;
2304 })
2305 .unwrap();
2306 }
2307
2308 pub fn paths(&self, include_dot_git: bool) -> Vec<PathBuf> {
2309 let mut result = Vec::new();
2310 let mut queue = collections::VecDeque::new();
2311 let state = &*self.state.lock();
2312 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2313 while let Some((path, entry)) = queue.pop_front() {
2314 if let FakeFsEntry::Dir { entries, .. } = entry {
2315 for (name, entry) in entries {
2316 queue.push_back((path.join(name), entry));
2317 }
2318 }
2319 if include_dot_git
2320 || !path
2321 .components()
2322 .any(|component| component.as_os_str() == *FS_DOT_GIT)
2323 {
2324 result.push(path);
2325 }
2326 }
2327 result
2328 }
2329
2330 pub fn directories(&self, include_dot_git: bool) -> Vec<PathBuf> {
2331 let mut result = Vec::new();
2332 let mut queue = collections::VecDeque::new();
2333 let state = &*self.state.lock();
2334 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2335 while let Some((path, entry)) = queue.pop_front() {
2336 if let FakeFsEntry::Dir { entries, .. } = entry {
2337 for (name, entry) in entries {
2338 queue.push_back((path.join(name), entry));
2339 }
2340 if include_dot_git
2341 || !path
2342 .components()
2343 .any(|component| component.as_os_str() == *FS_DOT_GIT)
2344 {
2345 result.push(path);
2346 }
2347 }
2348 }
2349 result
2350 }
2351
2352 pub fn files(&self) -> Vec<PathBuf> {
2353 let mut result = Vec::new();
2354 let mut queue = collections::VecDeque::new();
2355 let state = &*self.state.lock();
2356 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2357 while let Some((path, entry)) = queue.pop_front() {
2358 match entry {
2359 FakeFsEntry::File { .. } => result.push(path),
2360 FakeFsEntry::Dir { entries, .. } => {
2361 for (name, entry) in entries {
2362 queue.push_back((path.join(name), entry));
2363 }
2364 }
2365 FakeFsEntry::Symlink { .. } => {}
2366 }
2367 }
2368 result
2369 }
2370
2371 pub fn files_with_contents(&self, prefix: &Path) -> Vec<(PathBuf, Vec<u8>)> {
2372 let mut result = Vec::new();
2373 let mut queue = collections::VecDeque::new();
2374 let state = &*self.state.lock();
2375 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2376 while let Some((path, entry)) = queue.pop_front() {
2377 match entry {
2378 FakeFsEntry::File { content, .. } => {
2379 if path.starts_with(prefix) {
2380 result.push((path, content.clone()));
2381 }
2382 }
2383 FakeFsEntry::Dir { entries, .. } => {
2384 for (name, entry) in entries {
2385 queue.push_back((path.join(name), entry));
2386 }
2387 }
2388 FakeFsEntry::Symlink { .. } => {}
2389 }
2390 }
2391 result
2392 }
2393
2394 /// How many `read_dir` calls have been issued.
2395 pub fn read_dir_call_count(&self) -> usize {
2396 self.state.lock().read_dir_call_count
2397 }
2398
2399 pub fn watched_paths(&self) -> Vec<PathBuf> {
2400 let state = self.state.lock();
2401 state
2402 .event_txs
2403 .iter()
2404 .filter_map(|(path, tx)| Some(path.clone()).filter(|_| !tx.is_closed()))
2405 .collect()
2406 }
2407
2408 /// How many `metadata` calls have been issued.
2409 pub fn metadata_call_count(&self) -> usize {
2410 self.state.lock().metadata_call_count
2411 }
2412
2413 /// How many write operations have been issued for a specific path.
2414 pub fn write_count_for_path(&self, path: impl AsRef<Path>) -> usize {
2415 let path = path.as_ref().to_path_buf();
2416 self.state
2417 .lock()
2418 .path_write_counts
2419 .get(&path)
2420 .copied()
2421 .unwrap_or(0)
2422 }
2423
2424 pub fn emit_fs_event(&self, path: impl Into<PathBuf>, event: Option<PathEventKind>) {
2425 self.state.lock().emit_event(std::iter::once((path, event)));
2426 }
2427
2428 fn simulate_random_delay(&self) -> impl futures::Future<Output = ()> {
2429 self.executor.simulate_random_delay()
2430 }
2431
2432 /// Returns list of all tracked trash entries.
2433 pub fn trash_entries(&self) -> Vec<TrashedEntry> {
2434 self.state
2435 .lock()
2436 .trash
2437 .iter()
2438 .map(|(entry, _)| entry.clone())
2439 .collect()
2440 }
2441
2442 async fn remove_dir_inner(
2443 &self,
2444 path: &Path,
2445 options: RemoveOptions,
2446 ) -> Result<Option<FakeFsEntry>> {
2447 self.simulate_random_delay().await;
2448
2449 let path = normalize_path(path);
2450 let parent_path = path.parent().context("cannot remove the root")?;
2451 let base_name = path.file_name().context("cannot remove the root")?;
2452
2453 let mut state = self.state.lock();
2454 let parent_entry = state.entry(parent_path)?;
2455 let entry = parent_entry
2456 .dir_entries(parent_path)?
2457 .entry(base_name.to_str().unwrap().into());
2458
2459 let removed = match entry {
2460 btree_map::Entry::Vacant(_) => {
2461 if !options.ignore_if_not_exists {
2462 anyhow::bail!("{path:?} does not exist");
2463 }
2464
2465 None
2466 }
2467 btree_map::Entry::Occupied(mut entry) => {
2468 {
2469 let children = entry.get_mut().dir_entries(&path)?;
2470 if !options.recursive && !children.is_empty() {
2471 anyhow::bail!("{path:?} is not empty");
2472 }
2473 }
2474
2475 Some(entry.remove())
2476 }
2477 };
2478
2479 state.emit_event([(path, Some(PathEventKind::Removed))]);
2480 Ok(removed)
2481 }
2482
2483 async fn remove_file_inner(
2484 &self,
2485 path: &Path,
2486 options: RemoveOptions,
2487 ) -> Result<Option<FakeFsEntry>> {
2488 self.simulate_random_delay().await;
2489
2490 let path = normalize_path(path);
2491 let parent_path = path.parent().context("cannot remove the root")?;
2492 let base_name = path.file_name().unwrap();
2493 let mut state = self.state.lock();
2494 let parent_entry = state.entry(parent_path)?;
2495 let entry = parent_entry
2496 .dir_entries(parent_path)?
2497 .entry(base_name.to_str().unwrap().into());
2498 let removed = match entry {
2499 btree_map::Entry::Vacant(_) => {
2500 if !options.ignore_if_not_exists {
2501 anyhow::bail!("{path:?} does not exist");
2502 }
2503
2504 None
2505 }
2506 btree_map::Entry::Occupied(mut entry) => {
2507 entry.get_mut().file_content(&path)?;
2508 Some(entry.remove())
2509 }
2510 };
2511
2512 state.emit_event([(path, Some(PathEventKind::Removed))]);
2513 Ok(removed)
2514 }
2515}
2516
2517#[cfg(feature = "test-support")]
2518impl FakeFsEntry {
2519 fn is_file(&self) -> bool {
2520 matches!(self, Self::File { .. })
2521 }
2522
2523 fn is_symlink(&self) -> bool {
2524 matches!(self, Self::Symlink { .. })
2525 }
2526
2527 fn file_content(&self, path: &Path) -> Result<&Vec<u8>> {
2528 if let Self::File { content, .. } = self {
2529 Ok(content)
2530 } else {
2531 anyhow::bail!("not a file: {path:?}");
2532 }
2533 }
2534
2535 fn dir_entries(&mut self, path: &Path) -> Result<&mut BTreeMap<String, FakeFsEntry>> {
2536 if let Self::Dir { entries, .. } = self {
2537 Ok(entries)
2538 } else {
2539 anyhow::bail!("not a directory: {path:?}");
2540 }
2541 }
2542}
2543
2544#[cfg(feature = "test-support")]
2545struct FakeWatcher {
2546 tx: smol::channel::Sender<Vec<PathEvent>>,
2547 original_path: PathBuf,
2548 fs_state: Arc<Mutex<FakeFsState>>,
2549 prefixes: Mutex<Vec<PathBuf>>,
2550}
2551
2552#[cfg(feature = "test-support")]
2553impl Watcher for FakeWatcher {
2554 fn add(&self, path: &Path) -> Result<()> {
2555 if path.starts_with(&self.original_path) {
2556 return Ok(());
2557 }
2558 self.fs_state
2559 .try_lock()
2560 .unwrap()
2561 .event_txs
2562 .push((path.to_owned(), self.tx.clone()));
2563 self.prefixes.lock().push(path.to_owned());
2564 Ok(())
2565 }
2566
2567 fn remove(&self, _: &Path) -> Result<()> {
2568 Ok(())
2569 }
2570}
2571
2572#[cfg(feature = "test-support")]
2573#[derive(Debug)]
2574struct FakeHandle {
2575 inode: u64,
2576}
2577
2578#[cfg(feature = "test-support")]
2579impl FileHandle for FakeHandle {
2580 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf> {
2581 let fs = fs.as_fake();
2582 let mut state = fs.state.lock();
2583 let Some(target) = state.moves.get(&self.inode).cloned() else {
2584 anyhow::bail!("fake fd not moved")
2585 };
2586
2587 if state.try_entry(&target, false).is_some() {
2588 return Ok(target);
2589 }
2590 anyhow::bail!("fake fd target not found")
2591 }
2592}
2593
2594#[cfg(feature = "test-support")]
2595#[async_trait::async_trait]
2596impl Fs for FakeFs {
2597 async fn create_dir(&self, path: &Path) -> Result<()> {
2598 self.simulate_random_delay().await;
2599
2600 let mut created_dirs = Vec::new();
2601 let mut cur_path = PathBuf::new();
2602 for component in path.components() {
2603 let should_skip = matches!(component, Component::Prefix(..) | Component::RootDir);
2604 cur_path.push(component);
2605 if should_skip {
2606 continue;
2607 }
2608 let mut state = self.state.lock();
2609
2610 let inode = state.get_and_increment_inode();
2611 let mtime = state.get_and_increment_mtime();
2612 state.write_path(&cur_path, |entry| {
2613 entry.or_insert_with(|| {
2614 created_dirs.push((cur_path.clone(), Some(PathEventKind::Created)));
2615 FakeFsEntry::Dir {
2616 inode,
2617 mtime,
2618 len: 0,
2619 entries: Default::default(),
2620 git_repo_state: None,
2621 }
2622 });
2623 Ok(())
2624 })?
2625 }
2626
2627 self.state.lock().emit_event(created_dirs);
2628 Ok(())
2629 }
2630
2631 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
2632 self.simulate_random_delay().await;
2633 let mut state = self.state.lock();
2634 let inode = state.get_and_increment_inode();
2635 let mtime = state.get_and_increment_mtime();
2636 let file = FakeFsEntry::File {
2637 inode,
2638 mtime,
2639 len: 0,
2640 content: Vec::new(),
2641 git_dir_path: None,
2642 };
2643 let mut kind = Some(PathEventKind::Created);
2644 state.write_path(path, |entry| {
2645 match entry {
2646 btree_map::Entry::Occupied(mut e) => {
2647 if options.overwrite {
2648 kind = Some(PathEventKind::Changed);
2649 *e.get_mut() = file;
2650 } else if !options.ignore_if_exists {
2651 anyhow::bail!("path already exists: {path:?}");
2652 }
2653 }
2654 btree_map::Entry::Vacant(e) => {
2655 e.insert(file);
2656 }
2657 }
2658 Ok(())
2659 })?;
2660 state.emit_event([(path, kind)]);
2661 Ok(())
2662 }
2663
2664 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
2665 let mut state = self.state.lock();
2666 let file = FakeFsEntry::Symlink { target };
2667 state
2668 .write_path(path.as_ref(), move |e| match e {
2669 btree_map::Entry::Vacant(e) => {
2670 e.insert(file);
2671 Ok(())
2672 }
2673 btree_map::Entry::Occupied(mut e) => {
2674 *e.get_mut() = file;
2675 Ok(())
2676 }
2677 })
2678 .unwrap();
2679 state.emit_event([(path, Some(PathEventKind::Created))]);
2680
2681 Ok(())
2682 }
2683
2684 async fn create_file_with(
2685 &self,
2686 path: &Path,
2687 mut content: Pin<&mut (dyn AsyncRead + Send)>,
2688 ) -> Result<()> {
2689 let mut bytes = Vec::new();
2690 content.read_to_end(&mut bytes).await?;
2691 self.write_file_internal(path, bytes, true)?;
2692 Ok(())
2693 }
2694
2695 async fn extract_tar_file(
2696 &self,
2697 path: &Path,
2698 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
2699 ) -> Result<()> {
2700 let mut entries = content.entries()?;
2701 while let Some(entry) = entries.next().await {
2702 let mut entry = entry?;
2703 if entry.header().entry_type().is_file() {
2704 let path = path.join(entry.path()?.as_ref());
2705 let mut bytes = Vec::new();
2706 entry.read_to_end(&mut bytes).await?;
2707 self.create_dir(path.parent().unwrap()).await?;
2708 self.write_file_internal(&path, bytes, true)?;
2709 }
2710 }
2711 Ok(())
2712 }
2713
2714 async fn rename(&self, old_path: &Path, new_path: &Path, options: RenameOptions) -> Result<()> {
2715 self.simulate_random_delay().await;
2716
2717 let old_path = normalize_path(old_path);
2718 let new_path = normalize_path(new_path);
2719
2720 if options.create_parents {
2721 if let Some(parent) = new_path.parent() {
2722 self.create_dir(parent).await?;
2723 }
2724 }
2725
2726 let mut state = self.state.lock();
2727 let moved_entry = state.write_path(&old_path, |e| {
2728 if let btree_map::Entry::Occupied(e) = e {
2729 Ok(e.get().clone())
2730 } else {
2731 anyhow::bail!("path does not exist: {old_path:?}")
2732 }
2733 })?;
2734
2735 let inode = match moved_entry {
2736 FakeFsEntry::File { inode, .. } => inode,
2737 FakeFsEntry::Dir { inode, .. } => inode,
2738 _ => 0,
2739 };
2740
2741 state.moves.insert(inode, new_path.clone());
2742
2743 state.write_path(&new_path, |e| {
2744 match e {
2745 btree_map::Entry::Occupied(mut e) => {
2746 if options.overwrite {
2747 *e.get_mut() = moved_entry;
2748 } else if !options.ignore_if_exists {
2749 anyhow::bail!("path already exists: {new_path:?}");
2750 }
2751 }
2752 btree_map::Entry::Vacant(e) => {
2753 e.insert(moved_entry);
2754 }
2755 }
2756 Ok(())
2757 })?;
2758
2759 state
2760 .write_path(&old_path, |e| {
2761 if let btree_map::Entry::Occupied(e) = e {
2762 Ok(e.remove())
2763 } else {
2764 unreachable!()
2765 }
2766 })
2767 .unwrap();
2768
2769 state.emit_event([
2770 (old_path, Some(PathEventKind::Removed)),
2771 (new_path, Some(PathEventKind::Created)),
2772 ]);
2773 Ok(())
2774 }
2775
2776 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
2777 self.simulate_random_delay().await;
2778
2779 let source = normalize_path(source);
2780 let target = normalize_path(target);
2781 let mut state = self.state.lock();
2782 let mtime = state.get_and_increment_mtime();
2783 let inode = state.get_and_increment_inode();
2784 let source_entry = state.entry(&source)?;
2785 let content = source_entry.file_content(&source)?.clone();
2786 let mut kind = Some(PathEventKind::Created);
2787 state.write_path(&target, |e| match e {
2788 btree_map::Entry::Occupied(e) => {
2789 if options.overwrite {
2790 kind = Some(PathEventKind::Changed);
2791 Ok(Some(e.get().clone()))
2792 } else if !options.ignore_if_exists {
2793 anyhow::bail!("{target:?} already exists");
2794 } else {
2795 Ok(None)
2796 }
2797 }
2798 btree_map::Entry::Vacant(e) => Ok(Some(
2799 e.insert(FakeFsEntry::File {
2800 inode,
2801 mtime,
2802 len: content.len() as u64,
2803 content,
2804 git_dir_path: None,
2805 })
2806 .clone(),
2807 )),
2808 })?;
2809 state.emit_event([(target, kind)]);
2810 Ok(())
2811 }
2812
2813 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
2814 self.remove_dir_inner(path, options).await.map(|_| ())
2815 }
2816
2817 async fn trash(&self, path: &Path, options: RemoveOptions) -> Result<TrashedEntry> {
2818 let normalized_path = normalize_path(path);
2819 let parent_path = normalized_path.parent().context("cannot remove the root")?;
2820 let base_name = normalized_path.file_name().unwrap();
2821 let result = if self.is_dir(path).await {
2822 self.remove_dir_inner(path, options).await?
2823 } else {
2824 self.remove_file_inner(path, options).await?
2825 };
2826
2827 match result {
2828 Some(fake_entry) => {
2829 let trashed_entry = TrashedEntry {
2830 id: base_name.to_str().unwrap().into(),
2831 name: base_name.to_str().unwrap().into(),
2832 original_parent: parent_path.to_path_buf(),
2833 };
2834
2835 let mut state = self.state.lock();
2836 state.trash.push((trashed_entry.clone(), fake_entry));
2837 Ok(trashed_entry)
2838 }
2839 None => anyhow::bail!("{normalized_path:?} does not exist"),
2840 }
2841 }
2842
2843 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
2844 self.remove_file_inner(path, options).await.map(|_| ())
2845 }
2846
2847 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
2848 let bytes = self.load_internal(path).await?;
2849 Ok(Box::new(io::Cursor::new(bytes)))
2850 }
2851
2852 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
2853 self.simulate_random_delay().await;
2854 let mut state = self.state.lock();
2855 let inode = match state.entry(path)? {
2856 FakeFsEntry::File { inode, .. } => *inode,
2857 FakeFsEntry::Dir { inode, .. } => *inode,
2858 _ => unreachable!(),
2859 };
2860 Ok(Arc::new(FakeHandle { inode }))
2861 }
2862
2863 async fn load(&self, path: &Path) -> Result<String> {
2864 let content = self.load_internal(path).await?;
2865 Ok(String::from_utf8(content)?)
2866 }
2867
2868 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
2869 self.load_internal(path).await
2870 }
2871
2872 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
2873 self.simulate_random_delay().await;
2874 let path = normalize_path(path.as_path());
2875 if let Some(path) = path.parent() {
2876 self.create_dir(path).await?;
2877 }
2878 self.write_file_internal(path, data.into_bytes(), true)?;
2879 Ok(())
2880 }
2881
2882 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
2883 self.simulate_random_delay().await;
2884 let path = normalize_path(path);
2885 let content = text::chunks_with_line_ending(text, line_ending).collect::<String>();
2886 if let Some(path) = path.parent() {
2887 self.create_dir(path).await?;
2888 }
2889 self.write_file_internal(path, content.into_bytes(), false)?;
2890 Ok(())
2891 }
2892
2893 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
2894 self.simulate_random_delay().await;
2895 let path = normalize_path(path);
2896 if let Some(path) = path.parent() {
2897 self.create_dir(path).await?;
2898 }
2899 self.write_file_internal(path, content.to_vec(), false)?;
2900 Ok(())
2901 }
2902
2903 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
2904 let path = normalize_path(path);
2905 self.simulate_random_delay().await;
2906 let state = self.state.lock();
2907 let canonical_path = state
2908 .canonicalize(&path, true)
2909 .with_context(|| format!("path does not exist: {path:?}"))?;
2910 Ok(canonical_path)
2911 }
2912
2913 async fn is_file(&self, path: &Path) -> bool {
2914 let path = normalize_path(path);
2915 self.simulate_random_delay().await;
2916 let mut state = self.state.lock();
2917 if let Some((entry, _)) = state.try_entry(&path, true) {
2918 entry.is_file()
2919 } else {
2920 false
2921 }
2922 }
2923
2924 async fn is_dir(&self, path: &Path) -> bool {
2925 self.metadata(path)
2926 .await
2927 .is_ok_and(|metadata| metadata.is_some_and(|metadata| metadata.is_dir))
2928 }
2929
2930 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
2931 self.simulate_random_delay().await;
2932 let path = normalize_path(path);
2933 let mut state = self.state.lock();
2934 state.metadata_call_count += 1;
2935 if let Some((mut entry, _)) = state.try_entry(&path, false) {
2936 let is_symlink = entry.is_symlink();
2937 if is_symlink {
2938 if let Some(e) = state.try_entry(&path, true).map(|e| e.0) {
2939 entry = e;
2940 } else {
2941 return Ok(None);
2942 }
2943 }
2944
2945 Ok(Some(match &*entry {
2946 FakeFsEntry::File {
2947 inode, mtime, len, ..
2948 } => Metadata {
2949 inode: *inode,
2950 mtime: *mtime,
2951 len: *len,
2952 is_dir: false,
2953 is_symlink,
2954 is_fifo: false,
2955 is_executable: false,
2956 },
2957 FakeFsEntry::Dir {
2958 inode, mtime, len, ..
2959 } => Metadata {
2960 inode: *inode,
2961 mtime: *mtime,
2962 len: *len,
2963 is_dir: true,
2964 is_symlink,
2965 is_fifo: false,
2966 is_executable: false,
2967 },
2968 FakeFsEntry::Symlink { .. } => unreachable!(),
2969 }))
2970 } else {
2971 Ok(None)
2972 }
2973 }
2974
2975 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
2976 self.simulate_random_delay().await;
2977 let path = normalize_path(path);
2978 let mut state = self.state.lock();
2979 let (entry, _) = state
2980 .try_entry(&path, false)
2981 .with_context(|| format!("path does not exist: {path:?}"))?;
2982 if let FakeFsEntry::Symlink { target } = entry {
2983 Ok(target.clone())
2984 } else {
2985 anyhow::bail!("not a symlink: {path:?}")
2986 }
2987 }
2988
2989 async fn read_dir(
2990 &self,
2991 path: &Path,
2992 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
2993 self.simulate_random_delay().await;
2994 let path = normalize_path(path);
2995 let mut state = self.state.lock();
2996 state.read_dir_call_count += 1;
2997 let entry = state.entry(&path)?;
2998 let children = entry.dir_entries(&path)?;
2999 let paths = children
3000 .keys()
3001 .map(|file_name| Ok(path.join(file_name)))
3002 .collect::<Vec<_>>();
3003 Ok(Box::pin(futures::stream::iter(paths)))
3004 }
3005
3006 async fn watch(
3007 &self,
3008 path: &Path,
3009 _: Duration,
3010 ) -> (
3011 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
3012 Arc<dyn Watcher>,
3013 ) {
3014 self.simulate_random_delay().await;
3015 let (tx, rx) = smol::channel::unbounded();
3016 let path = path.to_path_buf();
3017 self.state.lock().event_txs.push((path.clone(), tx.clone()));
3018 let executor = self.executor.clone();
3019 let watcher = Arc::new(FakeWatcher {
3020 tx,
3021 original_path: path.to_owned(),
3022 fs_state: self.state.clone(),
3023 prefixes: Mutex::new(vec![path]),
3024 });
3025 (
3026 Box::pin(futures::StreamExt::filter(rx, {
3027 let watcher = watcher.clone();
3028 move |events| {
3029 let result = events.iter().any(|evt_path| {
3030 watcher
3031 .prefixes
3032 .lock()
3033 .iter()
3034 .any(|prefix| evt_path.path.starts_with(prefix))
3035 });
3036 let executor = executor.clone();
3037 async move {
3038 executor.simulate_random_delay().await;
3039 result
3040 }
3041 }
3042 })),
3043 watcher,
3044 )
3045 }
3046
3047 fn open_repo(
3048 &self,
3049 abs_dot_git: &Path,
3050 _system_git_binary: Option<&Path>,
3051 ) -> Result<Arc<dyn GitRepository>> {
3052 self.with_git_state_and_paths(
3053 abs_dot_git,
3054 false,
3055 |_, repository_dir_path, common_dir_path| {
3056 Arc::new(fake_git_repo::FakeGitRepository {
3057 fs: self.this.upgrade().unwrap(),
3058 executor: self.executor.clone(),
3059 dot_git_path: abs_dot_git.to_path_buf(),
3060 repository_dir_path: repository_dir_path.to_owned(),
3061 common_dir_path: common_dir_path.to_owned(),
3062 checkpoints: Arc::default(),
3063 is_trusted: Arc::default(),
3064 }) as _
3065 },
3066 )
3067 }
3068
3069 async fn git_init(
3070 &self,
3071 abs_work_directory_path: &Path,
3072 _fallback_branch_name: String,
3073 ) -> Result<()> {
3074 self.create_dir(&abs_work_directory_path.join(".git")).await
3075 }
3076
3077 async fn git_clone(&self, _repo_url: &str, _abs_work_directory: &Path) -> Result<()> {
3078 anyhow::bail!("Git clone is not supported in fake Fs")
3079 }
3080
3081 fn is_fake(&self) -> bool {
3082 true
3083 }
3084
3085 async fn is_case_sensitive(&self) -> bool {
3086 true
3087 }
3088
3089 fn subscribe_to_jobs(&self) -> JobEventReceiver {
3090 let (sender, receiver) = futures::channel::mpsc::unbounded();
3091 self.state.lock().job_event_subscribers.lock().push(sender);
3092 receiver
3093 }
3094
3095 async fn restore(&self, trashed_entry: TrashedEntry) -> Result<PathBuf, TrashRestoreError> {
3096 let mut state = self.state.lock();
3097
3098 let Some((trashed_entry, fake_entry)) = state
3099 .trash
3100 .iter()
3101 .find(|(entry, _)| *entry == trashed_entry)
3102 .cloned()
3103 else {
3104 return Err(TrashRestoreError::NotFound {
3105 path: PathBuf::from(trashed_entry.id),
3106 });
3107 };
3108
3109 let path = trashed_entry
3110 .original_parent
3111 .join(trashed_entry.name.clone());
3112
3113 let result = state.write_path(&path, |entry| match entry {
3114 btree_map::Entry::Vacant(entry) => {
3115 entry.insert(fake_entry);
3116 Ok(())
3117 }
3118 btree_map::Entry::Occupied(_) => {
3119 anyhow::bail!("Failed to restore {:?}", path);
3120 }
3121 });
3122
3123 match result {
3124 Ok(_) => {
3125 state.trash.retain(|(entry, _)| *entry != trashed_entry);
3126 state.emit_event([(path.clone(), Some(PathEventKind::Created))]);
3127 Ok(path)
3128 }
3129 Err(_) => {
3130 // For now we'll just assume that this failed because it was a
3131 // collision error, which I think that, for the time being, is
3132 // the only case where this could fail?
3133 Err(TrashRestoreError::Collision { path })
3134 }
3135 }
3136 }
3137
3138 #[cfg(feature = "test-support")]
3139 fn as_fake(&self) -> Arc<FakeFs> {
3140 self.this.upgrade().unwrap()
3141 }
3142}
3143
3144pub async fn copy_recursive<'a>(
3145 fs: &'a dyn Fs,
3146 source: &'a Path,
3147 target: &'a Path,
3148 options: CopyOptions,
3149) -> Result<()> {
3150 for (item, is_dir) in read_dir_items(fs, source).await? {
3151 let Ok(item_relative_path) = item.strip_prefix(source) else {
3152 continue;
3153 };
3154 let target_item = if item_relative_path == Path::new("") {
3155 target.to_path_buf()
3156 } else {
3157 target.join(item_relative_path)
3158 };
3159 if is_dir {
3160 if !options.overwrite && fs.metadata(&target_item).await.is_ok_and(|m| m.is_some()) {
3161 if options.ignore_if_exists {
3162 continue;
3163 } else {
3164 anyhow::bail!("{target_item:?} already exists");
3165 }
3166 }
3167 let _ = fs
3168 .remove_dir(
3169 &target_item,
3170 RemoveOptions {
3171 recursive: true,
3172 ignore_if_not_exists: true,
3173 },
3174 )
3175 .await;
3176 fs.create_dir(&target_item).await?;
3177 } else {
3178 fs.copy_file(&item, &target_item, options).await?;
3179 }
3180 }
3181 Ok(())
3182}
3183
3184/// Recursively reads all of the paths in the given directory.
3185///
3186/// Returns a vector of tuples of (path, is_dir).
3187pub async fn read_dir_items<'a>(fs: &'a dyn Fs, source: &'a Path) -> Result<Vec<(PathBuf, bool)>> {
3188 let mut items = Vec::new();
3189 read_recursive(fs, source, &mut items).await?;
3190 Ok(items)
3191}
3192
3193fn read_recursive<'a>(
3194 fs: &'a dyn Fs,
3195 source: &'a Path,
3196 output: &'a mut Vec<(PathBuf, bool)>,
3197) -> BoxFuture<'a, Result<()>> {
3198 use futures::future::FutureExt;
3199
3200 async move {
3201 let metadata = fs
3202 .metadata(source)
3203 .await?
3204 .with_context(|| format!("path does not exist: {source:?}"))?;
3205
3206 if metadata.is_dir {
3207 output.push((source.to_path_buf(), true));
3208 let mut children = fs.read_dir(source).await?;
3209 while let Some(child_path) = children.next().await {
3210 if let Ok(child_path) = child_path {
3211 read_recursive(fs, &child_path, output).await?;
3212 }
3213 }
3214 } else {
3215 output.push((source.to_path_buf(), false));
3216 }
3217 Ok(())
3218 }
3219 .boxed()
3220}
3221
3222// todo(windows)
3223// can we get file id not open the file twice?
3224// https://github.com/rust-lang/rust/issues/63010
3225#[cfg(target_os = "windows")]
3226async fn file_id(path: impl AsRef<Path>) -> Result<u64> {
3227 use std::os::windows::io::AsRawHandle;
3228
3229 use smol::fs::windows::OpenOptionsExt;
3230 use windows::Win32::{
3231 Foundation::HANDLE,
3232 Storage::FileSystem::{
3233 BY_HANDLE_FILE_INFORMATION, FILE_FLAG_BACKUP_SEMANTICS, GetFileInformationByHandle,
3234 },
3235 };
3236
3237 let file = smol::fs::OpenOptions::new()
3238 .read(true)
3239 .custom_flags(FILE_FLAG_BACKUP_SEMANTICS.0)
3240 .open(path)
3241 .await?;
3242
3243 let mut info: BY_HANDLE_FILE_INFORMATION = unsafe { std::mem::zeroed() };
3244 // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfileinformationbyhandle
3245 // This function supports Windows XP+
3246 smol::unblock(move || {
3247 unsafe { GetFileInformationByHandle(HANDLE(file.as_raw_handle() as _), &mut info)? };
3248
3249 Ok(((info.nFileIndexHigh as u64) << 32) | (info.nFileIndexLow as u64))
3250 })
3251 .await
3252}
3253
3254#[cfg(target_os = "windows")]
3255fn atomic_replace<P: AsRef<Path>>(
3256 replaced_file: P,
3257 replacement_file: P,
3258) -> windows::core::Result<()> {
3259 use windows::{
3260 Win32::Storage::FileSystem::{REPLACE_FILE_FLAGS, ReplaceFileW},
3261 core::HSTRING,
3262 };
3263
3264 // If the file does not exist, create it.
3265 let _ = std::fs::File::create_new(replaced_file.as_ref());
3266
3267 unsafe {
3268 ReplaceFileW(
3269 &HSTRING::from(replaced_file.as_ref().to_string_lossy().into_owned()),
3270 &HSTRING::from(replacement_file.as_ref().to_string_lossy().into_owned()),
3271 None,
3272 REPLACE_FILE_FLAGS::default(),
3273 None,
3274 None,
3275 )
3276 }
3277}