1pub mod fs_watcher;
2
3use parking_lot::Mutex;
4use slotmap::{KeyData, SlotMap};
5use std::ffi::OsString;
6use std::sync::atomic::{AtomicU8, AtomicUsize, Ordering};
7use std::time::Instant;
8use util::maybe;
9
10use anyhow::{Context as _, Result, anyhow};
11use futures::stream::iter;
12use gpui::App;
13use gpui::BackgroundExecutor;
14use gpui::Global;
15use gpui::ReadGlobal as _;
16use gpui::SharedString;
17use std::borrow::Cow;
18#[cfg(unix)]
19use std::ffi::CString;
20use util::command::new_command;
21
22#[cfg(unix)]
23use std::os::fd::{AsFd, AsRawFd};
24#[cfg(unix)]
25use std::os::unix::ffi::OsStrExt;
26
27#[cfg(unix)]
28use std::os::unix::fs::{FileTypeExt, MetadataExt};
29
30#[cfg(any(target_os = "macos", target_os = "freebsd"))]
31use std::mem::MaybeUninit;
32
33use async_tar::Archive;
34use futures::{AsyncRead, Stream, StreamExt, future::BoxFuture};
35use git::repository::{GitRepository, RealGitRepository};
36use is_executable::IsExecutable;
37use rope::Rope;
38use serde::{Deserialize, Serialize};
39use smol::io::AsyncWriteExt;
40#[cfg(feature = "test-support")]
41use std::path::Component;
42use std::{
43 io::{self, Write},
44 path::{Path, PathBuf},
45 pin::Pin,
46 sync::Arc,
47 time::{Duration, SystemTime, UNIX_EPOCH},
48};
49use tempfile::TempDir;
50use text::LineEnding;
51
52#[cfg(feature = "test-support")]
53mod fake_git_repo;
54#[cfg(feature = "test-support")]
55use collections::{BTreeMap, btree_map};
56#[cfg(feature = "test-support")]
57use fake_git_repo::FakeGitRepositoryState;
58#[cfg(feature = "test-support")]
59use git::{
60 repository::{InitialGraphCommitData, RepoPath, Worktree, repo_path},
61 status::{FileStatus, StatusCode, TrackedStatus, UnmergedStatus},
62};
63#[cfg(feature = "test-support")]
64use util::normalize_path;
65
66#[cfg(feature = "test-support")]
67use smol::io::AsyncReadExt;
68#[cfg(feature = "test-support")]
69use std::ffi::OsStr;
70
71pub trait Watcher: Send + Sync {
72 fn add(&self, path: &Path) -> Result<()>;
73 fn remove(&self, path: &Path) -> Result<()>;
74}
75
76#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
77pub enum PathEventKind {
78 Removed,
79 Created,
80 Changed,
81 Rescan,
82}
83
84#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
85pub struct PathEvent {
86 pub path: PathBuf,
87 pub kind: Option<PathEventKind>,
88}
89
90impl From<PathEvent> for PathBuf {
91 fn from(event: PathEvent) -> Self {
92 event.path
93 }
94}
95
96#[async_trait::async_trait]
97pub trait Fs: Send + Sync {
98 async fn create_dir(&self, path: &Path) -> Result<()>;
99 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()>;
100 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()>;
101 async fn create_file_with(
102 &self,
103 path: &Path,
104 content: Pin<&mut (dyn AsyncRead + Send)>,
105 ) -> Result<()>;
106 async fn extract_tar_file(
107 &self,
108 path: &Path,
109 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
110 ) -> Result<()>;
111 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()>;
112 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()>;
113
114 /// Removes a directory from the filesystem.
115 /// There is no expectation that the directory will be preserved in the
116 /// system trash.
117 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()>;
118
119 /// Moves a file or directory to the system trash.
120 /// Returns a [`TrashedEntry`] that can be used to keep track of the
121 /// location of the trashed item in the system's trash.
122 async fn trash(&self, path: &Path, options: RemoveOptions) -> Result<TrashId>;
123
124 /// Removes a file from the filesystem.
125 /// There is no expectation that the file will be preserved in the system
126 /// trash.
127 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()>;
128
129 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>>;
130 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>>;
131 async fn load(&self, path: &Path) -> Result<String> {
132 Ok(String::from_utf8(self.load_bytes(path).await?)?)
133 }
134 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>>;
135 async fn atomic_write(&self, path: PathBuf, text: String) -> Result<()>;
136 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()>;
137 async fn write(&self, path: &Path, content: &[u8]) -> Result<()>;
138 async fn canonicalize(&self, path: &Path) -> Result<PathBuf>;
139 async fn is_file(&self, path: &Path) -> bool;
140 async fn is_dir(&self, path: &Path) -> bool;
141 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>>;
142 async fn read_link(&self, path: &Path) -> Result<PathBuf>;
143 async fn read_dir(
144 &self,
145 path: &Path,
146 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>>;
147
148 async fn watch(
149 &self,
150 path: &Path,
151 latency: Duration,
152 ) -> (
153 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
154 Arc<dyn Watcher>,
155 );
156
157 fn open_repo(
158 &self,
159 abs_dot_git: &Path,
160 system_git_binary_path: Option<&Path>,
161 ) -> Result<Arc<dyn GitRepository>>;
162 async fn git_init(&self, abs_work_directory: &Path, fallback_branch_name: String)
163 -> Result<()>;
164 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()>;
165 fn is_fake(&self) -> bool;
166 async fn is_case_sensitive(&self) -> bool;
167 fn subscribe_to_jobs(&self) -> JobEventReceiver;
168
169 /// Restores a given `TrashedEntry`, moving it from the system's trash back
170 /// to the original path.
171 async fn restore(&self, item: TrashId) -> std::result::Result<PathBuf, TrashRestoreError>;
172
173 #[cfg(feature = "test-support")]
174 fn as_fake(&self) -> Arc<FakeFs> {
175 panic!("called as_fake on a real fs");
176 }
177}
178
179// We use our own type rather than `trash::TrashItem` directly to avoid carrying
180// over fields we don't need (e.g. `time_deleted`) and to insulate callers and
181// tests from changes to that crate's API surface.
182/// Represents a file or directory that has been moved to the system trash,
183/// retaining enough information to restore it to its original location.
184#[derive(Clone, PartialEq, Debug)]
185struct TrashedEntry {
186 /// Platform-specific identifier for the file/directory in the trash.
187 ///
188 /// * Freedesktop – Path to the `.trashinfo` file.
189 /// * macOS & Windows – Full path to the file/directory in the system's
190 /// trash.
191 pub id: OsString,
192 /// Name of the file/directory at the time of trashing, including extension.
193 pub name: OsString,
194 /// Absolute path to the parent directory at the time of trashing.
195 pub original_parent: PathBuf,
196}
197
198impl From<trash::TrashItem> for TrashedEntry {
199 fn from(item: trash::TrashItem) -> Self {
200 Self {
201 id: item.id,
202 name: item.name,
203 original_parent: item.original_parent,
204 }
205 }
206}
207
208impl TrashedEntry {
209 fn into_trash_item(self) -> trash::TrashItem {
210 trash::TrashItem {
211 id: self.id,
212 name: self.name,
213 original_parent: self.original_parent,
214 // `TrashedEntry` doesn't preserve `time_deleted` as we don't
215 // currently need it for restore, so we default it to 0 here.
216 time_deleted: 0,
217 }
218 }
219}
220
221#[derive(Debug, thiserror::Error)]
222pub enum TrashRestoreError {
223 #[error("The specified `path` ({}) was not found in the system's trash.", path.display())]
224 NotFound { path: PathBuf },
225 #[error("File or directory ({}) already exists at the restore destination.", path.display())]
226 Collision { path: PathBuf },
227 // This should never occur, the only way to get a TrashId is to undo
228 // consumes the Change::Trashed. We worry about remoting duplicate messages
229 // we do not want to crash the app then which is why this error is there.
230 #[error("The item was already restored")]
231 AlreadyRestored,
232 #[error("Unknown error ({description})")]
233 Unknown { description: String },
234}
235
236impl From<trash::Error> for TrashRestoreError {
237 fn from(err: trash::Error) -> Self {
238 match err {
239 trash::Error::RestoreCollision { path, .. } => Self::Collision { path },
240 trash::Error::Unknown { description } => Self::Unknown { description },
241 other => Self::Unknown {
242 description: other.to_string(),
243 },
244 }
245 }
246}
247
248struct GlobalFs(Arc<dyn Fs>);
249
250impl Global for GlobalFs {}
251
252impl dyn Fs {
253 /// Returns the global [`Fs`].
254 pub fn global(cx: &App) -> Arc<Self> {
255 GlobalFs::global(cx).0.clone()
256 }
257
258 /// Sets the global [`Fs`].
259 pub fn set_global(fs: Arc<Self>, cx: &mut App) {
260 cx.set_global(GlobalFs(fs));
261 }
262}
263
264#[derive(Copy, Clone, Default)]
265pub struct CreateOptions {
266 pub overwrite: bool,
267 pub ignore_if_exists: bool,
268}
269
270#[derive(Copy, Clone, Default)]
271pub struct CopyOptions {
272 pub overwrite: bool,
273 pub ignore_if_exists: bool,
274}
275
276#[derive(Copy, Clone, Default)]
277pub struct RenameOptions {
278 pub overwrite: bool,
279 pub ignore_if_exists: bool,
280 /// Whether to create parent directories if they do not exist.
281 pub create_parents: bool,
282}
283
284#[derive(Copy, Clone, Default)]
285pub struct RemoveOptions {
286 pub recursive: bool,
287 pub ignore_if_not_exists: bool,
288}
289
290#[derive(Copy, Clone, Debug)]
291pub struct Metadata {
292 pub inode: u64,
293 pub mtime: MTime,
294 pub is_symlink: bool,
295 pub is_dir: bool,
296 pub len: u64,
297 pub is_fifo: bool,
298 pub is_executable: bool,
299}
300
301/// Filesystem modification time. The purpose of this newtype is to discourage use of operations
302/// that do not make sense for mtimes. In particular, it is not always valid to compare mtimes using
303/// `<` or `>`, as there are many things that can cause the mtime of a file to be earlier than it
304/// was. See ["mtime comparison considered harmful" - apenwarr](https://apenwarr.ca/log/20181113).
305///
306/// Do not derive Ord, PartialOrd, or arithmetic operation traits.
307#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize)]
308#[serde(transparent)]
309pub struct MTime(SystemTime);
310
311pub type JobId = usize;
312
313#[derive(Clone, Debug)]
314pub struct JobInfo {
315 pub start: Instant,
316 pub message: SharedString,
317 pub id: JobId,
318}
319
320#[derive(Debug, Clone)]
321pub enum JobEvent {
322 Started { info: JobInfo },
323 Completed { id: JobId },
324}
325
326pub type JobEventSender = futures::channel::mpsc::UnboundedSender<JobEvent>;
327pub type JobEventReceiver = futures::channel::mpsc::UnboundedReceiver<JobEvent>;
328
329struct JobTracker {
330 id: JobId,
331 subscribers: Arc<Mutex<Vec<JobEventSender>>>,
332}
333
334impl JobTracker {
335 fn new(info: JobInfo, subscribers: Arc<Mutex<Vec<JobEventSender>>>) -> Self {
336 let id = info.id;
337 {
338 let mut subs = subscribers.lock();
339 subs.retain(|sender| {
340 sender
341 .unbounded_send(JobEvent::Started { info: info.clone() })
342 .is_ok()
343 });
344 }
345 Self { id, subscribers }
346 }
347}
348
349impl Drop for JobTracker {
350 fn drop(&mut self) {
351 let mut subs = self.subscribers.lock();
352 subs.retain(|sender| {
353 sender
354 .unbounded_send(JobEvent::Completed { id: self.id })
355 .is_ok()
356 });
357 }
358}
359
360impl MTime {
361 /// Conversion intended for persistence and testing.
362 pub fn from_seconds_and_nanos(secs: u64, nanos: u32) -> Self {
363 MTime(UNIX_EPOCH + Duration::new(secs, nanos))
364 }
365
366 /// Conversion intended for persistence.
367 pub fn to_seconds_and_nanos_for_persistence(self) -> Option<(u64, u32)> {
368 self.0
369 .duration_since(UNIX_EPOCH)
370 .ok()
371 .map(|duration| (duration.as_secs(), duration.subsec_nanos()))
372 }
373
374 /// Returns the value wrapped by this `MTime`, for presentation to the user. The name including
375 /// "_for_user" is to discourage misuse - this method should not be used when making decisions
376 /// about file dirtiness.
377 pub fn timestamp_for_user(self) -> SystemTime {
378 self.0
379 }
380
381 /// Temporary method to split out the behavior changes from introduction of this newtype.
382 pub fn bad_is_greater_than(self, other: MTime) -> bool {
383 self.0 > other.0
384 }
385}
386
387impl From<proto::Timestamp> for MTime {
388 fn from(timestamp: proto::Timestamp) -> Self {
389 MTime(timestamp.into())
390 }
391}
392
393impl From<MTime> for proto::Timestamp {
394 fn from(mtime: MTime) -> Self {
395 mtime.0.into()
396 }
397}
398
399slotmap::new_key_type! { pub struct TrashId; }
400
401// TODO!: Should we convert these to `from_proto` and `to_proto` for the sake of
402// consistency with other `Id` types like:
403//
404// * `WorktreeId`
405// * `ChannelId`
406// * `ProjectId`
407impl TrashId {
408 pub fn from_u64(value: u64) -> Self {
409 KeyData::from_ffi(value).into()
410 }
411
412 pub fn to_u64(&self) -> u64 {
413 self.0.as_ffi()
414 }
415}
416
417pub struct RealFs {
418 bundled_git_binary_path: Option<PathBuf>,
419 executor: BackgroundExecutor,
420 next_job_id: Arc<AtomicUsize>,
421 job_event_subscribers: Arc<Mutex<Vec<JobEventSender>>>,
422 trash: Arc<Mutex<SlotMap<TrashId, TrashedEntry>>>,
423 is_case_sensitive: AtomicU8,
424}
425
426pub trait FileHandle: Send + Sync + std::fmt::Debug {
427 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf>;
428}
429
430impl FileHandle for std::fs::File {
431 #[cfg(target_os = "macos")]
432 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
433 use std::{
434 ffi::{CStr, OsStr},
435 os::unix::ffi::OsStrExt,
436 };
437
438 let fd = self.as_fd();
439 let mut path_buf = MaybeUninit::<[u8; libc::PATH_MAX as usize]>::uninit();
440
441 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_GETPATH, path_buf.as_mut_ptr()) };
442 anyhow::ensure!(result != -1, "fcntl returned -1");
443
444 // SAFETY: `fcntl` will initialize the path buffer.
445 let c_str = unsafe { CStr::from_ptr(path_buf.as_ptr().cast()) };
446 anyhow::ensure!(!c_str.is_empty(), "Could find a path for the file handle");
447 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
448 Ok(path)
449 }
450
451 #[cfg(target_os = "linux")]
452 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
453 let fd = self.as_fd();
454 let fd_path = format!("/proc/self/fd/{}", fd.as_raw_fd());
455 let new_path = std::fs::read_link(fd_path)?;
456 if new_path
457 .file_name()
458 .is_some_and(|f| f.to_string_lossy().ends_with(" (deleted)"))
459 {
460 anyhow::bail!("file was deleted")
461 };
462
463 Ok(new_path)
464 }
465
466 #[cfg(target_os = "freebsd")]
467 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
468 use std::{
469 ffi::{CStr, OsStr},
470 os::unix::ffi::OsStrExt,
471 };
472
473 let fd = self.as_fd();
474 let mut kif = MaybeUninit::<libc::kinfo_file>::uninit();
475 kif.kf_structsize = libc::KINFO_FILE_SIZE;
476
477 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_KINFO, kif.as_mut_ptr()) };
478 anyhow::ensure!(result != -1, "fcntl returned -1");
479
480 // SAFETY: `fcntl` will initialize the kif.
481 let c_str = unsafe { CStr::from_ptr(kif.assume_init().kf_path.as_ptr()) };
482 anyhow::ensure!(!c_str.is_empty(), "Could find a path for the file handle");
483 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
484 Ok(path)
485 }
486
487 #[cfg(target_os = "windows")]
488 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
489 use std::ffi::OsString;
490 use std::os::windows::ffi::OsStringExt;
491 use std::os::windows::io::AsRawHandle;
492
493 use windows::Win32::Foundation::HANDLE;
494 use windows::Win32::Storage::FileSystem::{
495 FILE_NAME_NORMALIZED, GetFinalPathNameByHandleW,
496 };
497
498 let handle = HANDLE(self.as_raw_handle() as _);
499
500 // Query required buffer size (in wide chars)
501 let required_len =
502 unsafe { GetFinalPathNameByHandleW(handle, &mut [], FILE_NAME_NORMALIZED) };
503 anyhow::ensure!(
504 required_len != 0,
505 "GetFinalPathNameByHandleW returned 0 length"
506 );
507
508 // Allocate buffer and retrieve the path
509 let mut buf: Vec<u16> = vec![0u16; required_len as usize + 1];
510 let written = unsafe { GetFinalPathNameByHandleW(handle, &mut buf, FILE_NAME_NORMALIZED) };
511 anyhow::ensure!(
512 written != 0,
513 "GetFinalPathNameByHandleW failed to write path"
514 );
515
516 let os_str: OsString = OsString::from_wide(&buf[..written as usize]);
517 anyhow::ensure!(!os_str.is_empty(), "Could find a path for the file handle");
518 Ok(PathBuf::from(os_str))
519 }
520}
521
522pub struct RealWatcher {}
523
524impl RealFs {
525 pub fn new(git_binary_path: Option<PathBuf>, executor: BackgroundExecutor) -> Self {
526 Self {
527 bundled_git_binary_path: git_binary_path,
528 executor,
529 next_job_id: Arc::new(AtomicUsize::new(0)),
530 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
531 trash: Arc::new(Mutex::new(SlotMap::with_key())),
532 is_case_sensitive: Default::default(),
533 }
534 }
535
536 #[cfg(target_os = "windows")]
537 fn canonicalize(path: &Path) -> Result<PathBuf> {
538 use std::ffi::OsString;
539 use std::os::windows::ffi::OsStringExt;
540 use windows::Win32::Storage::FileSystem::GetVolumePathNameW;
541 use windows::core::HSTRING;
542
543 // std::fs::canonicalize resolves mapped network paths to UNC paths, which can
544 // confuse some software. To mitigate this, we canonicalize the input, then rebase
545 // the result onto the input's original volume root if both paths are on the same
546 // volume. This keeps the same drive letter or mount point the caller used.
547
548 let abs_path = if path.is_relative() {
549 std::env::current_dir()?.join(path)
550 } else {
551 path.to_path_buf()
552 };
553
554 let path_hstring = HSTRING::from(abs_path.as_os_str());
555 let mut vol_buf = vec![0u16; abs_path.as_os_str().len() + 2];
556 unsafe { GetVolumePathNameW(&path_hstring, &mut vol_buf)? };
557 let volume_root = {
558 let len = vol_buf
559 .iter()
560 .position(|&c| c == 0)
561 .unwrap_or(vol_buf.len());
562 PathBuf::from(OsString::from_wide(&vol_buf[..len]))
563 };
564
565 let resolved_path = dunce::canonicalize(&abs_path)?;
566 let resolved_root = dunce::canonicalize(&volume_root)?;
567
568 if let Ok(relative) = resolved_path.strip_prefix(&resolved_root) {
569 let mut result = volume_root;
570 result.push(relative);
571 Ok(result)
572 } else {
573 Ok(resolved_path)
574 }
575 }
576}
577
578#[cfg(any(target_os = "macos", target_os = "linux"))]
579fn rename_without_replace(source: &Path, target: &Path) -> io::Result<()> {
580 let source = path_to_c_string(source)?;
581 let target = path_to_c_string(target)?;
582
583 #[cfg(target_os = "macos")]
584 let result = unsafe { libc::renamex_np(source.as_ptr(), target.as_ptr(), libc::RENAME_EXCL) };
585
586 #[cfg(target_os = "linux")]
587 let result = unsafe {
588 libc::syscall(
589 libc::SYS_renameat2,
590 libc::AT_FDCWD,
591 source.as_ptr(),
592 libc::AT_FDCWD,
593 target.as_ptr(),
594 libc::RENAME_NOREPLACE,
595 )
596 };
597
598 if result == 0 {
599 Ok(())
600 } else {
601 Err(io::Error::last_os_error())
602 }
603}
604
605#[cfg(target_os = "windows")]
606fn rename_without_replace(source: &Path, target: &Path) -> io::Result<()> {
607 use std::os::windows::ffi::OsStrExt;
608
609 use windows::Win32::Storage::FileSystem::{MOVE_FILE_FLAGS, MoveFileExW};
610 use windows::core::PCWSTR;
611
612 let source: Vec<u16> = source.as_os_str().encode_wide().chain(Some(0)).collect();
613 let target: Vec<u16> = target.as_os_str().encode_wide().chain(Some(0)).collect();
614
615 unsafe {
616 MoveFileExW(
617 PCWSTR(source.as_ptr()),
618 PCWSTR(target.as_ptr()),
619 MOVE_FILE_FLAGS::default(),
620 )
621 }
622 .map_err(|_| io::Error::last_os_error())
623}
624
625#[cfg(any(target_os = "macos", target_os = "linux"))]
626fn path_to_c_string(path: &Path) -> io::Result<CString> {
627 CString::new(path.as_os_str().as_bytes()).map_err(|_| {
628 io::Error::new(
629 io::ErrorKind::InvalidInput,
630 format!("path contains interior NUL: {}", path.display()),
631 )
632 })
633}
634
635#[async_trait::async_trait]
636impl Fs for RealFs {
637 async fn create_dir(&self, path: &Path) -> Result<()> {
638 Ok(smol::fs::create_dir_all(path).await?)
639 }
640
641 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
642 #[cfg(unix)]
643 smol::fs::unix::symlink(target, path).await?;
644
645 #[cfg(windows)]
646 if smol::fs::metadata(&target).await?.is_dir() {
647 let status = new_command("cmd")
648 .args(["/C", "mklink", "/J"])
649 .args([path, target.as_path()])
650 .status()
651 .await?;
652
653 if !status.success() {
654 return Err(anyhow::anyhow!(
655 "Failed to create junction from {:?} to {:?}",
656 path,
657 target
658 ));
659 }
660 } else {
661 smol::fs::windows::symlink_file(target, path).await?
662 }
663
664 Ok(())
665 }
666
667 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
668 let mut open_options = smol::fs::OpenOptions::new();
669 open_options.write(true).create(true);
670 if options.overwrite {
671 open_options.truncate(true);
672 } else if !options.ignore_if_exists {
673 open_options.create_new(true);
674 }
675 open_options
676 .open(path)
677 .await
678 .with_context(|| format!("Failed to create file at {:?}", path))?;
679 Ok(())
680 }
681
682 async fn create_file_with(
683 &self,
684 path: &Path,
685 content: Pin<&mut (dyn AsyncRead + Send)>,
686 ) -> Result<()> {
687 let mut file = smol::fs::File::create(&path)
688 .await
689 .with_context(|| format!("Failed to create file at {:?}", path))?;
690 futures::io::copy(content, &mut file).await?;
691 Ok(())
692 }
693
694 async fn extract_tar_file(
695 &self,
696 path: &Path,
697 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
698 ) -> Result<()> {
699 content.unpack(path).await?;
700 Ok(())
701 }
702
703 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
704 if !options.overwrite && smol::fs::metadata(target).await.is_ok() {
705 if options.ignore_if_exists {
706 return Ok(());
707 } else {
708 anyhow::bail!("{target:?} already exists");
709 }
710 }
711
712 smol::fs::copy(source, target).await?;
713 Ok(())
714 }
715
716 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()> {
717 if options.create_parents {
718 if let Some(parent) = target.parent() {
719 self.create_dir(parent).await?;
720 }
721 }
722
723 if options.overwrite {
724 smol::fs::rename(source, target).await?;
725 return Ok(());
726 }
727
728 let use_metadata_fallback = {
729 #[cfg(any(target_os = "macos", target_os = "linux", target_os = "windows"))]
730 {
731 let source = source.to_path_buf();
732 let target = target.to_path_buf();
733 match self
734 .executor
735 .spawn(async move { rename_without_replace(&source, &target) })
736 .await
737 {
738 Ok(()) => return Ok(()),
739 Err(error) if error.kind() == io::ErrorKind::AlreadyExists => {
740 if options.ignore_if_exists {
741 return Ok(());
742 }
743 return Err(error.into());
744 }
745 Err(error)
746 if error.raw_os_error().is_some_and(|code| {
747 code == libc::ENOSYS
748 || code == libc::ENOTSUP
749 || code == libc::EOPNOTSUPP
750 || code == libc::EINVAL
751 }) =>
752 {
753 // For case when filesystem or kernel does not support atomic no-overwrite rename.
754 // EINVAL is returned by FUSE-based filesystems (e.g. NTFS via ntfs-3g)
755 // that don't support RENAME_NOREPLACE.
756 true
757 }
758 Err(error) => return Err(error.into()),
759 }
760 }
761
762 #[cfg(not(any(target_os = "macos", target_os = "linux", target_os = "windows")))]
763 {
764 // For platforms which do not have an atomic no-overwrite rename yet.
765 true
766 }
767 };
768
769 if use_metadata_fallback && smol::fs::metadata(target).await.is_ok() {
770 if options.ignore_if_exists {
771 return Ok(());
772 } else {
773 anyhow::bail!("{target:?} already exists");
774 }
775 }
776
777 smol::fs::rename(source, target).await?;
778 Ok(())
779 }
780
781 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
782 let result = if options.recursive {
783 smol::fs::remove_dir_all(path).await
784 } else {
785 smol::fs::remove_dir(path).await
786 };
787 match result {
788 Ok(()) => Ok(()),
789 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
790 Ok(())
791 }
792 Err(err) => Err(err)?,
793 }
794 }
795
796 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
797 #[cfg(windows)]
798 if let Ok(Some(metadata)) = self.metadata(path).await
799 && metadata.is_symlink
800 && metadata.is_dir
801 {
802 self.remove_dir(
803 path,
804 RemoveOptions {
805 recursive: false,
806 ignore_if_not_exists: true,
807 },
808 )
809 .await?;
810 return Ok(());
811 }
812
813 match smol::fs::remove_file(path).await {
814 Ok(()) => Ok(()),
815 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
816 Ok(())
817 }
818 Err(err) => Err(err)?,
819 }
820 }
821
822 async fn trash(&self, path: &Path, _options: RemoveOptions) -> Result<TrashId> {
823 // We must make the path absolute or trash will make a weird abomination
824 // of the zed working directory (not usually the worktree) and whatever
825 // the path variable holds.
826 let path = self
827 .canonicalize(path)
828 .await
829 .context("Could not canonicalize the path of the file")?;
830
831 let (tx, rx) = futures::channel::oneshot::channel();
832 std::thread::Builder::new()
833 .name("trash file or dir".to_string())
834 .spawn(|| tx.send(trash::delete_with_info(path)))
835 .expect("The os can spawn threads");
836
837 let entry = rx
838 .await
839 .context("Tx dropped or fs.restore panicked")?
840 .context("Could not trash file or dir")?
841 .into();
842
843 let id = self.trash.lock().insert(entry);
844 Ok(id)
845 }
846
847 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
848 Ok(Box::new(std::fs::File::open(path)?))
849 }
850
851 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
852 let mut options = std::fs::OpenOptions::new();
853 options.read(true);
854 #[cfg(windows)]
855 {
856 use std::os::windows::fs::OpenOptionsExt;
857 options.custom_flags(windows::Win32::Storage::FileSystem::FILE_FLAG_BACKUP_SEMANTICS.0);
858 }
859 Ok(Arc::new(options.open(path)?))
860 }
861
862 async fn load(&self, path: &Path) -> Result<String> {
863 let path = path.to_path_buf();
864 self.executor
865 .spawn(async move {
866 std::fs::read_to_string(&path)
867 .with_context(|| format!("Failed to read file {}", path.display()))
868 })
869 .await
870 }
871
872 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
873 let path = path.to_path_buf();
874 let bytes = self
875 .executor
876 .spawn(async move { std::fs::read(path) })
877 .await?;
878 Ok(bytes)
879 }
880
881 #[cfg(not(target_os = "windows"))]
882 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
883 smol::unblock(move || {
884 // Use the directory of the destination as temp dir to avoid
885 // invalid cross-device link error, and XDG_CACHE_DIR for fallback.
886 // See https://github.com/zed-industries/zed/pull/8437 for more details.
887 let mut tmp_file =
888 tempfile::NamedTempFile::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
889 tmp_file.write_all(data.as_bytes())?;
890 tmp_file.persist(path)?;
891 anyhow::Ok(())
892 })
893 .await?;
894
895 Ok(())
896 }
897
898 #[cfg(target_os = "windows")]
899 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
900 smol::unblock(move || {
901 // If temp dir is set to a different drive than the destination,
902 // we receive error:
903 //
904 // failed to persist temporary file:
905 // The system cannot move the file to a different disk drive. (os error 17)
906 //
907 // This is because `ReplaceFileW` does not support cross volume moves.
908 // See the remark section: "The backup file, replaced file, and replacement file must all reside on the same volume."
909 // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-replacefilew#remarks
910 //
911 // So we use the directory of the destination as a temp dir to avoid it.
912 // https://github.com/zed-industries/zed/issues/16571
913 let temp_dir = TempDir::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
914 let temp_file = {
915 let temp_file_path = temp_dir.path().join("temp_file");
916 let mut file = std::fs::File::create_new(&temp_file_path)?;
917 file.write_all(data.as_bytes())?;
918 temp_file_path
919 };
920 atomic_replace(path.as_path(), temp_file.as_path())?;
921 anyhow::Ok(())
922 })
923 .await?;
924 Ok(())
925 }
926
927 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
928 let buffer_size = text.summary().len.min(10 * 1024);
929 if let Some(path) = path.parent() {
930 self.create_dir(path)
931 .await
932 .with_context(|| format!("Failed to create directory at {:?}", path))?;
933 }
934 let file = smol::fs::File::create(path)
935 .await
936 .with_context(|| format!("Failed to create file at {:?}", path))?;
937 let mut writer = smol::io::BufWriter::with_capacity(buffer_size, file);
938 for chunk in text::chunks_with_line_ending(text, line_ending) {
939 writer.write_all(chunk.as_bytes()).await?;
940 }
941 writer.flush().await?;
942 Ok(())
943 }
944
945 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
946 if let Some(path) = path.parent() {
947 self.create_dir(path)
948 .await
949 .with_context(|| format!("Failed to create directory at {:?}", path))?;
950 }
951 let path = path.to_owned();
952 let contents = content.to_owned();
953 self.executor
954 .spawn(async move {
955 std::fs::write(path, contents)?;
956 Ok(())
957 })
958 .await
959 }
960
961 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
962 let path = path.to_owned();
963 self.executor
964 .spawn(async move {
965 #[cfg(target_os = "windows")]
966 let result = Self::canonicalize(&path);
967
968 #[cfg(not(target_os = "windows"))]
969 let result = std::fs::canonicalize(&path);
970
971 result.with_context(|| format!("canonicalizing {path:?}"))
972 })
973 .await
974 }
975
976 async fn is_file(&self, path: &Path) -> bool {
977 let path = path.to_owned();
978 self.executor
979 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_file()) })
980 .await
981 }
982
983 async fn is_dir(&self, path: &Path) -> bool {
984 let path = path.to_owned();
985 self.executor
986 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_dir()) })
987 .await
988 }
989
990 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
991 let path_buf = path.to_owned();
992 let symlink_metadata = match self
993 .executor
994 .spawn(async move { std::fs::symlink_metadata(&path_buf) })
995 .await
996 {
997 Ok(metadata) => metadata,
998 Err(err) => {
999 return match err.kind() {
1000 io::ErrorKind::NotFound | io::ErrorKind::NotADirectory => Ok(None),
1001 _ => Err(anyhow::Error::new(err)),
1002 };
1003 }
1004 };
1005
1006 let is_symlink = symlink_metadata.file_type().is_symlink();
1007 let metadata = if is_symlink {
1008 let path_buf = path.to_path_buf();
1009 // Read target metadata, if the target exists
1010 match self
1011 .executor
1012 .spawn(async move { std::fs::metadata(path_buf) })
1013 .await
1014 {
1015 Ok(target_metadata) => target_metadata,
1016 Err(err) => {
1017 if err.kind() != io::ErrorKind::NotFound {
1018 // TODO: Also FilesystemLoop when that's stable
1019 log::warn!(
1020 "Failed to read symlink target metadata for path {path:?}: {err}"
1021 );
1022 }
1023 // For a broken or recursive symlink, return the symlink metadata. (Or
1024 // as edge cases, a symlink into a directory we can't read, which is hard
1025 // to distinguish from just being broken.)
1026 symlink_metadata
1027 }
1028 }
1029 } else {
1030 symlink_metadata
1031 };
1032
1033 #[cfg(unix)]
1034 let inode = metadata.ino();
1035
1036 #[cfg(windows)]
1037 let inode = file_id(path).await?;
1038
1039 #[cfg(windows)]
1040 let is_fifo = false;
1041
1042 #[cfg(unix)]
1043 let is_fifo = metadata.file_type().is_fifo();
1044
1045 let path_buf = path.to_path_buf();
1046 let is_executable = self
1047 .executor
1048 .spawn(async move { path_buf.is_executable() })
1049 .await;
1050
1051 Ok(Some(Metadata {
1052 inode,
1053 mtime: MTime(metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH)),
1054 len: metadata.len(),
1055 is_symlink,
1056 is_dir: metadata.file_type().is_dir(),
1057 is_fifo,
1058 is_executable,
1059 }))
1060 }
1061
1062 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
1063 let path = path.to_owned();
1064 let path = self
1065 .executor
1066 .spawn(async move { std::fs::read_link(&path) })
1067 .await?;
1068 Ok(path)
1069 }
1070
1071 async fn read_dir(
1072 &self,
1073 path: &Path,
1074 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
1075 let path = path.to_owned();
1076 let result = iter(
1077 self.executor
1078 .spawn(async move { std::fs::read_dir(path) })
1079 .await?,
1080 )
1081 .map(|entry| match entry {
1082 Ok(entry) => Ok(entry.path()),
1083 Err(error) => Err(anyhow!("failed to read dir entry {error:?}")),
1084 });
1085 Ok(Box::pin(result))
1086 }
1087
1088 async fn watch(
1089 &self,
1090 path: &Path,
1091 latency: Duration,
1092 ) -> (
1093 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
1094 Arc<dyn Watcher>,
1095 ) {
1096 use util::{ResultExt as _, paths::SanitizedPath};
1097 let executor = self.executor.clone();
1098
1099 let (tx, rx) = smol::channel::unbounded();
1100 let pending_paths: Arc<Mutex<Vec<PathEvent>>> = Default::default();
1101 let watcher = Arc::new(fs_watcher::FsWatcher::new(tx, pending_paths.clone()));
1102
1103 // If the path doesn't exist yet (e.g. settings.json), watch the parent dir to learn when it's created.
1104 if let Err(e) = watcher.add(path)
1105 && let Some(parent) = path.parent()
1106 && let Err(parent_e) = watcher.add(parent)
1107 {
1108 log::warn!(
1109 "Failed to watch {} and its parent directory {}:\n{e}\n{parent_e}",
1110 path.display(),
1111 parent.display()
1112 );
1113 }
1114
1115 // Check if path is a symlink and follow the target parent
1116 if let Some(mut target) = self.read_link(path).await.ok() {
1117 log::trace!("watch symlink {path:?} -> {target:?}");
1118 // Check if symlink target is relative path, if so make it absolute
1119 if target.is_relative()
1120 && let Some(parent) = path.parent()
1121 {
1122 target = parent.join(target);
1123 if let Ok(canonical) = self.canonicalize(&target).await {
1124 target = SanitizedPath::new(&canonical).as_path().to_path_buf();
1125 }
1126 }
1127 watcher.add(&target).ok();
1128 if let Some(parent) = target.parent() {
1129 watcher.add(parent).log_err();
1130 }
1131 }
1132
1133 (
1134 Box::pin(rx.filter_map({
1135 let watcher = watcher.clone();
1136 let executor = executor.clone();
1137 move |_| {
1138 let _ = watcher.clone();
1139 let pending_paths = pending_paths.clone();
1140 let executor = executor.clone();
1141 async move {
1142 executor.timer(latency).await;
1143 let paths = std::mem::take(&mut *pending_paths.lock());
1144 (!paths.is_empty()).then_some(paths)
1145 }
1146 }
1147 })),
1148 watcher,
1149 )
1150 }
1151
1152 fn open_repo(
1153 &self,
1154 dotgit_path: &Path,
1155 system_git_binary_path: Option<&Path>,
1156 ) -> Result<Arc<dyn GitRepository>> {
1157 Ok(Arc::new(RealGitRepository::new(
1158 dotgit_path,
1159 self.bundled_git_binary_path.clone(),
1160 system_git_binary_path.map(|path| path.to_path_buf()),
1161 self.executor.clone(),
1162 )?))
1163 }
1164
1165 async fn git_init(
1166 &self,
1167 abs_work_directory_path: &Path,
1168 fallback_branch_name: String,
1169 ) -> Result<()> {
1170 let config = new_command("git")
1171 .current_dir(abs_work_directory_path)
1172 .args(&["config", "--global", "--get", "init.defaultBranch"])
1173 .output()
1174 .await?;
1175
1176 let branch_name;
1177
1178 if config.status.success() && !config.stdout.is_empty() {
1179 branch_name = String::from_utf8_lossy(&config.stdout);
1180 } else {
1181 branch_name = Cow::Borrowed(fallback_branch_name.as_str());
1182 }
1183
1184 new_command("git")
1185 .current_dir(abs_work_directory_path)
1186 .args(&["init", "-b"])
1187 .arg(branch_name.trim())
1188 .output()
1189 .await?;
1190
1191 Ok(())
1192 }
1193
1194 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()> {
1195 let job_id = self.next_job_id.fetch_add(1, Ordering::SeqCst);
1196 let job_info = JobInfo {
1197 id: job_id,
1198 start: Instant::now(),
1199 message: SharedString::from(format!("Cloning {}", repo_url)),
1200 };
1201
1202 let _job_tracker = JobTracker::new(job_info, self.job_event_subscribers.clone());
1203
1204 let output = new_command("git")
1205 .current_dir(abs_work_directory)
1206 .args(&["clone", repo_url])
1207 .output()
1208 .await?;
1209
1210 if !output.status.success() {
1211 anyhow::bail!(
1212 "git clone failed: {}",
1213 String::from_utf8_lossy(&output.stderr)
1214 );
1215 }
1216
1217 Ok(())
1218 }
1219
1220 fn is_fake(&self) -> bool {
1221 false
1222 }
1223
1224 fn subscribe_to_jobs(&self) -> JobEventReceiver {
1225 let (sender, receiver) = futures::channel::mpsc::unbounded();
1226 self.job_event_subscribers.lock().push(sender);
1227 receiver
1228 }
1229
1230 /// Checks whether the file system is case sensitive by attempting to create two files
1231 /// that have the same name except for the casing.
1232 ///
1233 /// It creates both files in a temporary directory it removes at the end.
1234 async fn is_case_sensitive(&self) -> bool {
1235 const UNINITIALIZED: u8 = 0;
1236 const CASE_SENSITIVE: u8 = 1;
1237 const NOT_CASE_SENSITIVE: u8 = 2;
1238
1239 // Note we could CAS here, but really, if we race we do this work twice at worst which isn't a big deal.
1240 let load = self.is_case_sensitive.load(Ordering::Acquire);
1241 if load != UNINITIALIZED {
1242 return load == CASE_SENSITIVE;
1243 }
1244 let temp_dir = self.executor.spawn(async { TempDir::new() });
1245 let res = maybe!(async {
1246 let temp_dir = temp_dir.await?;
1247 let test_file_1 = temp_dir.path().join("case_sensitivity_test.tmp");
1248 let test_file_2 = temp_dir.path().join("CASE_SENSITIVITY_TEST.TMP");
1249
1250 let create_opts = CreateOptions {
1251 overwrite: false,
1252 ignore_if_exists: false,
1253 };
1254
1255 // Create file1
1256 self.create_file(&test_file_1, create_opts).await?;
1257
1258 // Now check whether it's possible to create file2
1259 let case_sensitive = match self.create_file(&test_file_2, create_opts).await {
1260 Ok(_) => Ok(true),
1261 Err(e) => {
1262 if let Some(io_error) = e.downcast_ref::<io::Error>() {
1263 if io_error.kind() == io::ErrorKind::AlreadyExists {
1264 Ok(false)
1265 } else {
1266 Err(e)
1267 }
1268 } else {
1269 Err(e)
1270 }
1271 }
1272 };
1273
1274 temp_dir.close()?;
1275 case_sensitive
1276 }).await.unwrap_or_else(|e| {
1277 log::error!(
1278 "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
1279 );
1280 true
1281 });
1282 self.is_case_sensitive.store(
1283 if res {
1284 CASE_SENSITIVE
1285 } else {
1286 NOT_CASE_SENSITIVE
1287 },
1288 Ordering::Release,
1289 );
1290 res
1291 }
1292
1293 async fn restore(&self, item: TrashId) -> std::result::Result<PathBuf, TrashRestoreError> {
1294 let trashed_entry = self
1295 .trash
1296 .lock()
1297 .remove(item)
1298 .ok_or(TrashRestoreError::AlreadyRestored)?;
1299
1300 let restored_item_path = trashed_entry.original_parent.join(&trashed_entry.name);
1301
1302 let (tx, rx) = futures::channel::oneshot::channel();
1303 std::thread::Builder::new()
1304 .name("restore trashed item".to_string())
1305 .spawn(move || {
1306 let res = trash::restore_all([trashed_entry.into_trash_item()]);
1307 tx.send(res)
1308 })
1309 .expect("The OS can spawn a threads");
1310 rx.await.expect("Restore all never panics")?;
1311 Ok(restored_item_path)
1312 }
1313}
1314
1315#[cfg(not(any(target_os = "linux", target_os = "freebsd")))]
1316impl Watcher for RealWatcher {
1317 fn add(&self, _: &Path) -> Result<()> {
1318 Ok(())
1319 }
1320
1321 fn remove(&self, _: &Path) -> Result<()> {
1322 Ok(())
1323 }
1324}
1325
1326#[cfg(feature = "test-support")]
1327pub struct FakeFs {
1328 this: std::sync::Weak<Self>,
1329 // Use an unfair lock to ensure tests are deterministic.
1330 state: Arc<Mutex<FakeFsState>>,
1331 executor: gpui::BackgroundExecutor,
1332}
1333
1334#[cfg(feature = "test-support")]
1335struct FakeFsState {
1336 root: FakeFsEntry,
1337 next_inode: u64,
1338 next_mtime: SystemTime,
1339 git_event_tx: smol::channel::Sender<PathBuf>,
1340 event_txs: Vec<(PathBuf, smol::channel::Sender<Vec<PathEvent>>)>,
1341 events_paused: bool,
1342 buffered_events: Vec<PathEvent>,
1343 metadata_call_count: usize,
1344 read_dir_call_count: usize,
1345 path_write_counts: std::collections::HashMap<PathBuf, usize>,
1346 moves: std::collections::HashMap<u64, PathBuf>,
1347 job_event_subscribers: Arc<Mutex<Vec<JobEventSender>>>,
1348 trash: Mutex<SlotMap<TrashId, (TrashedEntry, FakeFsEntry)>>,
1349}
1350
1351#[cfg(feature = "test-support")]
1352#[derive(Clone, Debug)]
1353enum FakeFsEntry {
1354 File {
1355 inode: u64,
1356 mtime: MTime,
1357 len: u64,
1358 content: Vec<u8>,
1359 // The path to the repository state directory, if this is a gitfile.
1360 git_dir_path: Option<PathBuf>,
1361 },
1362 Dir {
1363 inode: u64,
1364 mtime: MTime,
1365 len: u64,
1366 entries: BTreeMap<String, FakeFsEntry>,
1367 git_repo_state: Option<Arc<Mutex<FakeGitRepositoryState>>>,
1368 },
1369 Symlink {
1370 target: PathBuf,
1371 },
1372}
1373
1374#[cfg(feature = "test-support")]
1375impl PartialEq for FakeFsEntry {
1376 fn eq(&self, other: &Self) -> bool {
1377 match (self, other) {
1378 (
1379 Self::File {
1380 inode: l_inode,
1381 mtime: l_mtime,
1382 len: l_len,
1383 content: l_content,
1384 git_dir_path: l_git_dir_path,
1385 },
1386 Self::File {
1387 inode: r_inode,
1388 mtime: r_mtime,
1389 len: r_len,
1390 content: r_content,
1391 git_dir_path: r_git_dir_path,
1392 },
1393 ) => {
1394 l_inode == r_inode
1395 && l_mtime == r_mtime
1396 && l_len == r_len
1397 && l_content == r_content
1398 && l_git_dir_path == r_git_dir_path
1399 }
1400 (
1401 Self::Dir {
1402 inode: l_inode,
1403 mtime: l_mtime,
1404 len: l_len,
1405 entries: l_entries,
1406 git_repo_state: l_git_repo_state,
1407 },
1408 Self::Dir {
1409 inode: r_inode,
1410 mtime: r_mtime,
1411 len: r_len,
1412 entries: r_entries,
1413 git_repo_state: r_git_repo_state,
1414 },
1415 ) => {
1416 let same_repo_state = match (l_git_repo_state.as_ref(), r_git_repo_state.as_ref()) {
1417 (Some(l), Some(r)) => Arc::ptr_eq(l, r),
1418 (None, None) => true,
1419 _ => false,
1420 };
1421 l_inode == r_inode
1422 && l_mtime == r_mtime
1423 && l_len == r_len
1424 && l_entries == r_entries
1425 && same_repo_state
1426 }
1427 (Self::Symlink { target: l_target }, Self::Symlink { target: r_target }) => {
1428 l_target == r_target
1429 }
1430 _ => false,
1431 }
1432 }
1433}
1434
1435#[cfg(feature = "test-support")]
1436impl FakeFsState {
1437 fn get_and_increment_mtime(&mut self) -> MTime {
1438 let mtime = self.next_mtime;
1439 self.next_mtime += FakeFs::SYSTEMTIME_INTERVAL;
1440 MTime(mtime)
1441 }
1442
1443 fn get_and_increment_inode(&mut self) -> u64 {
1444 let inode = self.next_inode;
1445 self.next_inode += 1;
1446 inode
1447 }
1448
1449 fn canonicalize(&self, target: &Path, follow_symlink: bool) -> Option<PathBuf> {
1450 let mut canonical_path = PathBuf::new();
1451 let mut path = target.to_path_buf();
1452 let mut entry_stack = Vec::new();
1453 'outer: loop {
1454 let mut path_components = path.components().peekable();
1455 let mut prefix = None;
1456 while let Some(component) = path_components.next() {
1457 match component {
1458 Component::Prefix(prefix_component) => prefix = Some(prefix_component),
1459 Component::RootDir => {
1460 entry_stack.clear();
1461 entry_stack.push(&self.root);
1462 canonical_path.clear();
1463 match prefix {
1464 Some(prefix_component) => {
1465 canonical_path = PathBuf::from(prefix_component.as_os_str());
1466 // Prefixes like `C:\\` are represented without their trailing slash, so we have to re-add it.
1467 canonical_path.push(std::path::MAIN_SEPARATOR_STR);
1468 }
1469 None => canonical_path = PathBuf::from(std::path::MAIN_SEPARATOR_STR),
1470 }
1471 }
1472 Component::CurDir => {}
1473 Component::ParentDir => {
1474 entry_stack.pop()?;
1475 canonical_path.pop();
1476 }
1477 Component::Normal(name) => {
1478 let current_entry = *entry_stack.last()?;
1479 if let FakeFsEntry::Dir { entries, .. } = current_entry {
1480 let entry = entries.get(name.to_str().unwrap())?;
1481 if (path_components.peek().is_some() || follow_symlink)
1482 && let FakeFsEntry::Symlink { target, .. } = entry
1483 {
1484 let mut target = target.clone();
1485 target.extend(path_components);
1486 path = target;
1487 continue 'outer;
1488 }
1489 entry_stack.push(entry);
1490 canonical_path = canonical_path.join(name);
1491 } else {
1492 return None;
1493 }
1494 }
1495 }
1496 }
1497 break;
1498 }
1499
1500 if entry_stack.is_empty() {
1501 None
1502 } else {
1503 Some(canonical_path)
1504 }
1505 }
1506
1507 fn try_entry(
1508 &mut self,
1509 target: &Path,
1510 follow_symlink: bool,
1511 ) -> Option<(&mut FakeFsEntry, PathBuf)> {
1512 let canonical_path = self.canonicalize(target, follow_symlink)?;
1513
1514 let mut components = canonical_path
1515 .components()
1516 .skip_while(|component| matches!(component, Component::Prefix(_)));
1517 let Some(Component::RootDir) = components.next() else {
1518 panic!(
1519 "the path {:?} was not canonicalized properly {:?}",
1520 target, canonical_path
1521 )
1522 };
1523
1524 let mut entry = &mut self.root;
1525 for component in components {
1526 match component {
1527 Component::Normal(name) => {
1528 if let FakeFsEntry::Dir { entries, .. } = entry {
1529 entry = entries.get_mut(name.to_str().unwrap())?;
1530 } else {
1531 return None;
1532 }
1533 }
1534 _ => {
1535 panic!(
1536 "the path {:?} was not canonicalized properly {:?}",
1537 target, canonical_path
1538 )
1539 }
1540 }
1541 }
1542
1543 Some((entry, canonical_path))
1544 }
1545
1546 fn entry(&mut self, target: &Path) -> Result<&mut FakeFsEntry> {
1547 Ok(self
1548 .try_entry(target, true)
1549 .ok_or_else(|| {
1550 anyhow!(io::Error::new(
1551 io::ErrorKind::NotFound,
1552 format!("not found: {target:?}")
1553 ))
1554 })?
1555 .0)
1556 }
1557
1558 fn write_path<Fn, T>(&mut self, path: &Path, callback: Fn) -> Result<T>
1559 where
1560 Fn: FnOnce(btree_map::Entry<String, FakeFsEntry>) -> Result<T>,
1561 {
1562 let path = normalize_path(path);
1563 let filename = path.file_name().context("cannot overwrite the root")?;
1564 let parent_path = path.parent().unwrap();
1565
1566 let parent = self.entry(parent_path)?;
1567 let new_entry = parent
1568 .dir_entries(parent_path)?
1569 .entry(filename.to_str().unwrap().into());
1570 callback(new_entry)
1571 }
1572
1573 fn emit_event<I, T>(&mut self, paths: I)
1574 where
1575 I: IntoIterator<Item = (T, Option<PathEventKind>)>,
1576 T: Into<PathBuf>,
1577 {
1578 self.buffered_events
1579 .extend(paths.into_iter().map(|(path, kind)| PathEvent {
1580 path: path.into(),
1581 kind,
1582 }));
1583
1584 if !self.events_paused {
1585 self.flush_events(self.buffered_events.len());
1586 }
1587 }
1588
1589 fn flush_events(&mut self, mut count: usize) {
1590 count = count.min(self.buffered_events.len());
1591 let events = self.buffered_events.drain(0..count).collect::<Vec<_>>();
1592 self.event_txs.retain(|(_, tx)| {
1593 let _ = tx.try_send(events.clone());
1594 !tx.is_closed()
1595 });
1596 }
1597}
1598
1599#[cfg(feature = "test-support")]
1600pub static FS_DOT_GIT: std::sync::LazyLock<&'static OsStr> =
1601 std::sync::LazyLock::new(|| OsStr::new(".git"));
1602
1603#[cfg(feature = "test-support")]
1604impl FakeFs {
1605 /// We need to use something large enough for Windows and Unix to consider this a new file.
1606 /// https://doc.rust-lang.org/nightly/std/time/struct.SystemTime.html#platform-specific-behavior
1607 const SYSTEMTIME_INTERVAL: Duration = Duration::from_nanos(100);
1608
1609 pub fn new(executor: gpui::BackgroundExecutor) -> Arc<Self> {
1610 let (tx, rx) = smol::channel::bounded::<PathBuf>(10);
1611
1612 let this = Arc::new_cyclic(|this| Self {
1613 this: this.clone(),
1614 executor: executor.clone(),
1615 state: Arc::new(Mutex::new(FakeFsState {
1616 root: FakeFsEntry::Dir {
1617 inode: 0,
1618 mtime: MTime(UNIX_EPOCH),
1619 len: 0,
1620 entries: Default::default(),
1621 git_repo_state: None,
1622 },
1623 git_event_tx: tx,
1624 next_mtime: UNIX_EPOCH + Self::SYSTEMTIME_INTERVAL,
1625 next_inode: 1,
1626 event_txs: Default::default(),
1627 buffered_events: Vec::new(),
1628 events_paused: false,
1629 read_dir_call_count: 0,
1630 metadata_call_count: 0,
1631 path_write_counts: Default::default(),
1632 moves: Default::default(),
1633 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
1634 trash: Mutex::new(SlotMap::with_key()),
1635 })),
1636 });
1637
1638 executor.spawn({
1639 let this = this.clone();
1640 async move {
1641 while let Ok(git_event) = rx.recv().await {
1642 if let Some(mut state) = this.state.try_lock() {
1643 state.emit_event([(git_event, Some(PathEventKind::Changed))]);
1644 } else {
1645 panic!("Failed to lock file system state, this execution would have caused a test hang");
1646 }
1647 }
1648 }
1649 }).detach();
1650
1651 this
1652 }
1653
1654 pub fn set_next_mtime(&self, next_mtime: SystemTime) {
1655 let mut state = self.state.lock();
1656 state.next_mtime = next_mtime;
1657 }
1658
1659 pub fn get_and_increment_mtime(&self) -> MTime {
1660 let mut state = self.state.lock();
1661 state.get_and_increment_mtime()
1662 }
1663
1664 pub async fn touch_path(&self, path: impl AsRef<Path>) {
1665 let mut state = self.state.lock();
1666 let path = path.as_ref();
1667 let new_mtime = state.get_and_increment_mtime();
1668 let new_inode = state.get_and_increment_inode();
1669 state
1670 .write_path(path, move |entry| {
1671 match entry {
1672 btree_map::Entry::Vacant(e) => {
1673 e.insert(FakeFsEntry::File {
1674 inode: new_inode,
1675 mtime: new_mtime,
1676 content: Vec::new(),
1677 len: 0,
1678 git_dir_path: None,
1679 });
1680 }
1681 btree_map::Entry::Occupied(mut e) => match &mut *e.get_mut() {
1682 FakeFsEntry::File { mtime, .. } => *mtime = new_mtime,
1683 FakeFsEntry::Dir { mtime, .. } => *mtime = new_mtime,
1684 FakeFsEntry::Symlink { .. } => {}
1685 },
1686 }
1687 Ok(())
1688 })
1689 .unwrap();
1690 state.emit_event([(path.to_path_buf(), Some(PathEventKind::Changed))]);
1691 }
1692
1693 pub async fn insert_file(&self, path: impl AsRef<Path>, content: Vec<u8>) {
1694 self.write_file_internal(path, content, true).unwrap()
1695 }
1696
1697 pub async fn insert_symlink(&self, path: impl AsRef<Path>, target: PathBuf) {
1698 let mut state = self.state.lock();
1699 let path = path.as_ref();
1700 let file = FakeFsEntry::Symlink { target };
1701 state
1702 .write_path(path.as_ref(), move |e| match e {
1703 btree_map::Entry::Vacant(e) => {
1704 e.insert(file);
1705 Ok(())
1706 }
1707 btree_map::Entry::Occupied(mut e) => {
1708 *e.get_mut() = file;
1709 Ok(())
1710 }
1711 })
1712 .unwrap();
1713 state.emit_event([(path, Some(PathEventKind::Created))]);
1714 }
1715
1716 fn write_file_internal(
1717 &self,
1718 path: impl AsRef<Path>,
1719 new_content: Vec<u8>,
1720 recreate_inode: bool,
1721 ) -> Result<()> {
1722 fn inner(
1723 this: &FakeFs,
1724 path: &Path,
1725 new_content: Vec<u8>,
1726 recreate_inode: bool,
1727 ) -> Result<()> {
1728 let mut state = this.state.lock();
1729 let path_buf = path.to_path_buf();
1730 *state.path_write_counts.entry(path_buf).or_insert(0) += 1;
1731 let new_inode = state.get_and_increment_inode();
1732 let new_mtime = state.get_and_increment_mtime();
1733 let new_len = new_content.len() as u64;
1734 let mut kind = None;
1735 state.write_path(path, |entry| {
1736 match entry {
1737 btree_map::Entry::Vacant(e) => {
1738 kind = Some(PathEventKind::Created);
1739 e.insert(FakeFsEntry::File {
1740 inode: new_inode,
1741 mtime: new_mtime,
1742 len: new_len,
1743 content: new_content,
1744 git_dir_path: None,
1745 });
1746 }
1747 btree_map::Entry::Occupied(mut e) => {
1748 kind = Some(PathEventKind::Changed);
1749 if let FakeFsEntry::File {
1750 inode,
1751 mtime,
1752 len,
1753 content,
1754 ..
1755 } = e.get_mut()
1756 {
1757 *mtime = new_mtime;
1758 *content = new_content;
1759 *len = new_len;
1760 if recreate_inode {
1761 *inode = new_inode;
1762 }
1763 } else {
1764 anyhow::bail!("not a file")
1765 }
1766 }
1767 }
1768 Ok(())
1769 })?;
1770 state.emit_event([(path, kind)]);
1771 Ok(())
1772 }
1773 inner(self, path.as_ref(), new_content, recreate_inode)
1774 }
1775
1776 pub fn read_file_sync(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1777 let path = path.as_ref();
1778 let path = normalize_path(path);
1779 let mut state = self.state.lock();
1780 let entry = state.entry(&path)?;
1781 entry.file_content(&path).cloned()
1782 }
1783
1784 async fn load_internal(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1785 let path = path.as_ref();
1786 let path = normalize_path(path);
1787 self.simulate_random_delay().await;
1788 let mut state = self.state.lock();
1789 let entry = state.entry(&path)?;
1790 entry.file_content(&path).cloned()
1791 }
1792
1793 pub fn pause_events(&self) {
1794 self.state.lock().events_paused = true;
1795 }
1796
1797 pub fn unpause_events_and_flush(&self) {
1798 self.state.lock().events_paused = false;
1799 self.flush_events(usize::MAX);
1800 }
1801
1802 pub fn buffered_event_count(&self) -> usize {
1803 self.state.lock().buffered_events.len()
1804 }
1805
1806 pub fn clear_buffered_events(&self) {
1807 self.state.lock().buffered_events.clear();
1808 }
1809
1810 pub fn flush_events(&self, count: usize) {
1811 self.state.lock().flush_events(count);
1812 }
1813
1814 pub(crate) fn entry(&self, target: &Path) -> Result<FakeFsEntry> {
1815 self.state.lock().entry(target).cloned()
1816 }
1817
1818 pub(crate) fn insert_entry(&self, target: &Path, new_entry: FakeFsEntry) -> Result<()> {
1819 let mut state = self.state.lock();
1820 state.write_path(target, |entry| {
1821 match entry {
1822 btree_map::Entry::Vacant(vacant_entry) => {
1823 vacant_entry.insert(new_entry);
1824 }
1825 btree_map::Entry::Occupied(mut occupied_entry) => {
1826 occupied_entry.insert(new_entry);
1827 }
1828 }
1829 Ok(())
1830 })
1831 }
1832
1833 #[must_use]
1834 pub fn insert_tree<'a>(
1835 &'a self,
1836 path: impl 'a + AsRef<Path> + Send,
1837 tree: serde_json::Value,
1838 ) -> futures::future::BoxFuture<'a, ()> {
1839 use futures::FutureExt as _;
1840 use serde_json::Value::*;
1841
1842 fn inner<'a>(
1843 this: &'a FakeFs,
1844 path: Arc<Path>,
1845 tree: serde_json::Value,
1846 ) -> futures::future::BoxFuture<'a, ()> {
1847 async move {
1848 match tree {
1849 Object(map) => {
1850 this.create_dir(&path).await.unwrap();
1851 for (name, contents) in map {
1852 let mut path = PathBuf::from(path.as_ref());
1853 path.push(name);
1854 this.insert_tree(&path, contents).await;
1855 }
1856 }
1857 Null => {
1858 this.create_dir(&path).await.unwrap();
1859 }
1860 String(contents) => {
1861 this.insert_file(&path, contents.into_bytes()).await;
1862 }
1863 _ => {
1864 panic!("JSON object must contain only objects, strings, or null");
1865 }
1866 }
1867 }
1868 .boxed()
1869 }
1870 inner(self, Arc::from(path.as_ref()), tree)
1871 }
1872
1873 pub fn insert_tree_from_real_fs<'a>(
1874 &'a self,
1875 path: impl 'a + AsRef<Path> + Send,
1876 src_path: impl 'a + AsRef<Path> + Send,
1877 ) -> futures::future::BoxFuture<'a, ()> {
1878 use futures::FutureExt as _;
1879
1880 async move {
1881 let path = path.as_ref();
1882 if std::fs::metadata(&src_path).unwrap().is_file() {
1883 let contents = std::fs::read(src_path).unwrap();
1884 self.insert_file(path, contents).await;
1885 } else {
1886 self.create_dir(path).await.unwrap();
1887 for entry in std::fs::read_dir(&src_path).unwrap() {
1888 let entry = entry.unwrap();
1889 self.insert_tree_from_real_fs(path.join(entry.file_name()), entry.path())
1890 .await;
1891 }
1892 }
1893 }
1894 .boxed()
1895 }
1896
1897 pub fn with_git_state_and_paths<T, F>(
1898 &self,
1899 dot_git: &Path,
1900 emit_git_event: bool,
1901 f: F,
1902 ) -> Result<T>
1903 where
1904 F: FnOnce(&mut FakeGitRepositoryState, &Path, &Path) -> T,
1905 {
1906 let mut state = self.state.lock();
1907 let git_event_tx = state.git_event_tx.clone();
1908 let entry = state.entry(dot_git).context("open .git")?;
1909
1910 if let FakeFsEntry::Dir { git_repo_state, .. } = entry {
1911 let repo_state = git_repo_state.get_or_insert_with(|| {
1912 log::debug!("insert git state for {dot_git:?}");
1913 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
1914 });
1915 let mut repo_state = repo_state.lock();
1916
1917 let result = f(&mut repo_state, dot_git, dot_git);
1918
1919 drop(repo_state);
1920 if emit_git_event {
1921 state.emit_event([(dot_git, Some(PathEventKind::Changed))]);
1922 }
1923
1924 Ok(result)
1925 } else if let FakeFsEntry::File {
1926 content,
1927 git_dir_path,
1928 ..
1929 } = &mut *entry
1930 {
1931 let path = match git_dir_path {
1932 Some(path) => path,
1933 None => {
1934 let path = std::str::from_utf8(content)
1935 .ok()
1936 .and_then(|content| content.strip_prefix("gitdir:"))
1937 .context("not a valid gitfile")?
1938 .trim();
1939 git_dir_path.insert(normalize_path(&dot_git.parent().unwrap().join(path)))
1940 }
1941 }
1942 .clone();
1943 let Some((git_dir_entry, canonical_path)) = state.try_entry(&path, true) else {
1944 anyhow::bail!("pointed-to git dir {path:?} not found")
1945 };
1946 let FakeFsEntry::Dir {
1947 git_repo_state,
1948 entries,
1949 ..
1950 } = git_dir_entry
1951 else {
1952 anyhow::bail!("gitfile points to a non-directory")
1953 };
1954 let common_dir = if let Some(child) = entries.get("commondir") {
1955 let raw = std::str::from_utf8(child.file_content("commondir".as_ref())?)
1956 .context("commondir content")?
1957 .trim();
1958 let raw_path = Path::new(raw);
1959 if raw_path.is_relative() {
1960 normalize_path(&canonical_path.join(raw_path))
1961 } else {
1962 raw_path.to_owned()
1963 }
1964 } else {
1965 canonical_path.clone()
1966 };
1967 let repo_state = git_repo_state.get_or_insert_with(|| {
1968 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
1969 });
1970 let mut repo_state = repo_state.lock();
1971
1972 let result = f(&mut repo_state, &canonical_path, &common_dir);
1973
1974 if emit_git_event {
1975 drop(repo_state);
1976 state.emit_event([(canonical_path, Some(PathEventKind::Changed))]);
1977 }
1978
1979 Ok(result)
1980 } else {
1981 anyhow::bail!("not a valid git repository");
1982 }
1983 }
1984
1985 pub fn with_git_state<T, F>(&self, dot_git: &Path, emit_git_event: bool, f: F) -> Result<T>
1986 where
1987 F: FnOnce(&mut FakeGitRepositoryState) -> T,
1988 {
1989 self.with_git_state_and_paths(dot_git, emit_git_event, |state, _, _| f(state))
1990 }
1991
1992 pub fn set_branch_name(&self, dot_git: &Path, branch: Option<impl Into<String>>) {
1993 self.with_git_state(dot_git, true, |state| {
1994 let branch = branch.map(Into::into);
1995 state.branches.extend(branch.clone());
1996 state.current_branch_name = branch
1997 })
1998 .unwrap();
1999 }
2000
2001 pub fn set_remote_for_repo(
2002 &self,
2003 dot_git: &Path,
2004 name: impl Into<String>,
2005 url: impl Into<String>,
2006 ) {
2007 self.with_git_state(dot_git, true, |state| {
2008 state.remotes.insert(name.into(), url.into());
2009 })
2010 .unwrap();
2011 }
2012
2013 pub fn insert_branches(&self, dot_git: &Path, branches: &[&str]) {
2014 self.with_git_state(dot_git, true, |state| {
2015 if let Some(first) = branches.first()
2016 && state.current_branch_name.is_none()
2017 {
2018 state.current_branch_name = Some(first.to_string())
2019 }
2020 state
2021 .branches
2022 .extend(branches.iter().map(ToString::to_string));
2023 })
2024 .unwrap();
2025 }
2026
2027 pub async fn add_linked_worktree_for_repo(
2028 &self,
2029 dot_git: &Path,
2030 emit_git_event: bool,
2031 worktree: Worktree,
2032 ) {
2033 let ref_name = worktree
2034 .ref_name
2035 .as_ref()
2036 .expect("linked worktree must have a ref_name");
2037 let branch_name = ref_name
2038 .strip_prefix("refs/heads/")
2039 .unwrap_or(ref_name.as_ref());
2040
2041 // Create ref in git state.
2042 self.with_git_state(dot_git, false, |state| {
2043 state
2044 .refs
2045 .insert(ref_name.to_string(), worktree.sha.to_string());
2046 })
2047 .unwrap();
2048
2049 // Create .git/worktrees/<name>/ directory with HEAD, commondir, and gitdir.
2050 let worktrees_entry_dir = dot_git.join("worktrees").join(branch_name);
2051 self.create_dir(&worktrees_entry_dir).await.unwrap();
2052
2053 self.write_file_internal(
2054 worktrees_entry_dir.join("HEAD"),
2055 format!("ref: {ref_name}").into_bytes(),
2056 false,
2057 )
2058 .unwrap();
2059
2060 self.write_file_internal(
2061 worktrees_entry_dir.join("commondir"),
2062 dot_git.to_string_lossy().into_owned().into_bytes(),
2063 false,
2064 )
2065 .unwrap();
2066
2067 let worktree_dot_git = worktree.path.join(".git");
2068 self.write_file_internal(
2069 worktrees_entry_dir.join("gitdir"),
2070 worktree_dot_git.to_string_lossy().into_owned().into_bytes(),
2071 false,
2072 )
2073 .unwrap();
2074
2075 // Create the worktree checkout directory with a .git file pointing back.
2076 self.create_dir(&worktree.path).await.unwrap();
2077
2078 self.write_file_internal(
2079 &worktree_dot_git,
2080 format!("gitdir: {}", worktrees_entry_dir.display()).into_bytes(),
2081 false,
2082 )
2083 .unwrap();
2084
2085 if emit_git_event {
2086 self.with_git_state(dot_git, true, |_| {}).unwrap();
2087 }
2088 }
2089
2090 pub async fn remove_worktree_for_repo(
2091 &self,
2092 dot_git: &Path,
2093 emit_git_event: bool,
2094 ref_name: &str,
2095 ) {
2096 let branch_name = ref_name.strip_prefix("refs/heads/").unwrap_or(ref_name);
2097 let worktrees_entry_dir = dot_git.join("worktrees").join(branch_name);
2098
2099 // Read gitdir to find the worktree checkout path.
2100 let gitdir_content = self
2101 .load_internal(worktrees_entry_dir.join("gitdir"))
2102 .await
2103 .unwrap();
2104 let gitdir_str = String::from_utf8(gitdir_content).unwrap();
2105 let worktree_path = PathBuf::from(gitdir_str.trim())
2106 .parent()
2107 .map(PathBuf::from)
2108 .unwrap_or_default();
2109
2110 // Remove the worktree checkout directory.
2111 self.remove_dir(
2112 &worktree_path,
2113 RemoveOptions {
2114 recursive: true,
2115 ignore_if_not_exists: true,
2116 },
2117 )
2118 .await
2119 .unwrap();
2120
2121 // Remove the .git/worktrees/<name>/ directory.
2122 self.remove_dir(
2123 &worktrees_entry_dir,
2124 RemoveOptions {
2125 recursive: true,
2126 ignore_if_not_exists: false,
2127 },
2128 )
2129 .await
2130 .unwrap();
2131
2132 if emit_git_event {
2133 self.with_git_state(dot_git, true, |_| {}).unwrap();
2134 }
2135 }
2136
2137 pub fn set_unmerged_paths_for_repo(
2138 &self,
2139 dot_git: &Path,
2140 unmerged_state: &[(RepoPath, UnmergedStatus)],
2141 ) {
2142 self.with_git_state(dot_git, true, |state| {
2143 state.unmerged_paths.clear();
2144 state.unmerged_paths.extend(
2145 unmerged_state
2146 .iter()
2147 .map(|(path, content)| (path.clone(), *content)),
2148 );
2149 })
2150 .unwrap();
2151 }
2152
2153 pub fn set_index_for_repo(&self, dot_git: &Path, index_state: &[(&str, String)]) {
2154 self.with_git_state(dot_git, true, |state| {
2155 state.index_contents.clear();
2156 state.index_contents.extend(
2157 index_state
2158 .iter()
2159 .map(|(path, content)| (repo_path(path), content.clone())),
2160 );
2161 })
2162 .unwrap();
2163 }
2164
2165 pub fn set_head_for_repo(
2166 &self,
2167 dot_git: &Path,
2168 head_state: &[(&str, String)],
2169 sha: impl Into<String>,
2170 ) {
2171 self.with_git_state(dot_git, true, |state| {
2172 state.head_contents.clear();
2173 state.head_contents.extend(
2174 head_state
2175 .iter()
2176 .map(|(path, content)| (repo_path(path), content.clone())),
2177 );
2178 state.refs.insert("HEAD".into(), sha.into());
2179 })
2180 .unwrap();
2181 }
2182
2183 pub fn set_head_and_index_for_repo(&self, dot_git: &Path, contents_by_path: &[(&str, String)]) {
2184 self.with_git_state(dot_git, true, |state| {
2185 state.head_contents.clear();
2186 state.head_contents.extend(
2187 contents_by_path
2188 .iter()
2189 .map(|(path, contents)| (repo_path(path), contents.clone())),
2190 );
2191 state.index_contents = state.head_contents.clone();
2192 })
2193 .unwrap();
2194 }
2195
2196 pub fn set_merge_base_content_for_repo(
2197 &self,
2198 dot_git: &Path,
2199 contents_by_path: &[(&str, String)],
2200 ) {
2201 self.with_git_state(dot_git, true, |state| {
2202 use git::Oid;
2203
2204 state.merge_base_contents.clear();
2205 let oids = (1..)
2206 .map(|n| n.to_string())
2207 .map(|n| Oid::from_bytes(n.repeat(20).as_bytes()).unwrap());
2208 for ((path, content), oid) in contents_by_path.iter().zip(oids) {
2209 state.merge_base_contents.insert(repo_path(path), oid);
2210 state.oids.insert(oid, content.clone());
2211 }
2212 })
2213 .unwrap();
2214 }
2215
2216 pub fn set_blame_for_repo(&self, dot_git: &Path, blames: Vec<(RepoPath, git::blame::Blame)>) {
2217 self.with_git_state(dot_git, true, |state| {
2218 state.blames.clear();
2219 state.blames.extend(blames);
2220 })
2221 .unwrap();
2222 }
2223
2224 pub fn set_graph_commits(&self, dot_git: &Path, commits: Vec<Arc<InitialGraphCommitData>>) {
2225 self.with_git_state(dot_git, true, |state| {
2226 state.graph_commits = commits;
2227 })
2228 .unwrap();
2229 }
2230
2231 pub fn set_graph_error(&self, dot_git: &Path, error: Option<String>) {
2232 self.with_git_state(dot_git, true, |state| {
2233 state.simulated_graph_error = error;
2234 })
2235 .unwrap();
2236 }
2237
2238 /// Put the given git repository into a state with the given status,
2239 /// by mutating the head, index, and unmerged state.
2240 pub fn set_status_for_repo(&self, dot_git: &Path, statuses: &[(&str, FileStatus)]) {
2241 let workdir_path = dot_git.parent().unwrap();
2242 let workdir_contents = self.files_with_contents(workdir_path);
2243 self.with_git_state(dot_git, true, |state| {
2244 state.index_contents.clear();
2245 state.head_contents.clear();
2246 state.unmerged_paths.clear();
2247 for (path, content) in workdir_contents {
2248 use util::{paths::PathStyle, rel_path::RelPath};
2249
2250 let repo_path = RelPath::new(path.strip_prefix(&workdir_path).unwrap(), PathStyle::local()).unwrap();
2251 let repo_path = RepoPath::from_rel_path(&repo_path);
2252 let status = statuses
2253 .iter()
2254 .find_map(|(p, status)| (*p == repo_path.as_unix_str()).then_some(status));
2255 let mut content = String::from_utf8_lossy(&content).to_string();
2256
2257 let mut index_content = None;
2258 let mut head_content = None;
2259 match status {
2260 None => {
2261 index_content = Some(content.clone());
2262 head_content = Some(content);
2263 }
2264 Some(FileStatus::Untracked | FileStatus::Ignored) => {}
2265 Some(FileStatus::Unmerged(unmerged_status)) => {
2266 state
2267 .unmerged_paths
2268 .insert(repo_path.clone(), *unmerged_status);
2269 content.push_str(" (unmerged)");
2270 index_content = Some(content.clone());
2271 head_content = Some(content);
2272 }
2273 Some(FileStatus::Tracked(TrackedStatus {
2274 index_status,
2275 worktree_status,
2276 })) => {
2277 match worktree_status {
2278 StatusCode::Modified => {
2279 let mut content = content.clone();
2280 content.push_str(" (modified in working copy)");
2281 index_content = Some(content);
2282 }
2283 StatusCode::TypeChanged | StatusCode::Unmodified => {
2284 index_content = Some(content.clone());
2285 }
2286 StatusCode::Added => {}
2287 StatusCode::Deleted | StatusCode::Renamed | StatusCode::Copied => {
2288 panic!("cannot create these statuses for an existing file");
2289 }
2290 };
2291 match index_status {
2292 StatusCode::Modified => {
2293 let mut content = index_content.clone().expect(
2294 "file cannot be both modified in index and created in working copy",
2295 );
2296 content.push_str(" (modified in index)");
2297 head_content = Some(content);
2298 }
2299 StatusCode::TypeChanged | StatusCode::Unmodified => {
2300 head_content = Some(index_content.clone().expect("file cannot be both unmodified in index and created in working copy"));
2301 }
2302 StatusCode::Added => {}
2303 StatusCode::Deleted => {
2304 head_content = Some("".into());
2305 }
2306 StatusCode::Renamed | StatusCode::Copied => {
2307 panic!("cannot create these statuses for an existing file");
2308 }
2309 };
2310 }
2311 };
2312
2313 if let Some(content) = index_content {
2314 state.index_contents.insert(repo_path.clone(), content);
2315 }
2316 if let Some(content) = head_content {
2317 state.head_contents.insert(repo_path.clone(), content);
2318 }
2319 }
2320 }).unwrap();
2321 }
2322
2323 pub fn set_error_message_for_index_write(&self, dot_git: &Path, message: Option<String>) {
2324 self.with_git_state(dot_git, true, |state| {
2325 state.simulated_index_write_error_message = message;
2326 })
2327 .unwrap();
2328 }
2329
2330 pub fn set_create_worktree_error(&self, dot_git: &Path, message: Option<String>) {
2331 self.with_git_state(dot_git, true, |state| {
2332 state.simulated_create_worktree_error = message;
2333 })
2334 .unwrap();
2335 }
2336
2337 pub fn paths(&self, include_dot_git: bool) -> Vec<PathBuf> {
2338 let mut result = Vec::new();
2339 let mut queue = collections::VecDeque::new();
2340 let state = &*self.state.lock();
2341 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2342 while let Some((path, entry)) = queue.pop_front() {
2343 if let FakeFsEntry::Dir { entries, .. } = entry {
2344 for (name, entry) in entries {
2345 queue.push_back((path.join(name), entry));
2346 }
2347 }
2348 if include_dot_git
2349 || !path
2350 .components()
2351 .any(|component| component.as_os_str() == *FS_DOT_GIT)
2352 {
2353 result.push(path);
2354 }
2355 }
2356 result
2357 }
2358
2359 pub fn directories(&self, include_dot_git: bool) -> Vec<PathBuf> {
2360 let mut result = Vec::new();
2361 let mut queue = collections::VecDeque::new();
2362 let state = &*self.state.lock();
2363 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2364 while let Some((path, entry)) = queue.pop_front() {
2365 if let FakeFsEntry::Dir { entries, .. } = entry {
2366 for (name, entry) in entries {
2367 queue.push_back((path.join(name), entry));
2368 }
2369 if include_dot_git
2370 || !path
2371 .components()
2372 .any(|component| component.as_os_str() == *FS_DOT_GIT)
2373 {
2374 result.push(path);
2375 }
2376 }
2377 }
2378 result
2379 }
2380
2381 pub fn files(&self) -> Vec<PathBuf> {
2382 let mut result = Vec::new();
2383 let mut queue = collections::VecDeque::new();
2384 let state = &*self.state.lock();
2385 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2386 while let Some((path, entry)) = queue.pop_front() {
2387 match entry {
2388 FakeFsEntry::File { .. } => result.push(path),
2389 FakeFsEntry::Dir { entries, .. } => {
2390 for (name, entry) in entries {
2391 queue.push_back((path.join(name), entry));
2392 }
2393 }
2394 FakeFsEntry::Symlink { .. } => {}
2395 }
2396 }
2397 result
2398 }
2399
2400 pub fn files_with_contents(&self, prefix: &Path) -> Vec<(PathBuf, Vec<u8>)> {
2401 let mut result = Vec::new();
2402 let mut queue = collections::VecDeque::new();
2403 let state = &*self.state.lock();
2404 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2405 while let Some((path, entry)) = queue.pop_front() {
2406 match entry {
2407 FakeFsEntry::File { content, .. } => {
2408 if path.starts_with(prefix) {
2409 result.push((path, content.clone()));
2410 }
2411 }
2412 FakeFsEntry::Dir { entries, .. } => {
2413 for (name, entry) in entries {
2414 queue.push_back((path.join(name), entry));
2415 }
2416 }
2417 FakeFsEntry::Symlink { .. } => {}
2418 }
2419 }
2420 result
2421 }
2422
2423 /// How many `read_dir` calls have been issued.
2424 pub fn read_dir_call_count(&self) -> usize {
2425 self.state.lock().read_dir_call_count
2426 }
2427
2428 pub fn watched_paths(&self) -> Vec<PathBuf> {
2429 let state = self.state.lock();
2430 state
2431 .event_txs
2432 .iter()
2433 .filter_map(|(path, tx)| Some(path.clone()).filter(|_| !tx.is_closed()))
2434 .collect()
2435 }
2436
2437 /// How many `metadata` calls have been issued.
2438 pub fn metadata_call_count(&self) -> usize {
2439 self.state.lock().metadata_call_count
2440 }
2441
2442 /// How many write operations have been issued for a specific path.
2443 pub fn write_count_for_path(&self, path: impl AsRef<Path>) -> usize {
2444 let path = path.as_ref().to_path_buf();
2445 self.state
2446 .lock()
2447 .path_write_counts
2448 .get(&path)
2449 .copied()
2450 .unwrap_or(0)
2451 }
2452
2453 pub fn emit_fs_event(&self, path: impl Into<PathBuf>, event: Option<PathEventKind>) {
2454 self.state.lock().emit_event(std::iter::once((path, event)));
2455 }
2456
2457 fn simulate_random_delay(&self) -> impl futures::Future<Output = ()> {
2458 self.executor.simulate_random_delay()
2459 }
2460
2461 async fn remove_dir_inner(
2462 &self,
2463 path: &Path,
2464 options: RemoveOptions,
2465 ) -> Result<Option<FakeFsEntry>> {
2466 self.simulate_random_delay().await;
2467
2468 let path = normalize_path(path);
2469 let parent_path = path.parent().context("cannot remove the root")?;
2470 let base_name = path.file_name().context("cannot remove the root")?;
2471
2472 let mut state = self.state.lock();
2473 let parent_entry = state.entry(parent_path)?;
2474 let entry = parent_entry
2475 .dir_entries(parent_path)?
2476 .entry(base_name.to_str().unwrap().into());
2477
2478 let removed = match entry {
2479 btree_map::Entry::Vacant(_) => {
2480 if !options.ignore_if_not_exists {
2481 anyhow::bail!("{path:?} does not exist");
2482 }
2483
2484 None
2485 }
2486 btree_map::Entry::Occupied(mut entry) => {
2487 {
2488 let children = entry.get_mut().dir_entries(&path)?;
2489 if !options.recursive && !children.is_empty() {
2490 anyhow::bail!("{path:?} is not empty");
2491 }
2492 }
2493
2494 Some(entry.remove())
2495 }
2496 };
2497
2498 state.emit_event([(path, Some(PathEventKind::Removed))]);
2499 Ok(removed)
2500 }
2501
2502 async fn remove_file_inner(
2503 &self,
2504 path: &Path,
2505 options: RemoveOptions,
2506 ) -> Result<Option<FakeFsEntry>> {
2507 self.simulate_random_delay().await;
2508
2509 let path = normalize_path(path);
2510 let parent_path = path.parent().context("cannot remove the root")?;
2511 let base_name = path.file_name().unwrap();
2512 let mut state = self.state.lock();
2513 let parent_entry = state.entry(parent_path)?;
2514 let entry = parent_entry
2515 .dir_entries(parent_path)?
2516 .entry(base_name.to_str().unwrap().into());
2517 let removed = match entry {
2518 btree_map::Entry::Vacant(_) => {
2519 if !options.ignore_if_not_exists {
2520 anyhow::bail!("{path:?} does not exist");
2521 }
2522
2523 None
2524 }
2525 btree_map::Entry::Occupied(mut entry) => {
2526 entry.get_mut().file_content(&path)?;
2527 Some(entry.remove())
2528 }
2529 };
2530
2531 state.emit_event([(path, Some(PathEventKind::Removed))]);
2532 Ok(removed)
2533 }
2534}
2535
2536#[cfg(feature = "test-support")]
2537impl FakeFsEntry {
2538 fn is_file(&self) -> bool {
2539 matches!(self, Self::File { .. })
2540 }
2541
2542 fn is_symlink(&self) -> bool {
2543 matches!(self, Self::Symlink { .. })
2544 }
2545
2546 fn file_content(&self, path: &Path) -> Result<&Vec<u8>> {
2547 if let Self::File { content, .. } = self {
2548 Ok(content)
2549 } else {
2550 anyhow::bail!("not a file: {path:?}");
2551 }
2552 }
2553
2554 fn dir_entries(&mut self, path: &Path) -> Result<&mut BTreeMap<String, FakeFsEntry>> {
2555 if let Self::Dir { entries, .. } = self {
2556 Ok(entries)
2557 } else {
2558 anyhow::bail!("not a directory: {path:?}");
2559 }
2560 }
2561}
2562
2563#[cfg(feature = "test-support")]
2564struct FakeWatcher {
2565 tx: smol::channel::Sender<Vec<PathEvent>>,
2566 original_path: PathBuf,
2567 fs_state: Arc<Mutex<FakeFsState>>,
2568 prefixes: Mutex<Vec<PathBuf>>,
2569}
2570
2571#[cfg(feature = "test-support")]
2572impl Watcher for FakeWatcher {
2573 fn add(&self, path: &Path) -> Result<()> {
2574 if path.starts_with(&self.original_path) {
2575 return Ok(());
2576 }
2577 self.fs_state
2578 .try_lock()
2579 .unwrap()
2580 .event_txs
2581 .push((path.to_owned(), self.tx.clone()));
2582 self.prefixes.lock().push(path.to_owned());
2583 Ok(())
2584 }
2585
2586 fn remove(&self, _: &Path) -> Result<()> {
2587 Ok(())
2588 }
2589}
2590
2591#[cfg(feature = "test-support")]
2592#[derive(Debug)]
2593struct FakeHandle {
2594 inode: u64,
2595}
2596
2597#[cfg(feature = "test-support")]
2598impl FileHandle for FakeHandle {
2599 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf> {
2600 let fs = fs.as_fake();
2601 let mut state = fs.state.lock();
2602 let Some(target) = state.moves.get(&self.inode).cloned() else {
2603 anyhow::bail!("fake fd not moved")
2604 };
2605
2606 if state.try_entry(&target, false).is_some() {
2607 return Ok(target);
2608 }
2609 anyhow::bail!("fake fd target not found")
2610 }
2611}
2612
2613#[cfg(feature = "test-support")]
2614#[async_trait::async_trait]
2615impl Fs for FakeFs {
2616 async fn create_dir(&self, path: &Path) -> Result<()> {
2617 self.simulate_random_delay().await;
2618
2619 let mut created_dirs = Vec::new();
2620 let mut cur_path = PathBuf::new();
2621 for component in path.components() {
2622 let should_skip = matches!(component, Component::Prefix(..) | Component::RootDir);
2623 cur_path.push(component);
2624 if should_skip {
2625 continue;
2626 }
2627 let mut state = self.state.lock();
2628
2629 let inode = state.get_and_increment_inode();
2630 let mtime = state.get_and_increment_mtime();
2631 state.write_path(&cur_path, |entry| {
2632 entry.or_insert_with(|| {
2633 created_dirs.push((cur_path.clone(), Some(PathEventKind::Created)));
2634 FakeFsEntry::Dir {
2635 inode,
2636 mtime,
2637 len: 0,
2638 entries: Default::default(),
2639 git_repo_state: None,
2640 }
2641 });
2642 Ok(())
2643 })?
2644 }
2645
2646 self.state.lock().emit_event(created_dirs);
2647 Ok(())
2648 }
2649
2650 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
2651 self.simulate_random_delay().await;
2652 let mut state = self.state.lock();
2653 let inode = state.get_and_increment_inode();
2654 let mtime = state.get_and_increment_mtime();
2655 let file = FakeFsEntry::File {
2656 inode,
2657 mtime,
2658 len: 0,
2659 content: Vec::new(),
2660 git_dir_path: None,
2661 };
2662 let mut kind = Some(PathEventKind::Created);
2663 state.write_path(path, |entry| {
2664 match entry {
2665 btree_map::Entry::Occupied(mut e) => {
2666 if options.overwrite {
2667 kind = Some(PathEventKind::Changed);
2668 *e.get_mut() = file;
2669 } else if !options.ignore_if_exists {
2670 anyhow::bail!("path already exists: {path:?}");
2671 }
2672 }
2673 btree_map::Entry::Vacant(e) => {
2674 e.insert(file);
2675 }
2676 }
2677 Ok(())
2678 })?;
2679 state.emit_event([(path, kind)]);
2680 Ok(())
2681 }
2682
2683 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
2684 let mut state = self.state.lock();
2685 let file = FakeFsEntry::Symlink { target };
2686 state
2687 .write_path(path.as_ref(), move |e| match e {
2688 btree_map::Entry::Vacant(e) => {
2689 e.insert(file);
2690 Ok(())
2691 }
2692 btree_map::Entry::Occupied(mut e) => {
2693 *e.get_mut() = file;
2694 Ok(())
2695 }
2696 })
2697 .unwrap();
2698 state.emit_event([(path, Some(PathEventKind::Created))]);
2699
2700 Ok(())
2701 }
2702
2703 async fn create_file_with(
2704 &self,
2705 path: &Path,
2706 mut content: Pin<&mut (dyn AsyncRead + Send)>,
2707 ) -> Result<()> {
2708 let mut bytes = Vec::new();
2709 content.read_to_end(&mut bytes).await?;
2710 self.write_file_internal(path, bytes, true)?;
2711 Ok(())
2712 }
2713
2714 async fn extract_tar_file(
2715 &self,
2716 path: &Path,
2717 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
2718 ) -> Result<()> {
2719 let mut entries = content.entries()?;
2720 while let Some(entry) = entries.next().await {
2721 let mut entry = entry?;
2722 if entry.header().entry_type().is_file() {
2723 let path = path.join(entry.path()?.as_ref());
2724 let mut bytes = Vec::new();
2725 entry.read_to_end(&mut bytes).await?;
2726 self.create_dir(path.parent().unwrap()).await?;
2727 self.write_file_internal(&path, bytes, true)?;
2728 }
2729 }
2730 Ok(())
2731 }
2732
2733 async fn rename(&self, old_path: &Path, new_path: &Path, options: RenameOptions) -> Result<()> {
2734 self.simulate_random_delay().await;
2735
2736 let old_path = normalize_path(old_path);
2737 let new_path = normalize_path(new_path);
2738
2739 if options.create_parents {
2740 if let Some(parent) = new_path.parent() {
2741 self.create_dir(parent).await?;
2742 }
2743 }
2744
2745 let mut state = self.state.lock();
2746 let moved_entry = state.write_path(&old_path, |e| {
2747 if let btree_map::Entry::Occupied(e) = e {
2748 Ok(e.get().clone())
2749 } else {
2750 anyhow::bail!("path does not exist: {old_path:?}")
2751 }
2752 })?;
2753
2754 let inode = match moved_entry {
2755 FakeFsEntry::File { inode, .. } => inode,
2756 FakeFsEntry::Dir { inode, .. } => inode,
2757 _ => 0,
2758 };
2759
2760 state.moves.insert(inode, new_path.clone());
2761
2762 state.write_path(&new_path, |e| {
2763 match e {
2764 btree_map::Entry::Occupied(mut e) => {
2765 if options.overwrite {
2766 *e.get_mut() = moved_entry;
2767 } else if !options.ignore_if_exists {
2768 anyhow::bail!("path already exists: {new_path:?}");
2769 }
2770 }
2771 btree_map::Entry::Vacant(e) => {
2772 e.insert(moved_entry);
2773 }
2774 }
2775 Ok(())
2776 })?;
2777
2778 state
2779 .write_path(&old_path, |e| {
2780 if let btree_map::Entry::Occupied(e) = e {
2781 Ok(e.remove())
2782 } else {
2783 unreachable!()
2784 }
2785 })
2786 .unwrap();
2787
2788 state.emit_event([
2789 (old_path, Some(PathEventKind::Removed)),
2790 (new_path, Some(PathEventKind::Created)),
2791 ]);
2792 Ok(())
2793 }
2794
2795 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
2796 self.simulate_random_delay().await;
2797
2798 let source = normalize_path(source);
2799 let target = normalize_path(target);
2800 let mut state = self.state.lock();
2801 let mtime = state.get_and_increment_mtime();
2802 let inode = state.get_and_increment_inode();
2803 let source_entry = state.entry(&source)?;
2804 let content = source_entry.file_content(&source)?.clone();
2805 let mut kind = Some(PathEventKind::Created);
2806 state.write_path(&target, |e| match e {
2807 btree_map::Entry::Occupied(e) => {
2808 if options.overwrite {
2809 kind = Some(PathEventKind::Changed);
2810 Ok(Some(e.get().clone()))
2811 } else if !options.ignore_if_exists {
2812 anyhow::bail!("{target:?} already exists");
2813 } else {
2814 Ok(None)
2815 }
2816 }
2817 btree_map::Entry::Vacant(e) => Ok(Some(
2818 e.insert(FakeFsEntry::File {
2819 inode,
2820 mtime,
2821 len: content.len() as u64,
2822 content,
2823 git_dir_path: None,
2824 })
2825 .clone(),
2826 )),
2827 })?;
2828 state.emit_event([(target, kind)]);
2829 Ok(())
2830 }
2831
2832 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
2833 self.remove_dir_inner(path, options).await.map(|_| ())
2834 }
2835
2836 async fn trash(&self, path: &Path, options: RemoveOptions) -> Result<TrashId> {
2837 let normalized_path = normalize_path(path);
2838 let parent_path = normalized_path.parent().context("cannot remove the root")?;
2839 let base_name = normalized_path.file_name().unwrap();
2840 let result = if self.is_dir(path).await {
2841 self.remove_dir_inner(path, options).await?
2842 } else {
2843 self.remove_file_inner(path, options).await?
2844 };
2845
2846 match result {
2847 Some(fake_entry) => {
2848 let trashed_entry = TrashedEntry {
2849 id: base_name.to_str().unwrap().into(),
2850 name: base_name.to_str().unwrap().into(),
2851 original_parent: parent_path.to_path_buf(),
2852 };
2853
2854 let trash_id = self
2855 .state
2856 .lock()
2857 .trash
2858 .lock()
2859 .insert((trashed_entry, fake_entry));
2860
2861 Ok(trash_id)
2862 }
2863 None => anyhow::bail!("{normalized_path:?} does not exist"),
2864 }
2865 }
2866
2867 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
2868 self.remove_file_inner(path, options).await.map(|_| ())
2869 }
2870
2871 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
2872 let bytes = self.load_internal(path).await?;
2873 Ok(Box::new(io::Cursor::new(bytes)))
2874 }
2875
2876 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
2877 self.simulate_random_delay().await;
2878 let mut state = self.state.lock();
2879 let inode = match state.entry(path)? {
2880 FakeFsEntry::File { inode, .. } => *inode,
2881 FakeFsEntry::Dir { inode, .. } => *inode,
2882 _ => unreachable!(),
2883 };
2884 Ok(Arc::new(FakeHandle { inode }))
2885 }
2886
2887 async fn load(&self, path: &Path) -> Result<String> {
2888 let content = self.load_internal(path).await?;
2889 Ok(String::from_utf8(content)?)
2890 }
2891
2892 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
2893 self.load_internal(path).await
2894 }
2895
2896 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
2897 self.simulate_random_delay().await;
2898 let path = normalize_path(path.as_path());
2899 if let Some(path) = path.parent() {
2900 self.create_dir(path).await?;
2901 }
2902 self.write_file_internal(path, data.into_bytes(), true)?;
2903 Ok(())
2904 }
2905
2906 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
2907 self.simulate_random_delay().await;
2908 let path = normalize_path(path);
2909 let content = text::chunks_with_line_ending(text, line_ending).collect::<String>();
2910 if let Some(path) = path.parent() {
2911 self.create_dir(path).await?;
2912 }
2913 self.write_file_internal(path, content.into_bytes(), false)?;
2914 Ok(())
2915 }
2916
2917 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
2918 self.simulate_random_delay().await;
2919 let path = normalize_path(path);
2920 if let Some(path) = path.parent() {
2921 self.create_dir(path).await?;
2922 }
2923 self.write_file_internal(path, content.to_vec(), false)?;
2924 Ok(())
2925 }
2926
2927 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
2928 let path = normalize_path(path);
2929 self.simulate_random_delay().await;
2930 let state = self.state.lock();
2931 let canonical_path = state
2932 .canonicalize(&path, true)
2933 .with_context(|| format!("path does not exist: {path:?}"))?;
2934 Ok(canonical_path)
2935 }
2936
2937 async fn is_file(&self, path: &Path) -> bool {
2938 let path = normalize_path(path);
2939 self.simulate_random_delay().await;
2940 let mut state = self.state.lock();
2941 if let Some((entry, _)) = state.try_entry(&path, true) {
2942 entry.is_file()
2943 } else {
2944 false
2945 }
2946 }
2947
2948 async fn is_dir(&self, path: &Path) -> bool {
2949 self.metadata(path)
2950 .await
2951 .is_ok_and(|metadata| metadata.is_some_and(|metadata| metadata.is_dir))
2952 }
2953
2954 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
2955 self.simulate_random_delay().await;
2956 let path = normalize_path(path);
2957 let mut state = self.state.lock();
2958 state.metadata_call_count += 1;
2959 if let Some((mut entry, _)) = state.try_entry(&path, false) {
2960 let is_symlink = entry.is_symlink();
2961 if is_symlink {
2962 if let Some(e) = state.try_entry(&path, true).map(|e| e.0) {
2963 entry = e;
2964 } else {
2965 return Ok(None);
2966 }
2967 }
2968
2969 Ok(Some(match &*entry {
2970 FakeFsEntry::File {
2971 inode, mtime, len, ..
2972 } => Metadata {
2973 inode: *inode,
2974 mtime: *mtime,
2975 len: *len,
2976 is_dir: false,
2977 is_symlink,
2978 is_fifo: false,
2979 is_executable: false,
2980 },
2981 FakeFsEntry::Dir {
2982 inode, mtime, len, ..
2983 } => Metadata {
2984 inode: *inode,
2985 mtime: *mtime,
2986 len: *len,
2987 is_dir: true,
2988 is_symlink,
2989 is_fifo: false,
2990 is_executable: false,
2991 },
2992 FakeFsEntry::Symlink { .. } => unreachable!(),
2993 }))
2994 } else {
2995 Ok(None)
2996 }
2997 }
2998
2999 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
3000 self.simulate_random_delay().await;
3001 let path = normalize_path(path);
3002 let mut state = self.state.lock();
3003 let (entry, _) = state
3004 .try_entry(&path, false)
3005 .with_context(|| format!("path does not exist: {path:?}"))?;
3006 if let FakeFsEntry::Symlink { target } = entry {
3007 Ok(target.clone())
3008 } else {
3009 anyhow::bail!("not a symlink: {path:?}")
3010 }
3011 }
3012
3013 async fn read_dir(
3014 &self,
3015 path: &Path,
3016 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
3017 self.simulate_random_delay().await;
3018 let path = normalize_path(path);
3019 let mut state = self.state.lock();
3020 state.read_dir_call_count += 1;
3021 let entry = state.entry(&path)?;
3022 let children = entry.dir_entries(&path)?;
3023 let paths = children
3024 .keys()
3025 .map(|file_name| Ok(path.join(file_name)))
3026 .collect::<Vec<_>>();
3027 Ok(Box::pin(futures::stream::iter(paths)))
3028 }
3029
3030 async fn watch(
3031 &self,
3032 path: &Path,
3033 _: Duration,
3034 ) -> (
3035 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
3036 Arc<dyn Watcher>,
3037 ) {
3038 self.simulate_random_delay().await;
3039 let (tx, rx) = smol::channel::unbounded();
3040 let path = path.to_path_buf();
3041 self.state.lock().event_txs.push((path.clone(), tx.clone()));
3042 let executor = self.executor.clone();
3043 let watcher = Arc::new(FakeWatcher {
3044 tx,
3045 original_path: path.to_owned(),
3046 fs_state: self.state.clone(),
3047 prefixes: Mutex::new(vec![path]),
3048 });
3049 (
3050 Box::pin(futures::StreamExt::filter(rx, {
3051 let watcher = watcher.clone();
3052 move |events| {
3053 let result = events.iter().any(|evt_path| {
3054 watcher
3055 .prefixes
3056 .lock()
3057 .iter()
3058 .any(|prefix| evt_path.path.starts_with(prefix))
3059 });
3060 let executor = executor.clone();
3061 async move {
3062 executor.simulate_random_delay().await;
3063 result
3064 }
3065 }
3066 })),
3067 watcher,
3068 )
3069 }
3070
3071 fn open_repo(
3072 &self,
3073 abs_dot_git: &Path,
3074 _system_git_binary: Option<&Path>,
3075 ) -> Result<Arc<dyn GitRepository>> {
3076 self.with_git_state_and_paths(
3077 abs_dot_git,
3078 false,
3079 |_, repository_dir_path, common_dir_path| {
3080 Arc::new(fake_git_repo::FakeGitRepository {
3081 fs: self.this.upgrade().unwrap(),
3082 executor: self.executor.clone(),
3083 dot_git_path: abs_dot_git.to_path_buf(),
3084 repository_dir_path: repository_dir_path.to_owned(),
3085 common_dir_path: common_dir_path.to_owned(),
3086 checkpoints: Arc::default(),
3087 is_trusted: Arc::default(),
3088 }) as _
3089 },
3090 )
3091 }
3092
3093 async fn git_init(
3094 &self,
3095 abs_work_directory_path: &Path,
3096 _fallback_branch_name: String,
3097 ) -> Result<()> {
3098 self.create_dir(&abs_work_directory_path.join(".git")).await
3099 }
3100
3101 async fn git_clone(&self, _repo_url: &str, _abs_work_directory: &Path) -> Result<()> {
3102 anyhow::bail!("Git clone is not supported in fake Fs")
3103 }
3104
3105 fn is_fake(&self) -> bool {
3106 true
3107 }
3108
3109 async fn is_case_sensitive(&self) -> bool {
3110 true
3111 }
3112
3113 fn subscribe_to_jobs(&self) -> JobEventReceiver {
3114 let (sender, receiver) = futures::channel::mpsc::unbounded();
3115 self.state.lock().job_event_subscribers.lock().push(sender);
3116 receiver
3117 }
3118
3119 async fn restore(&self, trash_id: TrashId) -> Result<PathBuf, TrashRestoreError> {
3120 let mut state = self.state.lock();
3121
3122 let Some((trashed_entry, fake_entry)) = state.trash.lock().remove(trash_id) else {
3123 return Err(TrashRestoreError::AlreadyRestored);
3124 };
3125
3126 let path = trashed_entry
3127 .original_parent
3128 .join(trashed_entry.name.clone());
3129
3130 let result = state.write_path(&path, |entry| match entry {
3131 btree_map::Entry::Vacant(entry) => {
3132 entry.insert(fake_entry);
3133 Ok(())
3134 }
3135 btree_map::Entry::Occupied(_) => {
3136 anyhow::bail!("Failed to restore {:?}", path);
3137 }
3138 });
3139
3140 match result {
3141 Ok(_) => {
3142 state.emit_event([(path.clone(), Some(PathEventKind::Created))]);
3143 Ok(path)
3144 }
3145 Err(_) => {
3146 // For now we'll just assume that this failed because it was a
3147 // collision error, which I think that, for the time being, is
3148 // the only case where this could fail?
3149 Err(TrashRestoreError::Collision { path })
3150 }
3151 }
3152 }
3153
3154 #[cfg(feature = "test-support")]
3155 fn as_fake(&self) -> Arc<FakeFs> {
3156 self.this.upgrade().unwrap()
3157 }
3158}
3159
3160pub async fn copy_recursive<'a>(
3161 fs: &'a dyn Fs,
3162 source: &'a Path,
3163 target: &'a Path,
3164 options: CopyOptions,
3165) -> Result<()> {
3166 for (item, is_dir) in read_dir_items(fs, source).await? {
3167 let Ok(item_relative_path) = item.strip_prefix(source) else {
3168 continue;
3169 };
3170 let target_item = if item_relative_path == Path::new("") {
3171 target.to_path_buf()
3172 } else {
3173 target.join(item_relative_path)
3174 };
3175 if is_dir {
3176 if !options.overwrite && fs.metadata(&target_item).await.is_ok_and(|m| m.is_some()) {
3177 if options.ignore_if_exists {
3178 continue;
3179 } else {
3180 anyhow::bail!("{target_item:?} already exists");
3181 }
3182 }
3183 let _ = fs
3184 .remove_dir(
3185 &target_item,
3186 RemoveOptions {
3187 recursive: true,
3188 ignore_if_not_exists: true,
3189 },
3190 )
3191 .await;
3192 fs.create_dir(&target_item).await?;
3193 } else {
3194 fs.copy_file(&item, &target_item, options).await?;
3195 }
3196 }
3197 Ok(())
3198}
3199
3200/// Recursively reads all of the paths in the given directory.
3201///
3202/// Returns a vector of tuples of (path, is_dir).
3203pub async fn read_dir_items<'a>(fs: &'a dyn Fs, source: &'a Path) -> Result<Vec<(PathBuf, bool)>> {
3204 let mut items = Vec::new();
3205 read_recursive(fs, source, &mut items).await?;
3206 Ok(items)
3207}
3208
3209fn read_recursive<'a>(
3210 fs: &'a dyn Fs,
3211 source: &'a Path,
3212 output: &'a mut Vec<(PathBuf, bool)>,
3213) -> BoxFuture<'a, Result<()>> {
3214 use futures::future::FutureExt;
3215
3216 async move {
3217 let metadata = fs
3218 .metadata(source)
3219 .await?
3220 .with_context(|| format!("path does not exist: {source:?}"))?;
3221
3222 if metadata.is_dir {
3223 output.push((source.to_path_buf(), true));
3224 let mut children = fs.read_dir(source).await?;
3225 while let Some(child_path) = children.next().await {
3226 if let Ok(child_path) = child_path {
3227 read_recursive(fs, &child_path, output).await?;
3228 }
3229 }
3230 } else {
3231 output.push((source.to_path_buf(), false));
3232 }
3233 Ok(())
3234 }
3235 .boxed()
3236}
3237
3238// todo(windows)
3239// can we get file id not open the file twice?
3240// https://github.com/rust-lang/rust/issues/63010
3241#[cfg(target_os = "windows")]
3242async fn file_id(path: impl AsRef<Path>) -> Result<u64> {
3243 use std::os::windows::io::AsRawHandle;
3244
3245 use smol::fs::windows::OpenOptionsExt;
3246 use windows::Win32::{
3247 Foundation::HANDLE,
3248 Storage::FileSystem::{
3249 BY_HANDLE_FILE_INFORMATION, FILE_FLAG_BACKUP_SEMANTICS, GetFileInformationByHandle,
3250 },
3251 };
3252
3253 let file = smol::fs::OpenOptions::new()
3254 .read(true)
3255 .custom_flags(FILE_FLAG_BACKUP_SEMANTICS.0)
3256 .open(path)
3257 .await?;
3258
3259 let mut info: BY_HANDLE_FILE_INFORMATION = unsafe { std::mem::zeroed() };
3260 // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfileinformationbyhandle
3261 // This function supports Windows XP+
3262 smol::unblock(move || {
3263 unsafe { GetFileInformationByHandle(HANDLE(file.as_raw_handle() as _), &mut info)? };
3264
3265 Ok(((info.nFileIndexHigh as u64) << 32) | (info.nFileIndexLow as u64))
3266 })
3267 .await
3268}
3269
3270#[cfg(target_os = "windows")]
3271fn atomic_replace<P: AsRef<Path>>(
3272 replaced_file: P,
3273 replacement_file: P,
3274) -> windows::core::Result<()> {
3275 use windows::{
3276 Win32::Storage::FileSystem::{REPLACE_FILE_FLAGS, ReplaceFileW},
3277 core::HSTRING,
3278 };
3279
3280 // If the file does not exist, create it.
3281 let _ = std::fs::File::create_new(replaced_file.as_ref());
3282
3283 unsafe {
3284 ReplaceFileW(
3285 &HSTRING::from(replaced_file.as_ref().to_string_lossy().into_owned()),
3286 &HSTRING::from(replacement_file.as_ref().to_string_lossy().into_owned()),
3287 None,
3288 REPLACE_FILE_FLAGS::default(),
3289 None,
3290 None,
3291 )
3292 }
3293}