1pub mod fs_watcher;
2
3use parking_lot::Mutex;
4use std::ffi::OsString;
5use std::sync::atomic::{AtomicU8, AtomicUsize, Ordering};
6use std::time::Instant;
7use util::maybe;
8
9use anyhow::{Context as _, Result, anyhow};
10use futures::stream::iter;
11use gpui::App;
12use gpui::BackgroundExecutor;
13use gpui::Global;
14use gpui::ReadGlobal as _;
15use gpui::SharedString;
16use std::borrow::Cow;
17#[cfg(unix)]
18use std::ffi::CString;
19use util::command::new_command;
20
21#[cfg(unix)]
22use std::os::fd::{AsFd, AsRawFd};
23#[cfg(unix)]
24use std::os::unix::ffi::OsStrExt;
25
26#[cfg(unix)]
27use std::os::unix::fs::{FileTypeExt, MetadataExt};
28
29#[cfg(any(target_os = "macos", target_os = "freebsd"))]
30use std::mem::MaybeUninit;
31
32use async_tar::Archive;
33use futures::{AsyncRead, Stream, StreamExt, future::BoxFuture};
34use git::repository::{GitRepository, RealGitRepository};
35use is_executable::IsExecutable;
36use rope::Rope;
37use serde::{Deserialize, Serialize};
38use smol::io::AsyncWriteExt;
39#[cfg(feature = "test-support")]
40use std::path::Component;
41use std::{
42 io::{self, Write},
43 path::{Path, PathBuf},
44 pin::Pin,
45 sync::Arc,
46 time::{Duration, SystemTime, UNIX_EPOCH},
47};
48use tempfile::TempDir;
49use text::LineEnding;
50
51#[cfg(feature = "test-support")]
52mod fake_git_repo;
53#[cfg(feature = "test-support")]
54use collections::{BTreeMap, btree_map};
55#[cfg(feature = "test-support")]
56use fake_git_repo::{FakeCommitDataEntry, FakeGitRepositoryState};
57#[cfg(feature = "test-support")]
58use git::{
59 repository::{CommitData, InitialGraphCommitData, RepoPath, Worktree, repo_path},
60 status::{FileStatus, StatusCode, TrackedStatus, UnmergedStatus},
61};
62#[cfg(feature = "test-support")]
63use util::normalize_path;
64
65#[cfg(feature = "test-support")]
66use smol::io::AsyncReadExt;
67#[cfg(feature = "test-support")]
68use std::ffi::OsStr;
69
70pub trait Watcher: Send + Sync {
71 fn add(&self, path: &Path) -> Result<()>;
72 fn remove(&self, path: &Path) -> Result<()>;
73}
74
75/// Detect whether a path requires polling instead of native file watching.
76///
77/// Returns `true` for filesystem types where inotify/FSEvents/ReadDirectoryChanges
78/// silently fail to deliver events: 9P (WSL drvfs), NFS, CIFS/SMB, FUSE (sshfs), etc.
79///
80/// Can be overridden with the `ZED_FILE_WATCHER_MODE` environment variable:
81/// - `native` — always use native OS watcher
82/// - `poll` — always use polling
83/// - `auto` (default) — auto-detect based on filesystem type
84pub fn requires_poll_watcher(path: &Path) -> bool {
85 match std::env::var("ZED_FILE_WATCHER_MODE")
86 .as_deref()
87 .unwrap_or("auto")
88 {
89 "native" => return false,
90 "poll" => return true,
91 _ => {}
92 }
93
94 #[cfg(target_os = "linux")]
95 {
96 let path = effective_watch_path(path);
97 return detect_requires_poll_watcher_linux(&path);
98 }
99
100 #[cfg(not(target_os = "linux"))]
101 {
102 let _ = path;
103 false
104 }
105}
106
107pub fn effective_watch_path(path: &Path) -> PathBuf {
108 if path.exists() {
109 return path.to_path_buf();
110 }
111
112 for ancestor in path.ancestors() {
113 if ancestor.exists() {
114 return ancestor.to_path_buf();
115 }
116 }
117
118 path.to_path_buf()
119}
120
121#[cfg(target_os = "linux")]
122fn detect_requires_poll_watcher_linux(path: &Path) -> bool {
123 use std::ffi::CString;
124 use std::os::unix::ffi::OsStrExt;
125
126 // Check filesystem type via statfs
127 let c_path = match CString::new(path.as_os_str().as_bytes()) {
128 Ok(p) => p,
129 Err(_) => return false,
130 };
131
132 let mut stat: libc::statfs = unsafe { std::mem::zeroed() };
133 if unsafe { libc::statfs(c_path.as_ptr(), &mut stat) } != 0 {
134 return false;
135 }
136
137 // Filesystem magic numbers where inotify does not deliver events.
138 // These are defined in linux/magic.h and statfs(2).
139 const V9FS_MAGIC: i64 = 0x01021997; // Plan 9 / WSL2 interop (drvfs)
140 const NFS_SUPER_MAGIC: i64 = 0x6969;
141 const CIFS_MAGIC: i64 = 0xFF534D42u32 as i64; // CIFS/SMB
142 const SMB_SUPER_MAGIC: i64 = 0x517B;
143 const SMB2_MAGIC: i64 = 0xFE534D42u32 as i64;
144 const FUSE_SUPER_MAGIC: i64 = 0x65735546; // FUSE (includes sshfs)
145
146 let fs_type = stat.f_type;
147 if fs_type == V9FS_MAGIC
148 || fs_type == NFS_SUPER_MAGIC
149 || fs_type == CIFS_MAGIC
150 || fs_type == SMB_SUPER_MAGIC
151 || fs_type == SMB2_MAGIC
152 || fs_type == FUSE_SUPER_MAGIC
153 {
154 log::info!(
155 "Detected network/virtual filesystem (type 0x{:x}) at {}, using poll watcher",
156 fs_type,
157 path.display()
158 );
159 return true;
160 }
161
162 // Also check for WSL + /mnt/<drive>/ pattern as a fallback
163 // in case statfs returns an unexpected type for drvfs
164 if is_wsl_drvfs_path(path) {
165 log::info!(
166 "Detected WSL drvfs mount at {}, using poll watcher",
167 path.display()
168 );
169 return true;
170 }
171
172 false
173}
174
175#[cfg(target_os = "linux")]
176fn is_wsl_drvfs_path(path: &Path) -> bool {
177 // Only relevant inside WSL
178 if std::env::var_os("WSL_DISTRO_NAME").is_none() {
179 if let Ok(version) = std::fs::read_to_string("/proc/version") {
180 let v = version.to_lowercase();
181 if !v.contains("microsoft") && !v.contains("wsl") {
182 return false;
183 }
184 } else {
185 return false;
186 }
187 }
188
189 // Windows drives are mounted at /mnt/c, /mnt/d, etc.
190 let path_str = match path.to_str() {
191 Some(s) => s,
192 None => return false,
193 };
194 if !path_str.starts_with("/mnt/") || path_str.len() < 6 {
195 return false;
196 }
197 let after_mnt = &path_str[5..];
198 after_mnt.starts_with(|c: char| c.is_ascii_alphabetic())
199 && (after_mnt.len() == 1 || after_mnt.as_bytes()[1] == b'/')
200}
201
202#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
203pub enum PathEventKind {
204 Removed,
205 Created,
206 Changed,
207 Rescan,
208}
209
210#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
211pub struct PathEvent {
212 pub path: PathBuf,
213 pub kind: Option<PathEventKind>,
214}
215
216impl From<PathEvent> for PathBuf {
217 fn from(event: PathEvent) -> Self {
218 event.path
219 }
220}
221
222#[async_trait::async_trait]
223pub trait Fs: Send + Sync {
224 async fn create_dir(&self, path: &Path) -> Result<()>;
225 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()>;
226 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()>;
227 async fn create_file_with(
228 &self,
229 path: &Path,
230 content: Pin<&mut (dyn AsyncRead + Send)>,
231 ) -> Result<()>;
232 async fn extract_tar_file(
233 &self,
234 path: &Path,
235 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
236 ) -> Result<()>;
237 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()>;
238 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()>;
239
240 /// Removes a directory from the filesystem.
241 /// There is no expectation that the directory will be preserved in the
242 /// system trash.
243 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()>;
244
245 /// Moves a file or directory to the system trash.
246 /// Returns a [`TrashedEntry`] that can be used to keep track of the
247 /// location of the trashed item in the system's trash.
248 async fn trash(&self, path: &Path, options: RemoveOptions) -> Result<TrashedEntry>;
249
250 /// Removes a file from the filesystem.
251 /// There is no expectation that the file will be preserved in the system
252 /// trash.
253 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()>;
254
255 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>>;
256 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>>;
257 async fn load(&self, path: &Path) -> Result<String> {
258 Ok(String::from_utf8(self.load_bytes(path).await?)?)
259 }
260 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>>;
261 async fn atomic_write(&self, path: PathBuf, text: String) -> Result<()>;
262 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()>;
263 async fn write(&self, path: &Path, content: &[u8]) -> Result<()>;
264 async fn canonicalize(&self, path: &Path) -> Result<PathBuf>;
265 async fn is_file(&self, path: &Path) -> bool;
266 async fn is_dir(&self, path: &Path) -> bool;
267 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>>;
268 async fn read_link(&self, path: &Path) -> Result<PathBuf>;
269 async fn read_dir(
270 &self,
271 path: &Path,
272 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>>;
273
274 async fn watch(
275 &self,
276 path: &Path,
277 latency: Duration,
278 ) -> (
279 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
280 Arc<dyn Watcher>,
281 );
282
283 fn open_repo(
284 &self,
285 abs_dot_git: &Path,
286 system_git_binary_path: Option<&Path>,
287 ) -> Result<Arc<dyn GitRepository>>;
288 async fn git_init(&self, abs_work_directory: &Path, fallback_branch_name: String)
289 -> Result<()>;
290 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()>;
291 fn is_fake(&self) -> bool;
292 async fn is_case_sensitive(&self) -> bool;
293 fn subscribe_to_jobs(&self) -> JobEventReceiver;
294
295 /// Restores a given `TrashedEntry`, moving it from the system's trash back
296 /// to the original path.
297 async fn restore(
298 &self,
299 trashed_entry: TrashedEntry,
300 ) -> std::result::Result<PathBuf, TrashRestoreError>;
301
302 #[cfg(feature = "test-support")]
303 fn as_fake(&self) -> Arc<FakeFs> {
304 panic!("called as_fake on a real fs");
305 }
306}
307
308// We use our own type rather than `trash::TrashItem` directly to avoid carrying
309// over fields we don't need (e.g. `time_deleted`) and to insulate callers and
310// tests from changes to that crate's API surface.
311/// Represents a file or directory that has been moved to the system trash,
312/// retaining enough information to restore it to its original location.
313#[derive(Clone, PartialEq, Debug)]
314pub struct TrashedEntry {
315 /// Platform-specific identifier for the file/directory in the trash.
316 ///
317 /// * Freedesktop – Path to the `.trashinfo` file.
318 /// * macOS & Windows – Full path to the file/directory in the system's
319 /// trash.
320 pub id: OsString,
321 /// Name of the file/directory at the time of trashing, including extension.
322 pub name: OsString,
323 /// Absolute path to the parent directory at the time of trashing.
324 pub original_parent: PathBuf,
325}
326
327impl From<trash::TrashItem> for TrashedEntry {
328 fn from(item: trash::TrashItem) -> Self {
329 Self {
330 id: item.id,
331 name: item.name,
332 original_parent: item.original_parent,
333 }
334 }
335}
336
337impl TrashedEntry {
338 fn into_trash_item(self) -> trash::TrashItem {
339 trash::TrashItem {
340 id: self.id,
341 name: self.name,
342 original_parent: self.original_parent,
343 // `TrashedEntry` doesn't preserve `time_deleted` as we don't
344 // currently need it for restore, so we default it to 0 here.
345 time_deleted: 0,
346 }
347 }
348}
349
350#[derive(Debug, thiserror::Error)]
351pub enum TrashRestoreError {
352 #[error("The specified `path` ({}) was not found in the system's trash.", path.display())]
353 NotFound { path: PathBuf },
354 #[error("File or directory ({}) already exists at the restore destination.", path.display())]
355 Collision { path: PathBuf },
356 #[error("Unknown error ({description})")]
357 Unknown { description: String },
358}
359
360impl From<trash::Error> for TrashRestoreError {
361 fn from(err: trash::Error) -> Self {
362 match err {
363 trash::Error::RestoreCollision { path, .. } => Self::Collision { path },
364 trash::Error::Unknown { description } => Self::Unknown { description },
365 other => Self::Unknown {
366 description: other.to_string(),
367 },
368 }
369 }
370}
371
372struct GlobalFs(Arc<dyn Fs>);
373
374impl Global for GlobalFs {}
375
376impl dyn Fs {
377 /// Returns the global [`Fs`].
378 pub fn global(cx: &App) -> Arc<Self> {
379 GlobalFs::global(cx).0.clone()
380 }
381
382 /// Sets the global [`Fs`].
383 pub fn set_global(fs: Arc<Self>, cx: &mut App) {
384 cx.set_global(GlobalFs(fs));
385 }
386}
387
388#[derive(Copy, Clone, Default)]
389pub struct CreateOptions {
390 pub overwrite: bool,
391 pub ignore_if_exists: bool,
392}
393
394#[derive(Copy, Clone, Default)]
395pub struct CopyOptions {
396 pub overwrite: bool,
397 pub ignore_if_exists: bool,
398}
399
400#[derive(Copy, Clone, Default)]
401pub struct RenameOptions {
402 pub overwrite: bool,
403 pub ignore_if_exists: bool,
404 /// Whether to create parent directories if they do not exist.
405 pub create_parents: bool,
406}
407
408#[derive(Copy, Clone, Default)]
409pub struct RemoveOptions {
410 pub recursive: bool,
411 pub ignore_if_not_exists: bool,
412}
413
414#[derive(Copy, Clone, Debug)]
415pub struct Metadata {
416 pub inode: u64,
417 pub mtime: MTime,
418 pub is_symlink: bool,
419 pub is_dir: bool,
420 pub len: u64,
421 pub is_fifo: bool,
422 pub is_executable: bool,
423}
424
425/// Filesystem modification time. The purpose of this newtype is to discourage use of operations
426/// that do not make sense for mtimes. In particular, it is not always valid to compare mtimes using
427/// `<` or `>`, as there are many things that can cause the mtime of a file to be earlier than it
428/// was. See ["mtime comparison considered harmful" - apenwarr](https://apenwarr.ca/log/20181113).
429///
430/// Do not derive Ord, PartialOrd, or arithmetic operation traits.
431#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize)]
432#[serde(transparent)]
433pub struct MTime(SystemTime);
434
435pub type JobId = usize;
436
437#[derive(Clone, Debug)]
438pub struct JobInfo {
439 pub start: Instant,
440 pub message: SharedString,
441 pub id: JobId,
442}
443
444#[derive(Debug, Clone)]
445pub enum JobEvent {
446 Started { info: JobInfo },
447 Completed { id: JobId },
448}
449
450pub type JobEventSender = futures::channel::mpsc::UnboundedSender<JobEvent>;
451pub type JobEventReceiver = futures::channel::mpsc::UnboundedReceiver<JobEvent>;
452
453struct JobTracker {
454 id: JobId,
455 subscribers: Arc<Mutex<Vec<JobEventSender>>>,
456}
457
458impl JobTracker {
459 fn new(info: JobInfo, subscribers: Arc<Mutex<Vec<JobEventSender>>>) -> Self {
460 let id = info.id;
461 {
462 let mut subs = subscribers.lock();
463 subs.retain(|sender| {
464 sender
465 .unbounded_send(JobEvent::Started { info: info.clone() })
466 .is_ok()
467 });
468 }
469 Self { id, subscribers }
470 }
471}
472
473impl Drop for JobTracker {
474 fn drop(&mut self) {
475 let mut subs = self.subscribers.lock();
476 subs.retain(|sender| {
477 sender
478 .unbounded_send(JobEvent::Completed { id: self.id })
479 .is_ok()
480 });
481 }
482}
483
484impl MTime {
485 /// Conversion intended for persistence and testing.
486 pub fn from_seconds_and_nanos(secs: u64, nanos: u32) -> Self {
487 MTime(UNIX_EPOCH + Duration::new(secs, nanos))
488 }
489
490 /// Conversion intended for persistence.
491 pub fn to_seconds_and_nanos_for_persistence(self) -> Option<(u64, u32)> {
492 self.0
493 .duration_since(UNIX_EPOCH)
494 .ok()
495 .map(|duration| (duration.as_secs(), duration.subsec_nanos()))
496 }
497
498 /// Returns the value wrapped by this `MTime`, for presentation to the user. The name including
499 /// "_for_user" is to discourage misuse - this method should not be used when making decisions
500 /// about file dirtiness.
501 pub fn timestamp_for_user(self) -> SystemTime {
502 self.0
503 }
504
505 /// Temporary method to split out the behavior changes from introduction of this newtype.
506 pub fn bad_is_greater_than(self, other: MTime) -> bool {
507 self.0 > other.0
508 }
509}
510
511impl From<proto::Timestamp> for MTime {
512 fn from(timestamp: proto::Timestamp) -> Self {
513 MTime(timestamp.into())
514 }
515}
516
517impl From<MTime> for proto::Timestamp {
518 fn from(mtime: MTime) -> Self {
519 mtime.0.into()
520 }
521}
522
523pub struct RealFs {
524 bundled_git_binary_path: Option<PathBuf>,
525 executor: BackgroundExecutor,
526 next_job_id: Arc<AtomicUsize>,
527 job_event_subscribers: Arc<Mutex<Vec<JobEventSender>>>,
528 is_case_sensitive: AtomicU8,
529}
530
531pub trait FileHandle: Send + Sync + std::fmt::Debug {
532 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf>;
533}
534
535impl FileHandle for std::fs::File {
536 #[cfg(target_os = "macos")]
537 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
538 use std::{
539 ffi::{CStr, OsStr},
540 os::unix::ffi::OsStrExt,
541 };
542
543 let fd = self.as_fd();
544 let mut path_buf = MaybeUninit::<[u8; libc::PATH_MAX as usize]>::uninit();
545
546 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_GETPATH, path_buf.as_mut_ptr()) };
547 anyhow::ensure!(result != -1, "fcntl returned -1");
548
549 // SAFETY: `fcntl` will initialize the path buffer.
550 let c_str = unsafe { CStr::from_ptr(path_buf.as_ptr().cast()) };
551 anyhow::ensure!(!c_str.is_empty(), "Could find a path for the file handle");
552 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
553 Ok(path)
554 }
555
556 #[cfg(target_os = "linux")]
557 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
558 let fd = self.as_fd();
559 let fd_path = format!("/proc/self/fd/{}", fd.as_raw_fd());
560 let new_path = std::fs::read_link(fd_path)?;
561 if new_path
562 .file_name()
563 .is_some_and(|f| f.to_string_lossy().ends_with(" (deleted)"))
564 {
565 anyhow::bail!("file was deleted")
566 };
567
568 Ok(new_path)
569 }
570
571 #[cfg(target_os = "freebsd")]
572 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
573 use std::{
574 ffi::{CStr, OsStr},
575 os::unix::ffi::OsStrExt,
576 };
577
578 let fd = self.as_fd();
579 let mut kif = MaybeUninit::<libc::kinfo_file>::uninit();
580 kif.kf_structsize = libc::KINFO_FILE_SIZE;
581
582 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_KINFO, kif.as_mut_ptr()) };
583 anyhow::ensure!(result != -1, "fcntl returned -1");
584
585 // SAFETY: `fcntl` will initialize the kif.
586 let c_str = unsafe { CStr::from_ptr(kif.assume_init().kf_path.as_ptr()) };
587 anyhow::ensure!(!c_str.is_empty(), "Could find a path for the file handle");
588 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
589 Ok(path)
590 }
591
592 #[cfg(target_os = "windows")]
593 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
594 use std::ffi::OsString;
595 use std::os::windows::ffi::OsStringExt;
596 use std::os::windows::io::AsRawHandle;
597
598 use windows::Win32::Foundation::HANDLE;
599 use windows::Win32::Storage::FileSystem::{
600 FILE_NAME_NORMALIZED, GetFinalPathNameByHandleW,
601 };
602
603 let handle = HANDLE(self.as_raw_handle() as _);
604
605 // Query required buffer size (in wide chars)
606 let required_len =
607 unsafe { GetFinalPathNameByHandleW(handle, &mut [], FILE_NAME_NORMALIZED) };
608 anyhow::ensure!(
609 required_len != 0,
610 "GetFinalPathNameByHandleW returned 0 length"
611 );
612
613 // Allocate buffer and retrieve the path
614 let mut buf: Vec<u16> = vec![0u16; required_len as usize + 1];
615 let written = unsafe { GetFinalPathNameByHandleW(handle, &mut buf, FILE_NAME_NORMALIZED) };
616 anyhow::ensure!(
617 written != 0,
618 "GetFinalPathNameByHandleW failed to write path"
619 );
620
621 let os_str: OsString = OsString::from_wide(&buf[..written as usize]);
622 anyhow::ensure!(!os_str.is_empty(), "Could find a path for the file handle");
623 Ok(PathBuf::from(os_str))
624 }
625}
626
627pub struct RealWatcher {}
628
629impl RealFs {
630 pub fn new(git_binary_path: Option<PathBuf>, executor: BackgroundExecutor) -> Self {
631 Self {
632 bundled_git_binary_path: git_binary_path,
633 executor,
634 next_job_id: Arc::new(AtomicUsize::new(0)),
635 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
636 is_case_sensitive: Default::default(),
637 }
638 }
639
640 #[cfg(target_os = "windows")]
641 fn canonicalize(path: &Path) -> Result<PathBuf> {
642 use std::ffi::OsString;
643 use std::os::windows::ffi::OsStringExt;
644 use windows::Win32::Storage::FileSystem::GetVolumePathNameW;
645 use windows::core::HSTRING;
646
647 // std::fs::canonicalize resolves mapped network paths to UNC paths, which can
648 // confuse some software. To mitigate this, we canonicalize the input, then rebase
649 // the result onto the input's original volume root if both paths are on the same
650 // volume. This keeps the same drive letter or mount point the caller used.
651
652 let abs_path = if path.is_relative() {
653 std::env::current_dir()?.join(path)
654 } else {
655 path.to_path_buf()
656 };
657
658 let path_hstring = HSTRING::from(abs_path.as_os_str());
659 let mut vol_buf = vec![0u16; abs_path.as_os_str().len() + 2];
660 unsafe { GetVolumePathNameW(&path_hstring, &mut vol_buf)? };
661 let volume_root = {
662 let len = vol_buf
663 .iter()
664 .position(|&c| c == 0)
665 .unwrap_or(vol_buf.len());
666 PathBuf::from(OsString::from_wide(&vol_buf[..len]))
667 };
668
669 let resolved_path = dunce::canonicalize(&abs_path)?;
670 let resolved_root = dunce::canonicalize(&volume_root)?;
671
672 if let Ok(relative) = resolved_path.strip_prefix(&resolved_root) {
673 let mut result = volume_root;
674 result.push(relative);
675 Ok(result)
676 } else {
677 Ok(resolved_path)
678 }
679 }
680}
681
682#[cfg(any(target_os = "macos", target_os = "linux"))]
683fn rename_without_replace(source: &Path, target: &Path) -> io::Result<()> {
684 let source = path_to_c_string(source)?;
685 let target = path_to_c_string(target)?;
686
687 #[cfg(target_os = "macos")]
688 let result = unsafe { libc::renamex_np(source.as_ptr(), target.as_ptr(), libc::RENAME_EXCL) };
689
690 #[cfg(target_os = "linux")]
691 let result = unsafe {
692 libc::syscall(
693 libc::SYS_renameat2,
694 libc::AT_FDCWD,
695 source.as_ptr(),
696 libc::AT_FDCWD,
697 target.as_ptr(),
698 libc::RENAME_NOREPLACE,
699 )
700 };
701
702 if result == 0 {
703 Ok(())
704 } else {
705 Err(io::Error::last_os_error())
706 }
707}
708
709#[cfg(target_os = "windows")]
710fn rename_without_replace(source: &Path, target: &Path) -> io::Result<()> {
711 use std::os::windows::ffi::OsStrExt;
712
713 use windows::Win32::Storage::FileSystem::{MOVE_FILE_FLAGS, MoveFileExW};
714 use windows::core::PCWSTR;
715
716 let source: Vec<u16> = source.as_os_str().encode_wide().chain(Some(0)).collect();
717 let target: Vec<u16> = target.as_os_str().encode_wide().chain(Some(0)).collect();
718
719 unsafe {
720 MoveFileExW(
721 PCWSTR(source.as_ptr()),
722 PCWSTR(target.as_ptr()),
723 MOVE_FILE_FLAGS::default(),
724 )
725 }
726 .map_err(|_| io::Error::last_os_error())
727}
728
729#[cfg(any(target_os = "macos", target_os = "linux"))]
730fn path_to_c_string(path: &Path) -> io::Result<CString> {
731 CString::new(path.as_os_str().as_bytes()).map_err(|_| {
732 io::Error::new(
733 io::ErrorKind::InvalidInput,
734 format!("path contains interior NUL: {}", path.display()),
735 )
736 })
737}
738
739#[async_trait::async_trait]
740impl Fs for RealFs {
741 async fn create_dir(&self, path: &Path) -> Result<()> {
742 Ok(smol::fs::create_dir_all(path).await?)
743 }
744
745 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
746 #[cfg(unix)]
747 smol::fs::unix::symlink(target, path).await?;
748
749 #[cfg(windows)]
750 if smol::fs::metadata(&target).await?.is_dir() {
751 let status = new_command("cmd")
752 .args(["/C", "mklink", "/J"])
753 .args([path, target.as_path()])
754 .status()
755 .await?;
756
757 if !status.success() {
758 return Err(anyhow::anyhow!(
759 "Failed to create junction from {:?} to {:?}",
760 path,
761 target
762 ));
763 }
764 } else {
765 smol::fs::windows::symlink_file(target, path).await?
766 }
767
768 Ok(())
769 }
770
771 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
772 let mut open_options = smol::fs::OpenOptions::new();
773 open_options.write(true).create(true);
774 if options.overwrite {
775 open_options.truncate(true);
776 } else if !options.ignore_if_exists {
777 open_options.create_new(true);
778 }
779 open_options
780 .open(path)
781 .await
782 .with_context(|| format!("Failed to create file at {:?}", path))?;
783 Ok(())
784 }
785
786 async fn create_file_with(
787 &self,
788 path: &Path,
789 content: Pin<&mut (dyn AsyncRead + Send)>,
790 ) -> Result<()> {
791 let mut file = smol::fs::File::create(&path)
792 .await
793 .with_context(|| format!("Failed to create file at {:?}", path))?;
794 futures::io::copy(content, &mut file).await?;
795 Ok(())
796 }
797
798 async fn extract_tar_file(
799 &self,
800 path: &Path,
801 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
802 ) -> Result<()> {
803 content.unpack(path).await?;
804 Ok(())
805 }
806
807 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
808 if !options.overwrite && smol::fs::metadata(target).await.is_ok() {
809 if options.ignore_if_exists {
810 return Ok(());
811 } else {
812 anyhow::bail!("{target:?} already exists");
813 }
814 }
815
816 smol::fs::copy(source, target).await?;
817 Ok(())
818 }
819
820 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()> {
821 if options.create_parents {
822 if let Some(parent) = target.parent() {
823 self.create_dir(parent).await?;
824 }
825 }
826
827 if options.overwrite {
828 smol::fs::rename(source, target).await?;
829 return Ok(());
830 }
831
832 let use_metadata_fallback = {
833 #[cfg(any(target_os = "macos", target_os = "linux", target_os = "windows"))]
834 {
835 let source = source.to_path_buf();
836 let target = target.to_path_buf();
837 match self
838 .executor
839 .spawn(async move { rename_without_replace(&source, &target) })
840 .await
841 {
842 Ok(()) => return Ok(()),
843 Err(error) if error.kind() == io::ErrorKind::AlreadyExists => {
844 if options.ignore_if_exists {
845 return Ok(());
846 }
847 return Err(error.into());
848 }
849 Err(error)
850 if error.raw_os_error().is_some_and(|code| {
851 code == libc::ENOSYS
852 || code == libc::ENOTSUP
853 || code == libc::EOPNOTSUPP
854 || code == libc::EINVAL
855 }) =>
856 {
857 // For case when filesystem or kernel does not support atomic no-overwrite rename.
858 // EINVAL is returned by FUSE-based filesystems (e.g. NTFS via ntfs-3g)
859 // that don't support RENAME_NOREPLACE.
860 true
861 }
862 Err(error) => return Err(error.into()),
863 }
864 }
865
866 #[cfg(not(any(target_os = "macos", target_os = "linux", target_os = "windows")))]
867 {
868 // For platforms which do not have an atomic no-overwrite rename yet.
869 true
870 }
871 };
872
873 if use_metadata_fallback && smol::fs::metadata(target).await.is_ok() {
874 if options.ignore_if_exists {
875 return Ok(());
876 } else {
877 anyhow::bail!("{target:?} already exists");
878 }
879 }
880
881 smol::fs::rename(source, target).await?;
882 Ok(())
883 }
884
885 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
886 let result = if options.recursive {
887 smol::fs::remove_dir_all(path).await
888 } else {
889 smol::fs::remove_dir(path).await
890 };
891 match result {
892 Ok(()) => Ok(()),
893 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
894 Ok(())
895 }
896 Err(err) => Err(err)?,
897 }
898 }
899
900 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
901 #[cfg(windows)]
902 if let Ok(Some(metadata)) = self.metadata(path).await
903 && metadata.is_symlink
904 && metadata.is_dir
905 {
906 self.remove_dir(
907 path,
908 RemoveOptions {
909 recursive: false,
910 ignore_if_not_exists: true,
911 },
912 )
913 .await?;
914 return Ok(());
915 }
916
917 match smol::fs::remove_file(path).await {
918 Ok(()) => Ok(()),
919 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
920 Ok(())
921 }
922 Err(err) => Err(err)?,
923 }
924 }
925
926 async fn trash(&self, path: &Path, _options: RemoveOptions) -> Result<TrashedEntry> {
927 // We must make the path absolute or trash will make a weird abomination
928 // of the zed working directory (not usually the worktree) and whatever
929 // the path variable holds.
930 let path = self
931 .canonicalize(path)
932 .await
933 .context("Could not canonicalize the path of the file")?;
934
935 let (tx, rx) = futures::channel::oneshot::channel();
936 std::thread::Builder::new()
937 .name("trash file or dir".to_string())
938 .spawn(|| tx.send(trash::delete_with_info(path)))
939 .expect("The os can spawn threads");
940
941 Ok(rx
942 .await
943 .context("Tx dropped or fs.restore panicked")?
944 .context("Could not trash file or dir")?
945 .into())
946 }
947
948 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
949 Ok(Box::new(std::fs::File::open(path)?))
950 }
951
952 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
953 let mut options = std::fs::OpenOptions::new();
954 options.read(true);
955 #[cfg(windows)]
956 {
957 use std::os::windows::fs::OpenOptionsExt;
958 options.custom_flags(windows::Win32::Storage::FileSystem::FILE_FLAG_BACKUP_SEMANTICS.0);
959 }
960 Ok(Arc::new(options.open(path)?))
961 }
962
963 async fn load(&self, path: &Path) -> Result<String> {
964 let path = path.to_path_buf();
965 self.executor
966 .spawn(async move {
967 std::fs::read_to_string(&path)
968 .with_context(|| format!("Failed to read file {}", path.display()))
969 })
970 .await
971 }
972
973 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
974 let path = path.to_path_buf();
975 let bytes = self
976 .executor
977 .spawn(async move { std::fs::read(path) })
978 .await?;
979 Ok(bytes)
980 }
981
982 #[cfg(not(target_os = "windows"))]
983 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
984 smol::unblock(move || {
985 // Use the directory of the destination as temp dir to avoid
986 // invalid cross-device link error, and XDG_CACHE_DIR for fallback.
987 // See https://github.com/zed-industries/zed/pull/8437 for more details.
988 let mut tmp_file =
989 tempfile::NamedTempFile::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
990 tmp_file.write_all(data.as_bytes())?;
991 tmp_file.persist(path)?;
992 anyhow::Ok(())
993 })
994 .await?;
995
996 Ok(())
997 }
998
999 #[cfg(target_os = "windows")]
1000 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
1001 smol::unblock(move || {
1002 // If temp dir is set to a different drive than the destination,
1003 // we receive error:
1004 //
1005 // failed to persist temporary file:
1006 // The system cannot move the file to a different disk drive. (os error 17)
1007 //
1008 // This is because `ReplaceFileW` does not support cross volume moves.
1009 // See the remark section: "The backup file, replaced file, and replacement file must all reside on the same volume."
1010 // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-replacefilew#remarks
1011 //
1012 // So we use the directory of the destination as a temp dir to avoid it.
1013 // https://github.com/zed-industries/zed/issues/16571
1014 let temp_dir = TempDir::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
1015 let temp_file = {
1016 let temp_file_path = temp_dir.path().join("temp_file");
1017 let mut file = std::fs::File::create_new(&temp_file_path)?;
1018 file.write_all(data.as_bytes())?;
1019 temp_file_path
1020 };
1021 atomic_replace(path.as_path(), temp_file.as_path())?;
1022 anyhow::Ok(())
1023 })
1024 .await?;
1025 Ok(())
1026 }
1027
1028 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
1029 let buffer_size = text.summary().len.min(10 * 1024);
1030 if let Some(path) = path.parent() {
1031 self.create_dir(path)
1032 .await
1033 .with_context(|| format!("Failed to create directory at {:?}", path))?;
1034 }
1035 let file = smol::fs::File::create(path)
1036 .await
1037 .with_context(|| format!("Failed to create file at {:?}", path))?;
1038 let mut writer = smol::io::BufWriter::with_capacity(buffer_size, file);
1039 for chunk in text::chunks_with_line_ending(text, line_ending) {
1040 writer.write_all(chunk.as_bytes()).await?;
1041 }
1042 writer.flush().await?;
1043 Ok(())
1044 }
1045
1046 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
1047 if let Some(path) = path.parent() {
1048 self.create_dir(path)
1049 .await
1050 .with_context(|| format!("Failed to create directory at {:?}", path))?;
1051 }
1052 let path = path.to_owned();
1053 let contents = content.to_owned();
1054 self.executor
1055 .spawn(async move {
1056 std::fs::write(path, contents)?;
1057 Ok(())
1058 })
1059 .await
1060 }
1061
1062 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
1063 let path = path.to_owned();
1064 self.executor
1065 .spawn(async move {
1066 #[cfg(target_os = "windows")]
1067 let result = Self::canonicalize(&path);
1068
1069 #[cfg(not(target_os = "windows"))]
1070 let result = std::fs::canonicalize(&path);
1071
1072 result.with_context(|| format!("canonicalizing {path:?}"))
1073 })
1074 .await
1075 }
1076
1077 async fn is_file(&self, path: &Path) -> bool {
1078 let path = path.to_owned();
1079 self.executor
1080 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_file()) })
1081 .await
1082 }
1083
1084 async fn is_dir(&self, path: &Path) -> bool {
1085 let path = path.to_owned();
1086 self.executor
1087 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_dir()) })
1088 .await
1089 }
1090
1091 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
1092 let path_buf = path.to_owned();
1093 let symlink_metadata = match self
1094 .executor
1095 .spawn(async move { std::fs::symlink_metadata(&path_buf) })
1096 .await
1097 {
1098 Ok(metadata) => metadata,
1099 Err(err) => {
1100 return match err.kind() {
1101 io::ErrorKind::NotFound | io::ErrorKind::NotADirectory => Ok(None),
1102 _ => Err(anyhow::Error::new(err)),
1103 };
1104 }
1105 };
1106
1107 let is_symlink = symlink_metadata.file_type().is_symlink();
1108 let metadata = if is_symlink {
1109 let path_buf = path.to_path_buf();
1110 // Read target metadata, if the target exists
1111 match self
1112 .executor
1113 .spawn(async move { std::fs::metadata(path_buf) })
1114 .await
1115 {
1116 Ok(target_metadata) => target_metadata,
1117 Err(err) => {
1118 if err.kind() != io::ErrorKind::NotFound {
1119 // TODO: Also FilesystemLoop when that's stable
1120 log::warn!(
1121 "Failed to read symlink target metadata for path {path:?}: {err}"
1122 );
1123 }
1124 // For a broken or recursive symlink, return the symlink metadata. (Or
1125 // as edge cases, a symlink into a directory we can't read, which is hard
1126 // to distinguish from just being broken.)
1127 symlink_metadata
1128 }
1129 }
1130 } else {
1131 symlink_metadata
1132 };
1133
1134 #[cfg(unix)]
1135 let inode = metadata.ino();
1136
1137 #[cfg(windows)]
1138 let inode = file_id(path).await?;
1139
1140 #[cfg(windows)]
1141 let is_fifo = false;
1142
1143 #[cfg(unix)]
1144 let is_fifo = metadata.file_type().is_fifo();
1145
1146 let path_buf = path.to_path_buf();
1147 let is_executable = self
1148 .executor
1149 .spawn(async move { path_buf.is_executable() })
1150 .await;
1151
1152 Ok(Some(Metadata {
1153 inode,
1154 mtime: MTime(metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH)),
1155 len: metadata.len(),
1156 is_symlink,
1157 is_dir: metadata.file_type().is_dir(),
1158 is_fifo,
1159 is_executable,
1160 }))
1161 }
1162
1163 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
1164 let path = path.to_owned();
1165 let path = self
1166 .executor
1167 .spawn(async move { std::fs::read_link(&path) })
1168 .await?;
1169 Ok(path)
1170 }
1171
1172 async fn read_dir(
1173 &self,
1174 path: &Path,
1175 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
1176 let path = path.to_owned();
1177 let result = iter(
1178 self.executor
1179 .spawn(async move { std::fs::read_dir(path) })
1180 .await?,
1181 )
1182 .map(|entry| match entry {
1183 Ok(entry) => Ok(entry.path()),
1184 Err(error) => Err(anyhow!("failed to read dir entry {error:?}")),
1185 });
1186 Ok(Box::pin(result))
1187 }
1188
1189 async fn watch(
1190 &self,
1191 path: &Path,
1192 latency: Duration,
1193 ) -> (
1194 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
1195 Arc<dyn Watcher>,
1196 ) {
1197 use util::{ResultExt as _, paths::SanitizedPath};
1198 let executor = self.executor.clone();
1199
1200 let use_poll = requires_poll_watcher(path);
1201 let watch_path = effective_watch_path(path);
1202
1203 let (tx, rx) = smol::channel::unbounded();
1204 let pending_paths: Arc<Mutex<Vec<PathEvent>>> = Default::default();
1205
1206 let mode = if use_poll {
1207 log::info!(
1208 "Using poll watcher ({}ms interval) for {}",
1209 fs_watcher::poll_interval().as_millis(),
1210 path.display()
1211 );
1212 telemetry::event!("fs_watcher_poll", path = path.display().to_string());
1213 fs_watcher::WatcherMode::Poll
1214 } else {
1215 fs_watcher::WatcherMode::Native
1216 };
1217 let watcher: Arc<dyn Watcher> = Arc::new(fs_watcher::FsWatcher::new(
1218 tx.clone(),
1219 pending_paths.clone(),
1220 mode,
1221 ));
1222
1223 if let Err(e) = watcher.add(&watch_path) {
1224 log::warn!(
1225 "Failed to watch {} using {}:\n{e}",
1226 path.display(),
1227 watch_path.display()
1228 );
1229 }
1230
1231 // Check if path is a symlink and follow the target parent
1232 if let Some(mut target) = self.read_link(path).await.ok() {
1233 log::trace!("watch symlink {path:?} -> {target:?}");
1234 // Check if symlink target is relative path, if so make it absolute
1235 if target.is_relative()
1236 && let Some(parent) = path.parent()
1237 {
1238 target = parent.join(target);
1239 if let Ok(canonical) = self.canonicalize(&target).await {
1240 target = SanitizedPath::new(&canonical).as_path().to_path_buf();
1241 }
1242 }
1243 watcher.add(&target).ok();
1244 if let Some(parent) = target.parent() {
1245 watcher.add(parent).log_err();
1246 }
1247 }
1248
1249 (
1250 Box::pin(rx.filter_map({
1251 let watcher = watcher.clone();
1252 let executor = executor.clone();
1253 move |_| {
1254 let _ = watcher.clone();
1255 let pending_paths = pending_paths.clone();
1256 let executor = executor.clone();
1257 async move {
1258 executor.timer(latency).await;
1259 let paths = std::mem::take(&mut *pending_paths.lock());
1260 (!paths.is_empty()).then_some(paths)
1261 }
1262 }
1263 })),
1264 watcher,
1265 )
1266 }
1267
1268 fn open_repo(
1269 &self,
1270 dotgit_path: &Path,
1271 system_git_binary_path: Option<&Path>,
1272 ) -> Result<Arc<dyn GitRepository>> {
1273 Ok(Arc::new(RealGitRepository::new(
1274 dotgit_path,
1275 self.bundled_git_binary_path.clone(),
1276 system_git_binary_path.map(|path| path.to_path_buf()),
1277 self.executor.clone(),
1278 )?))
1279 }
1280
1281 async fn git_init(
1282 &self,
1283 abs_work_directory_path: &Path,
1284 fallback_branch_name: String,
1285 ) -> Result<()> {
1286 let config = new_command("git")
1287 .current_dir(abs_work_directory_path)
1288 .args(&["config", "--global", "--get", "init.defaultBranch"])
1289 .output()
1290 .await?;
1291
1292 let branch_name;
1293
1294 if config.status.success() && !config.stdout.is_empty() {
1295 branch_name = String::from_utf8_lossy(&config.stdout);
1296 } else {
1297 branch_name = Cow::Borrowed(fallback_branch_name.as_str());
1298 }
1299
1300 new_command("git")
1301 .current_dir(abs_work_directory_path)
1302 .args(&["init", "-b"])
1303 .arg(branch_name.trim())
1304 .output()
1305 .await?;
1306
1307 Ok(())
1308 }
1309
1310 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()> {
1311 let job_id = self.next_job_id.fetch_add(1, Ordering::SeqCst);
1312 let job_info = JobInfo {
1313 id: job_id,
1314 start: Instant::now(),
1315 message: SharedString::from(format!("Cloning {}", repo_url)),
1316 };
1317
1318 let _job_tracker = JobTracker::new(job_info, self.job_event_subscribers.clone());
1319
1320 let output = new_command("git")
1321 .current_dir(abs_work_directory)
1322 .args(&["clone", repo_url])
1323 .output()
1324 .await?;
1325
1326 if !output.status.success() {
1327 anyhow::bail!(
1328 "git clone failed: {}",
1329 String::from_utf8_lossy(&output.stderr)
1330 );
1331 }
1332
1333 Ok(())
1334 }
1335
1336 fn is_fake(&self) -> bool {
1337 false
1338 }
1339
1340 fn subscribe_to_jobs(&self) -> JobEventReceiver {
1341 let (sender, receiver) = futures::channel::mpsc::unbounded();
1342 self.job_event_subscribers.lock().push(sender);
1343 receiver
1344 }
1345
1346 /// Checks whether the file system is case sensitive by attempting to create two files
1347 /// that have the same name except for the casing.
1348 ///
1349 /// It creates both files in a temporary directory it removes at the end.
1350 async fn is_case_sensitive(&self) -> bool {
1351 const UNINITIALIZED: u8 = 0;
1352 const CASE_SENSITIVE: u8 = 1;
1353 const NOT_CASE_SENSITIVE: u8 = 2;
1354
1355 // Note we could CAS here, but really, if we race we do this work twice at worst which isn't a big deal.
1356 let load = self.is_case_sensitive.load(Ordering::Acquire);
1357 if load != UNINITIALIZED {
1358 return load == CASE_SENSITIVE;
1359 }
1360 let temp_dir = self.executor.spawn(async { TempDir::new() });
1361 let res = maybe!(async {
1362 let temp_dir = temp_dir.await?;
1363 let test_file_1 = temp_dir.path().join("case_sensitivity_test.tmp");
1364 let test_file_2 = temp_dir.path().join("CASE_SENSITIVITY_TEST.TMP");
1365
1366 let create_opts = CreateOptions {
1367 overwrite: false,
1368 ignore_if_exists: false,
1369 };
1370
1371 // Create file1
1372 self.create_file(&test_file_1, create_opts).await?;
1373
1374 // Now check whether it's possible to create file2
1375 let case_sensitive = match self.create_file(&test_file_2, create_opts).await {
1376 Ok(_) => Ok(true),
1377 Err(e) => {
1378 if let Some(io_error) = e.downcast_ref::<io::Error>() {
1379 if io_error.kind() == io::ErrorKind::AlreadyExists {
1380 Ok(false)
1381 } else {
1382 Err(e)
1383 }
1384 } else {
1385 Err(e)
1386 }
1387 }
1388 };
1389
1390 temp_dir.close()?;
1391 case_sensitive
1392 }).await.unwrap_or_else(|e| {
1393 log::error!(
1394 "Failed to determine whether filesystem is case sensitive (falling back to true) due to error: {e:#}"
1395 );
1396 true
1397 });
1398 self.is_case_sensitive.store(
1399 if res {
1400 CASE_SENSITIVE
1401 } else {
1402 NOT_CASE_SENSITIVE
1403 },
1404 Ordering::Release,
1405 );
1406 res
1407 }
1408
1409 async fn restore(
1410 &self,
1411 trashed_entry: TrashedEntry,
1412 ) -> std::result::Result<PathBuf, TrashRestoreError> {
1413 let restored_item_path = trashed_entry.original_parent.join(&trashed_entry.name);
1414
1415 let (tx, rx) = futures::channel::oneshot::channel();
1416 std::thread::Builder::new()
1417 .name("restore trashed item".to_string())
1418 .spawn(move || {
1419 let res = trash::restore_all([trashed_entry.into_trash_item()]);
1420 tx.send(res)
1421 })
1422 .expect("The OS can spawn a threads");
1423 rx.await.expect("Restore all never panics")?;
1424 Ok(restored_item_path)
1425 }
1426}
1427
1428#[cfg(not(any(target_os = "linux", target_os = "freebsd")))]
1429impl Watcher for RealWatcher {
1430 fn add(&self, _: &Path) -> Result<()> {
1431 Ok(())
1432 }
1433
1434 fn remove(&self, _: &Path) -> Result<()> {
1435 Ok(())
1436 }
1437}
1438
1439#[cfg(feature = "test-support")]
1440pub struct FakeFs {
1441 this: std::sync::Weak<Self>,
1442 // Use an unfair lock to ensure tests are deterministic.
1443 state: Arc<Mutex<FakeFsState>>,
1444 executor: gpui::BackgroundExecutor,
1445}
1446
1447#[cfg(feature = "test-support")]
1448struct FakeFsState {
1449 root: FakeFsEntry,
1450 next_inode: u64,
1451 next_mtime: SystemTime,
1452 git_event_tx: smol::channel::Sender<PathBuf>,
1453 event_txs: Vec<(PathBuf, smol::channel::Sender<Vec<PathEvent>>)>,
1454 events_paused: bool,
1455 buffered_events: Vec<PathEvent>,
1456 metadata_call_count: usize,
1457 read_dir_call_count: usize,
1458 path_write_counts: std::collections::HashMap<PathBuf, usize>,
1459 moves: std::collections::HashMap<u64, PathBuf>,
1460 job_event_subscribers: Arc<Mutex<Vec<JobEventSender>>>,
1461 trash: Vec<(TrashedEntry, FakeFsEntry)>,
1462}
1463
1464#[cfg(feature = "test-support")]
1465#[derive(Clone, Debug)]
1466enum FakeFsEntry {
1467 File {
1468 inode: u64,
1469 mtime: MTime,
1470 len: u64,
1471 content: Vec<u8>,
1472 // The path to the repository state directory, if this is a gitfile.
1473 git_dir_path: Option<PathBuf>,
1474 },
1475 Dir {
1476 inode: u64,
1477 mtime: MTime,
1478 len: u64,
1479 entries: BTreeMap<String, FakeFsEntry>,
1480 git_repo_state: Option<Arc<Mutex<FakeGitRepositoryState>>>,
1481 },
1482 Symlink {
1483 target: PathBuf,
1484 },
1485}
1486
1487#[cfg(feature = "test-support")]
1488impl PartialEq for FakeFsEntry {
1489 fn eq(&self, other: &Self) -> bool {
1490 match (self, other) {
1491 (
1492 Self::File {
1493 inode: l_inode,
1494 mtime: l_mtime,
1495 len: l_len,
1496 content: l_content,
1497 git_dir_path: l_git_dir_path,
1498 },
1499 Self::File {
1500 inode: r_inode,
1501 mtime: r_mtime,
1502 len: r_len,
1503 content: r_content,
1504 git_dir_path: r_git_dir_path,
1505 },
1506 ) => {
1507 l_inode == r_inode
1508 && l_mtime == r_mtime
1509 && l_len == r_len
1510 && l_content == r_content
1511 && l_git_dir_path == r_git_dir_path
1512 }
1513 (
1514 Self::Dir {
1515 inode: l_inode,
1516 mtime: l_mtime,
1517 len: l_len,
1518 entries: l_entries,
1519 git_repo_state: l_git_repo_state,
1520 },
1521 Self::Dir {
1522 inode: r_inode,
1523 mtime: r_mtime,
1524 len: r_len,
1525 entries: r_entries,
1526 git_repo_state: r_git_repo_state,
1527 },
1528 ) => {
1529 let same_repo_state = match (l_git_repo_state.as_ref(), r_git_repo_state.as_ref()) {
1530 (Some(l), Some(r)) => Arc::ptr_eq(l, r),
1531 (None, None) => true,
1532 _ => false,
1533 };
1534 l_inode == r_inode
1535 && l_mtime == r_mtime
1536 && l_len == r_len
1537 && l_entries == r_entries
1538 && same_repo_state
1539 }
1540 (Self::Symlink { target: l_target }, Self::Symlink { target: r_target }) => {
1541 l_target == r_target
1542 }
1543 _ => false,
1544 }
1545 }
1546}
1547
1548#[cfg(feature = "test-support")]
1549impl FakeFsState {
1550 fn get_and_increment_mtime(&mut self) -> MTime {
1551 let mtime = self.next_mtime;
1552 self.next_mtime += FakeFs::SYSTEMTIME_INTERVAL;
1553 MTime(mtime)
1554 }
1555
1556 fn get_and_increment_inode(&mut self) -> u64 {
1557 let inode = self.next_inode;
1558 self.next_inode += 1;
1559 inode
1560 }
1561
1562 fn canonicalize(&self, target: &Path, follow_symlink: bool) -> Option<PathBuf> {
1563 let mut canonical_path = PathBuf::new();
1564 let mut path = target.to_path_buf();
1565 let mut entry_stack = Vec::new();
1566 'outer: loop {
1567 let mut path_components = path.components().peekable();
1568 let mut prefix = None;
1569 while let Some(component) = path_components.next() {
1570 match component {
1571 Component::Prefix(prefix_component) => prefix = Some(prefix_component),
1572 Component::RootDir => {
1573 entry_stack.clear();
1574 entry_stack.push(&self.root);
1575 canonical_path.clear();
1576 match prefix {
1577 Some(prefix_component) => {
1578 canonical_path = PathBuf::from(prefix_component.as_os_str());
1579 // Prefixes like `C:\\` are represented without their trailing slash, so we have to re-add it.
1580 canonical_path.push(std::path::MAIN_SEPARATOR_STR);
1581 }
1582 None => canonical_path = PathBuf::from(std::path::MAIN_SEPARATOR_STR),
1583 }
1584 }
1585 Component::CurDir => {}
1586 Component::ParentDir => {
1587 entry_stack.pop()?;
1588 canonical_path.pop();
1589 }
1590 Component::Normal(name) => {
1591 let current_entry = *entry_stack.last()?;
1592 if let FakeFsEntry::Dir { entries, .. } = current_entry {
1593 let entry = entries.get(name.to_str().unwrap())?;
1594 if (path_components.peek().is_some() || follow_symlink)
1595 && let FakeFsEntry::Symlink { target, .. } = entry
1596 {
1597 let mut target = target.clone();
1598 target.extend(path_components);
1599 path = target;
1600 continue 'outer;
1601 }
1602 entry_stack.push(entry);
1603 canonical_path = canonical_path.join(name);
1604 } else {
1605 return None;
1606 }
1607 }
1608 }
1609 }
1610 break;
1611 }
1612
1613 if entry_stack.is_empty() {
1614 None
1615 } else {
1616 Some(canonical_path)
1617 }
1618 }
1619
1620 fn try_entry(
1621 &mut self,
1622 target: &Path,
1623 follow_symlink: bool,
1624 ) -> Option<(&mut FakeFsEntry, PathBuf)> {
1625 let canonical_path = self.canonicalize(target, follow_symlink)?;
1626
1627 let mut components = canonical_path
1628 .components()
1629 .skip_while(|component| matches!(component, Component::Prefix(_)));
1630 let Some(Component::RootDir) = components.next() else {
1631 panic!(
1632 "the path {:?} was not canonicalized properly {:?}",
1633 target, canonical_path
1634 )
1635 };
1636
1637 let mut entry = &mut self.root;
1638 for component in components {
1639 match component {
1640 Component::Normal(name) => {
1641 if let FakeFsEntry::Dir { entries, .. } = entry {
1642 entry = entries.get_mut(name.to_str().unwrap())?;
1643 } else {
1644 return None;
1645 }
1646 }
1647 _ => {
1648 panic!(
1649 "the path {:?} was not canonicalized properly {:?}",
1650 target, canonical_path
1651 )
1652 }
1653 }
1654 }
1655
1656 Some((entry, canonical_path))
1657 }
1658
1659 fn entry(&mut self, target: &Path) -> Result<&mut FakeFsEntry> {
1660 Ok(self
1661 .try_entry(target, true)
1662 .ok_or_else(|| {
1663 anyhow!(io::Error::new(
1664 io::ErrorKind::NotFound,
1665 format!("not found: {target:?}")
1666 ))
1667 })?
1668 .0)
1669 }
1670
1671 fn write_path<Fn, T>(&mut self, path: &Path, callback: Fn) -> Result<T>
1672 where
1673 Fn: FnOnce(btree_map::Entry<String, FakeFsEntry>) -> Result<T>,
1674 {
1675 let path = normalize_path(path);
1676 let filename = path.file_name().context("cannot overwrite the root")?;
1677 let parent_path = path.parent().unwrap();
1678
1679 let parent = self.entry(parent_path)?;
1680 let new_entry = parent
1681 .dir_entries(parent_path)?
1682 .entry(filename.to_str().unwrap().into());
1683 callback(new_entry)
1684 }
1685
1686 fn emit_event<I, T>(&mut self, paths: I)
1687 where
1688 I: IntoIterator<Item = (T, Option<PathEventKind>)>,
1689 T: Into<PathBuf>,
1690 {
1691 self.buffered_events
1692 .extend(paths.into_iter().map(|(path, kind)| PathEvent {
1693 path: path.into(),
1694 kind,
1695 }));
1696
1697 if !self.events_paused {
1698 self.flush_events(self.buffered_events.len());
1699 }
1700 }
1701
1702 fn flush_events(&mut self, mut count: usize) {
1703 count = count.min(self.buffered_events.len());
1704 let events = self.buffered_events.drain(0..count).collect::<Vec<_>>();
1705 self.event_txs.retain(|(_, tx)| {
1706 let _ = tx.try_send(events.clone());
1707 !tx.is_closed()
1708 });
1709 }
1710}
1711
1712#[cfg(feature = "test-support")]
1713pub static FS_DOT_GIT: std::sync::LazyLock<&'static OsStr> =
1714 std::sync::LazyLock::new(|| OsStr::new(".git"));
1715
1716#[cfg(feature = "test-support")]
1717impl FakeFs {
1718 /// We need to use something large enough for Windows and Unix to consider this a new file.
1719 /// https://doc.rust-lang.org/nightly/std/time/struct.SystemTime.html#platform-specific-behavior
1720 const SYSTEMTIME_INTERVAL: Duration = Duration::from_nanos(100);
1721
1722 pub fn new(executor: gpui::BackgroundExecutor) -> Arc<Self> {
1723 let (tx, rx) = smol::channel::bounded::<PathBuf>(10);
1724
1725 let this = Arc::new_cyclic(|this| Self {
1726 this: this.clone(),
1727 executor: executor.clone(),
1728 state: Arc::new(Mutex::new(FakeFsState {
1729 root: FakeFsEntry::Dir {
1730 inode: 0,
1731 mtime: MTime(UNIX_EPOCH),
1732 len: 0,
1733 entries: Default::default(),
1734 git_repo_state: None,
1735 },
1736 git_event_tx: tx,
1737 next_mtime: UNIX_EPOCH + Self::SYSTEMTIME_INTERVAL,
1738 next_inode: 1,
1739 event_txs: Default::default(),
1740 buffered_events: Vec::new(),
1741 events_paused: false,
1742 read_dir_call_count: 0,
1743 metadata_call_count: 0,
1744 path_write_counts: Default::default(),
1745 moves: Default::default(),
1746 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
1747 trash: Vec::new(),
1748 })),
1749 });
1750
1751 executor.spawn({
1752 let this = this.clone();
1753 async move {
1754 while let Ok(git_event) = rx.recv().await {
1755 if let Some(mut state) = this.state.try_lock() {
1756 state.emit_event([(git_event, Some(PathEventKind::Changed))]);
1757 } else {
1758 panic!("Failed to lock file system state, this execution would have caused a test hang");
1759 }
1760 }
1761 }
1762 }).detach();
1763
1764 this
1765 }
1766
1767 pub fn set_next_mtime(&self, next_mtime: SystemTime) {
1768 let mut state = self.state.lock();
1769 state.next_mtime = next_mtime;
1770 }
1771
1772 pub fn get_and_increment_mtime(&self) -> MTime {
1773 let mut state = self.state.lock();
1774 state.get_and_increment_mtime()
1775 }
1776
1777 pub async fn touch_path(&self, path: impl AsRef<Path>) {
1778 let mut state = self.state.lock();
1779 let path = path.as_ref();
1780 let new_mtime = state.get_and_increment_mtime();
1781 let new_inode = state.get_and_increment_inode();
1782 state
1783 .write_path(path, move |entry| {
1784 match entry {
1785 btree_map::Entry::Vacant(e) => {
1786 e.insert(FakeFsEntry::File {
1787 inode: new_inode,
1788 mtime: new_mtime,
1789 content: Vec::new(),
1790 len: 0,
1791 git_dir_path: None,
1792 });
1793 }
1794 btree_map::Entry::Occupied(mut e) => match &mut *e.get_mut() {
1795 FakeFsEntry::File { mtime, .. } => *mtime = new_mtime,
1796 FakeFsEntry::Dir { mtime, .. } => *mtime = new_mtime,
1797 FakeFsEntry::Symlink { .. } => {}
1798 },
1799 }
1800 Ok(())
1801 })
1802 .unwrap();
1803 state.emit_event([(path.to_path_buf(), Some(PathEventKind::Changed))]);
1804 }
1805
1806 pub async fn insert_file(&self, path: impl AsRef<Path>, content: Vec<u8>) {
1807 self.write_file_internal(path, content, true).unwrap()
1808 }
1809
1810 pub async fn insert_symlink(&self, path: impl AsRef<Path>, target: PathBuf) {
1811 let mut state = self.state.lock();
1812 let path = path.as_ref();
1813 let file = FakeFsEntry::Symlink { target };
1814 state
1815 .write_path(path.as_ref(), move |e| match e {
1816 btree_map::Entry::Vacant(e) => {
1817 e.insert(file);
1818 Ok(())
1819 }
1820 btree_map::Entry::Occupied(mut e) => {
1821 *e.get_mut() = file;
1822 Ok(())
1823 }
1824 })
1825 .unwrap();
1826 state.emit_event([(path, Some(PathEventKind::Created))]);
1827 }
1828
1829 fn write_file_internal(
1830 &self,
1831 path: impl AsRef<Path>,
1832 new_content: Vec<u8>,
1833 recreate_inode: bool,
1834 ) -> Result<()> {
1835 fn inner(
1836 this: &FakeFs,
1837 path: &Path,
1838 new_content: Vec<u8>,
1839 recreate_inode: bool,
1840 ) -> Result<()> {
1841 let mut state = this.state.lock();
1842 let path_buf = path.to_path_buf();
1843 *state.path_write_counts.entry(path_buf).or_insert(0) += 1;
1844 let new_inode = state.get_and_increment_inode();
1845 let new_mtime = state.get_and_increment_mtime();
1846 let new_len = new_content.len() as u64;
1847 let mut kind = None;
1848 state.write_path(path, |entry| {
1849 match entry {
1850 btree_map::Entry::Vacant(e) => {
1851 kind = Some(PathEventKind::Created);
1852 e.insert(FakeFsEntry::File {
1853 inode: new_inode,
1854 mtime: new_mtime,
1855 len: new_len,
1856 content: new_content,
1857 git_dir_path: None,
1858 });
1859 }
1860 btree_map::Entry::Occupied(mut e) => {
1861 kind = Some(PathEventKind::Changed);
1862 if let FakeFsEntry::File {
1863 inode,
1864 mtime,
1865 len,
1866 content,
1867 ..
1868 } = e.get_mut()
1869 {
1870 *mtime = new_mtime;
1871 *content = new_content;
1872 *len = new_len;
1873 if recreate_inode {
1874 *inode = new_inode;
1875 }
1876 } else {
1877 anyhow::bail!("not a file")
1878 }
1879 }
1880 }
1881 Ok(())
1882 })?;
1883 state.emit_event([(path, kind)]);
1884 Ok(())
1885 }
1886 inner(self, path.as_ref(), new_content, recreate_inode)
1887 }
1888
1889 pub fn read_file_sync(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1890 let path = path.as_ref();
1891 let path = normalize_path(path);
1892 let mut state = self.state.lock();
1893 let entry = state.entry(&path)?;
1894 entry.file_content(&path).cloned()
1895 }
1896
1897 async fn load_internal(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1898 let path = path.as_ref();
1899 let path = normalize_path(path);
1900 self.simulate_random_delay().await;
1901 let mut state = self.state.lock();
1902 let entry = state.entry(&path)?;
1903 entry.file_content(&path).cloned()
1904 }
1905
1906 pub fn pause_events(&self) {
1907 self.state.lock().events_paused = true;
1908 }
1909
1910 pub fn unpause_events_and_flush(&self) {
1911 self.state.lock().events_paused = false;
1912 self.flush_events(usize::MAX);
1913 }
1914
1915 pub fn buffered_event_count(&self) -> usize {
1916 self.state.lock().buffered_events.len()
1917 }
1918
1919 pub fn clear_buffered_events(&self) {
1920 self.state.lock().buffered_events.clear();
1921 }
1922
1923 pub fn flush_events(&self, count: usize) {
1924 self.state.lock().flush_events(count);
1925 }
1926
1927 pub(crate) fn entry(&self, target: &Path) -> Result<FakeFsEntry> {
1928 self.state.lock().entry(target).cloned()
1929 }
1930
1931 pub(crate) fn insert_entry(&self, target: &Path, new_entry: FakeFsEntry) -> Result<()> {
1932 let mut state = self.state.lock();
1933 state.write_path(target, |entry| {
1934 match entry {
1935 btree_map::Entry::Vacant(vacant_entry) => {
1936 vacant_entry.insert(new_entry);
1937 }
1938 btree_map::Entry::Occupied(mut occupied_entry) => {
1939 occupied_entry.insert(new_entry);
1940 }
1941 }
1942 Ok(())
1943 })
1944 }
1945
1946 #[must_use]
1947 pub fn insert_tree<'a>(
1948 &'a self,
1949 path: impl 'a + AsRef<Path> + Send,
1950 tree: serde_json::Value,
1951 ) -> futures::future::BoxFuture<'a, ()> {
1952 use futures::FutureExt as _;
1953 use serde_json::Value::*;
1954
1955 fn inner<'a>(
1956 this: &'a FakeFs,
1957 path: Arc<Path>,
1958 tree: serde_json::Value,
1959 ) -> futures::future::BoxFuture<'a, ()> {
1960 async move {
1961 match tree {
1962 Object(map) => {
1963 this.create_dir(&path).await.unwrap();
1964 for (name, contents) in map {
1965 let mut path = PathBuf::from(path.as_ref());
1966 path.push(name);
1967 this.insert_tree(&path, contents).await;
1968 }
1969 }
1970 Null => {
1971 this.create_dir(&path).await.unwrap();
1972 }
1973 String(contents) => {
1974 this.insert_file(&path, contents.into_bytes()).await;
1975 }
1976 _ => {
1977 panic!("JSON object must contain only objects, strings, or null");
1978 }
1979 }
1980 }
1981 .boxed()
1982 }
1983 inner(self, Arc::from(path.as_ref()), tree)
1984 }
1985
1986 pub fn insert_tree_from_real_fs<'a>(
1987 &'a self,
1988 path: impl 'a + AsRef<Path> + Send,
1989 src_path: impl 'a + AsRef<Path> + Send,
1990 ) -> futures::future::BoxFuture<'a, ()> {
1991 use futures::FutureExt as _;
1992
1993 async move {
1994 let path = path.as_ref();
1995 if std::fs::metadata(&src_path).unwrap().is_file() {
1996 let contents = std::fs::read(src_path).unwrap();
1997 self.insert_file(path, contents).await;
1998 } else {
1999 self.create_dir(path).await.unwrap();
2000 for entry in std::fs::read_dir(&src_path).unwrap() {
2001 let entry = entry.unwrap();
2002 self.insert_tree_from_real_fs(path.join(entry.file_name()), entry.path())
2003 .await;
2004 }
2005 }
2006 }
2007 .boxed()
2008 }
2009
2010 pub fn with_git_state_and_paths<T, F>(
2011 &self,
2012 dot_git: &Path,
2013 emit_git_event: bool,
2014 f: F,
2015 ) -> Result<T>
2016 where
2017 F: FnOnce(&mut FakeGitRepositoryState, &Path, &Path) -> T,
2018 {
2019 let mut state = self.state.lock();
2020 let git_event_tx = state.git_event_tx.clone();
2021 let entry = state.entry(dot_git).context("open .git")?;
2022
2023 if let FakeFsEntry::Dir { git_repo_state, .. } = entry {
2024 let repo_state = git_repo_state.get_or_insert_with(|| {
2025 log::debug!("insert git state for {dot_git:?}");
2026 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
2027 });
2028 let mut repo_state = repo_state.lock();
2029
2030 let result = f(&mut repo_state, dot_git, dot_git);
2031
2032 drop(repo_state);
2033 if emit_git_event {
2034 state.emit_event([(
2035 dot_git.join("fake_git_repo_event"),
2036 Some(PathEventKind::Changed),
2037 )]);
2038 }
2039
2040 Ok(result)
2041 } else if let FakeFsEntry::File {
2042 content,
2043 git_dir_path,
2044 ..
2045 } = &mut *entry
2046 {
2047 let path = match git_dir_path {
2048 Some(path) => path,
2049 None => {
2050 let path = std::str::from_utf8(content)
2051 .ok()
2052 .and_then(|content| content.strip_prefix("gitdir:"))
2053 .context("not a valid gitfile")?
2054 .trim();
2055 git_dir_path.insert(normalize_path(&dot_git.parent().unwrap().join(path)))
2056 }
2057 }
2058 .clone();
2059 let Some((git_dir_entry, canonical_path)) = state.try_entry(&path, true) else {
2060 anyhow::bail!("pointed-to git dir {path:?} not found")
2061 };
2062 let FakeFsEntry::Dir {
2063 git_repo_state,
2064 entries,
2065 ..
2066 } = git_dir_entry
2067 else {
2068 anyhow::bail!("gitfile points to a non-directory")
2069 };
2070 let common_dir = if let Some(child) = entries.get("commondir") {
2071 let raw = std::str::from_utf8(child.file_content("commondir".as_ref())?)
2072 .context("commondir content")?
2073 .trim();
2074 let raw_path = Path::new(raw);
2075 if raw_path.is_relative() {
2076 normalize_path(&canonical_path.join(raw_path))
2077 } else {
2078 raw_path.to_owned()
2079 }
2080 } else {
2081 canonical_path.clone()
2082 };
2083 let repo_state = git_repo_state.get_or_insert_with(|| {
2084 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
2085 });
2086 let mut repo_state = repo_state.lock();
2087
2088 let result = f(&mut repo_state, &canonical_path, &common_dir);
2089
2090 if emit_git_event {
2091 drop(repo_state);
2092 state.emit_event([(
2093 canonical_path.join("fake_git_repo_event"),
2094 Some(PathEventKind::Changed),
2095 )]);
2096 }
2097
2098 Ok(result)
2099 } else {
2100 anyhow::bail!("not a valid git repository");
2101 }
2102 }
2103
2104 pub fn with_git_state<T, F>(&self, dot_git: &Path, emit_git_event: bool, f: F) -> Result<T>
2105 where
2106 F: FnOnce(&mut FakeGitRepositoryState) -> T,
2107 {
2108 self.with_git_state_and_paths(dot_git, emit_git_event, |state, _, _| f(state))
2109 }
2110
2111 pub fn set_branch_name(&self, dot_git: &Path, branch: Option<impl Into<String>>) {
2112 self.with_git_state(dot_git, true, |state| {
2113 let branch = branch.map(Into::into);
2114 state.branches.extend(branch.clone());
2115 state.current_branch_name = branch
2116 })
2117 .unwrap();
2118 }
2119
2120 pub fn set_remote_for_repo(
2121 &self,
2122 dot_git: &Path,
2123 name: impl Into<String>,
2124 url: impl Into<String>,
2125 ) {
2126 self.with_git_state(dot_git, true, |state| {
2127 state.remotes.insert(name.into(), url.into());
2128 })
2129 .unwrap();
2130 }
2131
2132 pub fn insert_branches(&self, dot_git: &Path, branches: &[&str]) {
2133 self.with_git_state(dot_git, true, |state| {
2134 if let Some(first) = branches.first()
2135 && state.current_branch_name.is_none()
2136 {
2137 state.current_branch_name = Some(first.to_string())
2138 }
2139 state
2140 .branches
2141 .extend(branches.iter().map(ToString::to_string));
2142 })
2143 .unwrap();
2144 }
2145
2146 pub async fn add_linked_worktree_for_repo(
2147 &self,
2148 dot_git: &Path,
2149 emit_git_event: bool,
2150 worktree: Worktree,
2151 ) {
2152 let ref_name = worktree
2153 .ref_name
2154 .as_ref()
2155 .expect("linked worktree must have a ref_name");
2156 let branch_name = ref_name
2157 .strip_prefix("refs/heads/")
2158 .unwrap_or(ref_name.as_ref());
2159
2160 // Create ref in git state.
2161 self.with_git_state(dot_git, false, |state| {
2162 state
2163 .refs
2164 .insert(ref_name.to_string(), worktree.sha.to_string());
2165 })
2166 .unwrap();
2167
2168 // Create .git/worktrees/<name>/ directory with HEAD, commondir, and gitdir.
2169 let worktrees_entry_dir = dot_git.join("worktrees").join(branch_name);
2170 self.create_dir(&worktrees_entry_dir).await.unwrap();
2171
2172 self.write_file_internal(
2173 worktrees_entry_dir.join("HEAD"),
2174 format!("ref: {ref_name}").into_bytes(),
2175 false,
2176 )
2177 .unwrap();
2178
2179 self.write_file_internal(
2180 worktrees_entry_dir.join("commondir"),
2181 dot_git.to_string_lossy().into_owned().into_bytes(),
2182 false,
2183 )
2184 .unwrap();
2185
2186 let worktree_dot_git = worktree.path.join(".git");
2187 self.write_file_internal(
2188 worktrees_entry_dir.join("gitdir"),
2189 worktree_dot_git.to_string_lossy().into_owned().into_bytes(),
2190 false,
2191 )
2192 .unwrap();
2193
2194 // Create the worktree checkout directory with a .git file pointing back.
2195 self.create_dir(&worktree.path).await.unwrap();
2196
2197 self.write_file_internal(
2198 &worktree_dot_git,
2199 format!("gitdir: {}", worktrees_entry_dir.display()).into_bytes(),
2200 false,
2201 )
2202 .unwrap();
2203
2204 if emit_git_event {
2205 self.with_git_state(dot_git, true, |_| {}).unwrap();
2206 }
2207 }
2208
2209 pub async fn remove_worktree_for_repo(
2210 &self,
2211 dot_git: &Path,
2212 emit_git_event: bool,
2213 ref_name: &str,
2214 ) {
2215 let branch_name = ref_name.strip_prefix("refs/heads/").unwrap_or(ref_name);
2216 let worktrees_entry_dir = dot_git.join("worktrees").join(branch_name);
2217
2218 // Read gitdir to find the worktree checkout path.
2219 let gitdir_content = self
2220 .load_internal(worktrees_entry_dir.join("gitdir"))
2221 .await
2222 .unwrap();
2223 let gitdir_str = String::from_utf8(gitdir_content).unwrap();
2224 let worktree_path = PathBuf::from(gitdir_str.trim())
2225 .parent()
2226 .map(PathBuf::from)
2227 .unwrap_or_default();
2228
2229 // Remove the worktree checkout directory.
2230 self.remove_dir(
2231 &worktree_path,
2232 RemoveOptions {
2233 recursive: true,
2234 ignore_if_not_exists: true,
2235 },
2236 )
2237 .await
2238 .unwrap();
2239
2240 // Remove the .git/worktrees/<name>/ directory.
2241 self.remove_dir(
2242 &worktrees_entry_dir,
2243 RemoveOptions {
2244 recursive: true,
2245 ignore_if_not_exists: false,
2246 },
2247 )
2248 .await
2249 .unwrap();
2250
2251 if emit_git_event {
2252 self.with_git_state(dot_git, true, |_| {}).unwrap();
2253 }
2254 }
2255
2256 pub fn set_unmerged_paths_for_repo(
2257 &self,
2258 dot_git: &Path,
2259 unmerged_state: &[(RepoPath, UnmergedStatus)],
2260 ) {
2261 self.with_git_state(dot_git, true, |state| {
2262 state.unmerged_paths.clear();
2263 state.unmerged_paths.extend(
2264 unmerged_state
2265 .iter()
2266 .map(|(path, content)| (path.clone(), *content)),
2267 );
2268 })
2269 .unwrap();
2270 }
2271
2272 pub fn set_index_for_repo(&self, dot_git: &Path, index_state: &[(&str, String)]) {
2273 self.with_git_state(dot_git, true, |state| {
2274 state.index_contents.clear();
2275 state.index_contents.extend(
2276 index_state
2277 .iter()
2278 .map(|(path, content)| (repo_path(path), content.clone())),
2279 );
2280 })
2281 .unwrap();
2282 }
2283
2284 pub fn set_head_for_repo(
2285 &self,
2286 dot_git: &Path,
2287 head_state: &[(&str, String)],
2288 sha: impl Into<String>,
2289 ) {
2290 self.with_git_state(dot_git, true, |state| {
2291 state.head_contents.clear();
2292 state.head_contents.extend(
2293 head_state
2294 .iter()
2295 .map(|(path, content)| (repo_path(path), content.clone())),
2296 );
2297 state.refs.insert("HEAD".into(), sha.into());
2298 })
2299 .unwrap();
2300 }
2301
2302 pub fn set_head_and_index_for_repo(&self, dot_git: &Path, contents_by_path: &[(&str, String)]) {
2303 self.with_git_state(dot_git, true, |state| {
2304 state.head_contents.clear();
2305 state.head_contents.extend(
2306 contents_by_path
2307 .iter()
2308 .map(|(path, contents)| (repo_path(path), contents.clone())),
2309 );
2310 state.index_contents = state.head_contents.clone();
2311 })
2312 .unwrap();
2313 }
2314
2315 pub fn set_merge_base_content_for_repo(
2316 &self,
2317 dot_git: &Path,
2318 contents_by_path: &[(&str, String)],
2319 ) {
2320 self.with_git_state(dot_git, true, |state| {
2321 use git::Oid;
2322
2323 state.merge_base_contents.clear();
2324 let oids = (1..)
2325 .map(|n| n.to_string())
2326 .map(|n| Oid::from_bytes(n.repeat(20).as_bytes()).unwrap());
2327 for ((path, content), oid) in contents_by_path.iter().zip(oids) {
2328 state.merge_base_contents.insert(repo_path(path), oid);
2329 state.oids.insert(oid, content.clone());
2330 }
2331 })
2332 .unwrap();
2333 }
2334
2335 pub fn set_blame_for_repo(&self, dot_git: &Path, blames: Vec<(RepoPath, git::blame::Blame)>) {
2336 self.with_git_state(dot_git, true, |state| {
2337 state.blames.clear();
2338 state.blames.extend(blames);
2339 })
2340 .unwrap();
2341 }
2342
2343 pub fn set_graph_commits(&self, dot_git: &Path, commits: Vec<Arc<InitialGraphCommitData>>) {
2344 self.with_git_state(dot_git, true, |state| {
2345 state.graph_commits = commits;
2346 })
2347 .unwrap();
2348 }
2349
2350 pub fn set_graph_error(&self, dot_git: &Path, error: Option<String>) {
2351 self.with_git_state(dot_git, true, |state| {
2352 state.simulated_graph_error = error;
2353 })
2354 .unwrap();
2355 }
2356
2357 pub fn set_commit_data(
2358 &self,
2359 dot_git: &Path,
2360 commit_data: impl IntoIterator<Item = (CommitData, bool)>,
2361 ) {
2362 self.with_git_state(dot_git, true, |state| {
2363 state.commit_data = commit_data
2364 .into_iter()
2365 .map(|(data, should_fail)| {
2366 (
2367 data.sha,
2368 if should_fail {
2369 FakeCommitDataEntry::Fail(data)
2370 } else {
2371 FakeCommitDataEntry::Success(data)
2372 },
2373 )
2374 })
2375 .collect();
2376 })
2377 .unwrap();
2378 }
2379
2380 /// Put the given git repository into a state with the given status,
2381 /// by mutating the head, index, and unmerged state.
2382 pub fn set_status_for_repo(&self, dot_git: &Path, statuses: &[(&str, FileStatus)]) {
2383 let workdir_path = dot_git.parent().unwrap();
2384 let workdir_contents = self.files_with_contents(workdir_path);
2385 self.with_git_state(dot_git, true, |state| {
2386 state.index_contents.clear();
2387 state.head_contents.clear();
2388 state.unmerged_paths.clear();
2389 for (path, content) in workdir_contents {
2390 use util::{paths::PathStyle, rel_path::RelPath};
2391
2392 let repo_path = RelPath::new(path.strip_prefix(&workdir_path).unwrap(), PathStyle::local()).unwrap();
2393 let repo_path = RepoPath::from_rel_path(&repo_path);
2394 let status = statuses
2395 .iter()
2396 .find_map(|(p, status)| (*p == repo_path.as_unix_str()).then_some(status));
2397 let mut content = String::from_utf8_lossy(&content).to_string();
2398
2399 let mut index_content = None;
2400 let mut head_content = None;
2401 match status {
2402 None => {
2403 index_content = Some(content.clone());
2404 head_content = Some(content);
2405 }
2406 Some(FileStatus::Untracked | FileStatus::Ignored) => {}
2407 Some(FileStatus::Unmerged(unmerged_status)) => {
2408 state
2409 .unmerged_paths
2410 .insert(repo_path.clone(), *unmerged_status);
2411 content.push_str(" (unmerged)");
2412 index_content = Some(content.clone());
2413 head_content = Some(content);
2414 }
2415 Some(FileStatus::Tracked(TrackedStatus {
2416 index_status,
2417 worktree_status,
2418 })) => {
2419 match worktree_status {
2420 StatusCode::Modified => {
2421 let mut content = content.clone();
2422 content.push_str(" (modified in working copy)");
2423 index_content = Some(content);
2424 }
2425 StatusCode::TypeChanged | StatusCode::Unmodified => {
2426 index_content = Some(content.clone());
2427 }
2428 StatusCode::Added => {}
2429 StatusCode::Deleted | StatusCode::Renamed | StatusCode::Copied => {
2430 panic!("cannot create these statuses for an existing file");
2431 }
2432 };
2433 match index_status {
2434 StatusCode::Modified => {
2435 let mut content = index_content.clone().expect(
2436 "file cannot be both modified in index and created in working copy",
2437 );
2438 content.push_str(" (modified in index)");
2439 head_content = Some(content);
2440 }
2441 StatusCode::TypeChanged | StatusCode::Unmodified => {
2442 head_content = Some(index_content.clone().expect("file cannot be both unmodified in index and created in working copy"));
2443 }
2444 StatusCode::Added => {}
2445 StatusCode::Deleted => {
2446 head_content = Some("".into());
2447 }
2448 StatusCode::Renamed | StatusCode::Copied => {
2449 panic!("cannot create these statuses for an existing file");
2450 }
2451 };
2452 }
2453 };
2454
2455 if let Some(content) = index_content {
2456 state.index_contents.insert(repo_path.clone(), content);
2457 }
2458 if let Some(content) = head_content {
2459 state.head_contents.insert(repo_path.clone(), content);
2460 }
2461 }
2462 }).unwrap();
2463 }
2464
2465 pub fn set_error_message_for_index_write(&self, dot_git: &Path, message: Option<String>) {
2466 self.with_git_state(dot_git, true, |state| {
2467 state.simulated_index_write_error_message = message;
2468 })
2469 .unwrap();
2470 }
2471
2472 pub fn set_create_worktree_error(&self, dot_git: &Path, message: Option<String>) {
2473 self.with_git_state(dot_git, true, |state| {
2474 state.simulated_create_worktree_error = message;
2475 })
2476 .unwrap();
2477 }
2478
2479 pub fn paths(&self, include_dot_git: bool) -> Vec<PathBuf> {
2480 let mut result = Vec::new();
2481 let mut queue = collections::VecDeque::new();
2482 let state = &*self.state.lock();
2483 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2484 while let Some((path, entry)) = queue.pop_front() {
2485 if let FakeFsEntry::Dir { entries, .. } = entry {
2486 for (name, entry) in entries {
2487 queue.push_back((path.join(name), entry));
2488 }
2489 }
2490 if include_dot_git
2491 || !path
2492 .components()
2493 .any(|component| component.as_os_str() == *FS_DOT_GIT)
2494 {
2495 result.push(path);
2496 }
2497 }
2498 result
2499 }
2500
2501 pub fn directories(&self, include_dot_git: bool) -> Vec<PathBuf> {
2502 let mut result = Vec::new();
2503 let mut queue = collections::VecDeque::new();
2504 let state = &*self.state.lock();
2505 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2506 while let Some((path, entry)) = queue.pop_front() {
2507 if let FakeFsEntry::Dir { entries, .. } = entry {
2508 for (name, entry) in entries {
2509 queue.push_back((path.join(name), entry));
2510 }
2511 if include_dot_git
2512 || !path
2513 .components()
2514 .any(|component| component.as_os_str() == *FS_DOT_GIT)
2515 {
2516 result.push(path);
2517 }
2518 }
2519 }
2520 result
2521 }
2522
2523 pub fn files(&self) -> Vec<PathBuf> {
2524 let mut result = Vec::new();
2525 let mut queue = collections::VecDeque::new();
2526 let state = &*self.state.lock();
2527 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2528 while let Some((path, entry)) = queue.pop_front() {
2529 match entry {
2530 FakeFsEntry::File { .. } => result.push(path),
2531 FakeFsEntry::Dir { entries, .. } => {
2532 for (name, entry) in entries {
2533 queue.push_back((path.join(name), entry));
2534 }
2535 }
2536 FakeFsEntry::Symlink { .. } => {}
2537 }
2538 }
2539 result
2540 }
2541
2542 pub fn files_with_contents(&self, prefix: &Path) -> Vec<(PathBuf, Vec<u8>)> {
2543 let mut result = Vec::new();
2544 let mut queue = collections::VecDeque::new();
2545 let state = &*self.state.lock();
2546 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2547 while let Some((path, entry)) = queue.pop_front() {
2548 match entry {
2549 FakeFsEntry::File { content, .. } => {
2550 if path.starts_with(prefix) {
2551 result.push((path, content.clone()));
2552 }
2553 }
2554 FakeFsEntry::Dir { entries, .. } => {
2555 for (name, entry) in entries {
2556 queue.push_back((path.join(name), entry));
2557 }
2558 }
2559 FakeFsEntry::Symlink { .. } => {}
2560 }
2561 }
2562 result
2563 }
2564
2565 /// How many `read_dir` calls have been issued.
2566 pub fn read_dir_call_count(&self) -> usize {
2567 self.state.lock().read_dir_call_count
2568 }
2569
2570 pub fn watched_paths(&self) -> Vec<PathBuf> {
2571 let state = self.state.lock();
2572 state
2573 .event_txs
2574 .iter()
2575 .filter_map(|(path, tx)| Some(path.clone()).filter(|_| !tx.is_closed()))
2576 .collect()
2577 }
2578
2579 /// How many `metadata` calls have been issued.
2580 pub fn metadata_call_count(&self) -> usize {
2581 self.state.lock().metadata_call_count
2582 }
2583
2584 /// How many write operations have been issued for a specific path.
2585 pub fn write_count_for_path(&self, path: impl AsRef<Path>) -> usize {
2586 let path = path.as_ref().to_path_buf();
2587 self.state
2588 .lock()
2589 .path_write_counts
2590 .get(&path)
2591 .copied()
2592 .unwrap_or(0)
2593 }
2594
2595 pub fn emit_fs_event(&self, path: impl Into<PathBuf>, event: Option<PathEventKind>) {
2596 self.state.lock().emit_event(std::iter::once((path, event)));
2597 }
2598
2599 fn simulate_random_delay(&self) -> impl futures::Future<Output = ()> {
2600 self.executor.simulate_random_delay()
2601 }
2602
2603 /// Returns list of all tracked trash entries.
2604 pub fn trash_entries(&self) -> Vec<TrashedEntry> {
2605 self.state
2606 .lock()
2607 .trash
2608 .iter()
2609 .map(|(entry, _)| entry.clone())
2610 .collect()
2611 }
2612
2613 async fn remove_dir_inner(
2614 &self,
2615 path: &Path,
2616 options: RemoveOptions,
2617 ) -> Result<Option<FakeFsEntry>> {
2618 self.simulate_random_delay().await;
2619
2620 let path = normalize_path(path);
2621 let parent_path = path.parent().context("cannot remove the root")?;
2622 let base_name = path.file_name().context("cannot remove the root")?;
2623
2624 let mut state = self.state.lock();
2625 let parent_entry = state.entry(parent_path)?;
2626 let entry = parent_entry
2627 .dir_entries(parent_path)?
2628 .entry(base_name.to_str().unwrap().into());
2629
2630 let removed = match entry {
2631 btree_map::Entry::Vacant(_) => {
2632 if !options.ignore_if_not_exists {
2633 anyhow::bail!("{path:?} does not exist");
2634 }
2635
2636 None
2637 }
2638 btree_map::Entry::Occupied(mut entry) => {
2639 {
2640 let children = entry.get_mut().dir_entries(&path)?;
2641 if !options.recursive && !children.is_empty() {
2642 anyhow::bail!("{path:?} is not empty");
2643 }
2644 }
2645
2646 Some(entry.remove())
2647 }
2648 };
2649
2650 state.emit_event([(path, Some(PathEventKind::Removed))]);
2651 Ok(removed)
2652 }
2653
2654 async fn remove_file_inner(
2655 &self,
2656 path: &Path,
2657 options: RemoveOptions,
2658 ) -> Result<Option<FakeFsEntry>> {
2659 self.simulate_random_delay().await;
2660
2661 let path = normalize_path(path);
2662 let parent_path = path.parent().context("cannot remove the root")?;
2663 let base_name = path.file_name().unwrap();
2664 let mut state = self.state.lock();
2665 let parent_entry = state.entry(parent_path)?;
2666 let entry = parent_entry
2667 .dir_entries(parent_path)?
2668 .entry(base_name.to_str().unwrap().into());
2669 let removed = match entry {
2670 btree_map::Entry::Vacant(_) => {
2671 if !options.ignore_if_not_exists {
2672 anyhow::bail!("{path:?} does not exist");
2673 }
2674
2675 None
2676 }
2677 btree_map::Entry::Occupied(mut entry) => {
2678 entry.get_mut().file_content(&path)?;
2679 Some(entry.remove())
2680 }
2681 };
2682
2683 state.emit_event([(path, Some(PathEventKind::Removed))]);
2684 Ok(removed)
2685 }
2686}
2687
2688#[cfg(feature = "test-support")]
2689impl FakeFsEntry {
2690 fn is_file(&self) -> bool {
2691 matches!(self, Self::File { .. })
2692 }
2693
2694 fn is_symlink(&self) -> bool {
2695 matches!(self, Self::Symlink { .. })
2696 }
2697
2698 fn file_content(&self, path: &Path) -> Result<&Vec<u8>> {
2699 if let Self::File { content, .. } = self {
2700 Ok(content)
2701 } else {
2702 anyhow::bail!("not a file: {path:?}");
2703 }
2704 }
2705
2706 fn dir_entries(&mut self, path: &Path) -> Result<&mut BTreeMap<String, FakeFsEntry>> {
2707 if let Self::Dir { entries, .. } = self {
2708 Ok(entries)
2709 } else {
2710 anyhow::bail!("not a directory: {path:?}");
2711 }
2712 }
2713}
2714
2715#[cfg(feature = "test-support")]
2716struct FakeWatcher {
2717 tx: smol::channel::Sender<Vec<PathEvent>>,
2718 original_path: PathBuf,
2719 fs_state: Arc<Mutex<FakeFsState>>,
2720 prefixes: Mutex<Vec<PathBuf>>,
2721}
2722
2723#[cfg(feature = "test-support")]
2724impl Watcher for FakeWatcher {
2725 fn add(&self, path: &Path) -> Result<()> {
2726 if path.starts_with(&self.original_path) {
2727 return Ok(());
2728 }
2729 self.fs_state
2730 .try_lock()
2731 .unwrap()
2732 .event_txs
2733 .push((path.to_owned(), self.tx.clone()));
2734 self.prefixes.lock().push(path.to_owned());
2735 Ok(())
2736 }
2737
2738 fn remove(&self, _: &Path) -> Result<()> {
2739 Ok(())
2740 }
2741}
2742
2743#[cfg(feature = "test-support")]
2744#[derive(Debug)]
2745struct FakeHandle {
2746 inode: u64,
2747}
2748
2749#[cfg(feature = "test-support")]
2750impl FileHandle for FakeHandle {
2751 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf> {
2752 let fs = fs.as_fake();
2753 let mut state = fs.state.lock();
2754 let Some(target) = state.moves.get(&self.inode).cloned() else {
2755 anyhow::bail!("fake fd not moved")
2756 };
2757
2758 if state.try_entry(&target, false).is_some() {
2759 return Ok(target);
2760 }
2761 anyhow::bail!("fake fd target not found")
2762 }
2763}
2764
2765#[cfg(feature = "test-support")]
2766#[async_trait::async_trait]
2767impl Fs for FakeFs {
2768 async fn create_dir(&self, path: &Path) -> Result<()> {
2769 self.simulate_random_delay().await;
2770
2771 let mut created_dirs = Vec::new();
2772 let mut cur_path = PathBuf::new();
2773 for component in path.components() {
2774 let should_skip = matches!(component, Component::Prefix(..) | Component::RootDir);
2775 cur_path.push(component);
2776 if should_skip {
2777 continue;
2778 }
2779 let mut state = self.state.lock();
2780
2781 let inode = state.get_and_increment_inode();
2782 let mtime = state.get_and_increment_mtime();
2783 state.write_path(&cur_path, |entry| {
2784 entry.or_insert_with(|| {
2785 created_dirs.push((cur_path.clone(), Some(PathEventKind::Created)));
2786 FakeFsEntry::Dir {
2787 inode,
2788 mtime,
2789 len: 0,
2790 entries: Default::default(),
2791 git_repo_state: None,
2792 }
2793 });
2794 Ok(())
2795 })?
2796 }
2797
2798 self.state.lock().emit_event(created_dirs);
2799 Ok(())
2800 }
2801
2802 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
2803 self.simulate_random_delay().await;
2804 let mut state = self.state.lock();
2805 let inode = state.get_and_increment_inode();
2806 let mtime = state.get_and_increment_mtime();
2807 let file = FakeFsEntry::File {
2808 inode,
2809 mtime,
2810 len: 0,
2811 content: Vec::new(),
2812 git_dir_path: None,
2813 };
2814 let mut kind = Some(PathEventKind::Created);
2815 state.write_path(path, |entry| {
2816 match entry {
2817 btree_map::Entry::Occupied(mut e) => {
2818 if options.overwrite {
2819 kind = Some(PathEventKind::Changed);
2820 *e.get_mut() = file;
2821 } else if !options.ignore_if_exists {
2822 anyhow::bail!("path already exists: {path:?}");
2823 }
2824 }
2825 btree_map::Entry::Vacant(e) => {
2826 e.insert(file);
2827 }
2828 }
2829 Ok(())
2830 })?;
2831 state.emit_event([(path, kind)]);
2832 Ok(())
2833 }
2834
2835 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
2836 let mut state = self.state.lock();
2837 let file = FakeFsEntry::Symlink { target };
2838 state
2839 .write_path(path.as_ref(), move |e| match e {
2840 btree_map::Entry::Vacant(e) => {
2841 e.insert(file);
2842 Ok(())
2843 }
2844 btree_map::Entry::Occupied(mut e) => {
2845 *e.get_mut() = file;
2846 Ok(())
2847 }
2848 })
2849 .unwrap();
2850 state.emit_event([(path, Some(PathEventKind::Created))]);
2851
2852 Ok(())
2853 }
2854
2855 async fn create_file_with(
2856 &self,
2857 path: &Path,
2858 mut content: Pin<&mut (dyn AsyncRead + Send)>,
2859 ) -> Result<()> {
2860 let mut bytes = Vec::new();
2861 content.read_to_end(&mut bytes).await?;
2862 self.write_file_internal(path, bytes, true)?;
2863 Ok(())
2864 }
2865
2866 async fn extract_tar_file(
2867 &self,
2868 path: &Path,
2869 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
2870 ) -> Result<()> {
2871 let mut entries = content.entries()?;
2872 while let Some(entry) = entries.next().await {
2873 let mut entry = entry?;
2874 if entry.header().entry_type().is_file() {
2875 let path = path.join(entry.path()?.as_ref());
2876 let mut bytes = Vec::new();
2877 entry.read_to_end(&mut bytes).await?;
2878 self.create_dir(path.parent().unwrap()).await?;
2879 self.write_file_internal(&path, bytes, true)?;
2880 }
2881 }
2882 Ok(())
2883 }
2884
2885 async fn rename(&self, old_path: &Path, new_path: &Path, options: RenameOptions) -> Result<()> {
2886 self.simulate_random_delay().await;
2887
2888 let old_path = normalize_path(old_path);
2889 let new_path = normalize_path(new_path);
2890
2891 if options.create_parents {
2892 if let Some(parent) = new_path.parent() {
2893 self.create_dir(parent).await?;
2894 }
2895 }
2896
2897 let mut state = self.state.lock();
2898 let moved_entry = state.write_path(&old_path, |e| {
2899 if let btree_map::Entry::Occupied(e) = e {
2900 Ok(e.get().clone())
2901 } else {
2902 anyhow::bail!("path does not exist: {old_path:?}")
2903 }
2904 })?;
2905
2906 let inode = match moved_entry {
2907 FakeFsEntry::File { inode, .. } => inode,
2908 FakeFsEntry::Dir { inode, .. } => inode,
2909 _ => 0,
2910 };
2911
2912 state.moves.insert(inode, new_path.clone());
2913
2914 state.write_path(&new_path, |e| {
2915 match e {
2916 btree_map::Entry::Occupied(mut e) => {
2917 if options.overwrite {
2918 *e.get_mut() = moved_entry;
2919 } else if !options.ignore_if_exists {
2920 anyhow::bail!("path already exists: {new_path:?}");
2921 }
2922 }
2923 btree_map::Entry::Vacant(e) => {
2924 e.insert(moved_entry);
2925 }
2926 }
2927 Ok(())
2928 })?;
2929
2930 state
2931 .write_path(&old_path, |e| {
2932 if let btree_map::Entry::Occupied(e) = e {
2933 Ok(e.remove())
2934 } else {
2935 unreachable!()
2936 }
2937 })
2938 .unwrap();
2939
2940 state.emit_event([
2941 (old_path, Some(PathEventKind::Removed)),
2942 (new_path, Some(PathEventKind::Created)),
2943 ]);
2944 Ok(())
2945 }
2946
2947 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
2948 self.simulate_random_delay().await;
2949
2950 let source = normalize_path(source);
2951 let target = normalize_path(target);
2952 let mut state = self.state.lock();
2953 let mtime = state.get_and_increment_mtime();
2954 let inode = state.get_and_increment_inode();
2955 let source_entry = state.entry(&source)?;
2956 let content = source_entry.file_content(&source)?.clone();
2957 let mut kind = Some(PathEventKind::Created);
2958 state.write_path(&target, |e| match e {
2959 btree_map::Entry::Occupied(e) => {
2960 if options.overwrite {
2961 kind = Some(PathEventKind::Changed);
2962 Ok(Some(e.get().clone()))
2963 } else if !options.ignore_if_exists {
2964 anyhow::bail!("{target:?} already exists");
2965 } else {
2966 Ok(None)
2967 }
2968 }
2969 btree_map::Entry::Vacant(e) => Ok(Some(
2970 e.insert(FakeFsEntry::File {
2971 inode,
2972 mtime,
2973 len: content.len() as u64,
2974 content,
2975 git_dir_path: None,
2976 })
2977 .clone(),
2978 )),
2979 })?;
2980 state.emit_event([(target, kind)]);
2981 Ok(())
2982 }
2983
2984 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
2985 self.remove_dir_inner(path, options).await.map(|_| ())
2986 }
2987
2988 async fn trash(&self, path: &Path, options: RemoveOptions) -> Result<TrashedEntry> {
2989 let normalized_path = normalize_path(path);
2990 let parent_path = normalized_path.parent().context("cannot remove the root")?;
2991 let base_name = normalized_path.file_name().unwrap();
2992 let result = if self.is_dir(path).await {
2993 self.remove_dir_inner(path, options).await?
2994 } else {
2995 self.remove_file_inner(path, options).await?
2996 };
2997
2998 match result {
2999 Some(fake_entry) => {
3000 let trashed_entry = TrashedEntry {
3001 id: base_name.to_str().unwrap().into(),
3002 name: base_name.to_str().unwrap().into(),
3003 original_parent: parent_path.to_path_buf(),
3004 };
3005
3006 let mut state = self.state.lock();
3007 state.trash.push((trashed_entry.clone(), fake_entry));
3008 Ok(trashed_entry)
3009 }
3010 None => anyhow::bail!("{normalized_path:?} does not exist"),
3011 }
3012 }
3013
3014 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
3015 self.remove_file_inner(path, options).await.map(|_| ())
3016 }
3017
3018 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
3019 let bytes = self.load_internal(path).await?;
3020 Ok(Box::new(io::Cursor::new(bytes)))
3021 }
3022
3023 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
3024 self.simulate_random_delay().await;
3025 let mut state = self.state.lock();
3026 let inode = match state.entry(path)? {
3027 FakeFsEntry::File { inode, .. } => *inode,
3028 FakeFsEntry::Dir { inode, .. } => *inode,
3029 _ => unreachable!(),
3030 };
3031 Ok(Arc::new(FakeHandle { inode }))
3032 }
3033
3034 async fn load(&self, path: &Path) -> Result<String> {
3035 let content = self.load_internal(path).await?;
3036 Ok(String::from_utf8(content)?)
3037 }
3038
3039 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
3040 self.load_internal(path).await
3041 }
3042
3043 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
3044 self.simulate_random_delay().await;
3045 let path = normalize_path(path.as_path());
3046 if let Some(path) = path.parent() {
3047 self.create_dir(path).await?;
3048 }
3049 self.write_file_internal(path, data.into_bytes(), true)?;
3050 Ok(())
3051 }
3052
3053 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
3054 self.simulate_random_delay().await;
3055 let path = normalize_path(path);
3056 let content = text::chunks_with_line_ending(text, line_ending).collect::<String>();
3057 if let Some(path) = path.parent() {
3058 self.create_dir(path).await?;
3059 }
3060 self.write_file_internal(path, content.into_bytes(), false)?;
3061 Ok(())
3062 }
3063
3064 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
3065 self.simulate_random_delay().await;
3066 let path = normalize_path(path);
3067 if let Some(path) = path.parent() {
3068 self.create_dir(path).await?;
3069 }
3070 self.write_file_internal(path, content.to_vec(), false)?;
3071 Ok(())
3072 }
3073
3074 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
3075 let path = normalize_path(path);
3076 self.simulate_random_delay().await;
3077 let state = self.state.lock();
3078 let canonical_path = state
3079 .canonicalize(&path, true)
3080 .with_context(|| format!("path does not exist: {path:?}"))?;
3081 Ok(canonical_path)
3082 }
3083
3084 async fn is_file(&self, path: &Path) -> bool {
3085 let path = normalize_path(path);
3086 self.simulate_random_delay().await;
3087 let mut state = self.state.lock();
3088 if let Some((entry, _)) = state.try_entry(&path, true) {
3089 entry.is_file()
3090 } else {
3091 false
3092 }
3093 }
3094
3095 async fn is_dir(&self, path: &Path) -> bool {
3096 self.metadata(path)
3097 .await
3098 .is_ok_and(|metadata| metadata.is_some_and(|metadata| metadata.is_dir))
3099 }
3100
3101 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
3102 self.simulate_random_delay().await;
3103 let path = normalize_path(path);
3104 let mut state = self.state.lock();
3105 state.metadata_call_count += 1;
3106 if let Some((mut entry, _)) = state.try_entry(&path, false) {
3107 let is_symlink = entry.is_symlink();
3108 if is_symlink {
3109 if let Some(e) = state.try_entry(&path, true).map(|e| e.0) {
3110 entry = e;
3111 } else {
3112 return Ok(None);
3113 }
3114 }
3115
3116 Ok(Some(match &*entry {
3117 FakeFsEntry::File {
3118 inode, mtime, len, ..
3119 } => Metadata {
3120 inode: *inode,
3121 mtime: *mtime,
3122 len: *len,
3123 is_dir: false,
3124 is_symlink,
3125 is_fifo: false,
3126 is_executable: false,
3127 },
3128 FakeFsEntry::Dir {
3129 inode, mtime, len, ..
3130 } => Metadata {
3131 inode: *inode,
3132 mtime: *mtime,
3133 len: *len,
3134 is_dir: true,
3135 is_symlink,
3136 is_fifo: false,
3137 is_executable: false,
3138 },
3139 FakeFsEntry::Symlink { .. } => unreachable!(),
3140 }))
3141 } else {
3142 Ok(None)
3143 }
3144 }
3145
3146 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
3147 self.simulate_random_delay().await;
3148 let path = normalize_path(path);
3149 let mut state = self.state.lock();
3150 let (entry, _) = state
3151 .try_entry(&path, false)
3152 .with_context(|| format!("path does not exist: {path:?}"))?;
3153 if let FakeFsEntry::Symlink { target } = entry {
3154 Ok(target.clone())
3155 } else {
3156 anyhow::bail!("not a symlink: {path:?}")
3157 }
3158 }
3159
3160 async fn read_dir(
3161 &self,
3162 path: &Path,
3163 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
3164 self.simulate_random_delay().await;
3165 let path = normalize_path(path);
3166 let mut state = self.state.lock();
3167 state.read_dir_call_count += 1;
3168 let entry = state.entry(&path)?;
3169 let children = entry.dir_entries(&path)?;
3170 let paths = children
3171 .keys()
3172 .map(|file_name| Ok(path.join(file_name)))
3173 .collect::<Vec<_>>();
3174 Ok(Box::pin(futures::stream::iter(paths)))
3175 }
3176
3177 async fn watch(
3178 &self,
3179 path: &Path,
3180 _: Duration,
3181 ) -> (
3182 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
3183 Arc<dyn Watcher>,
3184 ) {
3185 self.simulate_random_delay().await;
3186 let (tx, rx) = smol::channel::unbounded();
3187 let path = path.to_path_buf();
3188 self.state.lock().event_txs.push((path.clone(), tx.clone()));
3189 let executor = self.executor.clone();
3190 let watcher = Arc::new(FakeWatcher {
3191 tx,
3192 original_path: path.to_owned(),
3193 fs_state: self.state.clone(),
3194 prefixes: Mutex::new(vec![path]),
3195 });
3196 (
3197 Box::pin(futures::StreamExt::filter(rx, {
3198 let watcher = watcher.clone();
3199 move |events| {
3200 let result = events.iter().any(|evt_path| {
3201 watcher
3202 .prefixes
3203 .lock()
3204 .iter()
3205 .any(|prefix| evt_path.path.starts_with(prefix))
3206 });
3207 let executor = executor.clone();
3208 async move {
3209 executor.simulate_random_delay().await;
3210 result
3211 }
3212 }
3213 })),
3214 watcher,
3215 )
3216 }
3217
3218 fn open_repo(
3219 &self,
3220 abs_dot_git: &Path,
3221 _system_git_binary: Option<&Path>,
3222 ) -> Result<Arc<dyn GitRepository>> {
3223 self.with_git_state_and_paths(
3224 abs_dot_git,
3225 false,
3226 |_, repository_dir_path, common_dir_path| {
3227 Arc::new(fake_git_repo::FakeGitRepository {
3228 fs: self.this.upgrade().unwrap(),
3229 executor: self.executor.clone(),
3230 dot_git_path: abs_dot_git.to_path_buf(),
3231 repository_dir_path: repository_dir_path.to_owned(),
3232 common_dir_path: common_dir_path.to_owned(),
3233 checkpoints: Arc::default(),
3234 is_trusted: Arc::default(),
3235 }) as _
3236 },
3237 )
3238 }
3239
3240 async fn git_init(
3241 &self,
3242 abs_work_directory_path: &Path,
3243 _fallback_branch_name: String,
3244 ) -> Result<()> {
3245 self.create_dir(&abs_work_directory_path.join(".git")).await
3246 }
3247
3248 async fn git_clone(&self, _repo_url: &str, _abs_work_directory: &Path) -> Result<()> {
3249 anyhow::bail!("Git clone is not supported in fake Fs")
3250 }
3251
3252 fn is_fake(&self) -> bool {
3253 true
3254 }
3255
3256 async fn is_case_sensitive(&self) -> bool {
3257 true
3258 }
3259
3260 fn subscribe_to_jobs(&self) -> JobEventReceiver {
3261 let (sender, receiver) = futures::channel::mpsc::unbounded();
3262 self.state.lock().job_event_subscribers.lock().push(sender);
3263 receiver
3264 }
3265
3266 async fn restore(&self, trashed_entry: TrashedEntry) -> Result<PathBuf, TrashRestoreError> {
3267 let mut state = self.state.lock();
3268
3269 let Some((trashed_entry, fake_entry)) = state
3270 .trash
3271 .iter()
3272 .find(|(entry, _)| *entry == trashed_entry)
3273 .cloned()
3274 else {
3275 return Err(TrashRestoreError::NotFound {
3276 path: PathBuf::from(trashed_entry.id),
3277 });
3278 };
3279
3280 let path = trashed_entry
3281 .original_parent
3282 .join(trashed_entry.name.clone());
3283
3284 let result = state.write_path(&path, |entry| match entry {
3285 btree_map::Entry::Vacant(entry) => {
3286 entry.insert(fake_entry);
3287 Ok(())
3288 }
3289 btree_map::Entry::Occupied(_) => {
3290 anyhow::bail!("Failed to restore {:?}", path);
3291 }
3292 });
3293
3294 match result {
3295 Ok(_) => {
3296 state.trash.retain(|(entry, _)| *entry != trashed_entry);
3297 state.emit_event([(path.clone(), Some(PathEventKind::Created))]);
3298 Ok(path)
3299 }
3300 Err(_) => {
3301 // For now we'll just assume that this failed because it was a
3302 // collision error, which I think that, for the time being, is
3303 // the only case where this could fail?
3304 Err(TrashRestoreError::Collision { path })
3305 }
3306 }
3307 }
3308
3309 #[cfg(feature = "test-support")]
3310 fn as_fake(&self) -> Arc<FakeFs> {
3311 self.this.upgrade().unwrap()
3312 }
3313}
3314
3315pub async fn copy_recursive<'a>(
3316 fs: &'a dyn Fs,
3317 source: &'a Path,
3318 target: &'a Path,
3319 options: CopyOptions,
3320) -> Result<()> {
3321 for (item, is_dir) in read_dir_items(fs, source).await? {
3322 let Ok(item_relative_path) = item.strip_prefix(source) else {
3323 continue;
3324 };
3325 let target_item = if item_relative_path == Path::new("") {
3326 target.to_path_buf()
3327 } else {
3328 target.join(item_relative_path)
3329 };
3330 if is_dir {
3331 if !options.overwrite && fs.metadata(&target_item).await.is_ok_and(|m| m.is_some()) {
3332 if options.ignore_if_exists {
3333 continue;
3334 } else {
3335 anyhow::bail!("{target_item:?} already exists");
3336 }
3337 }
3338 let _ = fs
3339 .remove_dir(
3340 &target_item,
3341 RemoveOptions {
3342 recursive: true,
3343 ignore_if_not_exists: true,
3344 },
3345 )
3346 .await;
3347 fs.create_dir(&target_item).await?;
3348 } else {
3349 fs.copy_file(&item, &target_item, options).await?;
3350 }
3351 }
3352 Ok(())
3353}
3354
3355/// Recursively reads all of the paths in the given directory.
3356///
3357/// Returns a vector of tuples of (path, is_dir).
3358pub async fn read_dir_items<'a>(fs: &'a dyn Fs, source: &'a Path) -> Result<Vec<(PathBuf, bool)>> {
3359 let mut items = Vec::new();
3360 read_recursive(fs, source, &mut items).await?;
3361 Ok(items)
3362}
3363
3364fn read_recursive<'a>(
3365 fs: &'a dyn Fs,
3366 source: &'a Path,
3367 output: &'a mut Vec<(PathBuf, bool)>,
3368) -> BoxFuture<'a, Result<()>> {
3369 use futures::future::FutureExt;
3370
3371 async move {
3372 let metadata = fs
3373 .metadata(source)
3374 .await?
3375 .with_context(|| format!("path does not exist: {source:?}"))?;
3376
3377 if metadata.is_dir {
3378 output.push((source.to_path_buf(), true));
3379 let mut children = fs.read_dir(source).await?;
3380 while let Some(child_path) = children.next().await {
3381 if let Ok(child_path) = child_path {
3382 read_recursive(fs, &child_path, output).await?;
3383 }
3384 }
3385 } else {
3386 output.push((source.to_path_buf(), false));
3387 }
3388 Ok(())
3389 }
3390 .boxed()
3391}
3392
3393// todo(windows)
3394// can we get file id not open the file twice?
3395// https://github.com/rust-lang/rust/issues/63010
3396#[cfg(target_os = "windows")]
3397async fn file_id(path: impl AsRef<Path>) -> Result<u64> {
3398 use std::os::windows::io::AsRawHandle;
3399
3400 use smol::fs::windows::OpenOptionsExt;
3401 use windows::Win32::{
3402 Foundation::HANDLE,
3403 Storage::FileSystem::{
3404 BY_HANDLE_FILE_INFORMATION, FILE_FLAG_BACKUP_SEMANTICS, GetFileInformationByHandle,
3405 },
3406 };
3407
3408 let file = smol::fs::OpenOptions::new()
3409 .read(true)
3410 .custom_flags(FILE_FLAG_BACKUP_SEMANTICS.0)
3411 .open(path)
3412 .await?;
3413
3414 let mut info: BY_HANDLE_FILE_INFORMATION = unsafe { std::mem::zeroed() };
3415 // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfileinformationbyhandle
3416 // This function supports Windows XP+
3417 smol::unblock(move || {
3418 unsafe { GetFileInformationByHandle(HANDLE(file.as_raw_handle() as _), &mut info)? };
3419
3420 Ok(((info.nFileIndexHigh as u64) << 32) | (info.nFileIndexLow as u64))
3421 })
3422 .await
3423}
3424
3425#[cfg(target_os = "windows")]
3426fn atomic_replace<P: AsRef<Path>>(
3427 replaced_file: P,
3428 replacement_file: P,
3429) -> windows::core::Result<()> {
3430 use windows::{
3431 Win32::Storage::FileSystem::{REPLACE_FILE_FLAGS, ReplaceFileW},
3432 core::HSTRING,
3433 };
3434
3435 // If the file does not exist, create it.
3436 let _ = std::fs::File::create_new(replaced_file.as_ref());
3437
3438 unsafe {
3439 ReplaceFileW(
3440 &HSTRING::from(replaced_file.as_ref().to_string_lossy().into_owned()),
3441 &HSTRING::from(replacement_file.as_ref().to_string_lossy().into_owned()),
3442 None,
3443 REPLACE_FILE_FLAGS::default(),
3444 None,
3445 None,
3446 )
3447 }
3448}