1#[cfg(target_os = "macos")]
2mod mac_watcher;
3
4#[cfg(not(target_os = "macos"))]
5pub mod fs_watcher;
6
7use parking_lot::Mutex;
8use std::sync::atomic::{AtomicUsize, Ordering};
9use std::time::Instant;
10
11use anyhow::{Context as _, Result, anyhow};
12#[cfg(any(target_os = "linux", target_os = "freebsd"))]
13use ashpd::desktop::trash;
14use futures::stream::iter;
15use gpui::App;
16use gpui::BackgroundExecutor;
17use gpui::Global;
18use gpui::ReadGlobal as _;
19use gpui::SharedString;
20use std::borrow::Cow;
21use util::command::new_smol_command;
22
23#[cfg(unix)]
24use std::os::fd::{AsFd, AsRawFd};
25
26#[cfg(unix)]
27use std::os::unix::fs::{FileTypeExt, MetadataExt};
28
29#[cfg(any(target_os = "macos", target_os = "freebsd"))]
30use std::mem::MaybeUninit;
31
32use async_tar::Archive;
33use futures::{AsyncRead, Stream, StreamExt, future::BoxFuture};
34use git::repository::{GitRepository, RealGitRepository};
35use is_executable::IsExecutable;
36use rope::Rope;
37use serde::{Deserialize, Serialize};
38use smol::io::AsyncWriteExt;
39use std::{
40 io::{self, Write},
41 path::{Component, Path, PathBuf},
42 pin::Pin,
43 sync::Arc,
44 time::{Duration, SystemTime, UNIX_EPOCH},
45};
46use tempfile::TempDir;
47use text::LineEnding;
48
49#[cfg(any(test, feature = "test-support"))]
50mod fake_git_repo;
51#[cfg(any(test, feature = "test-support"))]
52use collections::{BTreeMap, btree_map};
53#[cfg(any(test, feature = "test-support"))]
54use fake_git_repo::FakeGitRepositoryState;
55#[cfg(any(test, feature = "test-support"))]
56use git::{
57 repository::{RepoPath, repo_path},
58 status::{FileStatus, StatusCode, TrackedStatus, UnmergedStatus},
59};
60
61#[cfg(any(test, feature = "test-support"))]
62use smol::io::AsyncReadExt;
63#[cfg(any(test, feature = "test-support"))]
64use std::ffi::OsStr;
65
66#[cfg(any(test, feature = "test-support"))]
67pub use fake_git_repo::{LOAD_HEAD_TEXT_TASK, LOAD_INDEX_TEXT_TASK};
68
69pub trait Watcher: Send + Sync {
70 fn add(&self, path: &Path) -> Result<()>;
71 fn remove(&self, path: &Path) -> Result<()>;
72}
73
74#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
75pub enum PathEventKind {
76 Removed,
77 Created,
78 Changed,
79}
80
81#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
82pub struct PathEvent {
83 pub path: PathBuf,
84 pub kind: Option<PathEventKind>,
85}
86
87impl From<PathEvent> for PathBuf {
88 fn from(event: PathEvent) -> Self {
89 event.path
90 }
91}
92
93#[async_trait::async_trait]
94pub trait Fs: Send + Sync {
95 async fn create_dir(&self, path: &Path) -> Result<()>;
96 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()>;
97 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()>;
98 async fn create_file_with(
99 &self,
100 path: &Path,
101 content: Pin<&mut (dyn AsyncRead + Send)>,
102 ) -> Result<()>;
103 async fn extract_tar_file(
104 &self,
105 path: &Path,
106 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
107 ) -> Result<()>;
108 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()>;
109 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()>;
110 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()>;
111 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
112 self.remove_dir(path, options).await
113 }
114 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()>;
115 async fn trash_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
116 self.remove_file(path, options).await
117 }
118 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>>;
119 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>>;
120 async fn load(&self, path: &Path) -> Result<String> {
121 Ok(String::from_utf8(self.load_bytes(path).await?)?)
122 }
123 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>>;
124 async fn atomic_write(&self, path: PathBuf, text: String) -> Result<()>;
125 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()>;
126 async fn write(&self, path: &Path, content: &[u8]) -> Result<()>;
127 async fn canonicalize(&self, path: &Path) -> Result<PathBuf>;
128 async fn is_file(&self, path: &Path) -> bool;
129 async fn is_dir(&self, path: &Path) -> bool;
130 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>>;
131 async fn read_link(&self, path: &Path) -> Result<PathBuf>;
132 async fn read_dir(
133 &self,
134 path: &Path,
135 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>>;
136
137 async fn watch(
138 &self,
139 path: &Path,
140 latency: Duration,
141 ) -> (
142 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
143 Arc<dyn Watcher>,
144 );
145
146 fn open_repo(
147 &self,
148 abs_dot_git: &Path,
149 system_git_binary_path: Option<&Path>,
150 ) -> Option<Arc<dyn GitRepository>>;
151 async fn git_init(&self, abs_work_directory: &Path, fallback_branch_name: String)
152 -> Result<()>;
153 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()>;
154 fn is_fake(&self) -> bool;
155 async fn is_case_sensitive(&self) -> Result<bool>;
156 fn subscribe_to_jobs(&self) -> JobEventReceiver;
157
158 #[cfg(any(test, feature = "test-support"))]
159 fn as_fake(&self) -> Arc<FakeFs> {
160 panic!("called as_fake on a real fs");
161 }
162}
163
164struct GlobalFs(Arc<dyn Fs>);
165
166impl Global for GlobalFs {}
167
168impl dyn Fs {
169 /// Returns the global [`Fs`].
170 pub fn global(cx: &App) -> Arc<Self> {
171 GlobalFs::global(cx).0.clone()
172 }
173
174 /// Sets the global [`Fs`].
175 pub fn set_global(fs: Arc<Self>, cx: &mut App) {
176 cx.set_global(GlobalFs(fs));
177 }
178}
179
180#[derive(Copy, Clone, Default)]
181pub struct CreateOptions {
182 pub overwrite: bool,
183 pub ignore_if_exists: bool,
184}
185
186#[derive(Copy, Clone, Default)]
187pub struct CopyOptions {
188 pub overwrite: bool,
189 pub ignore_if_exists: bool,
190}
191
192#[derive(Copy, Clone, Default)]
193pub struct RenameOptions {
194 pub overwrite: bool,
195 pub ignore_if_exists: bool,
196 /// Whether to create parent directories if they do not exist.
197 pub create_parents: bool,
198}
199
200#[derive(Copy, Clone, Default)]
201pub struct RemoveOptions {
202 pub recursive: bool,
203 pub ignore_if_not_exists: bool,
204}
205
206#[derive(Copy, Clone, Debug)]
207pub struct Metadata {
208 pub inode: u64,
209 pub mtime: MTime,
210 pub is_symlink: bool,
211 pub is_dir: bool,
212 pub len: u64,
213 pub is_fifo: bool,
214 pub is_executable: bool,
215}
216
217/// Filesystem modification time. The purpose of this newtype is to discourage use of operations
218/// that do not make sense for mtimes. In particular, it is not always valid to compare mtimes using
219/// `<` or `>`, as there are many things that can cause the mtime of a file to be earlier than it
220/// was. See ["mtime comparison considered harmful" - apenwarr](https://apenwarr.ca/log/20181113).
221///
222/// Do not derive Ord, PartialOrd, or arithmetic operation traits.
223#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize)]
224#[serde(transparent)]
225pub struct MTime(SystemTime);
226
227pub type JobId = usize;
228
229#[derive(Clone, Debug)]
230pub struct JobInfo {
231 pub start: Instant,
232 pub message: SharedString,
233 pub id: JobId,
234}
235
236#[derive(Debug, Clone)]
237pub enum JobEvent {
238 Started { info: JobInfo },
239 Completed { id: JobId },
240}
241
242pub type JobEventSender = futures::channel::mpsc::UnboundedSender<JobEvent>;
243pub type JobEventReceiver = futures::channel::mpsc::UnboundedReceiver<JobEvent>;
244
245struct JobTracker {
246 id: JobId,
247 subscribers: Arc<Mutex<Vec<JobEventSender>>>,
248}
249
250impl JobTracker {
251 fn new(info: JobInfo, subscribers: Arc<Mutex<Vec<JobEventSender>>>) -> Self {
252 let id = info.id;
253 {
254 let mut subs = subscribers.lock();
255 subs.retain(|sender| {
256 sender
257 .unbounded_send(JobEvent::Started { info: info.clone() })
258 .is_ok()
259 });
260 }
261 Self { id, subscribers }
262 }
263}
264
265impl Drop for JobTracker {
266 fn drop(&mut self) {
267 let mut subs = self.subscribers.lock();
268 subs.retain(|sender| {
269 sender
270 .unbounded_send(JobEvent::Completed { id: self.id })
271 .is_ok()
272 });
273 }
274}
275
276impl MTime {
277 /// Conversion intended for persistence and testing.
278 pub fn from_seconds_and_nanos(secs: u64, nanos: u32) -> Self {
279 MTime(UNIX_EPOCH + Duration::new(secs, nanos))
280 }
281
282 /// Conversion intended for persistence.
283 pub fn to_seconds_and_nanos_for_persistence(self) -> Option<(u64, u32)> {
284 self.0
285 .duration_since(UNIX_EPOCH)
286 .ok()
287 .map(|duration| (duration.as_secs(), duration.subsec_nanos()))
288 }
289
290 /// Returns the value wrapped by this `MTime`, for presentation to the user. The name including
291 /// "_for_user" is to discourage misuse - this method should not be used when making decisions
292 /// about file dirtiness.
293 pub fn timestamp_for_user(self) -> SystemTime {
294 self.0
295 }
296
297 /// Temporary method to split out the behavior changes from introduction of this newtype.
298 pub fn bad_is_greater_than(self, other: MTime) -> bool {
299 self.0 > other.0
300 }
301}
302
303impl From<proto::Timestamp> for MTime {
304 fn from(timestamp: proto::Timestamp) -> Self {
305 MTime(timestamp.into())
306 }
307}
308
309impl From<MTime> for proto::Timestamp {
310 fn from(mtime: MTime) -> Self {
311 mtime.0.into()
312 }
313}
314
315pub struct RealFs {
316 bundled_git_binary_path: Option<PathBuf>,
317 executor: BackgroundExecutor,
318 next_job_id: Arc<AtomicUsize>,
319 job_event_subscribers: Arc<Mutex<Vec<JobEventSender>>>,
320}
321
322pub trait FileHandle: Send + Sync + std::fmt::Debug {
323 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf>;
324}
325
326impl FileHandle for std::fs::File {
327 #[cfg(target_os = "macos")]
328 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
329 use std::{
330 ffi::{CStr, OsStr},
331 os::unix::ffi::OsStrExt,
332 };
333
334 let fd = self.as_fd();
335 let mut path_buf = MaybeUninit::<[u8; libc::PATH_MAX as usize]>::uninit();
336
337 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_GETPATH, path_buf.as_mut_ptr()) };
338 anyhow::ensure!(result != -1, "fcntl returned -1");
339
340 // SAFETY: `fcntl` will initialize the path buffer.
341 let c_str = unsafe { CStr::from_ptr(path_buf.as_ptr().cast()) };
342 anyhow::ensure!(!c_str.is_empty(), "Could find a path for the file handle");
343 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
344 Ok(path)
345 }
346
347 #[cfg(target_os = "linux")]
348 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
349 let fd = self.as_fd();
350 let fd_path = format!("/proc/self/fd/{}", fd.as_raw_fd());
351 let new_path = std::fs::read_link(fd_path)?;
352 if new_path
353 .file_name()
354 .is_some_and(|f| f.to_string_lossy().ends_with(" (deleted)"))
355 {
356 anyhow::bail!("file was deleted")
357 };
358
359 Ok(new_path)
360 }
361
362 #[cfg(target_os = "freebsd")]
363 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
364 use std::{
365 ffi::{CStr, OsStr},
366 os::unix::ffi::OsStrExt,
367 };
368
369 let fd = self.as_fd();
370 let mut kif = MaybeUninit::<libc::kinfo_file>::uninit();
371 kif.kf_structsize = libc::KINFO_FILE_SIZE;
372
373 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_KINFO, kif.as_mut_ptr()) };
374 anyhow::ensure!(result != -1, "fcntl returned -1");
375
376 // SAFETY: `fcntl` will initialize the kif.
377 let c_str = unsafe { CStr::from_ptr(kif.assume_init().kf_path.as_ptr()) };
378 anyhow::ensure!(!c_str.is_empty(), "Could find a path for the file handle");
379 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
380 Ok(path)
381 }
382
383 #[cfg(target_os = "windows")]
384 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
385 use std::ffi::OsString;
386 use std::os::windows::ffi::OsStringExt;
387 use std::os::windows::io::AsRawHandle;
388
389 use windows::Win32::Foundation::HANDLE;
390 use windows::Win32::Storage::FileSystem::{
391 FILE_NAME_NORMALIZED, GetFinalPathNameByHandleW,
392 };
393
394 let handle = HANDLE(self.as_raw_handle() as _);
395
396 // Query required buffer size (in wide chars)
397 let required_len =
398 unsafe { GetFinalPathNameByHandleW(handle, &mut [], FILE_NAME_NORMALIZED) };
399 anyhow::ensure!(
400 required_len != 0,
401 "GetFinalPathNameByHandleW returned 0 length"
402 );
403
404 // Allocate buffer and retrieve the path
405 let mut buf: Vec<u16> = vec![0u16; required_len as usize + 1];
406 let written = unsafe { GetFinalPathNameByHandleW(handle, &mut buf, FILE_NAME_NORMALIZED) };
407 anyhow::ensure!(
408 written != 0,
409 "GetFinalPathNameByHandleW failed to write path"
410 );
411
412 let os_str: OsString = OsString::from_wide(&buf[..written as usize]);
413 anyhow::ensure!(!os_str.is_empty(), "Could find a path for the file handle");
414 Ok(PathBuf::from(os_str))
415 }
416}
417
418pub struct RealWatcher {}
419
420impl RealFs {
421 pub fn new(git_binary_path: Option<PathBuf>, executor: BackgroundExecutor) -> Self {
422 Self {
423 bundled_git_binary_path: git_binary_path,
424 executor,
425 next_job_id: Arc::new(AtomicUsize::new(0)),
426 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
427 }
428 }
429
430 #[cfg(target_os = "windows")]
431 fn canonicalize(path: &Path) -> Result<PathBuf> {
432 let mut strip_prefix = None;
433
434 let mut new_path = PathBuf::new();
435 for component in path.components() {
436 match component {
437 std::path::Component::Prefix(_) => {
438 let component = component.as_os_str();
439 let canonicalized = if component
440 .to_str()
441 .map(|e| e.ends_with("\\"))
442 .unwrap_or(false)
443 {
444 std::fs::canonicalize(component)
445 } else {
446 let mut component = component.to_os_string();
447 component.push("\\");
448 std::fs::canonicalize(component)
449 }?;
450
451 let mut strip = PathBuf::new();
452 for component in canonicalized.components() {
453 match component {
454 Component::Prefix(prefix_component) => {
455 match prefix_component.kind() {
456 std::path::Prefix::Verbatim(os_str) => {
457 strip.push(os_str);
458 }
459 std::path::Prefix::VerbatimUNC(host, share) => {
460 strip.push("\\\\");
461 strip.push(host);
462 strip.push(share);
463 }
464 std::path::Prefix::VerbatimDisk(disk) => {
465 strip.push(format!("{}:", disk as char));
466 }
467 _ => strip.push(component),
468 };
469 }
470 _ => strip.push(component),
471 }
472 }
473 strip_prefix = Some(strip);
474 new_path.push(component);
475 }
476 std::path::Component::RootDir => {
477 new_path.push(component);
478 }
479 std::path::Component::CurDir => {
480 if strip_prefix.is_none() {
481 // unrooted path
482 new_path.push(component);
483 }
484 }
485 std::path::Component::ParentDir => {
486 if strip_prefix.is_some() {
487 // rooted path
488 new_path.pop();
489 } else {
490 new_path.push(component);
491 }
492 }
493 std::path::Component::Normal(_) => {
494 if let Ok(link) = std::fs::read_link(new_path.join(component)) {
495 let link = match &strip_prefix {
496 Some(e) => link.strip_prefix(e).unwrap_or(&link),
497 None => &link,
498 };
499 new_path.extend(link);
500 } else {
501 new_path.push(component);
502 }
503 }
504 }
505 }
506
507 Ok(new_path)
508 }
509}
510
511#[async_trait::async_trait]
512impl Fs for RealFs {
513 async fn create_dir(&self, path: &Path) -> Result<()> {
514 Ok(smol::fs::create_dir_all(path).await?)
515 }
516
517 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
518 #[cfg(unix)]
519 smol::fs::unix::symlink(target, path).await?;
520
521 #[cfg(windows)]
522 if smol::fs::metadata(&target).await?.is_dir() {
523 let status = new_smol_command("cmd")
524 .args(["/C", "mklink", "/J"])
525 .args([path, target.as_path()])
526 .status()
527 .await?;
528
529 if !status.success() {
530 return Err(anyhow::anyhow!(
531 "Failed to create junction from {:?} to {:?}",
532 path,
533 target
534 ));
535 }
536 } else {
537 smol::fs::windows::symlink_file(target, path).await?
538 }
539
540 Ok(())
541 }
542
543 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
544 let mut open_options = smol::fs::OpenOptions::new();
545 open_options.write(true).create(true);
546 if options.overwrite {
547 open_options.truncate(true);
548 } else if !options.ignore_if_exists {
549 open_options.create_new(true);
550 }
551 open_options.open(path).await?;
552 Ok(())
553 }
554
555 async fn create_file_with(
556 &self,
557 path: &Path,
558 content: Pin<&mut (dyn AsyncRead + Send)>,
559 ) -> Result<()> {
560 let mut file = smol::fs::File::create(&path).await?;
561 futures::io::copy(content, &mut file).await?;
562 Ok(())
563 }
564
565 async fn extract_tar_file(
566 &self,
567 path: &Path,
568 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
569 ) -> Result<()> {
570 content.unpack(path).await?;
571 Ok(())
572 }
573
574 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
575 if !options.overwrite && smol::fs::metadata(target).await.is_ok() {
576 if options.ignore_if_exists {
577 return Ok(());
578 } else {
579 anyhow::bail!("{target:?} already exists");
580 }
581 }
582
583 smol::fs::copy(source, target).await?;
584 Ok(())
585 }
586
587 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()> {
588 if !options.overwrite && smol::fs::metadata(target).await.is_ok() {
589 if options.ignore_if_exists {
590 return Ok(());
591 } else {
592 anyhow::bail!("{target:?} already exists");
593 }
594 }
595
596 if options.create_parents {
597 if let Some(parent) = target.parent() {
598 self.create_dir(parent).await?;
599 }
600 }
601
602 smol::fs::rename(source, target).await?;
603 Ok(())
604 }
605
606 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
607 let result = if options.recursive {
608 smol::fs::remove_dir_all(path).await
609 } else {
610 smol::fs::remove_dir(path).await
611 };
612 match result {
613 Ok(()) => Ok(()),
614 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
615 Ok(())
616 }
617 Err(err) => Err(err)?,
618 }
619 }
620
621 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
622 #[cfg(windows)]
623 if let Ok(Some(metadata)) = self.metadata(path).await
624 && metadata.is_symlink
625 && metadata.is_dir
626 {
627 self.remove_dir(
628 path,
629 RemoveOptions {
630 recursive: false,
631 ignore_if_not_exists: true,
632 },
633 )
634 .await?;
635 return Ok(());
636 }
637
638 match smol::fs::remove_file(path).await {
639 Ok(()) => Ok(()),
640 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
641 Ok(())
642 }
643 Err(err) => Err(err)?,
644 }
645 }
646
647 #[cfg(target_os = "macos")]
648 async fn trash_file(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
649 use cocoa::{
650 base::{id, nil},
651 foundation::{NSAutoreleasePool, NSString},
652 };
653 use objc::{class, msg_send, sel, sel_impl};
654
655 unsafe {
656 /// Allow NSString::alloc use here because it sets autorelease
657 #[allow(clippy::disallowed_methods)]
658 unsafe fn ns_string(string: &str) -> id {
659 unsafe { NSString::alloc(nil).init_str(string).autorelease() }
660 }
661
662 let url: id = msg_send![class!(NSURL), fileURLWithPath: ns_string(path.to_string_lossy().as_ref())];
663 let array: id = msg_send![class!(NSArray), arrayWithObject: url];
664 let workspace: id = msg_send![class!(NSWorkspace), sharedWorkspace];
665
666 let _: id = msg_send![workspace, recycleURLs: array completionHandler: nil];
667 }
668 Ok(())
669 }
670
671 #[cfg(any(target_os = "linux", target_os = "freebsd"))]
672 async fn trash_file(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
673 if let Ok(Some(metadata)) = self.metadata(path).await
674 && metadata.is_symlink
675 {
676 // TODO: trash_file does not support trashing symlinks yet - https://github.com/bilelmoussaoui/ashpd/issues/255
677 return self.remove_file(path, RemoveOptions::default()).await;
678 }
679 let file = smol::fs::File::open(path).await?;
680 match trash::trash_file(&file.as_fd()).await {
681 Ok(_) => Ok(()),
682 Err(err) => {
683 log::error!("Failed to trash file: {}", err);
684 // Trashing files can fail if you don't have a trashing dbus service configured.
685 // In that case, delete the file directly instead.
686 return self.remove_file(path, RemoveOptions::default()).await;
687 }
688 }
689 }
690
691 #[cfg(target_os = "windows")]
692 async fn trash_file(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
693 use util::paths::SanitizedPath;
694 use windows::{
695 Storage::{StorageDeleteOption, StorageFile},
696 core::HSTRING,
697 };
698 // todo(windows)
699 // When new version of `windows-rs` release, make this operation `async`
700 let path = path.canonicalize()?;
701 let path = SanitizedPath::new(&path);
702 let path_string = path.to_string();
703 let file = StorageFile::GetFileFromPathAsync(&HSTRING::from(path_string))?.get()?;
704 file.DeleteAsync(StorageDeleteOption::Default)?.get()?;
705 Ok(())
706 }
707
708 #[cfg(target_os = "macos")]
709 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
710 self.trash_file(path, options).await
711 }
712
713 #[cfg(any(target_os = "linux", target_os = "freebsd"))]
714 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
715 self.trash_file(path, options).await
716 }
717
718 #[cfg(target_os = "windows")]
719 async fn trash_dir(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
720 use util::paths::SanitizedPath;
721 use windows::{
722 Storage::{StorageDeleteOption, StorageFolder},
723 core::HSTRING,
724 };
725
726 // todo(windows)
727 // When new version of `windows-rs` release, make this operation `async`
728 let path = path.canonicalize()?;
729 let path = SanitizedPath::new(&path);
730 let path_string = path.to_string();
731 let folder = StorageFolder::GetFolderFromPathAsync(&HSTRING::from(path_string))?.get()?;
732 folder.DeleteAsync(StorageDeleteOption::Default)?.get()?;
733 Ok(())
734 }
735
736 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
737 Ok(Box::new(std::fs::File::open(path)?))
738 }
739
740 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
741 let mut options = std::fs::OpenOptions::new();
742 options.read(true);
743 #[cfg(windows)]
744 {
745 use std::os::windows::fs::OpenOptionsExt;
746 options.custom_flags(windows::Win32::Storage::FileSystem::FILE_FLAG_BACKUP_SEMANTICS.0);
747 }
748 Ok(Arc::new(options.open(path)?))
749 }
750
751 async fn load(&self, path: &Path) -> Result<String> {
752 let path = path.to_path_buf();
753 self.executor
754 .spawn(async move { Ok(std::fs::read_to_string(path)?) })
755 .await
756 }
757
758 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
759 let path = path.to_path_buf();
760 let bytes = self
761 .executor
762 .spawn(async move { std::fs::read(path) })
763 .await?;
764 Ok(bytes)
765 }
766
767 #[cfg(not(target_os = "windows"))]
768 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
769 smol::unblock(move || {
770 // Use the directory of the destination as temp dir to avoid
771 // invalid cross-device link error, and XDG_CACHE_DIR for fallback.
772 // See https://github.com/zed-industries/zed/pull/8437 for more details.
773 let mut tmp_file =
774 tempfile::NamedTempFile::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
775 tmp_file.write_all(data.as_bytes())?;
776 tmp_file.persist(path)?;
777 anyhow::Ok(())
778 })
779 .await?;
780
781 Ok(())
782 }
783
784 #[cfg(target_os = "windows")]
785 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
786 smol::unblock(move || {
787 // If temp dir is set to a different drive than the destination,
788 // we receive error:
789 //
790 // failed to persist temporary file:
791 // The system cannot move the file to a different disk drive. (os error 17)
792 //
793 // This is because `ReplaceFileW` does not support cross volume moves.
794 // See the remark section: "The backup file, replaced file, and replacement file must all reside on the same volume."
795 // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-replacefilew#remarks
796 //
797 // So we use the directory of the destination as a temp dir to avoid it.
798 // https://github.com/zed-industries/zed/issues/16571
799 let temp_dir = TempDir::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
800 let temp_file = {
801 let temp_file_path = temp_dir.path().join("temp_file");
802 let mut file = std::fs::File::create_new(&temp_file_path)?;
803 file.write_all(data.as_bytes())?;
804 temp_file_path
805 };
806 atomic_replace(path.as_path(), temp_file.as_path())?;
807 anyhow::Ok(())
808 })
809 .await?;
810 Ok(())
811 }
812
813 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
814 let buffer_size = text.summary().len.min(10 * 1024);
815 if let Some(path) = path.parent() {
816 self.create_dir(path).await?;
817 }
818 let file = smol::fs::File::create(path).await?;
819 let mut writer = smol::io::BufWriter::with_capacity(buffer_size, file);
820 for chunk in text::chunks_with_line_ending(text, line_ending) {
821 writer.write_all(chunk.as_bytes()).await?;
822 }
823 writer.flush().await?;
824 Ok(())
825 }
826
827 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
828 if let Some(path) = path.parent() {
829 self.create_dir(path).await?;
830 }
831 let path = path.to_owned();
832 let contents = content.to_owned();
833 self.executor
834 .spawn(async move {
835 std::fs::write(path, contents)?;
836 Ok(())
837 })
838 .await
839 }
840
841 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
842 let path = path.to_owned();
843 self.executor
844 .spawn(async move {
845 #[cfg(target_os = "windows")]
846 let result = Self::canonicalize(&path);
847
848 #[cfg(not(target_os = "windows"))]
849 let result = std::fs::canonicalize(&path);
850
851 result.with_context(|| format!("canonicalizing {path:?}"))
852 })
853 .await
854 }
855
856 async fn is_file(&self, path: &Path) -> bool {
857 let path = path.to_owned();
858 self.executor
859 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_file()) })
860 .await
861 }
862
863 async fn is_dir(&self, path: &Path) -> bool {
864 let path = path.to_owned();
865 self.executor
866 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_dir()) })
867 .await
868 }
869
870 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
871 let path_buf = path.to_owned();
872 let symlink_metadata = match self
873 .executor
874 .spawn(async move { std::fs::symlink_metadata(&path_buf) })
875 .await
876 {
877 Ok(metadata) => metadata,
878 Err(err) => {
879 return match err.kind() {
880 io::ErrorKind::NotFound | io::ErrorKind::NotADirectory => Ok(None),
881 _ => Err(anyhow::Error::new(err)),
882 };
883 }
884 };
885
886 let is_symlink = symlink_metadata.file_type().is_symlink();
887 let metadata = if is_symlink {
888 let path_buf = path.to_path_buf();
889 // Read target metadata, if the target exists
890 match self
891 .executor
892 .spawn(async move { std::fs::metadata(path_buf) })
893 .await
894 {
895 Ok(target_metadata) => target_metadata,
896 Err(err) => {
897 if err.kind() != io::ErrorKind::NotFound {
898 // TODO: Also FilesystemLoop when that's stable
899 log::warn!(
900 "Failed to read symlink target metadata for path {path:?}: {err}"
901 );
902 }
903 // For a broken or recursive symlink, return the symlink metadata. (Or
904 // as edge cases, a symlink into a directory we can't read, which is hard
905 // to distinguish from just being broken.)
906 symlink_metadata
907 }
908 }
909 } else {
910 symlink_metadata
911 };
912
913 #[cfg(unix)]
914 let inode = metadata.ino();
915
916 #[cfg(windows)]
917 let inode = file_id(path).await?;
918
919 #[cfg(windows)]
920 let is_fifo = false;
921
922 #[cfg(unix)]
923 let is_fifo = metadata.file_type().is_fifo();
924
925 let path_buf = path.to_path_buf();
926 let is_executable = self
927 .executor
928 .spawn(async move { path_buf.is_executable() })
929 .await;
930
931 Ok(Some(Metadata {
932 inode,
933 mtime: MTime(metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH)),
934 len: metadata.len(),
935 is_symlink,
936 is_dir: metadata.file_type().is_dir(),
937 is_fifo,
938 is_executable,
939 }))
940 }
941
942 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
943 let path = path.to_owned();
944 let path = self
945 .executor
946 .spawn(async move { std::fs::read_link(&path) })
947 .await?;
948 Ok(path)
949 }
950
951 async fn read_dir(
952 &self,
953 path: &Path,
954 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
955 let path = path.to_owned();
956 let result = iter(
957 self.executor
958 .spawn(async move { std::fs::read_dir(path) })
959 .await?,
960 )
961 .map(|entry| match entry {
962 Ok(entry) => Ok(entry.path()),
963 Err(error) => Err(anyhow!("failed to read dir entry {error:?}")),
964 });
965 Ok(Box::pin(result))
966 }
967
968 #[cfg(target_os = "macos")]
969 async fn watch(
970 &self,
971 path: &Path,
972 latency: Duration,
973 ) -> (
974 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
975 Arc<dyn Watcher>,
976 ) {
977 use fsevent::StreamFlags;
978
979 let (events_tx, events_rx) = smol::channel::unbounded();
980 let handles = Arc::new(parking_lot::Mutex::new(collections::BTreeMap::default()));
981 let watcher = Arc::new(mac_watcher::MacWatcher::new(
982 events_tx,
983 Arc::downgrade(&handles),
984 latency,
985 ));
986 watcher.add(path).expect("handles can't be dropped");
987
988 (
989 Box::pin(
990 events_rx
991 .map(|events| {
992 events
993 .into_iter()
994 .map(|event| {
995 log::trace!("fs path event: {event:?}");
996 let kind = if event.flags.contains(StreamFlags::ITEM_REMOVED) {
997 Some(PathEventKind::Removed)
998 } else if event.flags.contains(StreamFlags::ITEM_CREATED) {
999 Some(PathEventKind::Created)
1000 } else if event.flags.contains(StreamFlags::ITEM_MODIFIED)
1001 | event.flags.contains(StreamFlags::ITEM_RENAMED)
1002 {
1003 Some(PathEventKind::Changed)
1004 } else {
1005 None
1006 };
1007 PathEvent {
1008 path: event.path,
1009 kind,
1010 }
1011 })
1012 .collect()
1013 })
1014 .chain(futures::stream::once(async move {
1015 drop(handles);
1016 vec![]
1017 })),
1018 ),
1019 watcher,
1020 )
1021 }
1022
1023 #[cfg(not(target_os = "macos"))]
1024 async fn watch(
1025 &self,
1026 path: &Path,
1027 latency: Duration,
1028 ) -> (
1029 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
1030 Arc<dyn Watcher>,
1031 ) {
1032 use util::{ResultExt as _, paths::SanitizedPath};
1033
1034 let (tx, rx) = smol::channel::unbounded();
1035 let pending_paths: Arc<Mutex<Vec<PathEvent>>> = Default::default();
1036 let watcher = Arc::new(fs_watcher::FsWatcher::new(tx, pending_paths.clone()));
1037
1038 // If the path doesn't exist yet (e.g. settings.json), watch the parent dir to learn when it's created.
1039 if let Err(e) = watcher.add(path)
1040 && let Some(parent) = path.parent()
1041 && let Err(parent_e) = watcher.add(parent)
1042 {
1043 log::warn!(
1044 "Failed to watch {} and its parent directory {}:\n{e}\n{parent_e}",
1045 path.display(),
1046 parent.display()
1047 );
1048 }
1049
1050 // Check if path is a symlink and follow the target parent
1051 if let Some(mut target) = self.read_link(path).await.ok() {
1052 log::trace!("watch symlink {path:?} -> {target:?}");
1053 // Check if symlink target is relative path, if so make it absolute
1054 if target.is_relative()
1055 && let Some(parent) = path.parent()
1056 {
1057 target = parent.join(target);
1058 if let Ok(canonical) = self.canonicalize(&target).await {
1059 target = SanitizedPath::new(&canonical).as_path().to_path_buf();
1060 }
1061 }
1062 watcher.add(&target).ok();
1063 if let Some(parent) = target.parent() {
1064 watcher.add(parent).log_err();
1065 }
1066 }
1067
1068 (
1069 Box::pin(rx.filter_map({
1070 let watcher = watcher.clone();
1071 move |_| {
1072 let _ = watcher.clone();
1073 let pending_paths = pending_paths.clone();
1074 async move {
1075 smol::Timer::after(latency).await;
1076 let paths = std::mem::take(&mut *pending_paths.lock());
1077 (!paths.is_empty()).then_some(paths)
1078 }
1079 }
1080 })),
1081 watcher,
1082 )
1083 }
1084
1085 fn open_repo(
1086 &self,
1087 dotgit_path: &Path,
1088 system_git_binary_path: Option<&Path>,
1089 ) -> Option<Arc<dyn GitRepository>> {
1090 Some(Arc::new(RealGitRepository::new(
1091 dotgit_path,
1092 self.bundled_git_binary_path.clone(),
1093 system_git_binary_path.map(|path| path.to_path_buf()),
1094 self.executor.clone(),
1095 )?))
1096 }
1097
1098 async fn git_init(
1099 &self,
1100 abs_work_directory_path: &Path,
1101 fallback_branch_name: String,
1102 ) -> Result<()> {
1103 let config = new_smol_command("git")
1104 .current_dir(abs_work_directory_path)
1105 .args(&["config", "--global", "--get", "init.defaultBranch"])
1106 .output()
1107 .await?;
1108
1109 let branch_name;
1110
1111 if config.status.success() && !config.stdout.is_empty() {
1112 branch_name = String::from_utf8_lossy(&config.stdout);
1113 } else {
1114 branch_name = Cow::Borrowed(fallback_branch_name.as_str());
1115 }
1116
1117 new_smol_command("git")
1118 .current_dir(abs_work_directory_path)
1119 .args(&["init", "-b"])
1120 .arg(branch_name.trim())
1121 .output()
1122 .await?;
1123
1124 Ok(())
1125 }
1126
1127 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()> {
1128 let job_id = self.next_job_id.fetch_add(1, Ordering::SeqCst);
1129 let job_info = JobInfo {
1130 id: job_id,
1131 start: Instant::now(),
1132 message: SharedString::from(format!("Cloning {}", repo_url)),
1133 };
1134
1135 let _job_tracker = JobTracker::new(job_info, self.job_event_subscribers.clone());
1136
1137 let output = new_smol_command("git")
1138 .current_dir(abs_work_directory)
1139 .args(&["clone", repo_url])
1140 .output()
1141 .await?;
1142
1143 if !output.status.success() {
1144 anyhow::bail!(
1145 "git clone failed: {}",
1146 String::from_utf8_lossy(&output.stderr)
1147 );
1148 }
1149
1150 Ok(())
1151 }
1152
1153 fn is_fake(&self) -> bool {
1154 false
1155 }
1156
1157 fn subscribe_to_jobs(&self) -> JobEventReceiver {
1158 let (sender, receiver) = futures::channel::mpsc::unbounded();
1159 self.job_event_subscribers.lock().push(sender);
1160 receiver
1161 }
1162
1163 /// Checks whether the file system is case sensitive by attempting to create two files
1164 /// that have the same name except for the casing.
1165 ///
1166 /// It creates both files in a temporary directory it removes at the end.
1167 async fn is_case_sensitive(&self) -> Result<bool> {
1168 let temp_dir = TempDir::new()?;
1169 let test_file_1 = temp_dir.path().join("case_sensitivity_test.tmp");
1170 let test_file_2 = temp_dir.path().join("CASE_SENSITIVITY_TEST.TMP");
1171
1172 let create_opts = CreateOptions {
1173 overwrite: false,
1174 ignore_if_exists: false,
1175 };
1176
1177 // Create file1
1178 self.create_file(&test_file_1, create_opts).await?;
1179
1180 // Now check whether it's possible to create file2
1181 let case_sensitive = match self.create_file(&test_file_2, create_opts).await {
1182 Ok(_) => Ok(true),
1183 Err(e) => {
1184 if let Some(io_error) = e.downcast_ref::<io::Error>() {
1185 if io_error.kind() == io::ErrorKind::AlreadyExists {
1186 Ok(false)
1187 } else {
1188 Err(e)
1189 }
1190 } else {
1191 Err(e)
1192 }
1193 }
1194 };
1195
1196 temp_dir.close()?;
1197 case_sensitive
1198 }
1199}
1200
1201#[cfg(not(any(target_os = "linux", target_os = "freebsd")))]
1202impl Watcher for RealWatcher {
1203 fn add(&self, _: &Path) -> Result<()> {
1204 Ok(())
1205 }
1206
1207 fn remove(&self, _: &Path) -> Result<()> {
1208 Ok(())
1209 }
1210}
1211
1212#[cfg(any(test, feature = "test-support"))]
1213pub struct FakeFs {
1214 this: std::sync::Weak<Self>,
1215 // Use an unfair lock to ensure tests are deterministic.
1216 state: Arc<Mutex<FakeFsState>>,
1217 executor: gpui::BackgroundExecutor,
1218}
1219
1220#[cfg(any(test, feature = "test-support"))]
1221struct FakeFsState {
1222 root: FakeFsEntry,
1223 next_inode: u64,
1224 next_mtime: SystemTime,
1225 git_event_tx: smol::channel::Sender<PathBuf>,
1226 event_txs: Vec<(PathBuf, smol::channel::Sender<Vec<PathEvent>>)>,
1227 events_paused: bool,
1228 buffered_events: Vec<PathEvent>,
1229 metadata_call_count: usize,
1230 read_dir_call_count: usize,
1231 path_write_counts: std::collections::HashMap<PathBuf, usize>,
1232 moves: std::collections::HashMap<u64, PathBuf>,
1233 job_event_subscribers: Arc<Mutex<Vec<JobEventSender>>>,
1234}
1235
1236#[cfg(any(test, feature = "test-support"))]
1237#[derive(Clone, Debug)]
1238enum FakeFsEntry {
1239 File {
1240 inode: u64,
1241 mtime: MTime,
1242 len: u64,
1243 content: Vec<u8>,
1244 // The path to the repository state directory, if this is a gitfile.
1245 git_dir_path: Option<PathBuf>,
1246 },
1247 Dir {
1248 inode: u64,
1249 mtime: MTime,
1250 len: u64,
1251 entries: BTreeMap<String, FakeFsEntry>,
1252 git_repo_state: Option<Arc<Mutex<FakeGitRepositoryState>>>,
1253 },
1254 Symlink {
1255 target: PathBuf,
1256 },
1257}
1258
1259#[cfg(any(test, feature = "test-support"))]
1260impl PartialEq for FakeFsEntry {
1261 fn eq(&self, other: &Self) -> bool {
1262 match (self, other) {
1263 (
1264 Self::File {
1265 inode: l_inode,
1266 mtime: l_mtime,
1267 len: l_len,
1268 content: l_content,
1269 git_dir_path: l_git_dir_path,
1270 },
1271 Self::File {
1272 inode: r_inode,
1273 mtime: r_mtime,
1274 len: r_len,
1275 content: r_content,
1276 git_dir_path: r_git_dir_path,
1277 },
1278 ) => {
1279 l_inode == r_inode
1280 && l_mtime == r_mtime
1281 && l_len == r_len
1282 && l_content == r_content
1283 && l_git_dir_path == r_git_dir_path
1284 }
1285 (
1286 Self::Dir {
1287 inode: l_inode,
1288 mtime: l_mtime,
1289 len: l_len,
1290 entries: l_entries,
1291 git_repo_state: l_git_repo_state,
1292 },
1293 Self::Dir {
1294 inode: r_inode,
1295 mtime: r_mtime,
1296 len: r_len,
1297 entries: r_entries,
1298 git_repo_state: r_git_repo_state,
1299 },
1300 ) => {
1301 let same_repo_state = match (l_git_repo_state.as_ref(), r_git_repo_state.as_ref()) {
1302 (Some(l), Some(r)) => Arc::ptr_eq(l, r),
1303 (None, None) => true,
1304 _ => false,
1305 };
1306 l_inode == r_inode
1307 && l_mtime == r_mtime
1308 && l_len == r_len
1309 && l_entries == r_entries
1310 && same_repo_state
1311 }
1312 (Self::Symlink { target: l_target }, Self::Symlink { target: r_target }) => {
1313 l_target == r_target
1314 }
1315 _ => false,
1316 }
1317 }
1318}
1319
1320#[cfg(any(test, feature = "test-support"))]
1321impl FakeFsState {
1322 fn get_and_increment_mtime(&mut self) -> MTime {
1323 let mtime = self.next_mtime;
1324 self.next_mtime += FakeFs::SYSTEMTIME_INTERVAL;
1325 MTime(mtime)
1326 }
1327
1328 fn get_and_increment_inode(&mut self) -> u64 {
1329 let inode = self.next_inode;
1330 self.next_inode += 1;
1331 inode
1332 }
1333
1334 fn canonicalize(&self, target: &Path, follow_symlink: bool) -> Option<PathBuf> {
1335 let mut canonical_path = PathBuf::new();
1336 let mut path = target.to_path_buf();
1337 let mut entry_stack = Vec::new();
1338 'outer: loop {
1339 let mut path_components = path.components().peekable();
1340 let mut prefix = None;
1341 while let Some(component) = path_components.next() {
1342 match component {
1343 Component::Prefix(prefix_component) => prefix = Some(prefix_component),
1344 Component::RootDir => {
1345 entry_stack.clear();
1346 entry_stack.push(&self.root);
1347 canonical_path.clear();
1348 match prefix {
1349 Some(prefix_component) => {
1350 canonical_path = PathBuf::from(prefix_component.as_os_str());
1351 // Prefixes like `C:\\` are represented without their trailing slash, so we have to re-add it.
1352 canonical_path.push(std::path::MAIN_SEPARATOR_STR);
1353 }
1354 None => canonical_path = PathBuf::from(std::path::MAIN_SEPARATOR_STR),
1355 }
1356 }
1357 Component::CurDir => {}
1358 Component::ParentDir => {
1359 entry_stack.pop()?;
1360 canonical_path.pop();
1361 }
1362 Component::Normal(name) => {
1363 let current_entry = *entry_stack.last()?;
1364 if let FakeFsEntry::Dir { entries, .. } = current_entry {
1365 let entry = entries.get(name.to_str().unwrap())?;
1366 if (path_components.peek().is_some() || follow_symlink)
1367 && let FakeFsEntry::Symlink { target, .. } = entry
1368 {
1369 let mut target = target.clone();
1370 target.extend(path_components);
1371 path = target;
1372 continue 'outer;
1373 }
1374 entry_stack.push(entry);
1375 canonical_path = canonical_path.join(name);
1376 } else {
1377 return None;
1378 }
1379 }
1380 }
1381 }
1382 break;
1383 }
1384
1385 if entry_stack.is_empty() {
1386 None
1387 } else {
1388 Some(canonical_path)
1389 }
1390 }
1391
1392 fn try_entry(
1393 &mut self,
1394 target: &Path,
1395 follow_symlink: bool,
1396 ) -> Option<(&mut FakeFsEntry, PathBuf)> {
1397 let canonical_path = self.canonicalize(target, follow_symlink)?;
1398
1399 let mut components = canonical_path
1400 .components()
1401 .skip_while(|component| matches!(component, Component::Prefix(_)));
1402 let Some(Component::RootDir) = components.next() else {
1403 panic!(
1404 "the path {:?} was not canonicalized properly {:?}",
1405 target, canonical_path
1406 )
1407 };
1408
1409 let mut entry = &mut self.root;
1410 for component in components {
1411 match component {
1412 Component::Normal(name) => {
1413 if let FakeFsEntry::Dir { entries, .. } = entry {
1414 entry = entries.get_mut(name.to_str().unwrap())?;
1415 } else {
1416 return None;
1417 }
1418 }
1419 _ => {
1420 panic!(
1421 "the path {:?} was not canonicalized properly {:?}",
1422 target, canonical_path
1423 )
1424 }
1425 }
1426 }
1427
1428 Some((entry, canonical_path))
1429 }
1430
1431 fn entry(&mut self, target: &Path) -> Result<&mut FakeFsEntry> {
1432 Ok(self
1433 .try_entry(target, true)
1434 .ok_or_else(|| {
1435 anyhow!(io::Error::new(
1436 io::ErrorKind::NotFound,
1437 format!("not found: {target:?}")
1438 ))
1439 })?
1440 .0)
1441 }
1442
1443 fn write_path<Fn, T>(&mut self, path: &Path, callback: Fn) -> Result<T>
1444 where
1445 Fn: FnOnce(btree_map::Entry<String, FakeFsEntry>) -> Result<T>,
1446 {
1447 let path = normalize_path(path);
1448 let filename = path.file_name().context("cannot overwrite the root")?;
1449 let parent_path = path.parent().unwrap();
1450
1451 let parent = self.entry(parent_path)?;
1452 let new_entry = parent
1453 .dir_entries(parent_path)?
1454 .entry(filename.to_str().unwrap().into());
1455 callback(new_entry)
1456 }
1457
1458 fn emit_event<I, T>(&mut self, paths: I)
1459 where
1460 I: IntoIterator<Item = (T, Option<PathEventKind>)>,
1461 T: Into<PathBuf>,
1462 {
1463 self.buffered_events
1464 .extend(paths.into_iter().map(|(path, kind)| PathEvent {
1465 path: path.into(),
1466 kind,
1467 }));
1468
1469 if !self.events_paused {
1470 self.flush_events(self.buffered_events.len());
1471 }
1472 }
1473
1474 fn flush_events(&mut self, mut count: usize) {
1475 count = count.min(self.buffered_events.len());
1476 let events = self.buffered_events.drain(0..count).collect::<Vec<_>>();
1477 self.event_txs.retain(|(_, tx)| {
1478 let _ = tx.try_send(events.clone());
1479 !tx.is_closed()
1480 });
1481 }
1482}
1483
1484#[cfg(any(test, feature = "test-support"))]
1485pub static FS_DOT_GIT: std::sync::LazyLock<&'static OsStr> =
1486 std::sync::LazyLock::new(|| OsStr::new(".git"));
1487
1488#[cfg(any(test, feature = "test-support"))]
1489impl FakeFs {
1490 /// We need to use something large enough for Windows and Unix to consider this a new file.
1491 /// https://doc.rust-lang.org/nightly/std/time/struct.SystemTime.html#platform-specific-behavior
1492 const SYSTEMTIME_INTERVAL: Duration = Duration::from_nanos(100);
1493
1494 pub fn new(executor: gpui::BackgroundExecutor) -> Arc<Self> {
1495 let (tx, rx) = smol::channel::bounded::<PathBuf>(10);
1496
1497 let this = Arc::new_cyclic(|this| Self {
1498 this: this.clone(),
1499 executor: executor.clone(),
1500 state: Arc::new(Mutex::new(FakeFsState {
1501 root: FakeFsEntry::Dir {
1502 inode: 0,
1503 mtime: MTime(UNIX_EPOCH),
1504 len: 0,
1505 entries: Default::default(),
1506 git_repo_state: None,
1507 },
1508 git_event_tx: tx,
1509 next_mtime: UNIX_EPOCH + Self::SYSTEMTIME_INTERVAL,
1510 next_inode: 1,
1511 event_txs: Default::default(),
1512 buffered_events: Vec::new(),
1513 events_paused: false,
1514 read_dir_call_count: 0,
1515 metadata_call_count: 0,
1516 path_write_counts: Default::default(),
1517 moves: Default::default(),
1518 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
1519 })),
1520 });
1521
1522 executor.spawn({
1523 let this = this.clone();
1524 async move {
1525 while let Ok(git_event) = rx.recv().await {
1526 if let Some(mut state) = this.state.try_lock() {
1527 state.emit_event([(git_event, Some(PathEventKind::Changed))]);
1528 } else {
1529 panic!("Failed to lock file system state, this execution would have caused a test hang");
1530 }
1531 }
1532 }
1533 }).detach();
1534
1535 this
1536 }
1537
1538 pub fn set_next_mtime(&self, next_mtime: SystemTime) {
1539 let mut state = self.state.lock();
1540 state.next_mtime = next_mtime;
1541 }
1542
1543 pub fn get_and_increment_mtime(&self) -> MTime {
1544 let mut state = self.state.lock();
1545 state.get_and_increment_mtime()
1546 }
1547
1548 pub async fn touch_path(&self, path: impl AsRef<Path>) {
1549 let mut state = self.state.lock();
1550 let path = path.as_ref();
1551 let new_mtime = state.get_and_increment_mtime();
1552 let new_inode = state.get_and_increment_inode();
1553 state
1554 .write_path(path, move |entry| {
1555 match entry {
1556 btree_map::Entry::Vacant(e) => {
1557 e.insert(FakeFsEntry::File {
1558 inode: new_inode,
1559 mtime: new_mtime,
1560 content: Vec::new(),
1561 len: 0,
1562 git_dir_path: None,
1563 });
1564 }
1565 btree_map::Entry::Occupied(mut e) => match &mut *e.get_mut() {
1566 FakeFsEntry::File { mtime, .. } => *mtime = new_mtime,
1567 FakeFsEntry::Dir { mtime, .. } => *mtime = new_mtime,
1568 FakeFsEntry::Symlink { .. } => {}
1569 },
1570 }
1571 Ok(())
1572 })
1573 .unwrap();
1574 state.emit_event([(path.to_path_buf(), Some(PathEventKind::Changed))]);
1575 }
1576
1577 pub async fn insert_file(&self, path: impl AsRef<Path>, content: Vec<u8>) {
1578 self.write_file_internal(path, content, true).unwrap()
1579 }
1580
1581 pub async fn insert_symlink(&self, path: impl AsRef<Path>, target: PathBuf) {
1582 let mut state = self.state.lock();
1583 let path = path.as_ref();
1584 let file = FakeFsEntry::Symlink { target };
1585 state
1586 .write_path(path.as_ref(), move |e| match e {
1587 btree_map::Entry::Vacant(e) => {
1588 e.insert(file);
1589 Ok(())
1590 }
1591 btree_map::Entry::Occupied(mut e) => {
1592 *e.get_mut() = file;
1593 Ok(())
1594 }
1595 })
1596 .unwrap();
1597 state.emit_event([(path, Some(PathEventKind::Created))]);
1598 }
1599
1600 fn write_file_internal(
1601 &self,
1602 path: impl AsRef<Path>,
1603 new_content: Vec<u8>,
1604 recreate_inode: bool,
1605 ) -> Result<()> {
1606 let mut state = self.state.lock();
1607 let path_buf = path.as_ref().to_path_buf();
1608 *state.path_write_counts.entry(path_buf).or_insert(0) += 1;
1609 let new_inode = state.get_and_increment_inode();
1610 let new_mtime = state.get_and_increment_mtime();
1611 let new_len = new_content.len() as u64;
1612 let mut kind = None;
1613 state.write_path(path.as_ref(), |entry| {
1614 match entry {
1615 btree_map::Entry::Vacant(e) => {
1616 kind = Some(PathEventKind::Created);
1617 e.insert(FakeFsEntry::File {
1618 inode: new_inode,
1619 mtime: new_mtime,
1620 len: new_len,
1621 content: new_content,
1622 git_dir_path: None,
1623 });
1624 }
1625 btree_map::Entry::Occupied(mut e) => {
1626 kind = Some(PathEventKind::Changed);
1627 if let FakeFsEntry::File {
1628 inode,
1629 mtime,
1630 len,
1631 content,
1632 ..
1633 } = e.get_mut()
1634 {
1635 *mtime = new_mtime;
1636 *content = new_content;
1637 *len = new_len;
1638 if recreate_inode {
1639 *inode = new_inode;
1640 }
1641 } else {
1642 anyhow::bail!("not a file")
1643 }
1644 }
1645 }
1646 Ok(())
1647 })?;
1648 state.emit_event([(path.as_ref(), kind)]);
1649 Ok(())
1650 }
1651
1652 pub fn read_file_sync(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1653 let path = path.as_ref();
1654 let path = normalize_path(path);
1655 let mut state = self.state.lock();
1656 let entry = state.entry(&path)?;
1657 entry.file_content(&path).cloned()
1658 }
1659
1660 async fn load_internal(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1661 let path = path.as_ref();
1662 let path = normalize_path(path);
1663 self.simulate_random_delay().await;
1664 let mut state = self.state.lock();
1665 let entry = state.entry(&path)?;
1666 entry.file_content(&path).cloned()
1667 }
1668
1669 pub fn pause_events(&self) {
1670 self.state.lock().events_paused = true;
1671 }
1672
1673 pub fn unpause_events_and_flush(&self) {
1674 self.state.lock().events_paused = false;
1675 self.flush_events(usize::MAX);
1676 }
1677
1678 pub fn buffered_event_count(&self) -> usize {
1679 self.state.lock().buffered_events.len()
1680 }
1681
1682 pub fn flush_events(&self, count: usize) {
1683 self.state.lock().flush_events(count);
1684 }
1685
1686 pub(crate) fn entry(&self, target: &Path) -> Result<FakeFsEntry> {
1687 self.state.lock().entry(target).cloned()
1688 }
1689
1690 pub(crate) fn insert_entry(&self, target: &Path, new_entry: FakeFsEntry) -> Result<()> {
1691 let mut state = self.state.lock();
1692 state.write_path(target, |entry| {
1693 match entry {
1694 btree_map::Entry::Vacant(vacant_entry) => {
1695 vacant_entry.insert(new_entry);
1696 }
1697 btree_map::Entry::Occupied(mut occupied_entry) => {
1698 occupied_entry.insert(new_entry);
1699 }
1700 }
1701 Ok(())
1702 })
1703 }
1704
1705 #[must_use]
1706 pub fn insert_tree<'a>(
1707 &'a self,
1708 path: impl 'a + AsRef<Path> + Send,
1709 tree: serde_json::Value,
1710 ) -> futures::future::BoxFuture<'a, ()> {
1711 use futures::FutureExt as _;
1712 use serde_json::Value::*;
1713
1714 async move {
1715 let path = path.as_ref();
1716
1717 match tree {
1718 Object(map) => {
1719 self.create_dir(path).await.unwrap();
1720 for (name, contents) in map {
1721 let mut path = PathBuf::from(path);
1722 path.push(name);
1723 self.insert_tree(&path, contents).await;
1724 }
1725 }
1726 Null => {
1727 self.create_dir(path).await.unwrap();
1728 }
1729 String(contents) => {
1730 self.insert_file(&path, contents.into_bytes()).await;
1731 }
1732 _ => {
1733 panic!("JSON object must contain only objects, strings, or null");
1734 }
1735 }
1736 }
1737 .boxed()
1738 }
1739
1740 pub fn insert_tree_from_real_fs<'a>(
1741 &'a self,
1742 path: impl 'a + AsRef<Path> + Send,
1743 src_path: impl 'a + AsRef<Path> + Send,
1744 ) -> futures::future::BoxFuture<'a, ()> {
1745 use futures::FutureExt as _;
1746
1747 async move {
1748 let path = path.as_ref();
1749 if std::fs::metadata(&src_path).unwrap().is_file() {
1750 let contents = std::fs::read(src_path).unwrap();
1751 self.insert_file(path, contents).await;
1752 } else {
1753 self.create_dir(path).await.unwrap();
1754 for entry in std::fs::read_dir(&src_path).unwrap() {
1755 let entry = entry.unwrap();
1756 self.insert_tree_from_real_fs(path.join(entry.file_name()), entry.path())
1757 .await;
1758 }
1759 }
1760 }
1761 .boxed()
1762 }
1763
1764 pub fn with_git_state_and_paths<T, F>(
1765 &self,
1766 dot_git: &Path,
1767 emit_git_event: bool,
1768 f: F,
1769 ) -> Result<T>
1770 where
1771 F: FnOnce(&mut FakeGitRepositoryState, &Path, &Path) -> T,
1772 {
1773 let mut state = self.state.lock();
1774 let git_event_tx = state.git_event_tx.clone();
1775 let entry = state.entry(dot_git).context("open .git")?;
1776
1777 if let FakeFsEntry::Dir { git_repo_state, .. } = entry {
1778 let repo_state = git_repo_state.get_or_insert_with(|| {
1779 log::debug!("insert git state for {dot_git:?}");
1780 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
1781 });
1782 let mut repo_state = repo_state.lock();
1783
1784 let result = f(&mut repo_state, dot_git, dot_git);
1785
1786 drop(repo_state);
1787 if emit_git_event {
1788 state.emit_event([(dot_git, Some(PathEventKind::Changed))]);
1789 }
1790
1791 Ok(result)
1792 } else if let FakeFsEntry::File {
1793 content,
1794 git_dir_path,
1795 ..
1796 } = &mut *entry
1797 {
1798 let path = match git_dir_path {
1799 Some(path) => path,
1800 None => {
1801 let path = std::str::from_utf8(content)
1802 .ok()
1803 .and_then(|content| content.strip_prefix("gitdir:"))
1804 .context("not a valid gitfile")?
1805 .trim();
1806 git_dir_path.insert(normalize_path(&dot_git.parent().unwrap().join(path)))
1807 }
1808 }
1809 .clone();
1810 let Some((git_dir_entry, canonical_path)) = state.try_entry(&path, true) else {
1811 anyhow::bail!("pointed-to git dir {path:?} not found")
1812 };
1813 let FakeFsEntry::Dir {
1814 git_repo_state,
1815 entries,
1816 ..
1817 } = git_dir_entry
1818 else {
1819 anyhow::bail!("gitfile points to a non-directory")
1820 };
1821 let common_dir = if let Some(child) = entries.get("commondir") {
1822 Path::new(
1823 std::str::from_utf8(child.file_content("commondir".as_ref())?)
1824 .context("commondir content")?,
1825 )
1826 .to_owned()
1827 } else {
1828 canonical_path.clone()
1829 };
1830 let repo_state = git_repo_state.get_or_insert_with(|| {
1831 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
1832 });
1833 let mut repo_state = repo_state.lock();
1834
1835 let result = f(&mut repo_state, &canonical_path, &common_dir);
1836
1837 if emit_git_event {
1838 drop(repo_state);
1839 state.emit_event([(canonical_path, Some(PathEventKind::Changed))]);
1840 }
1841
1842 Ok(result)
1843 } else {
1844 anyhow::bail!("not a valid git repository");
1845 }
1846 }
1847
1848 pub fn with_git_state<T, F>(&self, dot_git: &Path, emit_git_event: bool, f: F) -> Result<T>
1849 where
1850 F: FnOnce(&mut FakeGitRepositoryState) -> T,
1851 {
1852 self.with_git_state_and_paths(dot_git, emit_git_event, |state, _, _| f(state))
1853 }
1854
1855 pub fn set_branch_name(&self, dot_git: &Path, branch: Option<impl Into<String>>) {
1856 self.with_git_state(dot_git, true, |state| {
1857 let branch = branch.map(Into::into);
1858 state.branches.extend(branch.clone());
1859 state.current_branch_name = branch
1860 })
1861 .unwrap();
1862 }
1863
1864 pub fn set_remote_for_repo(
1865 &self,
1866 dot_git: &Path,
1867 name: impl Into<String>,
1868 url: impl Into<String>,
1869 ) {
1870 self.with_git_state(dot_git, true, |state| {
1871 state.remotes.insert(name.into(), url.into());
1872 })
1873 .unwrap();
1874 }
1875
1876 pub fn insert_branches(&self, dot_git: &Path, branches: &[&str]) {
1877 self.with_git_state(dot_git, true, |state| {
1878 if let Some(first) = branches.first()
1879 && state.current_branch_name.is_none()
1880 {
1881 state.current_branch_name = Some(first.to_string())
1882 }
1883 state
1884 .branches
1885 .extend(branches.iter().map(ToString::to_string));
1886 })
1887 .unwrap();
1888 }
1889
1890 pub fn set_unmerged_paths_for_repo(
1891 &self,
1892 dot_git: &Path,
1893 unmerged_state: &[(RepoPath, UnmergedStatus)],
1894 ) {
1895 self.with_git_state(dot_git, true, |state| {
1896 state.unmerged_paths.clear();
1897 state.unmerged_paths.extend(
1898 unmerged_state
1899 .iter()
1900 .map(|(path, content)| (path.clone(), *content)),
1901 );
1902 })
1903 .unwrap();
1904 }
1905
1906 pub fn set_index_for_repo(&self, dot_git: &Path, index_state: &[(&str, String)]) {
1907 self.with_git_state(dot_git, true, |state| {
1908 state.index_contents.clear();
1909 state.index_contents.extend(
1910 index_state
1911 .iter()
1912 .map(|(path, content)| (repo_path(path), content.clone())),
1913 );
1914 })
1915 .unwrap();
1916 }
1917
1918 pub fn set_head_for_repo(
1919 &self,
1920 dot_git: &Path,
1921 head_state: &[(&str, String)],
1922 sha: impl Into<String>,
1923 ) {
1924 self.with_git_state(dot_git, true, |state| {
1925 state.head_contents.clear();
1926 state.head_contents.extend(
1927 head_state
1928 .iter()
1929 .map(|(path, content)| (repo_path(path), content.clone())),
1930 );
1931 state.refs.insert("HEAD".into(), sha.into());
1932 })
1933 .unwrap();
1934 }
1935
1936 pub fn set_head_and_index_for_repo(&self, dot_git: &Path, contents_by_path: &[(&str, String)]) {
1937 self.with_git_state(dot_git, true, |state| {
1938 state.head_contents.clear();
1939 state.head_contents.extend(
1940 contents_by_path
1941 .iter()
1942 .map(|(path, contents)| (repo_path(path), contents.clone())),
1943 );
1944 state.index_contents = state.head_contents.clone();
1945 })
1946 .unwrap();
1947 }
1948
1949 pub fn set_merge_base_content_for_repo(
1950 &self,
1951 dot_git: &Path,
1952 contents_by_path: &[(&str, String)],
1953 ) {
1954 self.with_git_state(dot_git, true, |state| {
1955 use git::Oid;
1956
1957 state.merge_base_contents.clear();
1958 let oids = (1..)
1959 .map(|n| n.to_string())
1960 .map(|n| Oid::from_bytes(n.repeat(20).as_bytes()).unwrap());
1961 for ((path, content), oid) in contents_by_path.iter().zip(oids) {
1962 state.merge_base_contents.insert(repo_path(path), oid);
1963 state.oids.insert(oid, content.clone());
1964 }
1965 })
1966 .unwrap();
1967 }
1968
1969 pub fn set_blame_for_repo(&self, dot_git: &Path, blames: Vec<(RepoPath, git::blame::Blame)>) {
1970 self.with_git_state(dot_git, true, |state| {
1971 state.blames.clear();
1972 state.blames.extend(blames);
1973 })
1974 .unwrap();
1975 }
1976
1977 /// Put the given git repository into a state with the given status,
1978 /// by mutating the head, index, and unmerged state.
1979 pub fn set_status_for_repo(&self, dot_git: &Path, statuses: &[(&str, FileStatus)]) {
1980 let workdir_path = dot_git.parent().unwrap();
1981 let workdir_contents = self.files_with_contents(workdir_path);
1982 self.with_git_state(dot_git, true, |state| {
1983 state.index_contents.clear();
1984 state.head_contents.clear();
1985 state.unmerged_paths.clear();
1986 for (path, content) in workdir_contents {
1987 use util::{paths::PathStyle, rel_path::RelPath};
1988
1989 let repo_path = RelPath::new(path.strip_prefix(&workdir_path).unwrap(), PathStyle::local()).unwrap();
1990 let repo_path = RepoPath::from_rel_path(&repo_path);
1991 let status = statuses
1992 .iter()
1993 .find_map(|(p, status)| (*p == repo_path.as_unix_str()).then_some(status));
1994 let mut content = String::from_utf8_lossy(&content).to_string();
1995
1996 let mut index_content = None;
1997 let mut head_content = None;
1998 match status {
1999 None => {
2000 index_content = Some(content.clone());
2001 head_content = Some(content);
2002 }
2003 Some(FileStatus::Untracked | FileStatus::Ignored) => {}
2004 Some(FileStatus::Unmerged(unmerged_status)) => {
2005 state
2006 .unmerged_paths
2007 .insert(repo_path.clone(), *unmerged_status);
2008 content.push_str(" (unmerged)");
2009 index_content = Some(content.clone());
2010 head_content = Some(content);
2011 }
2012 Some(FileStatus::Tracked(TrackedStatus {
2013 index_status,
2014 worktree_status,
2015 })) => {
2016 match worktree_status {
2017 StatusCode::Modified => {
2018 let mut content = content.clone();
2019 content.push_str(" (modified in working copy)");
2020 index_content = Some(content);
2021 }
2022 StatusCode::TypeChanged | StatusCode::Unmodified => {
2023 index_content = Some(content.clone());
2024 }
2025 StatusCode::Added => {}
2026 StatusCode::Deleted | StatusCode::Renamed | StatusCode::Copied => {
2027 panic!("cannot create these statuses for an existing file");
2028 }
2029 };
2030 match index_status {
2031 StatusCode::Modified => {
2032 let mut content = index_content.clone().expect(
2033 "file cannot be both modified in index and created in working copy",
2034 );
2035 content.push_str(" (modified in index)");
2036 head_content = Some(content);
2037 }
2038 StatusCode::TypeChanged | StatusCode::Unmodified => {
2039 head_content = Some(index_content.clone().expect("file cannot be both unmodified in index and created in working copy"));
2040 }
2041 StatusCode::Added => {}
2042 StatusCode::Deleted => {
2043 head_content = Some("".into());
2044 }
2045 StatusCode::Renamed | StatusCode::Copied => {
2046 panic!("cannot create these statuses for an existing file");
2047 }
2048 };
2049 }
2050 };
2051
2052 if let Some(content) = index_content {
2053 state.index_contents.insert(repo_path.clone(), content);
2054 }
2055 if let Some(content) = head_content {
2056 state.head_contents.insert(repo_path.clone(), content);
2057 }
2058 }
2059 }).unwrap();
2060 }
2061
2062 pub fn set_error_message_for_index_write(&self, dot_git: &Path, message: Option<String>) {
2063 self.with_git_state(dot_git, true, |state| {
2064 state.simulated_index_write_error_message = message;
2065 })
2066 .unwrap();
2067 }
2068
2069 pub fn paths(&self, include_dot_git: bool) -> Vec<PathBuf> {
2070 let mut result = Vec::new();
2071 let mut queue = collections::VecDeque::new();
2072 let state = &*self.state.lock();
2073 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2074 while let Some((path, entry)) = queue.pop_front() {
2075 if let FakeFsEntry::Dir { entries, .. } = entry {
2076 for (name, entry) in entries {
2077 queue.push_back((path.join(name), entry));
2078 }
2079 }
2080 if include_dot_git
2081 || !path
2082 .components()
2083 .any(|component| component.as_os_str() == *FS_DOT_GIT)
2084 {
2085 result.push(path);
2086 }
2087 }
2088 result
2089 }
2090
2091 pub fn directories(&self, include_dot_git: bool) -> Vec<PathBuf> {
2092 let mut result = Vec::new();
2093 let mut queue = collections::VecDeque::new();
2094 let state = &*self.state.lock();
2095 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2096 while let Some((path, entry)) = queue.pop_front() {
2097 if let FakeFsEntry::Dir { entries, .. } = entry {
2098 for (name, entry) in entries {
2099 queue.push_back((path.join(name), entry));
2100 }
2101 if include_dot_git
2102 || !path
2103 .components()
2104 .any(|component| component.as_os_str() == *FS_DOT_GIT)
2105 {
2106 result.push(path);
2107 }
2108 }
2109 }
2110 result
2111 }
2112
2113 pub fn files(&self) -> Vec<PathBuf> {
2114 let mut result = Vec::new();
2115 let mut queue = collections::VecDeque::new();
2116 let state = &*self.state.lock();
2117 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2118 while let Some((path, entry)) = queue.pop_front() {
2119 match entry {
2120 FakeFsEntry::File { .. } => result.push(path),
2121 FakeFsEntry::Dir { entries, .. } => {
2122 for (name, entry) in entries {
2123 queue.push_back((path.join(name), entry));
2124 }
2125 }
2126 FakeFsEntry::Symlink { .. } => {}
2127 }
2128 }
2129 result
2130 }
2131
2132 pub fn files_with_contents(&self, prefix: &Path) -> Vec<(PathBuf, Vec<u8>)> {
2133 let mut result = Vec::new();
2134 let mut queue = collections::VecDeque::new();
2135 let state = &*self.state.lock();
2136 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2137 while let Some((path, entry)) = queue.pop_front() {
2138 match entry {
2139 FakeFsEntry::File { content, .. } => {
2140 if path.starts_with(prefix) {
2141 result.push((path, content.clone()));
2142 }
2143 }
2144 FakeFsEntry::Dir { entries, .. } => {
2145 for (name, entry) in entries {
2146 queue.push_back((path.join(name), entry));
2147 }
2148 }
2149 FakeFsEntry::Symlink { .. } => {}
2150 }
2151 }
2152 result
2153 }
2154
2155 /// How many `read_dir` calls have been issued.
2156 pub fn read_dir_call_count(&self) -> usize {
2157 self.state.lock().read_dir_call_count
2158 }
2159
2160 pub fn watched_paths(&self) -> Vec<PathBuf> {
2161 let state = self.state.lock();
2162 state
2163 .event_txs
2164 .iter()
2165 .filter_map(|(path, tx)| Some(path.clone()).filter(|_| !tx.is_closed()))
2166 .collect()
2167 }
2168
2169 /// How many `metadata` calls have been issued.
2170 pub fn metadata_call_count(&self) -> usize {
2171 self.state.lock().metadata_call_count
2172 }
2173
2174 /// How many write operations have been issued for a specific path.
2175 pub fn write_count_for_path(&self, path: impl AsRef<Path>) -> usize {
2176 let path = path.as_ref().to_path_buf();
2177 self.state
2178 .lock()
2179 .path_write_counts
2180 .get(&path)
2181 .copied()
2182 .unwrap_or(0)
2183 }
2184
2185 pub fn emit_fs_event(&self, path: impl Into<PathBuf>, event: Option<PathEventKind>) {
2186 self.state.lock().emit_event(std::iter::once((path, event)));
2187 }
2188
2189 fn simulate_random_delay(&self) -> impl futures::Future<Output = ()> {
2190 self.executor.simulate_random_delay()
2191 }
2192}
2193
2194#[cfg(any(test, feature = "test-support"))]
2195impl FakeFsEntry {
2196 fn is_file(&self) -> bool {
2197 matches!(self, Self::File { .. })
2198 }
2199
2200 fn is_symlink(&self) -> bool {
2201 matches!(self, Self::Symlink { .. })
2202 }
2203
2204 fn file_content(&self, path: &Path) -> Result<&Vec<u8>> {
2205 if let Self::File { content, .. } = self {
2206 Ok(content)
2207 } else {
2208 anyhow::bail!("not a file: {path:?}");
2209 }
2210 }
2211
2212 fn dir_entries(&mut self, path: &Path) -> Result<&mut BTreeMap<String, FakeFsEntry>> {
2213 if let Self::Dir { entries, .. } = self {
2214 Ok(entries)
2215 } else {
2216 anyhow::bail!("not a directory: {path:?}");
2217 }
2218 }
2219}
2220
2221#[cfg(any(test, feature = "test-support"))]
2222struct FakeWatcher {
2223 tx: smol::channel::Sender<Vec<PathEvent>>,
2224 original_path: PathBuf,
2225 fs_state: Arc<Mutex<FakeFsState>>,
2226 prefixes: Mutex<Vec<PathBuf>>,
2227}
2228
2229#[cfg(any(test, feature = "test-support"))]
2230impl Watcher for FakeWatcher {
2231 fn add(&self, path: &Path) -> Result<()> {
2232 if path.starts_with(&self.original_path) {
2233 return Ok(());
2234 }
2235 self.fs_state
2236 .try_lock()
2237 .unwrap()
2238 .event_txs
2239 .push((path.to_owned(), self.tx.clone()));
2240 self.prefixes.lock().push(path.to_owned());
2241 Ok(())
2242 }
2243
2244 fn remove(&self, _: &Path) -> Result<()> {
2245 Ok(())
2246 }
2247}
2248
2249#[cfg(any(test, feature = "test-support"))]
2250#[derive(Debug)]
2251struct FakeHandle {
2252 inode: u64,
2253}
2254
2255#[cfg(any(test, feature = "test-support"))]
2256impl FileHandle for FakeHandle {
2257 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf> {
2258 let fs = fs.as_fake();
2259 let mut state = fs.state.lock();
2260 let Some(target) = state.moves.get(&self.inode).cloned() else {
2261 anyhow::bail!("fake fd not moved")
2262 };
2263
2264 if state.try_entry(&target, false).is_some() {
2265 return Ok(target);
2266 }
2267 anyhow::bail!("fake fd target not found")
2268 }
2269}
2270
2271#[cfg(any(test, feature = "test-support"))]
2272#[async_trait::async_trait]
2273impl Fs for FakeFs {
2274 async fn create_dir(&self, path: &Path) -> Result<()> {
2275 self.simulate_random_delay().await;
2276
2277 let mut created_dirs = Vec::new();
2278 let mut cur_path = PathBuf::new();
2279 for component in path.components() {
2280 let should_skip = matches!(component, Component::Prefix(..) | Component::RootDir);
2281 cur_path.push(component);
2282 if should_skip {
2283 continue;
2284 }
2285 let mut state = self.state.lock();
2286
2287 let inode = state.get_and_increment_inode();
2288 let mtime = state.get_and_increment_mtime();
2289 state.write_path(&cur_path, |entry| {
2290 entry.or_insert_with(|| {
2291 created_dirs.push((cur_path.clone(), Some(PathEventKind::Created)));
2292 FakeFsEntry::Dir {
2293 inode,
2294 mtime,
2295 len: 0,
2296 entries: Default::default(),
2297 git_repo_state: None,
2298 }
2299 });
2300 Ok(())
2301 })?
2302 }
2303
2304 self.state.lock().emit_event(created_dirs);
2305 Ok(())
2306 }
2307
2308 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
2309 self.simulate_random_delay().await;
2310 let mut state = self.state.lock();
2311 let inode = state.get_and_increment_inode();
2312 let mtime = state.get_and_increment_mtime();
2313 let file = FakeFsEntry::File {
2314 inode,
2315 mtime,
2316 len: 0,
2317 content: Vec::new(),
2318 git_dir_path: None,
2319 };
2320 let mut kind = Some(PathEventKind::Created);
2321 state.write_path(path, |entry| {
2322 match entry {
2323 btree_map::Entry::Occupied(mut e) => {
2324 if options.overwrite {
2325 kind = Some(PathEventKind::Changed);
2326 *e.get_mut() = file;
2327 } else if !options.ignore_if_exists {
2328 anyhow::bail!("path already exists: {path:?}");
2329 }
2330 }
2331 btree_map::Entry::Vacant(e) => {
2332 e.insert(file);
2333 }
2334 }
2335 Ok(())
2336 })?;
2337 state.emit_event([(path, kind)]);
2338 Ok(())
2339 }
2340
2341 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
2342 let mut state = self.state.lock();
2343 let file = FakeFsEntry::Symlink { target };
2344 state
2345 .write_path(path.as_ref(), move |e| match e {
2346 btree_map::Entry::Vacant(e) => {
2347 e.insert(file);
2348 Ok(())
2349 }
2350 btree_map::Entry::Occupied(mut e) => {
2351 *e.get_mut() = file;
2352 Ok(())
2353 }
2354 })
2355 .unwrap();
2356 state.emit_event([(path, Some(PathEventKind::Created))]);
2357
2358 Ok(())
2359 }
2360
2361 async fn create_file_with(
2362 &self,
2363 path: &Path,
2364 mut content: Pin<&mut (dyn AsyncRead + Send)>,
2365 ) -> Result<()> {
2366 let mut bytes = Vec::new();
2367 content.read_to_end(&mut bytes).await?;
2368 self.write_file_internal(path, bytes, true)?;
2369 Ok(())
2370 }
2371
2372 async fn extract_tar_file(
2373 &self,
2374 path: &Path,
2375 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
2376 ) -> Result<()> {
2377 let mut entries = content.entries()?;
2378 while let Some(entry) = entries.next().await {
2379 let mut entry = entry?;
2380 if entry.header().entry_type().is_file() {
2381 let path = path.join(entry.path()?.as_ref());
2382 let mut bytes = Vec::new();
2383 entry.read_to_end(&mut bytes).await?;
2384 self.create_dir(path.parent().unwrap()).await?;
2385 self.write_file_internal(&path, bytes, true)?;
2386 }
2387 }
2388 Ok(())
2389 }
2390
2391 async fn rename(&self, old_path: &Path, new_path: &Path, options: RenameOptions) -> Result<()> {
2392 self.simulate_random_delay().await;
2393
2394 let old_path = normalize_path(old_path);
2395 let new_path = normalize_path(new_path);
2396
2397 if options.create_parents {
2398 if let Some(parent) = new_path.parent() {
2399 self.create_dir(parent).await?;
2400 }
2401 }
2402
2403 let mut state = self.state.lock();
2404 let moved_entry = state.write_path(&old_path, |e| {
2405 if let btree_map::Entry::Occupied(e) = e {
2406 Ok(e.get().clone())
2407 } else {
2408 anyhow::bail!("path does not exist: {old_path:?}")
2409 }
2410 })?;
2411
2412 let inode = match moved_entry {
2413 FakeFsEntry::File { inode, .. } => inode,
2414 FakeFsEntry::Dir { inode, .. } => inode,
2415 _ => 0,
2416 };
2417
2418 state.moves.insert(inode, new_path.clone());
2419
2420 state.write_path(&new_path, |e| {
2421 match e {
2422 btree_map::Entry::Occupied(mut e) => {
2423 if options.overwrite {
2424 *e.get_mut() = moved_entry;
2425 } else if !options.ignore_if_exists {
2426 anyhow::bail!("path already exists: {new_path:?}");
2427 }
2428 }
2429 btree_map::Entry::Vacant(e) => {
2430 e.insert(moved_entry);
2431 }
2432 }
2433 Ok(())
2434 })?;
2435
2436 state
2437 .write_path(&old_path, |e| {
2438 if let btree_map::Entry::Occupied(e) = e {
2439 Ok(e.remove())
2440 } else {
2441 unreachable!()
2442 }
2443 })
2444 .unwrap();
2445
2446 state.emit_event([
2447 (old_path, Some(PathEventKind::Removed)),
2448 (new_path, Some(PathEventKind::Created)),
2449 ]);
2450 Ok(())
2451 }
2452
2453 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
2454 self.simulate_random_delay().await;
2455
2456 let source = normalize_path(source);
2457 let target = normalize_path(target);
2458 let mut state = self.state.lock();
2459 let mtime = state.get_and_increment_mtime();
2460 let inode = state.get_and_increment_inode();
2461 let source_entry = state.entry(&source)?;
2462 let content = source_entry.file_content(&source)?.clone();
2463 let mut kind = Some(PathEventKind::Created);
2464 state.write_path(&target, |e| match e {
2465 btree_map::Entry::Occupied(e) => {
2466 if options.overwrite {
2467 kind = Some(PathEventKind::Changed);
2468 Ok(Some(e.get().clone()))
2469 } else if !options.ignore_if_exists {
2470 anyhow::bail!("{target:?} already exists");
2471 } else {
2472 Ok(None)
2473 }
2474 }
2475 btree_map::Entry::Vacant(e) => Ok(Some(
2476 e.insert(FakeFsEntry::File {
2477 inode,
2478 mtime,
2479 len: content.len() as u64,
2480 content,
2481 git_dir_path: None,
2482 })
2483 .clone(),
2484 )),
2485 })?;
2486 state.emit_event([(target, kind)]);
2487 Ok(())
2488 }
2489
2490 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
2491 self.simulate_random_delay().await;
2492
2493 let path = normalize_path(path);
2494 let parent_path = path.parent().context("cannot remove the root")?;
2495 let base_name = path.file_name().context("cannot remove the root")?;
2496
2497 let mut state = self.state.lock();
2498 let parent_entry = state.entry(parent_path)?;
2499 let entry = parent_entry
2500 .dir_entries(parent_path)?
2501 .entry(base_name.to_str().unwrap().into());
2502
2503 match entry {
2504 btree_map::Entry::Vacant(_) => {
2505 if !options.ignore_if_not_exists {
2506 anyhow::bail!("{path:?} does not exist");
2507 }
2508 }
2509 btree_map::Entry::Occupied(mut entry) => {
2510 {
2511 let children = entry.get_mut().dir_entries(&path)?;
2512 if !options.recursive && !children.is_empty() {
2513 anyhow::bail!("{path:?} is not empty");
2514 }
2515 }
2516 entry.remove();
2517 }
2518 }
2519 state.emit_event([(path, Some(PathEventKind::Removed))]);
2520 Ok(())
2521 }
2522
2523 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
2524 self.simulate_random_delay().await;
2525
2526 let path = normalize_path(path);
2527 let parent_path = path.parent().context("cannot remove the root")?;
2528 let base_name = path.file_name().unwrap();
2529 let mut state = self.state.lock();
2530 let parent_entry = state.entry(parent_path)?;
2531 let entry = parent_entry
2532 .dir_entries(parent_path)?
2533 .entry(base_name.to_str().unwrap().into());
2534 match entry {
2535 btree_map::Entry::Vacant(_) => {
2536 if !options.ignore_if_not_exists {
2537 anyhow::bail!("{path:?} does not exist");
2538 }
2539 }
2540 btree_map::Entry::Occupied(mut entry) => {
2541 entry.get_mut().file_content(&path)?;
2542 entry.remove();
2543 }
2544 }
2545 state.emit_event([(path, Some(PathEventKind::Removed))]);
2546 Ok(())
2547 }
2548
2549 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
2550 let bytes = self.load_internal(path).await?;
2551 Ok(Box::new(io::Cursor::new(bytes)))
2552 }
2553
2554 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
2555 self.simulate_random_delay().await;
2556 let mut state = self.state.lock();
2557 let inode = match state.entry(path)? {
2558 FakeFsEntry::File { inode, .. } => *inode,
2559 FakeFsEntry::Dir { inode, .. } => *inode,
2560 _ => unreachable!(),
2561 };
2562 Ok(Arc::new(FakeHandle { inode }))
2563 }
2564
2565 async fn load(&self, path: &Path) -> Result<String> {
2566 let content = self.load_internal(path).await?;
2567 Ok(String::from_utf8(content)?)
2568 }
2569
2570 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
2571 self.load_internal(path).await
2572 }
2573
2574 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
2575 self.simulate_random_delay().await;
2576 let path = normalize_path(path.as_path());
2577 if let Some(path) = path.parent() {
2578 self.create_dir(path).await?;
2579 }
2580 self.write_file_internal(path, data.into_bytes(), true)?;
2581 Ok(())
2582 }
2583
2584 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
2585 self.simulate_random_delay().await;
2586 let path = normalize_path(path);
2587 let content = text::chunks_with_line_ending(text, line_ending).collect::<String>();
2588 if let Some(path) = path.parent() {
2589 self.create_dir(path).await?;
2590 }
2591 self.write_file_internal(path, content.into_bytes(), false)?;
2592 Ok(())
2593 }
2594
2595 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
2596 self.simulate_random_delay().await;
2597 let path = normalize_path(path);
2598 if let Some(path) = path.parent() {
2599 self.create_dir(path).await?;
2600 }
2601 self.write_file_internal(path, content.to_vec(), false)?;
2602 Ok(())
2603 }
2604
2605 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
2606 let path = normalize_path(path);
2607 self.simulate_random_delay().await;
2608 let state = self.state.lock();
2609 let canonical_path = state
2610 .canonicalize(&path, true)
2611 .with_context(|| format!("path does not exist: {path:?}"))?;
2612 Ok(canonical_path)
2613 }
2614
2615 async fn is_file(&self, path: &Path) -> bool {
2616 let path = normalize_path(path);
2617 self.simulate_random_delay().await;
2618 let mut state = self.state.lock();
2619 if let Some((entry, _)) = state.try_entry(&path, true) {
2620 entry.is_file()
2621 } else {
2622 false
2623 }
2624 }
2625
2626 async fn is_dir(&self, path: &Path) -> bool {
2627 self.metadata(path)
2628 .await
2629 .is_ok_and(|metadata| metadata.is_some_and(|metadata| metadata.is_dir))
2630 }
2631
2632 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
2633 self.simulate_random_delay().await;
2634 let path = normalize_path(path);
2635 let mut state = self.state.lock();
2636 state.metadata_call_count += 1;
2637 if let Some((mut entry, _)) = state.try_entry(&path, false) {
2638 let is_symlink = entry.is_symlink();
2639 if is_symlink {
2640 if let Some(e) = state.try_entry(&path, true).map(|e| e.0) {
2641 entry = e;
2642 } else {
2643 return Ok(None);
2644 }
2645 }
2646
2647 Ok(Some(match &*entry {
2648 FakeFsEntry::File {
2649 inode, mtime, len, ..
2650 } => Metadata {
2651 inode: *inode,
2652 mtime: *mtime,
2653 len: *len,
2654 is_dir: false,
2655 is_symlink,
2656 is_fifo: false,
2657 is_executable: false,
2658 },
2659 FakeFsEntry::Dir {
2660 inode, mtime, len, ..
2661 } => Metadata {
2662 inode: *inode,
2663 mtime: *mtime,
2664 len: *len,
2665 is_dir: true,
2666 is_symlink,
2667 is_fifo: false,
2668 is_executable: false,
2669 },
2670 FakeFsEntry::Symlink { .. } => unreachable!(),
2671 }))
2672 } else {
2673 Ok(None)
2674 }
2675 }
2676
2677 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
2678 self.simulate_random_delay().await;
2679 let path = normalize_path(path);
2680 let mut state = self.state.lock();
2681 let (entry, _) = state
2682 .try_entry(&path, false)
2683 .with_context(|| format!("path does not exist: {path:?}"))?;
2684 if let FakeFsEntry::Symlink { target } = entry {
2685 Ok(target.clone())
2686 } else {
2687 anyhow::bail!("not a symlink: {path:?}")
2688 }
2689 }
2690
2691 async fn read_dir(
2692 &self,
2693 path: &Path,
2694 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
2695 self.simulate_random_delay().await;
2696 let path = normalize_path(path);
2697 let mut state = self.state.lock();
2698 state.read_dir_call_count += 1;
2699 let entry = state.entry(&path)?;
2700 let children = entry.dir_entries(&path)?;
2701 let paths = children
2702 .keys()
2703 .map(|file_name| Ok(path.join(file_name)))
2704 .collect::<Vec<_>>();
2705 Ok(Box::pin(futures::stream::iter(paths)))
2706 }
2707
2708 async fn watch(
2709 &self,
2710 path: &Path,
2711 _: Duration,
2712 ) -> (
2713 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
2714 Arc<dyn Watcher>,
2715 ) {
2716 self.simulate_random_delay().await;
2717 let (tx, rx) = smol::channel::unbounded();
2718 let path = path.to_path_buf();
2719 self.state.lock().event_txs.push((path.clone(), tx.clone()));
2720 let executor = self.executor.clone();
2721 let watcher = Arc::new(FakeWatcher {
2722 tx,
2723 original_path: path.to_owned(),
2724 fs_state: self.state.clone(),
2725 prefixes: Mutex::new(vec![path]),
2726 });
2727 (
2728 Box::pin(futures::StreamExt::filter(rx, {
2729 let watcher = watcher.clone();
2730 move |events| {
2731 let result = events.iter().any(|evt_path| {
2732 watcher
2733 .prefixes
2734 .lock()
2735 .iter()
2736 .any(|prefix| evt_path.path.starts_with(prefix))
2737 });
2738 let executor = executor.clone();
2739 async move {
2740 executor.simulate_random_delay().await;
2741 result
2742 }
2743 }
2744 })),
2745 watcher,
2746 )
2747 }
2748
2749 fn open_repo(
2750 &self,
2751 abs_dot_git: &Path,
2752 _system_git_binary: Option<&Path>,
2753 ) -> Option<Arc<dyn GitRepository>> {
2754 use util::ResultExt as _;
2755
2756 self.with_git_state_and_paths(
2757 abs_dot_git,
2758 false,
2759 |_, repository_dir_path, common_dir_path| {
2760 Arc::new(fake_git_repo::FakeGitRepository {
2761 fs: self.this.upgrade().unwrap(),
2762 executor: self.executor.clone(),
2763 dot_git_path: abs_dot_git.to_path_buf(),
2764 repository_dir_path: repository_dir_path.to_owned(),
2765 common_dir_path: common_dir_path.to_owned(),
2766 checkpoints: Arc::default(),
2767 }) as _
2768 },
2769 )
2770 .log_err()
2771 }
2772
2773 async fn git_init(
2774 &self,
2775 abs_work_directory_path: &Path,
2776 _fallback_branch_name: String,
2777 ) -> Result<()> {
2778 self.create_dir(&abs_work_directory_path.join(".git")).await
2779 }
2780
2781 async fn git_clone(&self, _repo_url: &str, _abs_work_directory: &Path) -> Result<()> {
2782 anyhow::bail!("Git clone is not supported in fake Fs")
2783 }
2784
2785 fn is_fake(&self) -> bool {
2786 true
2787 }
2788
2789 async fn is_case_sensitive(&self) -> Result<bool> {
2790 Ok(true)
2791 }
2792
2793 fn subscribe_to_jobs(&self) -> JobEventReceiver {
2794 let (sender, receiver) = futures::channel::mpsc::unbounded();
2795 self.state.lock().job_event_subscribers.lock().push(sender);
2796 receiver
2797 }
2798
2799 #[cfg(any(test, feature = "test-support"))]
2800 fn as_fake(&self) -> Arc<FakeFs> {
2801 self.this.upgrade().unwrap()
2802 }
2803}
2804
2805pub fn normalize_path(path: &Path) -> PathBuf {
2806 let mut components = path.components().peekable();
2807 let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek().cloned() {
2808 components.next();
2809 PathBuf::from(c.as_os_str())
2810 } else {
2811 PathBuf::new()
2812 };
2813
2814 for component in components {
2815 match component {
2816 Component::Prefix(..) => unreachable!(),
2817 Component::RootDir => {
2818 ret.push(component.as_os_str());
2819 }
2820 Component::CurDir => {}
2821 Component::ParentDir => {
2822 ret.pop();
2823 }
2824 Component::Normal(c) => {
2825 ret.push(c);
2826 }
2827 }
2828 }
2829 ret
2830}
2831
2832pub async fn copy_recursive<'a>(
2833 fs: &'a dyn Fs,
2834 source: &'a Path,
2835 target: &'a Path,
2836 options: CopyOptions,
2837) -> Result<()> {
2838 for (item, is_dir) in read_dir_items(fs, source).await? {
2839 let Ok(item_relative_path) = item.strip_prefix(source) else {
2840 continue;
2841 };
2842 let target_item = if item_relative_path == Path::new("") {
2843 target.to_path_buf()
2844 } else {
2845 target.join(item_relative_path)
2846 };
2847 if is_dir {
2848 if !options.overwrite && fs.metadata(&target_item).await.is_ok_and(|m| m.is_some()) {
2849 if options.ignore_if_exists {
2850 continue;
2851 } else {
2852 anyhow::bail!("{target_item:?} already exists");
2853 }
2854 }
2855 let _ = fs
2856 .remove_dir(
2857 &target_item,
2858 RemoveOptions {
2859 recursive: true,
2860 ignore_if_not_exists: true,
2861 },
2862 )
2863 .await;
2864 fs.create_dir(&target_item).await?;
2865 } else {
2866 fs.copy_file(&item, &target_item, options).await?;
2867 }
2868 }
2869 Ok(())
2870}
2871
2872/// Recursively reads all of the paths in the given directory.
2873///
2874/// Returns a vector of tuples of (path, is_dir).
2875pub async fn read_dir_items<'a>(fs: &'a dyn Fs, source: &'a Path) -> Result<Vec<(PathBuf, bool)>> {
2876 let mut items = Vec::new();
2877 read_recursive(fs, source, &mut items).await?;
2878 Ok(items)
2879}
2880
2881fn read_recursive<'a>(
2882 fs: &'a dyn Fs,
2883 source: &'a Path,
2884 output: &'a mut Vec<(PathBuf, bool)>,
2885) -> BoxFuture<'a, Result<()>> {
2886 use futures::future::FutureExt;
2887
2888 async move {
2889 let metadata = fs
2890 .metadata(source)
2891 .await?
2892 .with_context(|| format!("path does not exist: {source:?}"))?;
2893
2894 if metadata.is_dir {
2895 output.push((source.to_path_buf(), true));
2896 let mut children = fs.read_dir(source).await?;
2897 while let Some(child_path) = children.next().await {
2898 if let Ok(child_path) = child_path {
2899 read_recursive(fs, &child_path, output).await?;
2900 }
2901 }
2902 } else {
2903 output.push((source.to_path_buf(), false));
2904 }
2905 Ok(())
2906 }
2907 .boxed()
2908}
2909
2910// todo(windows)
2911// can we get file id not open the file twice?
2912// https://github.com/rust-lang/rust/issues/63010
2913#[cfg(target_os = "windows")]
2914async fn file_id(path: impl AsRef<Path>) -> Result<u64> {
2915 use std::os::windows::io::AsRawHandle;
2916
2917 use smol::fs::windows::OpenOptionsExt;
2918 use windows::Win32::{
2919 Foundation::HANDLE,
2920 Storage::FileSystem::{
2921 BY_HANDLE_FILE_INFORMATION, FILE_FLAG_BACKUP_SEMANTICS, GetFileInformationByHandle,
2922 },
2923 };
2924
2925 let file = smol::fs::OpenOptions::new()
2926 .read(true)
2927 .custom_flags(FILE_FLAG_BACKUP_SEMANTICS.0)
2928 .open(path)
2929 .await?;
2930
2931 let mut info: BY_HANDLE_FILE_INFORMATION = unsafe { std::mem::zeroed() };
2932 // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfileinformationbyhandle
2933 // This function supports Windows XP+
2934 smol::unblock(move || {
2935 unsafe { GetFileInformationByHandle(HANDLE(file.as_raw_handle() as _), &mut info)? };
2936
2937 Ok(((info.nFileIndexHigh as u64) << 32) | (info.nFileIndexLow as u64))
2938 })
2939 .await
2940}
2941
2942#[cfg(target_os = "windows")]
2943fn atomic_replace<P: AsRef<Path>>(
2944 replaced_file: P,
2945 replacement_file: P,
2946) -> windows::core::Result<()> {
2947 use windows::{
2948 Win32::Storage::FileSystem::{REPLACE_FILE_FLAGS, ReplaceFileW},
2949 core::HSTRING,
2950 };
2951
2952 // If the file does not exist, create it.
2953 let _ = std::fs::File::create_new(replaced_file.as_ref());
2954
2955 unsafe {
2956 ReplaceFileW(
2957 &HSTRING::from(replaced_file.as_ref().to_string_lossy().into_owned()),
2958 &HSTRING::from(replacement_file.as_ref().to_string_lossy().into_owned()),
2959 None,
2960 REPLACE_FILE_FLAGS::default(),
2961 None,
2962 None,
2963 )
2964 }
2965}
2966
2967#[cfg(test)]
2968mod tests {
2969 use super::*;
2970 use gpui::BackgroundExecutor;
2971 use serde_json::json;
2972 use util::path;
2973
2974 #[gpui::test]
2975 async fn test_fake_fs(executor: BackgroundExecutor) {
2976 let fs = FakeFs::new(executor.clone());
2977 fs.insert_tree(
2978 path!("/root"),
2979 json!({
2980 "dir1": {
2981 "a": "A",
2982 "b": "B"
2983 },
2984 "dir2": {
2985 "c": "C",
2986 "dir3": {
2987 "d": "D"
2988 }
2989 }
2990 }),
2991 )
2992 .await;
2993
2994 assert_eq!(
2995 fs.files(),
2996 vec![
2997 PathBuf::from(path!("/root/dir1/a")),
2998 PathBuf::from(path!("/root/dir1/b")),
2999 PathBuf::from(path!("/root/dir2/c")),
3000 PathBuf::from(path!("/root/dir2/dir3/d")),
3001 ]
3002 );
3003
3004 fs.create_symlink(path!("/root/dir2/link-to-dir3").as_ref(), "./dir3".into())
3005 .await
3006 .unwrap();
3007
3008 assert_eq!(
3009 fs.canonicalize(path!("/root/dir2/link-to-dir3").as_ref())
3010 .await
3011 .unwrap(),
3012 PathBuf::from(path!("/root/dir2/dir3")),
3013 );
3014 assert_eq!(
3015 fs.canonicalize(path!("/root/dir2/link-to-dir3/d").as_ref())
3016 .await
3017 .unwrap(),
3018 PathBuf::from(path!("/root/dir2/dir3/d")),
3019 );
3020 assert_eq!(
3021 fs.load(path!("/root/dir2/link-to-dir3/d").as_ref())
3022 .await
3023 .unwrap(),
3024 "D",
3025 );
3026 }
3027
3028 #[gpui::test]
3029 async fn test_copy_recursive_with_single_file(executor: BackgroundExecutor) {
3030 let fs = FakeFs::new(executor.clone());
3031 fs.insert_tree(
3032 path!("/outer"),
3033 json!({
3034 "a": "A",
3035 "b": "B",
3036 "inner": {}
3037 }),
3038 )
3039 .await;
3040
3041 assert_eq!(
3042 fs.files(),
3043 vec![
3044 PathBuf::from(path!("/outer/a")),
3045 PathBuf::from(path!("/outer/b")),
3046 ]
3047 );
3048
3049 let source = Path::new(path!("/outer/a"));
3050 let target = Path::new(path!("/outer/a copy"));
3051 copy_recursive(fs.as_ref(), source, target, Default::default())
3052 .await
3053 .unwrap();
3054
3055 assert_eq!(
3056 fs.files(),
3057 vec![
3058 PathBuf::from(path!("/outer/a")),
3059 PathBuf::from(path!("/outer/a copy")),
3060 PathBuf::from(path!("/outer/b")),
3061 ]
3062 );
3063
3064 let source = Path::new(path!("/outer/a"));
3065 let target = Path::new(path!("/outer/inner/a copy"));
3066 copy_recursive(fs.as_ref(), source, target, Default::default())
3067 .await
3068 .unwrap();
3069
3070 assert_eq!(
3071 fs.files(),
3072 vec![
3073 PathBuf::from(path!("/outer/a")),
3074 PathBuf::from(path!("/outer/a copy")),
3075 PathBuf::from(path!("/outer/b")),
3076 PathBuf::from(path!("/outer/inner/a copy")),
3077 ]
3078 );
3079 }
3080
3081 #[gpui::test]
3082 async fn test_copy_recursive_with_single_dir(executor: BackgroundExecutor) {
3083 let fs = FakeFs::new(executor.clone());
3084 fs.insert_tree(
3085 path!("/outer"),
3086 json!({
3087 "a": "A",
3088 "empty": {},
3089 "non-empty": {
3090 "b": "B",
3091 }
3092 }),
3093 )
3094 .await;
3095
3096 assert_eq!(
3097 fs.files(),
3098 vec![
3099 PathBuf::from(path!("/outer/a")),
3100 PathBuf::from(path!("/outer/non-empty/b")),
3101 ]
3102 );
3103 assert_eq!(
3104 fs.directories(false),
3105 vec![
3106 PathBuf::from(path!("/")),
3107 PathBuf::from(path!("/outer")),
3108 PathBuf::from(path!("/outer/empty")),
3109 PathBuf::from(path!("/outer/non-empty")),
3110 ]
3111 );
3112
3113 let source = Path::new(path!("/outer/empty"));
3114 let target = Path::new(path!("/outer/empty copy"));
3115 copy_recursive(fs.as_ref(), source, target, Default::default())
3116 .await
3117 .unwrap();
3118
3119 assert_eq!(
3120 fs.files(),
3121 vec![
3122 PathBuf::from(path!("/outer/a")),
3123 PathBuf::from(path!("/outer/non-empty/b")),
3124 ]
3125 );
3126 assert_eq!(
3127 fs.directories(false),
3128 vec![
3129 PathBuf::from(path!("/")),
3130 PathBuf::from(path!("/outer")),
3131 PathBuf::from(path!("/outer/empty")),
3132 PathBuf::from(path!("/outer/empty copy")),
3133 PathBuf::from(path!("/outer/non-empty")),
3134 ]
3135 );
3136
3137 let source = Path::new(path!("/outer/non-empty"));
3138 let target = Path::new(path!("/outer/non-empty copy"));
3139 copy_recursive(fs.as_ref(), source, target, Default::default())
3140 .await
3141 .unwrap();
3142
3143 assert_eq!(
3144 fs.files(),
3145 vec![
3146 PathBuf::from(path!("/outer/a")),
3147 PathBuf::from(path!("/outer/non-empty/b")),
3148 PathBuf::from(path!("/outer/non-empty copy/b")),
3149 ]
3150 );
3151 assert_eq!(
3152 fs.directories(false),
3153 vec![
3154 PathBuf::from(path!("/")),
3155 PathBuf::from(path!("/outer")),
3156 PathBuf::from(path!("/outer/empty")),
3157 PathBuf::from(path!("/outer/empty copy")),
3158 PathBuf::from(path!("/outer/non-empty")),
3159 PathBuf::from(path!("/outer/non-empty copy")),
3160 ]
3161 );
3162 }
3163
3164 #[gpui::test]
3165 async fn test_copy_recursive(executor: BackgroundExecutor) {
3166 let fs = FakeFs::new(executor.clone());
3167 fs.insert_tree(
3168 path!("/outer"),
3169 json!({
3170 "inner1": {
3171 "a": "A",
3172 "b": "B",
3173 "inner3": {
3174 "d": "D",
3175 },
3176 "inner4": {}
3177 },
3178 "inner2": {
3179 "c": "C",
3180 }
3181 }),
3182 )
3183 .await;
3184
3185 assert_eq!(
3186 fs.files(),
3187 vec![
3188 PathBuf::from(path!("/outer/inner1/a")),
3189 PathBuf::from(path!("/outer/inner1/b")),
3190 PathBuf::from(path!("/outer/inner2/c")),
3191 PathBuf::from(path!("/outer/inner1/inner3/d")),
3192 ]
3193 );
3194 assert_eq!(
3195 fs.directories(false),
3196 vec![
3197 PathBuf::from(path!("/")),
3198 PathBuf::from(path!("/outer")),
3199 PathBuf::from(path!("/outer/inner1")),
3200 PathBuf::from(path!("/outer/inner2")),
3201 PathBuf::from(path!("/outer/inner1/inner3")),
3202 PathBuf::from(path!("/outer/inner1/inner4")),
3203 ]
3204 );
3205
3206 let source = Path::new(path!("/outer"));
3207 let target = Path::new(path!("/outer/inner1/outer"));
3208 copy_recursive(fs.as_ref(), source, target, Default::default())
3209 .await
3210 .unwrap();
3211
3212 assert_eq!(
3213 fs.files(),
3214 vec![
3215 PathBuf::from(path!("/outer/inner1/a")),
3216 PathBuf::from(path!("/outer/inner1/b")),
3217 PathBuf::from(path!("/outer/inner2/c")),
3218 PathBuf::from(path!("/outer/inner1/inner3/d")),
3219 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3220 PathBuf::from(path!("/outer/inner1/outer/inner1/b")),
3221 PathBuf::from(path!("/outer/inner1/outer/inner2/c")),
3222 PathBuf::from(path!("/outer/inner1/outer/inner1/inner3/d")),
3223 ]
3224 );
3225 assert_eq!(
3226 fs.directories(false),
3227 vec![
3228 PathBuf::from(path!("/")),
3229 PathBuf::from(path!("/outer")),
3230 PathBuf::from(path!("/outer/inner1")),
3231 PathBuf::from(path!("/outer/inner2")),
3232 PathBuf::from(path!("/outer/inner1/inner3")),
3233 PathBuf::from(path!("/outer/inner1/inner4")),
3234 PathBuf::from(path!("/outer/inner1/outer")),
3235 PathBuf::from(path!("/outer/inner1/outer/inner1")),
3236 PathBuf::from(path!("/outer/inner1/outer/inner2")),
3237 PathBuf::from(path!("/outer/inner1/outer/inner1/inner3")),
3238 PathBuf::from(path!("/outer/inner1/outer/inner1/inner4")),
3239 ]
3240 );
3241 }
3242
3243 #[gpui::test]
3244 async fn test_copy_recursive_with_overwriting(executor: BackgroundExecutor) {
3245 let fs = FakeFs::new(executor.clone());
3246 fs.insert_tree(
3247 path!("/outer"),
3248 json!({
3249 "inner1": {
3250 "a": "A",
3251 "b": "B",
3252 "outer": {
3253 "inner1": {
3254 "a": "B"
3255 }
3256 }
3257 },
3258 "inner2": {
3259 "c": "C",
3260 }
3261 }),
3262 )
3263 .await;
3264
3265 assert_eq!(
3266 fs.files(),
3267 vec![
3268 PathBuf::from(path!("/outer/inner1/a")),
3269 PathBuf::from(path!("/outer/inner1/b")),
3270 PathBuf::from(path!("/outer/inner2/c")),
3271 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3272 ]
3273 );
3274 assert_eq!(
3275 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3276 .await
3277 .unwrap(),
3278 "B",
3279 );
3280
3281 let source = Path::new(path!("/outer"));
3282 let target = Path::new(path!("/outer/inner1/outer"));
3283 copy_recursive(
3284 fs.as_ref(),
3285 source,
3286 target,
3287 CopyOptions {
3288 overwrite: true,
3289 ..Default::default()
3290 },
3291 )
3292 .await
3293 .unwrap();
3294
3295 assert_eq!(
3296 fs.files(),
3297 vec![
3298 PathBuf::from(path!("/outer/inner1/a")),
3299 PathBuf::from(path!("/outer/inner1/b")),
3300 PathBuf::from(path!("/outer/inner2/c")),
3301 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3302 PathBuf::from(path!("/outer/inner1/outer/inner1/b")),
3303 PathBuf::from(path!("/outer/inner1/outer/inner2/c")),
3304 PathBuf::from(path!("/outer/inner1/outer/inner1/outer/inner1/a")),
3305 ]
3306 );
3307 assert_eq!(
3308 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3309 .await
3310 .unwrap(),
3311 "A"
3312 );
3313 }
3314
3315 #[gpui::test]
3316 async fn test_copy_recursive_with_ignoring(executor: BackgroundExecutor) {
3317 let fs = FakeFs::new(executor.clone());
3318 fs.insert_tree(
3319 path!("/outer"),
3320 json!({
3321 "inner1": {
3322 "a": "A",
3323 "b": "B",
3324 "outer": {
3325 "inner1": {
3326 "a": "B"
3327 }
3328 }
3329 },
3330 "inner2": {
3331 "c": "C",
3332 }
3333 }),
3334 )
3335 .await;
3336
3337 assert_eq!(
3338 fs.files(),
3339 vec![
3340 PathBuf::from(path!("/outer/inner1/a")),
3341 PathBuf::from(path!("/outer/inner1/b")),
3342 PathBuf::from(path!("/outer/inner2/c")),
3343 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3344 ]
3345 );
3346 assert_eq!(
3347 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3348 .await
3349 .unwrap(),
3350 "B",
3351 );
3352
3353 let source = Path::new(path!("/outer"));
3354 let target = Path::new(path!("/outer/inner1/outer"));
3355 copy_recursive(
3356 fs.as_ref(),
3357 source,
3358 target,
3359 CopyOptions {
3360 ignore_if_exists: true,
3361 ..Default::default()
3362 },
3363 )
3364 .await
3365 .unwrap();
3366
3367 assert_eq!(
3368 fs.files(),
3369 vec![
3370 PathBuf::from(path!("/outer/inner1/a")),
3371 PathBuf::from(path!("/outer/inner1/b")),
3372 PathBuf::from(path!("/outer/inner2/c")),
3373 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3374 PathBuf::from(path!("/outer/inner1/outer/inner1/b")),
3375 PathBuf::from(path!("/outer/inner1/outer/inner2/c")),
3376 PathBuf::from(path!("/outer/inner1/outer/inner1/outer/inner1/a")),
3377 ]
3378 );
3379 assert_eq!(
3380 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3381 .await
3382 .unwrap(),
3383 "B"
3384 );
3385 }
3386
3387 #[gpui::test]
3388 async fn test_realfs_atomic_write(executor: BackgroundExecutor) {
3389 // With the file handle still open, the file should be replaced
3390 // https://github.com/zed-industries/zed/issues/30054
3391 let fs = RealFs {
3392 bundled_git_binary_path: None,
3393 executor,
3394 next_job_id: Arc::new(AtomicUsize::new(0)),
3395 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
3396 };
3397 let temp_dir = TempDir::new().unwrap();
3398 let file_to_be_replaced = temp_dir.path().join("file.txt");
3399 let mut file = std::fs::File::create_new(&file_to_be_replaced).unwrap();
3400 file.write_all(b"Hello").unwrap();
3401 // drop(file); // We still hold the file handle here
3402 let content = std::fs::read_to_string(&file_to_be_replaced).unwrap();
3403 assert_eq!(content, "Hello");
3404 smol::block_on(fs.atomic_write(file_to_be_replaced.clone(), "World".into())).unwrap();
3405 let content = std::fs::read_to_string(&file_to_be_replaced).unwrap();
3406 assert_eq!(content, "World");
3407 }
3408
3409 #[gpui::test]
3410 async fn test_realfs_atomic_write_non_existing_file(executor: BackgroundExecutor) {
3411 let fs = RealFs {
3412 bundled_git_binary_path: None,
3413 executor,
3414 next_job_id: Arc::new(AtomicUsize::new(0)),
3415 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
3416 };
3417 let temp_dir = TempDir::new().unwrap();
3418 let file_to_be_replaced = temp_dir.path().join("file.txt");
3419 smol::block_on(fs.atomic_write(file_to_be_replaced.clone(), "Hello".into())).unwrap();
3420 let content = std::fs::read_to_string(&file_to_be_replaced).unwrap();
3421 assert_eq!(content, "Hello");
3422 }
3423
3424 #[gpui::test]
3425 #[cfg(target_os = "windows")]
3426 async fn test_realfs_canonicalize(executor: BackgroundExecutor) {
3427 use util::paths::SanitizedPath;
3428
3429 let fs = RealFs {
3430 bundled_git_binary_path: None,
3431 executor,
3432 next_job_id: Arc::new(AtomicUsize::new(0)),
3433 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
3434 };
3435 let temp_dir = TempDir::new().unwrap();
3436 let file = temp_dir.path().join("test (1).txt");
3437 let file = SanitizedPath::new(&file);
3438 std::fs::write(&file, "test").unwrap();
3439
3440 let canonicalized = fs.canonicalize(file.as_path()).await;
3441 assert!(canonicalized.is_ok());
3442 }
3443
3444 #[gpui::test]
3445 async fn test_rename(executor: BackgroundExecutor) {
3446 let fs = FakeFs::new(executor.clone());
3447 fs.insert_tree(
3448 path!("/root"),
3449 json!({
3450 "src": {
3451 "file_a.txt": "content a",
3452 "file_b.txt": "content b"
3453 }
3454 }),
3455 )
3456 .await;
3457
3458 fs.rename(
3459 Path::new(path!("/root/src/file_a.txt")),
3460 Path::new(path!("/root/src/new/renamed_a.txt")),
3461 RenameOptions {
3462 create_parents: true,
3463 ..Default::default()
3464 },
3465 )
3466 .await
3467 .unwrap();
3468
3469 // Assert that the `file_a.txt` file was being renamed and moved to a
3470 // different directory that did not exist before.
3471 assert_eq!(
3472 fs.files(),
3473 vec![
3474 PathBuf::from(path!("/root/src/file_b.txt")),
3475 PathBuf::from(path!("/root/src/new/renamed_a.txt")),
3476 ]
3477 );
3478
3479 let result = fs
3480 .rename(
3481 Path::new(path!("/root/src/file_b.txt")),
3482 Path::new(path!("/root/src/old/renamed_b.txt")),
3483 RenameOptions {
3484 create_parents: false,
3485 ..Default::default()
3486 },
3487 )
3488 .await;
3489
3490 // Assert that the `file_b.txt` file was not renamed nor moved, as
3491 // `create_parents` was set to `false`.
3492 // different directory that did not exist before.
3493 assert!(result.is_err());
3494 assert_eq!(
3495 fs.files(),
3496 vec![
3497 PathBuf::from(path!("/root/src/file_b.txt")),
3498 PathBuf::from(path!("/root/src/new/renamed_a.txt")),
3499 ]
3500 );
3501 }
3502
3503 #[gpui::test]
3504 #[cfg(unix)]
3505 async fn test_realfs_broken_symlink_metadata(executor: BackgroundExecutor) {
3506 let tempdir = TempDir::new().unwrap();
3507 let path = tempdir.path();
3508 let fs = RealFs {
3509 bundled_git_binary_path: None,
3510 executor,
3511 next_job_id: Arc::new(AtomicUsize::new(0)),
3512 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
3513 };
3514 let symlink_path = path.join("symlink");
3515 smol::block_on(fs.create_symlink(&symlink_path, PathBuf::from("file_a.txt"))).unwrap();
3516 let metadata = fs
3517 .metadata(&symlink_path)
3518 .await
3519 .expect("metadata call succeeds")
3520 .expect("metadata returned");
3521 assert!(metadata.is_symlink);
3522 assert!(!metadata.is_dir);
3523 assert!(!metadata.is_fifo);
3524 assert!(!metadata.is_executable);
3525 // don't care about len or mtime on symlinks?
3526 }
3527
3528 #[gpui::test]
3529 #[cfg(unix)]
3530 async fn test_realfs_symlink_loop_metadata(executor: BackgroundExecutor) {
3531 let tempdir = TempDir::new().unwrap();
3532 let path = tempdir.path();
3533 let fs = RealFs {
3534 bundled_git_binary_path: None,
3535 executor,
3536 next_job_id: Arc::new(AtomicUsize::new(0)),
3537 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
3538 };
3539 let symlink_path = path.join("symlink");
3540 smol::block_on(fs.create_symlink(&symlink_path, PathBuf::from("symlink"))).unwrap();
3541 let metadata = fs
3542 .metadata(&symlink_path)
3543 .await
3544 .expect("metadata call succeeds")
3545 .expect("metadata returned");
3546 assert!(metadata.is_symlink);
3547 assert!(!metadata.is_dir);
3548 assert!(!metadata.is_fifo);
3549 assert!(!metadata.is_executable);
3550 // don't care about len or mtime on symlinks?
3551 }
3552}