1#[cfg(target_os = "macos")]
2mod mac_watcher;
3
4#[cfg(not(target_os = "macos"))]
5pub mod fs_watcher;
6
7use anyhow::{Context as _, Result, anyhow};
8#[cfg(any(target_os = "linux", target_os = "freebsd"))]
9use ashpd::desktop::trash;
10use futures::stream::iter;
11use gpui::App;
12use gpui::BackgroundExecutor;
13use gpui::Global;
14use gpui::ReadGlobal as _;
15use std::borrow::Cow;
16use util::command::new_smol_command;
17
18#[cfg(unix)]
19use std::os::fd::{AsFd, AsRawFd};
20
21#[cfg(unix)]
22use std::os::unix::fs::{FileTypeExt, MetadataExt};
23
24#[cfg(any(target_os = "macos", target_os = "freebsd"))]
25use std::mem::MaybeUninit;
26
27use async_tar::Archive;
28use futures::{AsyncRead, Stream, StreamExt, future::BoxFuture};
29use git::repository::{GitRepository, RealGitRepository};
30use rope::Rope;
31use serde::{Deserialize, Serialize};
32use smol::io::AsyncWriteExt;
33use std::{
34 io::{self, Write},
35 path::{Component, Path, PathBuf},
36 pin::Pin,
37 sync::Arc,
38 time::{Duration, SystemTime, UNIX_EPOCH},
39};
40use tempfile::TempDir;
41use text::LineEnding;
42
43#[cfg(any(test, feature = "test-support"))]
44mod fake_git_repo;
45#[cfg(any(test, feature = "test-support"))]
46use collections::{BTreeMap, btree_map};
47#[cfg(any(test, feature = "test-support"))]
48use fake_git_repo::FakeGitRepositoryState;
49#[cfg(any(test, feature = "test-support"))]
50use git::{
51 repository::{RepoPath, repo_path},
52 status::{FileStatus, StatusCode, TrackedStatus, UnmergedStatus},
53};
54#[cfg(any(test, feature = "test-support"))]
55use parking_lot::Mutex;
56#[cfg(any(test, feature = "test-support"))]
57use smol::io::AsyncReadExt;
58#[cfg(any(test, feature = "test-support"))]
59use std::ffi::OsStr;
60
61use encodings::{Encoding, EncodingOptions};
62#[cfg(any(test, feature = "test-support"))]
63pub use fake_git_repo::{LOAD_HEAD_TEXT_TASK, LOAD_INDEX_TEXT_TASK};
64
65pub trait Watcher: Send + Sync {
66 fn add(&self, path: &Path) -> Result<()>;
67 fn remove(&self, path: &Path) -> Result<()>;
68}
69
70#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
71pub enum PathEventKind {
72 Removed,
73 Created,
74 Changed,
75}
76
77#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
78pub struct PathEvent {
79 pub path: PathBuf,
80 pub kind: Option<PathEventKind>,
81}
82
83impl From<PathEvent> for PathBuf {
84 fn from(event: PathEvent) -> Self {
85 event.path
86 }
87}
88
89#[async_trait::async_trait]
90pub trait Fs: Send + Sync {
91 async fn create_dir(&self, path: &Path) -> Result<()>;
92 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()>;
93 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()>;
94 async fn create_file_with(
95 &self,
96 path: &Path,
97 content: Pin<&mut (dyn AsyncRead + Send)>,
98 ) -> Result<()>;
99 async fn extract_tar_file(
100 &self,
101 path: &Path,
102 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
103 ) -> Result<()>;
104 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()>;
105 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()>;
106 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()>;
107 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
108 self.remove_dir(path, options).await
109 }
110 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()>;
111 async fn trash_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
112 self.remove_file(path, options).await
113 }
114 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>>;
115 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>>;
116 async fn load(&self, path: &Path) -> Result<String> {
117 Ok(String::from_utf8(self.load_bytes(path).await?)?)
118 }
119
120 async fn load_with_encoding(
121 &self,
122 path: &Path,
123 options: &EncodingOptions,
124 ) -> Result<(Encoding, String)> {
125 let bytes = self.load_bytes(path).await?;
126 options.process(bytes)
127 }
128
129 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>>;
130 async fn atomic_write(&self, path: PathBuf, text: String) -> Result<()>;
131 async fn save(
132 &self,
133 path: &Path,
134 text: &Rope,
135 line_ending: LineEnding,
136 encoding: Encoding,
137 ) -> Result<()>;
138 async fn write(&self, path: &Path, content: &[u8]) -> Result<()>;
139 async fn canonicalize(&self, path: &Path) -> Result<PathBuf>;
140 async fn is_file(&self, path: &Path) -> bool;
141 async fn is_dir(&self, path: &Path) -> bool;
142 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>>;
143 async fn read_link(&self, path: &Path) -> Result<PathBuf>;
144 async fn read_dir(
145 &self,
146 path: &Path,
147 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>>;
148
149 async fn watch(
150 &self,
151 path: &Path,
152 latency: Duration,
153 ) -> (
154 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
155 Arc<dyn Watcher>,
156 );
157
158 fn open_repo(
159 &self,
160 abs_dot_git: &Path,
161 system_git_binary_path: Option<&Path>,
162 ) -> Option<Arc<dyn GitRepository>>;
163 async fn git_init(&self, abs_work_directory: &Path, fallback_branch_name: String)
164 -> Result<()>;
165 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()>;
166 fn is_fake(&self) -> bool;
167 async fn is_case_sensitive(&self) -> Result<bool>;
168
169 #[cfg(any(test, feature = "test-support"))]
170 fn as_fake(&self) -> Arc<FakeFs> {
171 panic!("called as_fake on a real fs");
172 }
173}
174
175struct GlobalFs(Arc<dyn Fs>);
176
177impl Global for GlobalFs {}
178
179impl dyn Fs {
180 /// Returns the global [`Fs`].
181 pub fn global(cx: &App) -> Arc<Self> {
182 GlobalFs::global(cx).0.clone()
183 }
184
185 /// Sets the global [`Fs`].
186 pub fn set_global(fs: Arc<Self>, cx: &mut App) {
187 cx.set_global(GlobalFs(fs));
188 }
189}
190
191#[derive(Copy, Clone, Default)]
192pub struct CreateOptions {
193 pub overwrite: bool,
194 pub ignore_if_exists: bool,
195}
196
197#[derive(Copy, Clone, Default)]
198pub struct CopyOptions {
199 pub overwrite: bool,
200 pub ignore_if_exists: bool,
201}
202
203#[derive(Copy, Clone, Default)]
204pub struct RenameOptions {
205 pub overwrite: bool,
206 pub ignore_if_exists: bool,
207}
208
209#[derive(Copy, Clone, Default)]
210pub struct RemoveOptions {
211 pub recursive: bool,
212 pub ignore_if_not_exists: bool,
213}
214
215#[derive(Copy, Clone, Debug)]
216pub struct Metadata {
217 pub inode: u64,
218 pub mtime: MTime,
219 pub is_symlink: bool,
220 pub is_dir: bool,
221 pub len: u64,
222 pub is_fifo: bool,
223}
224
225/// Filesystem modification time. The purpose of this newtype is to discourage use of operations
226/// that do not make sense for mtimes. In particular, it is not always valid to compare mtimes using
227/// `<` or `>`, as there are many things that can cause the mtime of a file to be earlier than it
228/// was. See ["mtime comparison considered harmful" - apenwarr](https://apenwarr.ca/log/20181113).
229///
230/// Do not derive Ord, PartialOrd, or arithmetic operation traits.
231#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize)]
232#[serde(transparent)]
233pub struct MTime(SystemTime);
234
235impl MTime {
236 /// Conversion intended for persistence and testing.
237 pub fn from_seconds_and_nanos(secs: u64, nanos: u32) -> Self {
238 MTime(UNIX_EPOCH + Duration::new(secs, nanos))
239 }
240
241 /// Conversion intended for persistence.
242 pub fn to_seconds_and_nanos_for_persistence(self) -> Option<(u64, u32)> {
243 self.0
244 .duration_since(UNIX_EPOCH)
245 .ok()
246 .map(|duration| (duration.as_secs(), duration.subsec_nanos()))
247 }
248
249 /// Returns the value wrapped by this `MTime`, for presentation to the user. The name including
250 /// "_for_user" is to discourage misuse - this method should not be used when making decisions
251 /// about file dirtiness.
252 pub fn timestamp_for_user(self) -> SystemTime {
253 self.0
254 }
255
256 /// Temporary method to split out the behavior changes from introduction of this newtype.
257 pub fn bad_is_greater_than(self, other: MTime) -> bool {
258 self.0 > other.0
259 }
260}
261
262impl From<proto::Timestamp> for MTime {
263 fn from(timestamp: proto::Timestamp) -> Self {
264 MTime(timestamp.into())
265 }
266}
267
268impl From<MTime> for proto::Timestamp {
269 fn from(mtime: MTime) -> Self {
270 mtime.0.into()
271 }
272}
273
274pub struct RealFs {
275 bundled_git_binary_path: Option<PathBuf>,
276 executor: BackgroundExecutor,
277}
278
279pub trait FileHandle: Send + Sync + std::fmt::Debug {
280 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf>;
281}
282
283impl FileHandle for std::fs::File {
284 #[cfg(target_os = "macos")]
285 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
286 use std::{
287 ffi::{CStr, OsStr},
288 os::unix::ffi::OsStrExt,
289 };
290
291 let fd = self.as_fd();
292 let mut path_buf = MaybeUninit::<[u8; libc::PATH_MAX as usize]>::uninit();
293
294 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_GETPATH, path_buf.as_mut_ptr()) };
295 if result == -1 {
296 anyhow::bail!("fcntl returned -1".to_string());
297 }
298
299 // SAFETY: `fcntl` will initialize the path buffer.
300 let c_str = unsafe { CStr::from_ptr(path_buf.as_ptr().cast()) };
301 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
302 Ok(path)
303 }
304
305 #[cfg(target_os = "linux")]
306 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
307 let fd = self.as_fd();
308 let fd_path = format!("/proc/self/fd/{}", fd.as_raw_fd());
309 let new_path = std::fs::read_link(fd_path)?;
310 if new_path
311 .file_name()
312 .is_some_and(|f| f.to_string_lossy().ends_with(" (deleted)"))
313 {
314 anyhow::bail!("file was deleted")
315 };
316
317 Ok(new_path)
318 }
319
320 #[cfg(target_os = "freebsd")]
321 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
322 use std::{
323 ffi::{CStr, OsStr},
324 os::unix::ffi::OsStrExt,
325 };
326
327 let fd = self.as_fd();
328 let mut kif = MaybeUninit::<libc::kinfo_file>::uninit();
329 kif.kf_structsize = libc::KINFO_FILE_SIZE;
330
331 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_KINFO, kif.as_mut_ptr()) };
332 if result == -1 {
333 anyhow::bail!("fcntl returned -1".to_string());
334 }
335
336 // SAFETY: `fcntl` will initialize the kif.
337 let c_str = unsafe { CStr::from_ptr(kif.assume_init().kf_path.as_ptr()) };
338 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
339 Ok(path)
340 }
341
342 #[cfg(target_os = "windows")]
343 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
344 use std::ffi::OsString;
345 use std::os::windows::ffi::OsStringExt;
346 use std::os::windows::io::AsRawHandle;
347
348 use windows::Win32::Foundation::HANDLE;
349 use windows::Win32::Storage::FileSystem::{
350 FILE_NAME_NORMALIZED, GetFinalPathNameByHandleW,
351 };
352
353 let handle = HANDLE(self.as_raw_handle() as _);
354
355 // Query required buffer size (in wide chars)
356 let required_len =
357 unsafe { GetFinalPathNameByHandleW(handle, &mut [], FILE_NAME_NORMALIZED) };
358 if required_len == 0 {
359 anyhow::bail!("GetFinalPathNameByHandleW returned 0 length");
360 }
361
362 // Allocate buffer and retrieve the path
363 let mut buf: Vec<u16> = vec![0u16; required_len as usize + 1];
364 let written = unsafe { GetFinalPathNameByHandleW(handle, &mut buf, FILE_NAME_NORMALIZED) };
365 if written == 0 {
366 anyhow::bail!("GetFinalPathNameByHandleW failed to write path");
367 }
368
369 let os_str: OsString = OsString::from_wide(&buf[..written as usize]);
370 Ok(PathBuf::from(os_str))
371 }
372}
373
374pub struct RealWatcher {}
375
376impl RealFs {
377 pub fn new(git_binary_path: Option<PathBuf>, executor: BackgroundExecutor) -> Self {
378 Self {
379 bundled_git_binary_path: git_binary_path,
380 executor,
381 }
382 }
383}
384
385#[async_trait::async_trait]
386impl Fs for RealFs {
387 async fn create_dir(&self, path: &Path) -> Result<()> {
388 Ok(smol::fs::create_dir_all(path).await?)
389 }
390
391 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
392 #[cfg(unix)]
393 smol::fs::unix::symlink(target, path).await?;
394
395 #[cfg(windows)]
396 if smol::fs::metadata(&target).await?.is_dir() {
397 let status = smol::process::Command::new("cmd")
398 .args(["/C", "mklink", "/J"])
399 .args([path, target.as_path()])
400 .status()
401 .await?;
402
403 if !status.success() {
404 return Err(anyhow::anyhow!(
405 "Failed to create junction from {:?} to {:?}",
406 path,
407 target
408 ));
409 }
410 } else {
411 smol::fs::windows::symlink_file(target, path).await?
412 }
413
414 Ok(())
415 }
416
417 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
418 let mut open_options = smol::fs::OpenOptions::new();
419 open_options.write(true).create(true);
420 if options.overwrite {
421 open_options.truncate(true);
422 } else if !options.ignore_if_exists {
423 open_options.create_new(true);
424 }
425 open_options.open(path).await?;
426 Ok(())
427 }
428
429 async fn create_file_with(
430 &self,
431 path: &Path,
432 content: Pin<&mut (dyn AsyncRead + Send)>,
433 ) -> Result<()> {
434 let mut file = smol::fs::File::create(&path).await?;
435 futures::io::copy(content, &mut file).await?;
436 Ok(())
437 }
438
439 async fn extract_tar_file(
440 &self,
441 path: &Path,
442 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
443 ) -> Result<()> {
444 content.unpack(path).await?;
445 Ok(())
446 }
447
448 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
449 if !options.overwrite && smol::fs::metadata(target).await.is_ok() {
450 if options.ignore_if_exists {
451 return Ok(());
452 } else {
453 anyhow::bail!("{target:?} already exists");
454 }
455 }
456
457 smol::fs::copy(source, target).await?;
458 Ok(())
459 }
460
461 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()> {
462 if !options.overwrite && smol::fs::metadata(target).await.is_ok() {
463 if options.ignore_if_exists {
464 return Ok(());
465 } else {
466 anyhow::bail!("{target:?} already exists");
467 }
468 }
469
470 smol::fs::rename(source, target).await?;
471 Ok(())
472 }
473
474 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
475 let result = if options.recursive {
476 smol::fs::remove_dir_all(path).await
477 } else {
478 smol::fs::remove_dir(path).await
479 };
480 match result {
481 Ok(()) => Ok(()),
482 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
483 Ok(())
484 }
485 Err(err) => Err(err)?,
486 }
487 }
488
489 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
490 #[cfg(windows)]
491 if let Ok(Some(metadata)) = self.metadata(path).await
492 && metadata.is_symlink
493 && metadata.is_dir
494 {
495 self.remove_dir(
496 path,
497 RemoveOptions {
498 recursive: false,
499 ignore_if_not_exists: true,
500 },
501 )
502 .await?;
503 return Ok(());
504 }
505
506 match smol::fs::remove_file(path).await {
507 Ok(()) => Ok(()),
508 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
509 Ok(())
510 }
511 Err(err) => Err(err)?,
512 }
513 }
514
515 #[cfg(target_os = "macos")]
516 async fn trash_file(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
517 use cocoa::{
518 base::{id, nil},
519 foundation::{NSAutoreleasePool, NSString},
520 };
521 use objc::{class, msg_send, sel, sel_impl};
522
523 unsafe {
524 unsafe fn ns_string(string: &str) -> id {
525 unsafe { NSString::alloc(nil).init_str(string).autorelease() }
526 }
527
528 let url: id = msg_send![class!(NSURL), fileURLWithPath: ns_string(path.to_string_lossy().as_ref())];
529 let array: id = msg_send![class!(NSArray), arrayWithObject: url];
530 let workspace: id = msg_send![class!(NSWorkspace), sharedWorkspace];
531
532 let _: id = msg_send![workspace, recycleURLs: array completionHandler: nil];
533 }
534 Ok(())
535 }
536
537 #[cfg(any(target_os = "linux", target_os = "freebsd"))]
538 async fn trash_file(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
539 if let Ok(Some(metadata)) = self.metadata(path).await
540 && metadata.is_symlink
541 {
542 // TODO: trash_file does not support trashing symlinks yet - https://github.com/bilelmoussaoui/ashpd/issues/255
543 return self.remove_file(path, RemoveOptions::default()).await;
544 }
545 let file = smol::fs::File::open(path).await?;
546 match trash::trash_file(&file.as_fd()).await {
547 Ok(_) => Ok(()),
548 Err(err) => {
549 log::error!("Failed to trash file: {}", err);
550 // Trashing files can fail if you don't have a trashing dbus service configured.
551 // In that case, delete the file directly instead.
552 return self.remove_file(path, RemoveOptions::default()).await;
553 }
554 }
555 }
556
557 #[cfg(target_os = "windows")]
558 async fn trash_file(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
559 use util::paths::SanitizedPath;
560 use windows::{
561 Storage::{StorageDeleteOption, StorageFile},
562 core::HSTRING,
563 };
564 // todo(windows)
565 // When new version of `windows-rs` release, make this operation `async`
566 let path = path.canonicalize()?;
567 let path = SanitizedPath::new(&path);
568 let path_string = path.to_string();
569 let file = StorageFile::GetFileFromPathAsync(&HSTRING::from(path_string))?.get()?;
570 file.DeleteAsync(StorageDeleteOption::Default)?.get()?;
571 Ok(())
572 }
573
574 #[cfg(target_os = "macos")]
575 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
576 self.trash_file(path, options).await
577 }
578
579 #[cfg(any(target_os = "linux", target_os = "freebsd"))]
580 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
581 self.trash_file(path, options).await
582 }
583
584 #[cfg(target_os = "windows")]
585 async fn trash_dir(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
586 use util::paths::SanitizedPath;
587 use windows::{
588 Storage::{StorageDeleteOption, StorageFolder},
589 core::HSTRING,
590 };
591
592 // todo(windows)
593 // When new version of `windows-rs` release, make this operation `async`
594 let path = path.canonicalize()?;
595 let path = SanitizedPath::new(&path);
596 let path_string = path.to_string();
597 let folder = StorageFolder::GetFolderFromPathAsync(&HSTRING::from(path_string))?.get()?;
598 folder.DeleteAsync(StorageDeleteOption::Default)?.get()?;
599 Ok(())
600 }
601
602 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
603 Ok(Box::new(std::fs::File::open(path)?))
604 }
605
606 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
607 let mut options = std::fs::OpenOptions::new();
608 options.read(true);
609 #[cfg(windows)]
610 {
611 use std::os::windows::fs::OpenOptionsExt;
612 options.custom_flags(windows::Win32::Storage::FileSystem::FILE_FLAG_BACKUP_SEMANTICS.0);
613 }
614 Ok(Arc::new(options.open(path)?))
615 }
616
617 async fn load(&self, path: &Path) -> Result<String> {
618 let path = path.to_path_buf();
619 let text = smol::unblock(|| std::fs::read_to_string(path)).await?;
620 Ok(text)
621 }
622
623 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
624 let path = path.to_path_buf();
625 let bytes = self
626 .executor
627 .spawn(async move { std::fs::read(path) })
628 .await?;
629 Ok(bytes)
630 }
631
632 #[cfg(not(target_os = "windows"))]
633 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
634 smol::unblock(move || {
635 // Use the directory of the destination as temp dir to avoid
636 // invalid cross-device link error, and XDG_CACHE_DIR for fallback.
637 // See https://github.com/zed-industries/zed/pull/8437 for more details.
638 let mut tmp_file =
639 tempfile::NamedTempFile::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
640 tmp_file.write_all(data.as_bytes())?;
641 tmp_file.persist(path)?;
642 anyhow::Ok(())
643 })
644 .await?;
645
646 Ok(())
647 }
648
649 #[cfg(target_os = "windows")]
650 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
651 smol::unblock(move || {
652 // If temp dir is set to a different drive than the destination,
653 // we receive error:
654 //
655 // failed to persist temporary file:
656 // The system cannot move the file to a different disk drive. (os error 17)
657 //
658 // This is because `ReplaceFileW` does not support cross volume moves.
659 // See the remark section: "The backup file, replaced file, and replacement file must all reside on the same volume."
660 // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-replacefilew#remarks
661 //
662 // So we use the directory of the destination as a temp dir to avoid it.
663 // https://github.com/zed-industries/zed/issues/16571
664 let temp_dir = TempDir::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
665 let temp_file = {
666 let temp_file_path = temp_dir.path().join("temp_file");
667 let mut file = std::fs::File::create_new(&temp_file_path)?;
668 file.write_all(data.as_bytes())?;
669 temp_file_path
670 };
671 atomic_replace(path.as_path(), temp_file.as_path())?;
672 anyhow::Ok(())
673 })
674 .await?;
675 Ok(())
676 }
677
678 async fn save(
679 &self,
680 path: &Path,
681 text: &Rope,
682 line_ending: LineEnding,
683 encoding: Encoding,
684 ) -> Result<()> {
685 let buffer_size = text.summary().len.min(10 * 1024);
686 if let Some(path) = path.parent() {
687 self.create_dir(path).await?;
688 }
689 let file = smol::fs::File::create(path).await?;
690 let mut writer = smol::io::BufWriter::with_capacity(buffer_size, file);
691
692 if let Some(bom) = encoding.bom() {
693 writer.write_all(bom).await?;
694 }
695
696 for chunk in chunks(text, line_ending) {
697 writer.write_all(&encoding.encode_chunk(chunk)?).await?
698 }
699
700 writer.flush().await?;
701 Ok(())
702 }
703
704 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
705 if let Some(path) = path.parent() {
706 self.create_dir(path).await?;
707 }
708 let path = path.to_owned();
709 let contents = content.to_owned();
710 self.executor
711 .spawn(async move {
712 std::fs::write(path, contents)?;
713 Ok(())
714 })
715 .await
716 }
717
718 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
719 let path = path.to_owned();
720 self.executor
721 .spawn(async move {
722 std::fs::canonicalize(&path).with_context(|| format!("canonicalizing {path:?}"))
723 })
724 .await
725 }
726
727 async fn is_file(&self, path: &Path) -> bool {
728 let path = path.to_owned();
729 self.executor
730 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_file()) })
731 .await
732 }
733
734 async fn is_dir(&self, path: &Path) -> bool {
735 let path = path.to_owned();
736 self.executor
737 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_dir()) })
738 .await
739 }
740
741 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
742 let path_buf = path.to_owned();
743 let symlink_metadata = match self
744 .executor
745 .spawn(async move { std::fs::symlink_metadata(&path_buf) })
746 .await
747 {
748 Ok(metadata) => metadata,
749 Err(err) => {
750 return match (err.kind(), err.raw_os_error()) {
751 (io::ErrorKind::NotFound, _) => Ok(None),
752 (io::ErrorKind::Other, Some(libc::ENOTDIR)) => Ok(None),
753 _ => Err(anyhow::Error::new(err)),
754 };
755 }
756 };
757
758 let is_symlink = symlink_metadata.file_type().is_symlink();
759 let metadata = if is_symlink {
760 let path_buf = path.to_path_buf();
761 let path_exists = self
762 .executor
763 .spawn(async move {
764 path_buf
765 .try_exists()
766 .with_context(|| format!("checking existence for path {path_buf:?}"))
767 })
768 .await?;
769 if path_exists {
770 let path_buf = path.to_path_buf();
771 self.executor
772 .spawn(async move { std::fs::metadata(path_buf) })
773 .await
774 .with_context(|| "accessing symlink for path {path}")?
775 } else {
776 symlink_metadata
777 }
778 } else {
779 symlink_metadata
780 };
781
782 #[cfg(unix)]
783 let inode = metadata.ino();
784
785 #[cfg(windows)]
786 let inode = file_id(path).await?;
787
788 #[cfg(windows)]
789 let is_fifo = false;
790
791 #[cfg(unix)]
792 let is_fifo = metadata.file_type().is_fifo();
793
794 Ok(Some(Metadata {
795 inode,
796 mtime: MTime(metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH)),
797 len: metadata.len(),
798 is_symlink,
799 is_dir: metadata.file_type().is_dir(),
800 is_fifo,
801 }))
802 }
803
804 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
805 let path = path.to_owned();
806 let path = self
807 .executor
808 .spawn(async move { std::fs::read_link(&path) })
809 .await?;
810 Ok(path)
811 }
812
813 async fn read_dir(
814 &self,
815 path: &Path,
816 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
817 let path = path.to_owned();
818 let result = iter(
819 self.executor
820 .spawn(async move { std::fs::read_dir(path) })
821 .await?,
822 )
823 .map(|entry| match entry {
824 Ok(entry) => Ok(entry.path()),
825 Err(error) => Err(anyhow!("failed to read dir entry {error:?}")),
826 });
827 Ok(Box::pin(result))
828 }
829
830 #[cfg(target_os = "macos")]
831 async fn watch(
832 &self,
833 path: &Path,
834 latency: Duration,
835 ) -> (
836 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
837 Arc<dyn Watcher>,
838 ) {
839 use fsevent::StreamFlags;
840
841 let (events_tx, events_rx) = smol::channel::unbounded();
842 let handles = Arc::new(parking_lot::Mutex::new(collections::BTreeMap::default()));
843 let watcher = Arc::new(mac_watcher::MacWatcher::new(
844 events_tx,
845 Arc::downgrade(&handles),
846 latency,
847 ));
848 watcher.add(path).expect("handles can't be dropped");
849
850 (
851 Box::pin(
852 events_rx
853 .map(|events| {
854 events
855 .into_iter()
856 .map(|event| {
857 log::trace!("fs path event: {event:?}");
858 let kind = if event.flags.contains(StreamFlags::ITEM_REMOVED) {
859 Some(PathEventKind::Removed)
860 } else if event.flags.contains(StreamFlags::ITEM_CREATED) {
861 Some(PathEventKind::Created)
862 } else if event.flags.contains(StreamFlags::ITEM_MODIFIED)
863 | event.flags.contains(StreamFlags::ITEM_RENAMED)
864 {
865 Some(PathEventKind::Changed)
866 } else {
867 None
868 };
869 PathEvent {
870 path: event.path,
871 kind,
872 }
873 })
874 .collect()
875 })
876 .chain(futures::stream::once(async move {
877 drop(handles);
878 vec![]
879 })),
880 ),
881 watcher,
882 )
883 }
884
885 #[cfg(not(target_os = "macos"))]
886 async fn watch(
887 &self,
888 path: &Path,
889 latency: Duration,
890 ) -> (
891 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
892 Arc<dyn Watcher>,
893 ) {
894 use parking_lot::Mutex;
895 use util::{ResultExt as _, paths::SanitizedPath};
896
897 let (tx, rx) = smol::channel::unbounded();
898 let pending_paths: Arc<Mutex<Vec<PathEvent>>> = Default::default();
899 let watcher = Arc::new(fs_watcher::FsWatcher::new(tx, pending_paths.clone()));
900
901 // If the path doesn't exist yet (e.g. settings.json), watch the parent dir to learn when it's created.
902 if let Err(e) = watcher.add(path)
903 && let Some(parent) = path.parent()
904 && let Err(parent_e) = watcher.add(parent)
905 {
906 log::warn!(
907 "Failed to watch {} and its parent directory {}:\n{e}\n{parent_e}",
908 path.display(),
909 parent.display()
910 );
911 }
912
913 // Check if path is a symlink and follow the target parent
914 if let Some(mut target) = self.read_link(path).await.ok() {
915 log::trace!("watch symlink {path:?} -> {target:?}");
916 // Check if symlink target is relative path, if so make it absolute
917 if target.is_relative()
918 && let Some(parent) = path.parent()
919 {
920 target = parent.join(target);
921 if let Ok(canonical) = self.canonicalize(&target).await {
922 target = SanitizedPath::new(&canonical).as_path().to_path_buf();
923 }
924 }
925 watcher.add(&target).ok();
926 if let Some(parent) = target.parent() {
927 watcher.add(parent).log_err();
928 }
929 }
930
931 (
932 Box::pin(rx.filter_map({
933 let watcher = watcher.clone();
934 move |_| {
935 let _ = watcher.clone();
936 let pending_paths = pending_paths.clone();
937 async move {
938 smol::Timer::after(latency).await;
939 let paths = std::mem::take(&mut *pending_paths.lock());
940 (!paths.is_empty()).then_some(paths)
941 }
942 }
943 })),
944 watcher,
945 )
946 }
947
948 fn open_repo(
949 &self,
950 dotgit_path: &Path,
951 system_git_binary_path: Option<&Path>,
952 ) -> Option<Arc<dyn GitRepository>> {
953 Some(Arc::new(RealGitRepository::new(
954 dotgit_path,
955 self.bundled_git_binary_path.clone(),
956 system_git_binary_path.map(|path| path.to_path_buf()),
957 self.executor.clone(),
958 )?))
959 }
960
961 async fn git_init(
962 &self,
963 abs_work_directory_path: &Path,
964 fallback_branch_name: String,
965 ) -> Result<()> {
966 let config = new_smol_command("git")
967 .current_dir(abs_work_directory_path)
968 .args(&["config", "--global", "--get", "init.defaultBranch"])
969 .output()
970 .await?;
971
972 let branch_name;
973
974 if config.status.success() && !config.stdout.is_empty() {
975 branch_name = String::from_utf8_lossy(&config.stdout);
976 } else {
977 branch_name = Cow::Borrowed(fallback_branch_name.as_str());
978 }
979
980 new_smol_command("git")
981 .current_dir(abs_work_directory_path)
982 .args(&["init", "-b"])
983 .arg(branch_name.trim())
984 .output()
985 .await?;
986
987 Ok(())
988 }
989
990 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()> {
991 let output = new_smol_command("git")
992 .current_dir(abs_work_directory)
993 .args(&["clone", repo_url])
994 .output()
995 .await?;
996
997 if !output.status.success() {
998 anyhow::bail!(
999 "git clone failed: {}",
1000 String::from_utf8_lossy(&output.stderr)
1001 );
1002 }
1003
1004 Ok(())
1005 }
1006
1007 fn is_fake(&self) -> bool {
1008 false
1009 }
1010
1011 /// Checks whether the file system is case sensitive by attempting to create two files
1012 /// that have the same name except for the casing.
1013 ///
1014 /// It creates both files in a temporary directory it removes at the end.
1015 async fn is_case_sensitive(&self) -> Result<bool> {
1016 let temp_dir = TempDir::new()?;
1017 let test_file_1 = temp_dir.path().join("case_sensitivity_test.tmp");
1018 let test_file_2 = temp_dir.path().join("CASE_SENSITIVITY_TEST.TMP");
1019
1020 let create_opts = CreateOptions {
1021 overwrite: false,
1022 ignore_if_exists: false,
1023 };
1024
1025 // Create file1
1026 self.create_file(&test_file_1, create_opts).await?;
1027
1028 // Now check whether it's possible to create file2
1029 let case_sensitive = match self.create_file(&test_file_2, create_opts).await {
1030 Ok(_) => Ok(true),
1031 Err(e) => {
1032 if let Some(io_error) = e.downcast_ref::<io::Error>() {
1033 if io_error.kind() == io::ErrorKind::AlreadyExists {
1034 Ok(false)
1035 } else {
1036 Err(e)
1037 }
1038 } else {
1039 Err(e)
1040 }
1041 }
1042 };
1043
1044 temp_dir.close()?;
1045 case_sensitive
1046 }
1047}
1048
1049#[cfg(not(any(target_os = "linux", target_os = "freebsd")))]
1050impl Watcher for RealWatcher {
1051 fn add(&self, _: &Path) -> Result<()> {
1052 Ok(())
1053 }
1054
1055 fn remove(&self, _: &Path) -> Result<()> {
1056 Ok(())
1057 }
1058}
1059
1060#[cfg(any(test, feature = "test-support"))]
1061pub struct FakeFs {
1062 this: std::sync::Weak<Self>,
1063 // Use an unfair lock to ensure tests are deterministic.
1064 state: Arc<Mutex<FakeFsState>>,
1065 executor: gpui::BackgroundExecutor,
1066}
1067
1068#[cfg(any(test, feature = "test-support"))]
1069struct FakeFsState {
1070 root: FakeFsEntry,
1071 next_inode: u64,
1072 next_mtime: SystemTime,
1073 git_event_tx: smol::channel::Sender<PathBuf>,
1074 event_txs: Vec<(PathBuf, smol::channel::Sender<Vec<PathEvent>>)>,
1075 events_paused: bool,
1076 buffered_events: Vec<PathEvent>,
1077 metadata_call_count: usize,
1078 read_dir_call_count: usize,
1079 path_write_counts: std::collections::HashMap<PathBuf, usize>,
1080 moves: std::collections::HashMap<u64, PathBuf>,
1081}
1082
1083#[cfg(any(test, feature = "test-support"))]
1084#[derive(Clone, Debug)]
1085enum FakeFsEntry {
1086 File {
1087 inode: u64,
1088 mtime: MTime,
1089 len: u64,
1090 content: Vec<u8>,
1091 // The path to the repository state directory, if this is a gitfile.
1092 git_dir_path: Option<PathBuf>,
1093 },
1094 Dir {
1095 inode: u64,
1096 mtime: MTime,
1097 len: u64,
1098 entries: BTreeMap<String, FakeFsEntry>,
1099 git_repo_state: Option<Arc<Mutex<FakeGitRepositoryState>>>,
1100 },
1101 Symlink {
1102 target: PathBuf,
1103 },
1104}
1105
1106#[cfg(any(test, feature = "test-support"))]
1107impl PartialEq for FakeFsEntry {
1108 fn eq(&self, other: &Self) -> bool {
1109 match (self, other) {
1110 (
1111 Self::File {
1112 inode: l_inode,
1113 mtime: l_mtime,
1114 len: l_len,
1115 content: l_content,
1116 git_dir_path: l_git_dir_path,
1117 },
1118 Self::File {
1119 inode: r_inode,
1120 mtime: r_mtime,
1121 len: r_len,
1122 content: r_content,
1123 git_dir_path: r_git_dir_path,
1124 },
1125 ) => {
1126 l_inode == r_inode
1127 && l_mtime == r_mtime
1128 && l_len == r_len
1129 && l_content == r_content
1130 && l_git_dir_path == r_git_dir_path
1131 }
1132 (
1133 Self::Dir {
1134 inode: l_inode,
1135 mtime: l_mtime,
1136 len: l_len,
1137 entries: l_entries,
1138 git_repo_state: l_git_repo_state,
1139 },
1140 Self::Dir {
1141 inode: r_inode,
1142 mtime: r_mtime,
1143 len: r_len,
1144 entries: r_entries,
1145 git_repo_state: r_git_repo_state,
1146 },
1147 ) => {
1148 let same_repo_state = match (l_git_repo_state.as_ref(), r_git_repo_state.as_ref()) {
1149 (Some(l), Some(r)) => Arc::ptr_eq(l, r),
1150 (None, None) => true,
1151 _ => false,
1152 };
1153 l_inode == r_inode
1154 && l_mtime == r_mtime
1155 && l_len == r_len
1156 && l_entries == r_entries
1157 && same_repo_state
1158 }
1159 (Self::Symlink { target: l_target }, Self::Symlink { target: r_target }) => {
1160 l_target == r_target
1161 }
1162 _ => false,
1163 }
1164 }
1165}
1166
1167#[cfg(any(test, feature = "test-support"))]
1168impl FakeFsState {
1169 fn get_and_increment_mtime(&mut self) -> MTime {
1170 let mtime = self.next_mtime;
1171 self.next_mtime += FakeFs::SYSTEMTIME_INTERVAL;
1172 MTime(mtime)
1173 }
1174
1175 fn get_and_increment_inode(&mut self) -> u64 {
1176 let inode = self.next_inode;
1177 self.next_inode += 1;
1178 inode
1179 }
1180
1181 fn canonicalize(&self, target: &Path, follow_symlink: bool) -> Option<PathBuf> {
1182 let mut canonical_path = PathBuf::new();
1183 let mut path = target.to_path_buf();
1184 let mut entry_stack = Vec::new();
1185 'outer: loop {
1186 let mut path_components = path.components().peekable();
1187 let mut prefix = None;
1188 while let Some(component) = path_components.next() {
1189 match component {
1190 Component::Prefix(prefix_component) => prefix = Some(prefix_component),
1191 Component::RootDir => {
1192 entry_stack.clear();
1193 entry_stack.push(&self.root);
1194 canonical_path.clear();
1195 match prefix {
1196 Some(prefix_component) => {
1197 canonical_path = PathBuf::from(prefix_component.as_os_str());
1198 // Prefixes like `C:\\` are represented without their trailing slash, so we have to re-add it.
1199 canonical_path.push(std::path::MAIN_SEPARATOR_STR);
1200 }
1201 None => canonical_path = PathBuf::from(std::path::MAIN_SEPARATOR_STR),
1202 }
1203 }
1204 Component::CurDir => {}
1205 Component::ParentDir => {
1206 entry_stack.pop()?;
1207 canonical_path.pop();
1208 }
1209 Component::Normal(name) => {
1210 let current_entry = *entry_stack.last()?;
1211 if let FakeFsEntry::Dir { entries, .. } = current_entry {
1212 let entry = entries.get(name.to_str().unwrap())?;
1213 if (path_components.peek().is_some() || follow_symlink)
1214 && let FakeFsEntry::Symlink { target, .. } = entry
1215 {
1216 let mut target = target.clone();
1217 target.extend(path_components);
1218 path = target;
1219 continue 'outer;
1220 }
1221 entry_stack.push(entry);
1222 canonical_path = canonical_path.join(name);
1223 } else {
1224 return None;
1225 }
1226 }
1227 }
1228 }
1229 break;
1230 }
1231
1232 if entry_stack.is_empty() {
1233 None
1234 } else {
1235 Some(canonical_path)
1236 }
1237 }
1238
1239 fn try_entry(
1240 &mut self,
1241 target: &Path,
1242 follow_symlink: bool,
1243 ) -> Option<(&mut FakeFsEntry, PathBuf)> {
1244 let canonical_path = self.canonicalize(target, follow_symlink)?;
1245
1246 let mut components = canonical_path
1247 .components()
1248 .skip_while(|component| matches!(component, Component::Prefix(_)));
1249 let Some(Component::RootDir) = components.next() else {
1250 panic!(
1251 "the path {:?} was not canonicalized properly {:?}",
1252 target, canonical_path
1253 )
1254 };
1255
1256 let mut entry = &mut self.root;
1257 for component in components {
1258 match component {
1259 Component::Normal(name) => {
1260 if let FakeFsEntry::Dir { entries, .. } = entry {
1261 entry = entries.get_mut(name.to_str().unwrap())?;
1262 } else {
1263 return None;
1264 }
1265 }
1266 _ => {
1267 panic!(
1268 "the path {:?} was not canonicalized properly {:?}",
1269 target, canonical_path
1270 )
1271 }
1272 }
1273 }
1274
1275 Some((entry, canonical_path))
1276 }
1277
1278 fn entry(&mut self, target: &Path) -> Result<&mut FakeFsEntry> {
1279 Ok(self
1280 .try_entry(target, true)
1281 .ok_or_else(|| {
1282 anyhow!(io::Error::new(
1283 io::ErrorKind::NotFound,
1284 format!("not found: {target:?}")
1285 ))
1286 })?
1287 .0)
1288 }
1289
1290 fn write_path<Fn, T>(&mut self, path: &Path, callback: Fn) -> Result<T>
1291 where
1292 Fn: FnOnce(btree_map::Entry<String, FakeFsEntry>) -> Result<T>,
1293 {
1294 let path = normalize_path(path);
1295 let filename = path.file_name().context("cannot overwrite the root")?;
1296 let parent_path = path.parent().unwrap();
1297
1298 let parent = self.entry(parent_path)?;
1299 let new_entry = parent
1300 .dir_entries(parent_path)?
1301 .entry(filename.to_str().unwrap().into());
1302 callback(new_entry)
1303 }
1304
1305 fn emit_event<I, T>(&mut self, paths: I)
1306 where
1307 I: IntoIterator<Item = (T, Option<PathEventKind>)>,
1308 T: Into<PathBuf>,
1309 {
1310 self.buffered_events
1311 .extend(paths.into_iter().map(|(path, kind)| PathEvent {
1312 path: path.into(),
1313 kind,
1314 }));
1315
1316 if !self.events_paused {
1317 self.flush_events(self.buffered_events.len());
1318 }
1319 }
1320
1321 fn flush_events(&mut self, mut count: usize) {
1322 count = count.min(self.buffered_events.len());
1323 let events = self.buffered_events.drain(0..count).collect::<Vec<_>>();
1324 self.event_txs.retain(|(_, tx)| {
1325 let _ = tx.try_send(events.clone());
1326 !tx.is_closed()
1327 });
1328 }
1329}
1330
1331#[cfg(any(test, feature = "test-support"))]
1332pub static FS_DOT_GIT: std::sync::LazyLock<&'static OsStr> =
1333 std::sync::LazyLock::new(|| OsStr::new(".git"));
1334
1335#[cfg(any(test, feature = "test-support"))]
1336impl FakeFs {
1337 /// We need to use something large enough for Windows and Unix to consider this a new file.
1338 /// https://doc.rust-lang.org/nightly/std/time/struct.SystemTime.html#platform-specific-behavior
1339 const SYSTEMTIME_INTERVAL: Duration = Duration::from_nanos(100);
1340
1341 pub fn new(executor: gpui::BackgroundExecutor) -> Arc<Self> {
1342 let (tx, rx) = smol::channel::bounded::<PathBuf>(10);
1343
1344 let this = Arc::new_cyclic(|this| Self {
1345 this: this.clone(),
1346 executor: executor.clone(),
1347 state: Arc::new(Mutex::new(FakeFsState {
1348 root: FakeFsEntry::Dir {
1349 inode: 0,
1350 mtime: MTime(UNIX_EPOCH),
1351 len: 0,
1352 entries: Default::default(),
1353 git_repo_state: None,
1354 },
1355 git_event_tx: tx,
1356 next_mtime: UNIX_EPOCH + Self::SYSTEMTIME_INTERVAL,
1357 next_inode: 1,
1358 event_txs: Default::default(),
1359 buffered_events: Vec::new(),
1360 events_paused: false,
1361 read_dir_call_count: 0,
1362 metadata_call_count: 0,
1363 path_write_counts: Default::default(),
1364 moves: Default::default(),
1365 })),
1366 });
1367
1368 executor.spawn({
1369 let this = this.clone();
1370 async move {
1371 while let Ok(git_event) = rx.recv().await {
1372 if let Some(mut state) = this.state.try_lock() {
1373 state.emit_event([(git_event, Some(PathEventKind::Changed))]);
1374 } else {
1375 panic!("Failed to lock file system state, this execution would have caused a test hang");
1376 }
1377 }
1378 }
1379 }).detach();
1380
1381 this
1382 }
1383
1384 pub fn set_next_mtime(&self, next_mtime: SystemTime) {
1385 let mut state = self.state.lock();
1386 state.next_mtime = next_mtime;
1387 }
1388
1389 pub fn get_and_increment_mtime(&self) -> MTime {
1390 let mut state = self.state.lock();
1391 state.get_and_increment_mtime()
1392 }
1393
1394 pub async fn touch_path(&self, path: impl AsRef<Path>) {
1395 let mut state = self.state.lock();
1396 let path = path.as_ref();
1397 let new_mtime = state.get_and_increment_mtime();
1398 let new_inode = state.get_and_increment_inode();
1399 state
1400 .write_path(path, move |entry| {
1401 match entry {
1402 btree_map::Entry::Vacant(e) => {
1403 e.insert(FakeFsEntry::File {
1404 inode: new_inode,
1405 mtime: new_mtime,
1406 content: Vec::new(),
1407 len: 0,
1408 git_dir_path: None,
1409 });
1410 }
1411 btree_map::Entry::Occupied(mut e) => match &mut *e.get_mut() {
1412 FakeFsEntry::File { mtime, .. } => *mtime = new_mtime,
1413 FakeFsEntry::Dir { mtime, .. } => *mtime = new_mtime,
1414 FakeFsEntry::Symlink { .. } => {}
1415 },
1416 }
1417 Ok(())
1418 })
1419 .unwrap();
1420 state.emit_event([(path.to_path_buf(), Some(PathEventKind::Changed))]);
1421 }
1422
1423 pub async fn insert_file(&self, path: impl AsRef<Path>, content: Vec<u8>) {
1424 self.write_file_internal(path, content, true).unwrap()
1425 }
1426
1427 pub async fn insert_symlink(&self, path: impl AsRef<Path>, target: PathBuf) {
1428 let mut state = self.state.lock();
1429 let path = path.as_ref();
1430 let file = FakeFsEntry::Symlink { target };
1431 state
1432 .write_path(path.as_ref(), move |e| match e {
1433 btree_map::Entry::Vacant(e) => {
1434 e.insert(file);
1435 Ok(())
1436 }
1437 btree_map::Entry::Occupied(mut e) => {
1438 *e.get_mut() = file;
1439 Ok(())
1440 }
1441 })
1442 .unwrap();
1443 state.emit_event([(path, Some(PathEventKind::Created))]);
1444 }
1445
1446 fn write_file_internal(
1447 &self,
1448 path: impl AsRef<Path>,
1449 new_content: Vec<u8>,
1450 recreate_inode: bool,
1451 ) -> Result<()> {
1452 let mut state = self.state.lock();
1453 let path_buf = path.as_ref().to_path_buf();
1454 *state.path_write_counts.entry(path_buf).or_insert(0) += 1;
1455 let new_inode = state.get_and_increment_inode();
1456 let new_mtime = state.get_and_increment_mtime();
1457 let new_len = new_content.len() as u64;
1458 let mut kind = None;
1459 state.write_path(path.as_ref(), |entry| {
1460 match entry {
1461 btree_map::Entry::Vacant(e) => {
1462 kind = Some(PathEventKind::Created);
1463 e.insert(FakeFsEntry::File {
1464 inode: new_inode,
1465 mtime: new_mtime,
1466 len: new_len,
1467 content: new_content,
1468 git_dir_path: None,
1469 });
1470 }
1471 btree_map::Entry::Occupied(mut e) => {
1472 kind = Some(PathEventKind::Changed);
1473 if let FakeFsEntry::File {
1474 inode,
1475 mtime,
1476 len,
1477 content,
1478 ..
1479 } = e.get_mut()
1480 {
1481 *mtime = new_mtime;
1482 *content = new_content;
1483 *len = new_len;
1484 if recreate_inode {
1485 *inode = new_inode;
1486 }
1487 } else {
1488 anyhow::bail!("not a file")
1489 }
1490 }
1491 }
1492 Ok(())
1493 })?;
1494 state.emit_event([(path.as_ref(), kind)]);
1495 Ok(())
1496 }
1497
1498 pub fn read_file_sync(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1499 let path = path.as_ref();
1500 let path = normalize_path(path);
1501 let mut state = self.state.lock();
1502 let entry = state.entry(&path)?;
1503 entry.file_content(&path).cloned()
1504 }
1505
1506 async fn load_internal(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1507 let path = path.as_ref();
1508 let path = normalize_path(path);
1509 self.simulate_random_delay().await;
1510 let mut state = self.state.lock();
1511 let entry = state.entry(&path)?;
1512 entry.file_content(&path).cloned()
1513 }
1514
1515 pub fn pause_events(&self) {
1516 self.state.lock().events_paused = true;
1517 }
1518
1519 pub fn unpause_events_and_flush(&self) {
1520 self.state.lock().events_paused = false;
1521 self.flush_events(usize::MAX);
1522 }
1523
1524 pub fn buffered_event_count(&self) -> usize {
1525 self.state.lock().buffered_events.len()
1526 }
1527
1528 pub fn flush_events(&self, count: usize) {
1529 self.state.lock().flush_events(count);
1530 }
1531
1532 pub(crate) fn entry(&self, target: &Path) -> Result<FakeFsEntry> {
1533 self.state.lock().entry(target).cloned()
1534 }
1535
1536 pub(crate) fn insert_entry(&self, target: &Path, new_entry: FakeFsEntry) -> Result<()> {
1537 let mut state = self.state.lock();
1538 state.write_path(target, |entry| {
1539 match entry {
1540 btree_map::Entry::Vacant(vacant_entry) => {
1541 vacant_entry.insert(new_entry);
1542 }
1543 btree_map::Entry::Occupied(mut occupied_entry) => {
1544 occupied_entry.insert(new_entry);
1545 }
1546 }
1547 Ok(())
1548 })
1549 }
1550
1551 #[must_use]
1552 pub fn insert_tree<'a>(
1553 &'a self,
1554 path: impl 'a + AsRef<Path> + Send,
1555 tree: serde_json::Value,
1556 ) -> futures::future::BoxFuture<'a, ()> {
1557 use futures::FutureExt as _;
1558 use serde_json::Value::*;
1559
1560 async move {
1561 let path = path.as_ref();
1562
1563 match tree {
1564 Object(map) => {
1565 self.create_dir(path).await.unwrap();
1566 for (name, contents) in map {
1567 let mut path = PathBuf::from(path);
1568 path.push(name);
1569 self.insert_tree(&path, contents).await;
1570 }
1571 }
1572 Null => {
1573 self.create_dir(path).await.unwrap();
1574 }
1575 String(contents) => {
1576 self.insert_file(&path, contents.into_bytes()).await;
1577 }
1578 _ => {
1579 panic!("JSON object must contain only objects, strings, or null");
1580 }
1581 }
1582 }
1583 .boxed()
1584 }
1585
1586 pub fn insert_tree_from_real_fs<'a>(
1587 &'a self,
1588 path: impl 'a + AsRef<Path> + Send,
1589 src_path: impl 'a + AsRef<Path> + Send,
1590 ) -> futures::future::BoxFuture<'a, ()> {
1591 use futures::FutureExt as _;
1592
1593 async move {
1594 let path = path.as_ref();
1595 if std::fs::metadata(&src_path).unwrap().is_file() {
1596 let contents = std::fs::read(src_path).unwrap();
1597 self.insert_file(path, contents).await;
1598 } else {
1599 self.create_dir(path).await.unwrap();
1600 for entry in std::fs::read_dir(&src_path).unwrap() {
1601 let entry = entry.unwrap();
1602 self.insert_tree_from_real_fs(path.join(entry.file_name()), entry.path())
1603 .await;
1604 }
1605 }
1606 }
1607 .boxed()
1608 }
1609
1610 pub fn with_git_state_and_paths<T, F>(
1611 &self,
1612 dot_git: &Path,
1613 emit_git_event: bool,
1614 f: F,
1615 ) -> Result<T>
1616 where
1617 F: FnOnce(&mut FakeGitRepositoryState, &Path, &Path) -> T,
1618 {
1619 let mut state = self.state.lock();
1620 let git_event_tx = state.git_event_tx.clone();
1621 let entry = state.entry(dot_git).context("open .git")?;
1622
1623 if let FakeFsEntry::Dir { git_repo_state, .. } = entry {
1624 let repo_state = git_repo_state.get_or_insert_with(|| {
1625 log::debug!("insert git state for {dot_git:?}");
1626 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
1627 });
1628 let mut repo_state = repo_state.lock();
1629
1630 let result = f(&mut repo_state, dot_git, dot_git);
1631
1632 drop(repo_state);
1633 if emit_git_event {
1634 state.emit_event([(dot_git, Some(PathEventKind::Changed))]);
1635 }
1636
1637 Ok(result)
1638 } else if let FakeFsEntry::File {
1639 content,
1640 git_dir_path,
1641 ..
1642 } = &mut *entry
1643 {
1644 let path = match git_dir_path {
1645 Some(path) => path,
1646 None => {
1647 let path = std::str::from_utf8(content)
1648 .ok()
1649 .and_then(|content| content.strip_prefix("gitdir:"))
1650 .context("not a valid gitfile")?
1651 .trim();
1652 git_dir_path.insert(normalize_path(&dot_git.parent().unwrap().join(path)))
1653 }
1654 }
1655 .clone();
1656 let Some((git_dir_entry, canonical_path)) = state.try_entry(&path, true) else {
1657 anyhow::bail!("pointed-to git dir {path:?} not found")
1658 };
1659 let FakeFsEntry::Dir {
1660 git_repo_state,
1661 entries,
1662 ..
1663 } = git_dir_entry
1664 else {
1665 anyhow::bail!("gitfile points to a non-directory")
1666 };
1667 let common_dir = if let Some(child) = entries.get("commondir") {
1668 Path::new(
1669 std::str::from_utf8(child.file_content("commondir".as_ref())?)
1670 .context("commondir content")?,
1671 )
1672 .to_owned()
1673 } else {
1674 canonical_path.clone()
1675 };
1676 let repo_state = git_repo_state.get_or_insert_with(|| {
1677 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
1678 });
1679 let mut repo_state = repo_state.lock();
1680
1681 let result = f(&mut repo_state, &canonical_path, &common_dir);
1682
1683 if emit_git_event {
1684 drop(repo_state);
1685 state.emit_event([(canonical_path, Some(PathEventKind::Changed))]);
1686 }
1687
1688 Ok(result)
1689 } else {
1690 anyhow::bail!("not a valid git repository");
1691 }
1692 }
1693
1694 pub fn with_git_state<T, F>(&self, dot_git: &Path, emit_git_event: bool, f: F) -> Result<T>
1695 where
1696 F: FnOnce(&mut FakeGitRepositoryState) -> T,
1697 {
1698 self.with_git_state_and_paths(dot_git, emit_git_event, |state, _, _| f(state))
1699 }
1700
1701 pub fn set_branch_name(&self, dot_git: &Path, branch: Option<impl Into<String>>) {
1702 self.with_git_state(dot_git, true, |state| {
1703 let branch = branch.map(Into::into);
1704 state.branches.extend(branch.clone());
1705 state.current_branch_name = branch
1706 })
1707 .unwrap();
1708 }
1709
1710 pub fn insert_branches(&self, dot_git: &Path, branches: &[&str]) {
1711 self.with_git_state(dot_git, true, |state| {
1712 if let Some(first) = branches.first()
1713 && state.current_branch_name.is_none()
1714 {
1715 state.current_branch_name = Some(first.to_string())
1716 }
1717 state
1718 .branches
1719 .extend(branches.iter().map(ToString::to_string));
1720 })
1721 .unwrap();
1722 }
1723
1724 pub fn set_unmerged_paths_for_repo(
1725 &self,
1726 dot_git: &Path,
1727 unmerged_state: &[(RepoPath, UnmergedStatus)],
1728 ) {
1729 self.with_git_state(dot_git, true, |state| {
1730 state.unmerged_paths.clear();
1731 state.unmerged_paths.extend(
1732 unmerged_state
1733 .iter()
1734 .map(|(path, content)| (path.clone(), *content)),
1735 );
1736 })
1737 .unwrap();
1738 }
1739
1740 pub fn set_index_for_repo(&self, dot_git: &Path, index_state: &[(&str, String)]) {
1741 self.with_git_state(dot_git, true, |state| {
1742 state.index_contents.clear();
1743 state.index_contents.extend(
1744 index_state
1745 .iter()
1746 .map(|(path, content)| (repo_path(path), content.clone())),
1747 );
1748 })
1749 .unwrap();
1750 }
1751
1752 pub fn set_head_for_repo(
1753 &self,
1754 dot_git: &Path,
1755 head_state: &[(&str, String)],
1756 sha: impl Into<String>,
1757 ) {
1758 self.with_git_state(dot_git, true, |state| {
1759 state.head_contents.clear();
1760 state.head_contents.extend(
1761 head_state
1762 .iter()
1763 .map(|(path, content)| (repo_path(path), content.clone())),
1764 );
1765 state.refs.insert("HEAD".into(), sha.into());
1766 })
1767 .unwrap();
1768 }
1769
1770 pub fn set_head_and_index_for_repo(&self, dot_git: &Path, contents_by_path: &[(&str, String)]) {
1771 self.with_git_state(dot_git, true, |state| {
1772 state.head_contents.clear();
1773 state.head_contents.extend(
1774 contents_by_path
1775 .iter()
1776 .map(|(path, contents)| (repo_path(path), contents.clone())),
1777 );
1778 state.index_contents = state.head_contents.clone();
1779 })
1780 .unwrap();
1781 }
1782
1783 pub fn set_merge_base_content_for_repo(
1784 &self,
1785 dot_git: &Path,
1786 contents_by_path: &[(&str, String)],
1787 ) {
1788 self.with_git_state(dot_git, true, |state| {
1789 use git::Oid;
1790
1791 state.merge_base_contents.clear();
1792 let oids = (1..)
1793 .map(|n| n.to_string())
1794 .map(|n| Oid::from_bytes(n.repeat(20).as_bytes()).unwrap());
1795 for ((path, content), oid) in contents_by_path.iter().zip(oids) {
1796 state.merge_base_contents.insert(repo_path(path), oid);
1797 state.oids.insert(oid, content.clone());
1798 }
1799 })
1800 .unwrap();
1801 }
1802
1803 pub fn set_blame_for_repo(&self, dot_git: &Path, blames: Vec<(RepoPath, git::blame::Blame)>) {
1804 self.with_git_state(dot_git, true, |state| {
1805 state.blames.clear();
1806 state.blames.extend(blames);
1807 })
1808 .unwrap();
1809 }
1810
1811 /// Put the given git repository into a state with the given status,
1812 /// by mutating the head, index, and unmerged state.
1813 pub fn set_status_for_repo(&self, dot_git: &Path, statuses: &[(&str, FileStatus)]) {
1814 let workdir_path = dot_git.parent().unwrap();
1815 let workdir_contents = self.files_with_contents(workdir_path);
1816 self.with_git_state(dot_git, true, |state| {
1817 state.index_contents.clear();
1818 state.head_contents.clear();
1819 state.unmerged_paths.clear();
1820 for (path, content) in workdir_contents {
1821 use util::{paths::PathStyle, rel_path::RelPath};
1822
1823 let repo_path: RepoPath = RelPath::new(path.strip_prefix(&workdir_path).unwrap(), PathStyle::local()).unwrap().into();
1824 let status = statuses
1825 .iter()
1826 .find_map(|(p, status)| (*p == repo_path.as_unix_str()).then_some(status));
1827 let mut content = String::from_utf8_lossy(&content).to_string();
1828
1829 let mut index_content = None;
1830 let mut head_content = None;
1831 match status {
1832 None => {
1833 index_content = Some(content.clone());
1834 head_content = Some(content);
1835 }
1836 Some(FileStatus::Untracked | FileStatus::Ignored) => {}
1837 Some(FileStatus::Unmerged(unmerged_status)) => {
1838 state
1839 .unmerged_paths
1840 .insert(repo_path.clone(), *unmerged_status);
1841 content.push_str(" (unmerged)");
1842 index_content = Some(content.clone());
1843 head_content = Some(content);
1844 }
1845 Some(FileStatus::Tracked(TrackedStatus {
1846 index_status,
1847 worktree_status,
1848 })) => {
1849 match worktree_status {
1850 StatusCode::Modified => {
1851 let mut content = content.clone();
1852 content.push_str(" (modified in working copy)");
1853 index_content = Some(content);
1854 }
1855 StatusCode::TypeChanged | StatusCode::Unmodified => {
1856 index_content = Some(content.clone());
1857 }
1858 StatusCode::Added => {}
1859 StatusCode::Deleted | StatusCode::Renamed | StatusCode::Copied => {
1860 panic!("cannot create these statuses for an existing file");
1861 }
1862 };
1863 match index_status {
1864 StatusCode::Modified => {
1865 let mut content = index_content.clone().expect(
1866 "file cannot be both modified in index and created in working copy",
1867 );
1868 content.push_str(" (modified in index)");
1869 head_content = Some(content);
1870 }
1871 StatusCode::TypeChanged | StatusCode::Unmodified => {
1872 head_content = Some(index_content.clone().expect("file cannot be both unmodified in index and created in working copy"));
1873 }
1874 StatusCode::Added => {}
1875 StatusCode::Deleted => {
1876 head_content = Some("".into());
1877 }
1878 StatusCode::Renamed | StatusCode::Copied => {
1879 panic!("cannot create these statuses for an existing file");
1880 }
1881 };
1882 }
1883 };
1884
1885 if let Some(content) = index_content {
1886 state.index_contents.insert(repo_path.clone(), content);
1887 }
1888 if let Some(content) = head_content {
1889 state.head_contents.insert(repo_path.clone(), content);
1890 }
1891 }
1892 }).unwrap();
1893 }
1894
1895 pub fn set_error_message_for_index_write(&self, dot_git: &Path, message: Option<String>) {
1896 self.with_git_state(dot_git, true, |state| {
1897 state.simulated_index_write_error_message = message;
1898 })
1899 .unwrap();
1900 }
1901
1902 pub fn paths(&self, include_dot_git: bool) -> Vec<PathBuf> {
1903 let mut result = Vec::new();
1904 let mut queue = collections::VecDeque::new();
1905 let state = &*self.state.lock();
1906 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
1907 while let Some((path, entry)) = queue.pop_front() {
1908 if let FakeFsEntry::Dir { entries, .. } = entry {
1909 for (name, entry) in entries {
1910 queue.push_back((path.join(name), entry));
1911 }
1912 }
1913 if include_dot_git
1914 || !path
1915 .components()
1916 .any(|component| component.as_os_str() == *FS_DOT_GIT)
1917 {
1918 result.push(path);
1919 }
1920 }
1921 result
1922 }
1923
1924 pub fn directories(&self, include_dot_git: bool) -> Vec<PathBuf> {
1925 let mut result = Vec::new();
1926 let mut queue = collections::VecDeque::new();
1927 let state = &*self.state.lock();
1928 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
1929 while let Some((path, entry)) = queue.pop_front() {
1930 if let FakeFsEntry::Dir { entries, .. } = entry {
1931 for (name, entry) in entries {
1932 queue.push_back((path.join(name), entry));
1933 }
1934 if include_dot_git
1935 || !path
1936 .components()
1937 .any(|component| component.as_os_str() == *FS_DOT_GIT)
1938 {
1939 result.push(path);
1940 }
1941 }
1942 }
1943 result
1944 }
1945
1946 pub fn files(&self) -> Vec<PathBuf> {
1947 let mut result = Vec::new();
1948 let mut queue = collections::VecDeque::new();
1949 let state = &*self.state.lock();
1950 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
1951 while let Some((path, entry)) = queue.pop_front() {
1952 match entry {
1953 FakeFsEntry::File { .. } => result.push(path),
1954 FakeFsEntry::Dir { entries, .. } => {
1955 for (name, entry) in entries {
1956 queue.push_back((path.join(name), entry));
1957 }
1958 }
1959 FakeFsEntry::Symlink { .. } => {}
1960 }
1961 }
1962 result
1963 }
1964
1965 pub fn files_with_contents(&self, prefix: &Path) -> Vec<(PathBuf, Vec<u8>)> {
1966 let mut result = Vec::new();
1967 let mut queue = collections::VecDeque::new();
1968 let state = &*self.state.lock();
1969 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
1970 while let Some((path, entry)) = queue.pop_front() {
1971 match entry {
1972 FakeFsEntry::File { content, .. } => {
1973 if path.starts_with(prefix) {
1974 result.push((path, content.clone()));
1975 }
1976 }
1977 FakeFsEntry::Dir { entries, .. } => {
1978 for (name, entry) in entries {
1979 queue.push_back((path.join(name), entry));
1980 }
1981 }
1982 FakeFsEntry::Symlink { .. } => {}
1983 }
1984 }
1985 result
1986 }
1987
1988 /// How many `read_dir` calls have been issued.
1989 pub fn read_dir_call_count(&self) -> usize {
1990 self.state.lock().read_dir_call_count
1991 }
1992
1993 pub fn watched_paths(&self) -> Vec<PathBuf> {
1994 let state = self.state.lock();
1995 state
1996 .event_txs
1997 .iter()
1998 .filter_map(|(path, tx)| Some(path.clone()).filter(|_| !tx.is_closed()))
1999 .collect()
2000 }
2001
2002 /// How many `metadata` calls have been issued.
2003 pub fn metadata_call_count(&self) -> usize {
2004 self.state.lock().metadata_call_count
2005 }
2006
2007 /// How many write operations have been issued for a specific path.
2008 pub fn write_count_for_path(&self, path: impl AsRef<Path>) -> usize {
2009 let path = path.as_ref().to_path_buf();
2010 self.state
2011 .lock()
2012 .path_write_counts
2013 .get(&path)
2014 .copied()
2015 .unwrap_or(0)
2016 }
2017
2018 pub fn emit_fs_event(&self, path: impl Into<PathBuf>, event: Option<PathEventKind>) {
2019 self.state.lock().emit_event(std::iter::once((path, event)));
2020 }
2021
2022 fn simulate_random_delay(&self) -> impl futures::Future<Output = ()> {
2023 self.executor.simulate_random_delay()
2024 }
2025}
2026
2027#[cfg(any(test, feature = "test-support"))]
2028impl FakeFsEntry {
2029 fn is_file(&self) -> bool {
2030 matches!(self, Self::File { .. })
2031 }
2032
2033 fn is_symlink(&self) -> bool {
2034 matches!(self, Self::Symlink { .. })
2035 }
2036
2037 fn file_content(&self, path: &Path) -> Result<&Vec<u8>> {
2038 if let Self::File { content, .. } = self {
2039 Ok(content)
2040 } else {
2041 anyhow::bail!("not a file: {path:?}");
2042 }
2043 }
2044
2045 fn dir_entries(&mut self, path: &Path) -> Result<&mut BTreeMap<String, FakeFsEntry>> {
2046 if let Self::Dir { entries, .. } = self {
2047 Ok(entries)
2048 } else {
2049 anyhow::bail!("not a directory: {path:?}");
2050 }
2051 }
2052}
2053
2054#[cfg(any(test, feature = "test-support"))]
2055struct FakeWatcher {
2056 tx: smol::channel::Sender<Vec<PathEvent>>,
2057 original_path: PathBuf,
2058 fs_state: Arc<Mutex<FakeFsState>>,
2059 prefixes: Mutex<Vec<PathBuf>>,
2060}
2061
2062#[cfg(any(test, feature = "test-support"))]
2063impl Watcher for FakeWatcher {
2064 fn add(&self, path: &Path) -> Result<()> {
2065 if path.starts_with(&self.original_path) {
2066 return Ok(());
2067 }
2068 self.fs_state
2069 .try_lock()
2070 .unwrap()
2071 .event_txs
2072 .push((path.to_owned(), self.tx.clone()));
2073 self.prefixes.lock().push(path.to_owned());
2074 Ok(())
2075 }
2076
2077 fn remove(&self, _: &Path) -> Result<()> {
2078 Ok(())
2079 }
2080}
2081
2082#[cfg(any(test, feature = "test-support"))]
2083#[derive(Debug)]
2084struct FakeHandle {
2085 inode: u64,
2086}
2087
2088#[cfg(any(test, feature = "test-support"))]
2089impl FileHandle for FakeHandle {
2090 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf> {
2091 let fs = fs.as_fake();
2092 let mut state = fs.state.lock();
2093 let Some(target) = state.moves.get(&self.inode).cloned() else {
2094 anyhow::bail!("fake fd not moved")
2095 };
2096
2097 if state.try_entry(&target, false).is_some() {
2098 return Ok(target);
2099 }
2100 anyhow::bail!("fake fd target not found")
2101 }
2102}
2103
2104#[cfg(any(test, feature = "test-support"))]
2105#[async_trait::async_trait]
2106impl Fs for FakeFs {
2107 async fn create_dir(&self, path: &Path) -> Result<()> {
2108 self.simulate_random_delay().await;
2109
2110 let mut created_dirs = Vec::new();
2111 let mut cur_path = PathBuf::new();
2112 for component in path.components() {
2113 let should_skip = matches!(component, Component::Prefix(..) | Component::RootDir);
2114 cur_path.push(component);
2115 if should_skip {
2116 continue;
2117 }
2118 let mut state = self.state.lock();
2119
2120 let inode = state.get_and_increment_inode();
2121 let mtime = state.get_and_increment_mtime();
2122 state.write_path(&cur_path, |entry| {
2123 entry.or_insert_with(|| {
2124 created_dirs.push((cur_path.clone(), Some(PathEventKind::Created)));
2125 FakeFsEntry::Dir {
2126 inode,
2127 mtime,
2128 len: 0,
2129 entries: Default::default(),
2130 git_repo_state: None,
2131 }
2132 });
2133 Ok(())
2134 })?
2135 }
2136
2137 self.state.lock().emit_event(created_dirs);
2138 Ok(())
2139 }
2140
2141 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
2142 self.simulate_random_delay().await;
2143 let mut state = self.state.lock();
2144 let inode = state.get_and_increment_inode();
2145 let mtime = state.get_and_increment_mtime();
2146 let file = FakeFsEntry::File {
2147 inode,
2148 mtime,
2149 len: 0,
2150 content: Vec::new(),
2151 git_dir_path: None,
2152 };
2153 let mut kind = Some(PathEventKind::Created);
2154 state.write_path(path, |entry| {
2155 match entry {
2156 btree_map::Entry::Occupied(mut e) => {
2157 if options.overwrite {
2158 kind = Some(PathEventKind::Changed);
2159 *e.get_mut() = file;
2160 } else if !options.ignore_if_exists {
2161 anyhow::bail!("path already exists: {path:?}");
2162 }
2163 }
2164 btree_map::Entry::Vacant(e) => {
2165 e.insert(file);
2166 }
2167 }
2168 Ok(())
2169 })?;
2170 state.emit_event([(path, kind)]);
2171 Ok(())
2172 }
2173
2174 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
2175 let mut state = self.state.lock();
2176 let file = FakeFsEntry::Symlink { target };
2177 state
2178 .write_path(path.as_ref(), move |e| match e {
2179 btree_map::Entry::Vacant(e) => {
2180 e.insert(file);
2181 Ok(())
2182 }
2183 btree_map::Entry::Occupied(mut e) => {
2184 *e.get_mut() = file;
2185 Ok(())
2186 }
2187 })
2188 .unwrap();
2189 state.emit_event([(path, Some(PathEventKind::Created))]);
2190
2191 Ok(())
2192 }
2193
2194 async fn create_file_with(
2195 &self,
2196 path: &Path,
2197 mut content: Pin<&mut (dyn AsyncRead + Send)>,
2198 ) -> Result<()> {
2199 let mut bytes = Vec::new();
2200 content.read_to_end(&mut bytes).await?;
2201 self.write_file_internal(path, bytes, true)?;
2202 Ok(())
2203 }
2204
2205 async fn extract_tar_file(
2206 &self,
2207 path: &Path,
2208 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
2209 ) -> Result<()> {
2210 let mut entries = content.entries()?;
2211 while let Some(entry) = entries.next().await {
2212 let mut entry = entry?;
2213 if entry.header().entry_type().is_file() {
2214 let path = path.join(entry.path()?.as_ref());
2215 let mut bytes = Vec::new();
2216 entry.read_to_end(&mut bytes).await?;
2217 self.create_dir(path.parent().unwrap()).await?;
2218 self.write_file_internal(&path, bytes, true)?;
2219 }
2220 }
2221 Ok(())
2222 }
2223
2224 async fn rename(&self, old_path: &Path, new_path: &Path, options: RenameOptions) -> Result<()> {
2225 self.simulate_random_delay().await;
2226
2227 let old_path = normalize_path(old_path);
2228 let new_path = normalize_path(new_path);
2229
2230 let mut state = self.state.lock();
2231 let moved_entry = state.write_path(&old_path, |e| {
2232 if let btree_map::Entry::Occupied(e) = e {
2233 Ok(e.get().clone())
2234 } else {
2235 anyhow::bail!("path does not exist: {old_path:?}")
2236 }
2237 })?;
2238
2239 let inode = match moved_entry {
2240 FakeFsEntry::File { inode, .. } => inode,
2241 FakeFsEntry::Dir { inode, .. } => inode,
2242 _ => 0,
2243 };
2244
2245 state.moves.insert(inode, new_path.clone());
2246
2247 state.write_path(&new_path, |e| {
2248 match e {
2249 btree_map::Entry::Occupied(mut e) => {
2250 if options.overwrite {
2251 *e.get_mut() = moved_entry;
2252 } else if !options.ignore_if_exists {
2253 anyhow::bail!("path already exists: {new_path:?}");
2254 }
2255 }
2256 btree_map::Entry::Vacant(e) => {
2257 e.insert(moved_entry);
2258 }
2259 }
2260 Ok(())
2261 })?;
2262
2263 state
2264 .write_path(&old_path, |e| {
2265 if let btree_map::Entry::Occupied(e) = e {
2266 Ok(e.remove())
2267 } else {
2268 unreachable!()
2269 }
2270 })
2271 .unwrap();
2272
2273 state.emit_event([
2274 (old_path, Some(PathEventKind::Removed)),
2275 (new_path, Some(PathEventKind::Created)),
2276 ]);
2277 Ok(())
2278 }
2279
2280 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
2281 self.simulate_random_delay().await;
2282
2283 let source = normalize_path(source);
2284 let target = normalize_path(target);
2285 let mut state = self.state.lock();
2286 let mtime = state.get_and_increment_mtime();
2287 let inode = state.get_and_increment_inode();
2288 let source_entry = state.entry(&source)?;
2289 let content = source_entry.file_content(&source)?.clone();
2290 let mut kind = Some(PathEventKind::Created);
2291 state.write_path(&target, |e| match e {
2292 btree_map::Entry::Occupied(e) => {
2293 if options.overwrite {
2294 kind = Some(PathEventKind::Changed);
2295 Ok(Some(e.get().clone()))
2296 } else if !options.ignore_if_exists {
2297 anyhow::bail!("{target:?} already exists");
2298 } else {
2299 Ok(None)
2300 }
2301 }
2302 btree_map::Entry::Vacant(e) => Ok(Some(
2303 e.insert(FakeFsEntry::File {
2304 inode,
2305 mtime,
2306 len: content.len() as u64,
2307 content,
2308 git_dir_path: None,
2309 })
2310 .clone(),
2311 )),
2312 })?;
2313 state.emit_event([(target, kind)]);
2314 Ok(())
2315 }
2316
2317 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
2318 self.simulate_random_delay().await;
2319
2320 let path = normalize_path(path);
2321 let parent_path = path.parent().context("cannot remove the root")?;
2322 let base_name = path.file_name().context("cannot remove the root")?;
2323
2324 let mut state = self.state.lock();
2325 let parent_entry = state.entry(parent_path)?;
2326 let entry = parent_entry
2327 .dir_entries(parent_path)?
2328 .entry(base_name.to_str().unwrap().into());
2329
2330 match entry {
2331 btree_map::Entry::Vacant(_) => {
2332 if !options.ignore_if_not_exists {
2333 anyhow::bail!("{path:?} does not exist");
2334 }
2335 }
2336 btree_map::Entry::Occupied(mut entry) => {
2337 {
2338 let children = entry.get_mut().dir_entries(&path)?;
2339 if !options.recursive && !children.is_empty() {
2340 anyhow::bail!("{path:?} is not empty");
2341 }
2342 }
2343 entry.remove();
2344 }
2345 }
2346 state.emit_event([(path, Some(PathEventKind::Removed))]);
2347 Ok(())
2348 }
2349
2350 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
2351 self.simulate_random_delay().await;
2352
2353 let path = normalize_path(path);
2354 let parent_path = path.parent().context("cannot remove the root")?;
2355 let base_name = path.file_name().unwrap();
2356 let mut state = self.state.lock();
2357 let parent_entry = state.entry(parent_path)?;
2358 let entry = parent_entry
2359 .dir_entries(parent_path)?
2360 .entry(base_name.to_str().unwrap().into());
2361 match entry {
2362 btree_map::Entry::Vacant(_) => {
2363 if !options.ignore_if_not_exists {
2364 anyhow::bail!("{path:?} does not exist");
2365 }
2366 }
2367 btree_map::Entry::Occupied(mut entry) => {
2368 entry.get_mut().file_content(&path)?;
2369 entry.remove();
2370 }
2371 }
2372 state.emit_event([(path, Some(PathEventKind::Removed))]);
2373 Ok(())
2374 }
2375
2376 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
2377 let bytes = self.load_internal(path).await?;
2378 Ok(Box::new(io::Cursor::new(bytes)))
2379 }
2380
2381 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
2382 self.simulate_random_delay().await;
2383 let mut state = self.state.lock();
2384 let inode = match state.entry(path)? {
2385 FakeFsEntry::File { inode, .. } => *inode,
2386 FakeFsEntry::Dir { inode, .. } => *inode,
2387 _ => unreachable!(),
2388 };
2389 Ok(Arc::new(FakeHandle { inode }))
2390 }
2391
2392 async fn load(&self, path: &Path) -> Result<String> {
2393 let content = self.load_internal(path).await?;
2394 Ok(String::from_utf8(content)?)
2395 }
2396
2397 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
2398 self.load_internal(path).await
2399 }
2400
2401 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
2402 self.simulate_random_delay().await;
2403 let path = normalize_path(path.as_path());
2404 if let Some(path) = path.parent() {
2405 self.create_dir(path).await?;
2406 }
2407 self.write_file_internal(path, data.into_bytes(), true)?;
2408 Ok(())
2409 }
2410
2411 async fn save(
2412 &self,
2413 path: &Path,
2414 text: &Rope,
2415 line_ending: LineEnding,
2416 encoding: Encoding,
2417 ) -> Result<()> {
2418 self.simulate_random_delay().await;
2419 let path = normalize_path(path);
2420 let content = chunks(text, line_ending).collect::<String>();
2421 if let Some(path) = path.parent() {
2422 self.create_dir(path).await?;
2423 }
2424 let mut bytes = Vec::new();
2425 if let Some(bom) = encoding.bom() {
2426 bytes.extend_from_slice(bom);
2427 }
2428 bytes.extend_from_slice(&encoding.encode_chunk(&content)?);
2429 self.write_file_internal(path, bytes, false)?;
2430 Ok(())
2431 }
2432
2433 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
2434 self.simulate_random_delay().await;
2435 let path = normalize_path(path);
2436 if let Some(path) = path.parent() {
2437 self.create_dir(path).await?;
2438 }
2439 self.write_file_internal(path, content.to_vec(), false)?;
2440 Ok(())
2441 }
2442
2443 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
2444 let path = normalize_path(path);
2445 self.simulate_random_delay().await;
2446 let state = self.state.lock();
2447 let canonical_path = state
2448 .canonicalize(&path, true)
2449 .with_context(|| format!("path does not exist: {path:?}"))?;
2450 Ok(canonical_path)
2451 }
2452
2453 async fn is_file(&self, path: &Path) -> bool {
2454 let path = normalize_path(path);
2455 self.simulate_random_delay().await;
2456 let mut state = self.state.lock();
2457 if let Some((entry, _)) = state.try_entry(&path, true) {
2458 entry.is_file()
2459 } else {
2460 false
2461 }
2462 }
2463
2464 async fn is_dir(&self, path: &Path) -> bool {
2465 self.metadata(path)
2466 .await
2467 .is_ok_and(|metadata| metadata.is_some_and(|metadata| metadata.is_dir))
2468 }
2469
2470 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
2471 self.simulate_random_delay().await;
2472 let path = normalize_path(path);
2473 let mut state = self.state.lock();
2474 state.metadata_call_count += 1;
2475 if let Some((mut entry, _)) = state.try_entry(&path, false) {
2476 let is_symlink = entry.is_symlink();
2477 if is_symlink {
2478 if let Some(e) = state.try_entry(&path, true).map(|e| e.0) {
2479 entry = e;
2480 } else {
2481 return Ok(None);
2482 }
2483 }
2484
2485 Ok(Some(match &*entry {
2486 FakeFsEntry::File {
2487 inode, mtime, len, ..
2488 } => Metadata {
2489 inode: *inode,
2490 mtime: *mtime,
2491 len: *len,
2492 is_dir: false,
2493 is_symlink,
2494 is_fifo: false,
2495 },
2496 FakeFsEntry::Dir {
2497 inode, mtime, len, ..
2498 } => Metadata {
2499 inode: *inode,
2500 mtime: *mtime,
2501 len: *len,
2502 is_dir: true,
2503 is_symlink,
2504 is_fifo: false,
2505 },
2506 FakeFsEntry::Symlink { .. } => unreachable!(),
2507 }))
2508 } else {
2509 Ok(None)
2510 }
2511 }
2512
2513 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
2514 self.simulate_random_delay().await;
2515 let path = normalize_path(path);
2516 let mut state = self.state.lock();
2517 let (entry, _) = state
2518 .try_entry(&path, false)
2519 .with_context(|| format!("path does not exist: {path:?}"))?;
2520 if let FakeFsEntry::Symlink { target } = entry {
2521 Ok(target.clone())
2522 } else {
2523 anyhow::bail!("not a symlink: {path:?}")
2524 }
2525 }
2526
2527 async fn read_dir(
2528 &self,
2529 path: &Path,
2530 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
2531 self.simulate_random_delay().await;
2532 let path = normalize_path(path);
2533 let mut state = self.state.lock();
2534 state.read_dir_call_count += 1;
2535 let entry = state.entry(&path)?;
2536 let children = entry.dir_entries(&path)?;
2537 let paths = children
2538 .keys()
2539 .map(|file_name| Ok(path.join(file_name)))
2540 .collect::<Vec<_>>();
2541 Ok(Box::pin(futures::stream::iter(paths)))
2542 }
2543
2544 async fn watch(
2545 &self,
2546 path: &Path,
2547 _: Duration,
2548 ) -> (
2549 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
2550 Arc<dyn Watcher>,
2551 ) {
2552 self.simulate_random_delay().await;
2553 let (tx, rx) = smol::channel::unbounded();
2554 let path = path.to_path_buf();
2555 self.state.lock().event_txs.push((path.clone(), tx.clone()));
2556 let executor = self.executor.clone();
2557 let watcher = Arc::new(FakeWatcher {
2558 tx,
2559 original_path: path.to_owned(),
2560 fs_state: self.state.clone(),
2561 prefixes: Mutex::new(vec![path]),
2562 });
2563 (
2564 Box::pin(futures::StreamExt::filter(rx, {
2565 let watcher = watcher.clone();
2566 move |events| {
2567 let result = events.iter().any(|evt_path| {
2568 watcher
2569 .prefixes
2570 .lock()
2571 .iter()
2572 .any(|prefix| evt_path.path.starts_with(prefix))
2573 });
2574 let executor = executor.clone();
2575 async move {
2576 executor.simulate_random_delay().await;
2577 result
2578 }
2579 }
2580 })),
2581 watcher,
2582 )
2583 }
2584
2585 fn open_repo(
2586 &self,
2587 abs_dot_git: &Path,
2588 _system_git_binary: Option<&Path>,
2589 ) -> Option<Arc<dyn GitRepository>> {
2590 use util::ResultExt as _;
2591
2592 self.with_git_state_and_paths(
2593 abs_dot_git,
2594 false,
2595 |_, repository_dir_path, common_dir_path| {
2596 Arc::new(fake_git_repo::FakeGitRepository {
2597 fs: self.this.upgrade().unwrap(),
2598 executor: self.executor.clone(),
2599 dot_git_path: abs_dot_git.to_path_buf(),
2600 repository_dir_path: repository_dir_path.to_owned(),
2601 common_dir_path: common_dir_path.to_owned(),
2602 checkpoints: Arc::default(),
2603 }) as _
2604 },
2605 )
2606 .log_err()
2607 }
2608
2609 async fn git_init(
2610 &self,
2611 abs_work_directory_path: &Path,
2612 _fallback_branch_name: String,
2613 ) -> Result<()> {
2614 self.create_dir(&abs_work_directory_path.join(".git")).await
2615 }
2616
2617 async fn git_clone(&self, _repo_url: &str, _abs_work_directory: &Path) -> Result<()> {
2618 anyhow::bail!("Git clone is not supported in fake Fs")
2619 }
2620
2621 fn is_fake(&self) -> bool {
2622 true
2623 }
2624
2625 async fn is_case_sensitive(&self) -> Result<bool> {
2626 Ok(true)
2627 }
2628
2629 #[cfg(any(test, feature = "test-support"))]
2630 fn as_fake(&self) -> Arc<FakeFs> {
2631 self.this.upgrade().unwrap()
2632 }
2633}
2634
2635fn chunks(rope: &Rope, line_ending: LineEnding) -> impl Iterator<Item = &str> {
2636 rope.chunks().flat_map(move |chunk| {
2637 let mut newline = false;
2638 let end_with_newline = chunk.ends_with('\n').then_some(line_ending.as_str());
2639 chunk
2640 .lines()
2641 .flat_map(move |line| {
2642 let ending = if newline {
2643 Some(line_ending.as_str())
2644 } else {
2645 None
2646 };
2647 newline = true;
2648 ending.into_iter().chain([line])
2649 })
2650 .chain(end_with_newline)
2651 })
2652}
2653
2654pub fn normalize_path(path: &Path) -> PathBuf {
2655 let mut components = path.components().peekable();
2656 let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek().cloned() {
2657 components.next();
2658 PathBuf::from(c.as_os_str())
2659 } else {
2660 PathBuf::new()
2661 };
2662
2663 for component in components {
2664 match component {
2665 Component::Prefix(..) => unreachable!(),
2666 Component::RootDir => {
2667 ret.push(component.as_os_str());
2668 }
2669 Component::CurDir => {}
2670 Component::ParentDir => {
2671 ret.pop();
2672 }
2673 Component::Normal(c) => {
2674 ret.push(c);
2675 }
2676 }
2677 }
2678 ret
2679}
2680
2681pub async fn copy_recursive<'a>(
2682 fs: &'a dyn Fs,
2683 source: &'a Path,
2684 target: &'a Path,
2685 options: CopyOptions,
2686) -> Result<()> {
2687 for (item, is_dir) in read_dir_items(fs, source).await? {
2688 let Ok(item_relative_path) = item.strip_prefix(source) else {
2689 continue;
2690 };
2691 let target_item = if item_relative_path == Path::new("") {
2692 target.to_path_buf()
2693 } else {
2694 target.join(item_relative_path)
2695 };
2696 if is_dir {
2697 if !options.overwrite && fs.metadata(&target_item).await.is_ok_and(|m| m.is_some()) {
2698 if options.ignore_if_exists {
2699 continue;
2700 } else {
2701 anyhow::bail!("{target_item:?} already exists");
2702 }
2703 }
2704 let _ = fs
2705 .remove_dir(
2706 &target_item,
2707 RemoveOptions {
2708 recursive: true,
2709 ignore_if_not_exists: true,
2710 },
2711 )
2712 .await;
2713 fs.create_dir(&target_item).await?;
2714 } else {
2715 fs.copy_file(&item, &target_item, options).await?;
2716 }
2717 }
2718 Ok(())
2719}
2720
2721/// Recursively reads all of the paths in the given directory.
2722///
2723/// Returns a vector of tuples of (path, is_dir).
2724pub async fn read_dir_items<'a>(fs: &'a dyn Fs, source: &'a Path) -> Result<Vec<(PathBuf, bool)>> {
2725 let mut items = Vec::new();
2726 read_recursive(fs, source, &mut items).await?;
2727 Ok(items)
2728}
2729
2730fn read_recursive<'a>(
2731 fs: &'a dyn Fs,
2732 source: &'a Path,
2733 output: &'a mut Vec<(PathBuf, bool)>,
2734) -> BoxFuture<'a, Result<()>> {
2735 use futures::future::FutureExt;
2736
2737 async move {
2738 let metadata = fs
2739 .metadata(source)
2740 .await?
2741 .with_context(|| format!("path does not exist: {source:?}"))?;
2742
2743 if metadata.is_dir {
2744 output.push((source.to_path_buf(), true));
2745 let mut children = fs.read_dir(source).await?;
2746 while let Some(child_path) = children.next().await {
2747 if let Ok(child_path) = child_path {
2748 read_recursive(fs, &child_path, output).await?;
2749 }
2750 }
2751 } else {
2752 output.push((source.to_path_buf(), false));
2753 }
2754 Ok(())
2755 }
2756 .boxed()
2757}
2758
2759// todo(windows)
2760// can we get file id not open the file twice?
2761// https://github.com/rust-lang/rust/issues/63010
2762#[cfg(target_os = "windows")]
2763async fn file_id(path: impl AsRef<Path>) -> Result<u64> {
2764 use std::os::windows::io::AsRawHandle;
2765
2766 use smol::fs::windows::OpenOptionsExt;
2767 use windows::Win32::{
2768 Foundation::HANDLE,
2769 Storage::FileSystem::{
2770 BY_HANDLE_FILE_INFORMATION, FILE_FLAG_BACKUP_SEMANTICS, GetFileInformationByHandle,
2771 },
2772 };
2773
2774 let file = smol::fs::OpenOptions::new()
2775 .read(true)
2776 .custom_flags(FILE_FLAG_BACKUP_SEMANTICS.0)
2777 .open(path)
2778 .await?;
2779
2780 let mut info: BY_HANDLE_FILE_INFORMATION = unsafe { std::mem::zeroed() };
2781 // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfileinformationbyhandle
2782 // This function supports Windows XP+
2783 smol::unblock(move || {
2784 unsafe { GetFileInformationByHandle(HANDLE(file.as_raw_handle() as _), &mut info)? };
2785
2786 Ok(((info.nFileIndexHigh as u64) << 32) | (info.nFileIndexLow as u64))
2787 })
2788 .await
2789}
2790
2791#[cfg(target_os = "windows")]
2792fn atomic_replace<P: AsRef<Path>>(
2793 replaced_file: P,
2794 replacement_file: P,
2795) -> windows::core::Result<()> {
2796 use windows::{
2797 Win32::Storage::FileSystem::{REPLACE_FILE_FLAGS, ReplaceFileW},
2798 core::HSTRING,
2799 };
2800
2801 // If the file does not exist, create it.
2802 let _ = std::fs::File::create_new(replaced_file.as_ref());
2803
2804 unsafe {
2805 ReplaceFileW(
2806 &HSTRING::from(replaced_file.as_ref().to_string_lossy().into_owned()),
2807 &HSTRING::from(replacement_file.as_ref().to_string_lossy().into_owned()),
2808 None,
2809 REPLACE_FILE_FLAGS::default(),
2810 None,
2811 None,
2812 )
2813 }
2814}
2815
2816#[cfg(test)]
2817mod tests {
2818 use super::*;
2819 use gpui::BackgroundExecutor;
2820 use serde_json::json;
2821 use util::path;
2822
2823 #[gpui::test]
2824 async fn test_fake_fs(executor: BackgroundExecutor) {
2825 let fs = FakeFs::new(executor.clone());
2826 fs.insert_tree(
2827 path!("/root"),
2828 json!({
2829 "dir1": {
2830 "a": "A",
2831 "b": "B"
2832 },
2833 "dir2": {
2834 "c": "C",
2835 "dir3": {
2836 "d": "D"
2837 }
2838 }
2839 }),
2840 )
2841 .await;
2842
2843 assert_eq!(
2844 fs.files(),
2845 vec![
2846 PathBuf::from(path!("/root/dir1/a")),
2847 PathBuf::from(path!("/root/dir1/b")),
2848 PathBuf::from(path!("/root/dir2/c")),
2849 PathBuf::from(path!("/root/dir2/dir3/d")),
2850 ]
2851 );
2852
2853 fs.create_symlink(path!("/root/dir2/link-to-dir3").as_ref(), "./dir3".into())
2854 .await
2855 .unwrap();
2856
2857 assert_eq!(
2858 fs.canonicalize(path!("/root/dir2/link-to-dir3").as_ref())
2859 .await
2860 .unwrap(),
2861 PathBuf::from(path!("/root/dir2/dir3")),
2862 );
2863 assert_eq!(
2864 fs.canonicalize(path!("/root/dir2/link-to-dir3/d").as_ref())
2865 .await
2866 .unwrap(),
2867 PathBuf::from(path!("/root/dir2/dir3/d")),
2868 );
2869 assert_eq!(
2870 fs.load(path!("/root/dir2/link-to-dir3/d").as_ref())
2871 .await
2872 .unwrap(),
2873 "D",
2874 );
2875 }
2876
2877 #[gpui::test]
2878 async fn test_copy_recursive_with_single_file(executor: BackgroundExecutor) {
2879 let fs = FakeFs::new(executor.clone());
2880 fs.insert_tree(
2881 path!("/outer"),
2882 json!({
2883 "a": "A",
2884 "b": "B",
2885 "inner": {}
2886 }),
2887 )
2888 .await;
2889
2890 assert_eq!(
2891 fs.files(),
2892 vec![
2893 PathBuf::from(path!("/outer/a")),
2894 PathBuf::from(path!("/outer/b")),
2895 ]
2896 );
2897
2898 let source = Path::new(path!("/outer/a"));
2899 let target = Path::new(path!("/outer/a copy"));
2900 copy_recursive(fs.as_ref(), source, target, Default::default())
2901 .await
2902 .unwrap();
2903
2904 assert_eq!(
2905 fs.files(),
2906 vec![
2907 PathBuf::from(path!("/outer/a")),
2908 PathBuf::from(path!("/outer/a copy")),
2909 PathBuf::from(path!("/outer/b")),
2910 ]
2911 );
2912
2913 let source = Path::new(path!("/outer/a"));
2914 let target = Path::new(path!("/outer/inner/a copy"));
2915 copy_recursive(fs.as_ref(), source, target, Default::default())
2916 .await
2917 .unwrap();
2918
2919 assert_eq!(
2920 fs.files(),
2921 vec![
2922 PathBuf::from(path!("/outer/a")),
2923 PathBuf::from(path!("/outer/a copy")),
2924 PathBuf::from(path!("/outer/b")),
2925 PathBuf::from(path!("/outer/inner/a copy")),
2926 ]
2927 );
2928 }
2929
2930 #[gpui::test]
2931 async fn test_copy_recursive_with_single_dir(executor: BackgroundExecutor) {
2932 let fs = FakeFs::new(executor.clone());
2933 fs.insert_tree(
2934 path!("/outer"),
2935 json!({
2936 "a": "A",
2937 "empty": {},
2938 "non-empty": {
2939 "b": "B",
2940 }
2941 }),
2942 )
2943 .await;
2944
2945 assert_eq!(
2946 fs.files(),
2947 vec![
2948 PathBuf::from(path!("/outer/a")),
2949 PathBuf::from(path!("/outer/non-empty/b")),
2950 ]
2951 );
2952 assert_eq!(
2953 fs.directories(false),
2954 vec![
2955 PathBuf::from(path!("/")),
2956 PathBuf::from(path!("/outer")),
2957 PathBuf::from(path!("/outer/empty")),
2958 PathBuf::from(path!("/outer/non-empty")),
2959 ]
2960 );
2961
2962 let source = Path::new(path!("/outer/empty"));
2963 let target = Path::new(path!("/outer/empty copy"));
2964 copy_recursive(fs.as_ref(), source, target, Default::default())
2965 .await
2966 .unwrap();
2967
2968 assert_eq!(
2969 fs.files(),
2970 vec![
2971 PathBuf::from(path!("/outer/a")),
2972 PathBuf::from(path!("/outer/non-empty/b")),
2973 ]
2974 );
2975 assert_eq!(
2976 fs.directories(false),
2977 vec![
2978 PathBuf::from(path!("/")),
2979 PathBuf::from(path!("/outer")),
2980 PathBuf::from(path!("/outer/empty")),
2981 PathBuf::from(path!("/outer/empty copy")),
2982 PathBuf::from(path!("/outer/non-empty")),
2983 ]
2984 );
2985
2986 let source = Path::new(path!("/outer/non-empty"));
2987 let target = Path::new(path!("/outer/non-empty copy"));
2988 copy_recursive(fs.as_ref(), source, target, Default::default())
2989 .await
2990 .unwrap();
2991
2992 assert_eq!(
2993 fs.files(),
2994 vec![
2995 PathBuf::from(path!("/outer/a")),
2996 PathBuf::from(path!("/outer/non-empty/b")),
2997 PathBuf::from(path!("/outer/non-empty copy/b")),
2998 ]
2999 );
3000 assert_eq!(
3001 fs.directories(false),
3002 vec![
3003 PathBuf::from(path!("/")),
3004 PathBuf::from(path!("/outer")),
3005 PathBuf::from(path!("/outer/empty")),
3006 PathBuf::from(path!("/outer/empty copy")),
3007 PathBuf::from(path!("/outer/non-empty")),
3008 PathBuf::from(path!("/outer/non-empty copy")),
3009 ]
3010 );
3011 }
3012
3013 #[gpui::test]
3014 async fn test_copy_recursive(executor: BackgroundExecutor) {
3015 let fs = FakeFs::new(executor.clone());
3016 fs.insert_tree(
3017 path!("/outer"),
3018 json!({
3019 "inner1": {
3020 "a": "A",
3021 "b": "B",
3022 "inner3": {
3023 "d": "D",
3024 },
3025 "inner4": {}
3026 },
3027 "inner2": {
3028 "c": "C",
3029 }
3030 }),
3031 )
3032 .await;
3033
3034 assert_eq!(
3035 fs.files(),
3036 vec![
3037 PathBuf::from(path!("/outer/inner1/a")),
3038 PathBuf::from(path!("/outer/inner1/b")),
3039 PathBuf::from(path!("/outer/inner2/c")),
3040 PathBuf::from(path!("/outer/inner1/inner3/d")),
3041 ]
3042 );
3043 assert_eq!(
3044 fs.directories(false),
3045 vec![
3046 PathBuf::from(path!("/")),
3047 PathBuf::from(path!("/outer")),
3048 PathBuf::from(path!("/outer/inner1")),
3049 PathBuf::from(path!("/outer/inner2")),
3050 PathBuf::from(path!("/outer/inner1/inner3")),
3051 PathBuf::from(path!("/outer/inner1/inner4")),
3052 ]
3053 );
3054
3055 let source = Path::new(path!("/outer"));
3056 let target = Path::new(path!("/outer/inner1/outer"));
3057 copy_recursive(fs.as_ref(), source, target, Default::default())
3058 .await
3059 .unwrap();
3060
3061 assert_eq!(
3062 fs.files(),
3063 vec![
3064 PathBuf::from(path!("/outer/inner1/a")),
3065 PathBuf::from(path!("/outer/inner1/b")),
3066 PathBuf::from(path!("/outer/inner2/c")),
3067 PathBuf::from(path!("/outer/inner1/inner3/d")),
3068 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3069 PathBuf::from(path!("/outer/inner1/outer/inner1/b")),
3070 PathBuf::from(path!("/outer/inner1/outer/inner2/c")),
3071 PathBuf::from(path!("/outer/inner1/outer/inner1/inner3/d")),
3072 ]
3073 );
3074 assert_eq!(
3075 fs.directories(false),
3076 vec![
3077 PathBuf::from(path!("/")),
3078 PathBuf::from(path!("/outer")),
3079 PathBuf::from(path!("/outer/inner1")),
3080 PathBuf::from(path!("/outer/inner2")),
3081 PathBuf::from(path!("/outer/inner1/inner3")),
3082 PathBuf::from(path!("/outer/inner1/inner4")),
3083 PathBuf::from(path!("/outer/inner1/outer")),
3084 PathBuf::from(path!("/outer/inner1/outer/inner1")),
3085 PathBuf::from(path!("/outer/inner1/outer/inner2")),
3086 PathBuf::from(path!("/outer/inner1/outer/inner1/inner3")),
3087 PathBuf::from(path!("/outer/inner1/outer/inner1/inner4")),
3088 ]
3089 );
3090 }
3091
3092 #[gpui::test]
3093 async fn test_copy_recursive_with_overwriting(executor: BackgroundExecutor) {
3094 let fs = FakeFs::new(executor.clone());
3095 fs.insert_tree(
3096 path!("/outer"),
3097 json!({
3098 "inner1": {
3099 "a": "A",
3100 "b": "B",
3101 "outer": {
3102 "inner1": {
3103 "a": "B"
3104 }
3105 }
3106 },
3107 "inner2": {
3108 "c": "C",
3109 }
3110 }),
3111 )
3112 .await;
3113
3114 assert_eq!(
3115 fs.files(),
3116 vec![
3117 PathBuf::from(path!("/outer/inner1/a")),
3118 PathBuf::from(path!("/outer/inner1/b")),
3119 PathBuf::from(path!("/outer/inner2/c")),
3120 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3121 ]
3122 );
3123 assert_eq!(
3124 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3125 .await
3126 .unwrap(),
3127 "B",
3128 );
3129
3130 let source = Path::new(path!("/outer"));
3131 let target = Path::new(path!("/outer/inner1/outer"));
3132 copy_recursive(
3133 fs.as_ref(),
3134 source,
3135 target,
3136 CopyOptions {
3137 overwrite: true,
3138 ..Default::default()
3139 },
3140 )
3141 .await
3142 .unwrap();
3143
3144 assert_eq!(
3145 fs.files(),
3146 vec![
3147 PathBuf::from(path!("/outer/inner1/a")),
3148 PathBuf::from(path!("/outer/inner1/b")),
3149 PathBuf::from(path!("/outer/inner2/c")),
3150 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3151 PathBuf::from(path!("/outer/inner1/outer/inner1/b")),
3152 PathBuf::from(path!("/outer/inner1/outer/inner2/c")),
3153 PathBuf::from(path!("/outer/inner1/outer/inner1/outer/inner1/a")),
3154 ]
3155 );
3156 assert_eq!(
3157 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3158 .await
3159 .unwrap(),
3160 "A"
3161 );
3162 }
3163
3164 #[gpui::test]
3165 async fn test_copy_recursive_with_ignoring(executor: BackgroundExecutor) {
3166 let fs = FakeFs::new(executor.clone());
3167 fs.insert_tree(
3168 path!("/outer"),
3169 json!({
3170 "inner1": {
3171 "a": "A",
3172 "b": "B",
3173 "outer": {
3174 "inner1": {
3175 "a": "B"
3176 }
3177 }
3178 },
3179 "inner2": {
3180 "c": "C",
3181 }
3182 }),
3183 )
3184 .await;
3185
3186 assert_eq!(
3187 fs.files(),
3188 vec![
3189 PathBuf::from(path!("/outer/inner1/a")),
3190 PathBuf::from(path!("/outer/inner1/b")),
3191 PathBuf::from(path!("/outer/inner2/c")),
3192 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3193 ]
3194 );
3195 assert_eq!(
3196 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3197 .await
3198 .unwrap(),
3199 "B",
3200 );
3201
3202 let source = Path::new(path!("/outer"));
3203 let target = Path::new(path!("/outer/inner1/outer"));
3204 copy_recursive(
3205 fs.as_ref(),
3206 source,
3207 target,
3208 CopyOptions {
3209 ignore_if_exists: true,
3210 ..Default::default()
3211 },
3212 )
3213 .await
3214 .unwrap();
3215
3216 assert_eq!(
3217 fs.files(),
3218 vec![
3219 PathBuf::from(path!("/outer/inner1/a")),
3220 PathBuf::from(path!("/outer/inner1/b")),
3221 PathBuf::from(path!("/outer/inner2/c")),
3222 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3223 PathBuf::from(path!("/outer/inner1/outer/inner1/b")),
3224 PathBuf::from(path!("/outer/inner1/outer/inner2/c")),
3225 PathBuf::from(path!("/outer/inner1/outer/inner1/outer/inner1/a")),
3226 ]
3227 );
3228 assert_eq!(
3229 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3230 .await
3231 .unwrap(),
3232 "B"
3233 );
3234 }
3235
3236 #[gpui::test]
3237 async fn test_realfs_atomic_write(executor: BackgroundExecutor) {
3238 // With the file handle still open, the file should be replaced
3239 // https://github.com/zed-industries/zed/issues/30054
3240 let fs = RealFs {
3241 bundled_git_binary_path: None,
3242 executor,
3243 };
3244 let temp_dir = TempDir::new().unwrap();
3245 let file_to_be_replaced = temp_dir.path().join("file.txt");
3246 let mut file = std::fs::File::create_new(&file_to_be_replaced).unwrap();
3247 file.write_all(b"Hello").unwrap();
3248 // drop(file); // We still hold the file handle here
3249 let content = std::fs::read_to_string(&file_to_be_replaced).unwrap();
3250 assert_eq!(content, "Hello");
3251 smol::block_on(fs.atomic_write(file_to_be_replaced.clone(), "World".into())).unwrap();
3252 let content = std::fs::read_to_string(&file_to_be_replaced).unwrap();
3253 assert_eq!(content, "World");
3254 }
3255
3256 #[gpui::test]
3257 async fn test_realfs_atomic_write_non_existing_file(executor: BackgroundExecutor) {
3258 let fs = RealFs {
3259 bundled_git_binary_path: None,
3260 executor,
3261 };
3262 let temp_dir = TempDir::new().unwrap();
3263 let file_to_be_replaced = temp_dir.path().join("file.txt");
3264 smol::block_on(fs.atomic_write(file_to_be_replaced.clone(), "Hello".into())).unwrap();
3265 let content = std::fs::read_to_string(&file_to_be_replaced).unwrap();
3266 assert_eq!(content, "Hello");
3267 }
3268}