1#[cfg(target_os = "macos")]
2mod mac_watcher;
3
4#[cfg(not(target_os = "macos"))]
5pub mod fs_watcher;
6
7use parking_lot::Mutex;
8use std::sync::atomic::{AtomicUsize, Ordering};
9use std::time::Instant;
10
11use anyhow::{Context as _, Result, anyhow};
12#[cfg(any(target_os = "linux", target_os = "freebsd"))]
13use ashpd::desktop::trash;
14use futures::stream::iter;
15use gpui::App;
16use gpui::BackgroundExecutor;
17use gpui::Global;
18use gpui::ReadGlobal as _;
19use gpui::SharedString;
20use std::borrow::Cow;
21use util::command::new_smol_command;
22
23#[cfg(unix)]
24use std::os::fd::{AsFd, AsRawFd};
25
26#[cfg(unix)]
27use std::os::unix::fs::{FileTypeExt, MetadataExt};
28
29#[cfg(any(target_os = "macos", target_os = "freebsd"))]
30use std::mem::MaybeUninit;
31
32use async_tar::Archive;
33use futures::{AsyncRead, Stream, StreamExt, future::BoxFuture};
34use git::repository::{GitRepository, RealGitRepository};
35use is_executable::IsExecutable;
36use rope::Rope;
37use serde::{Deserialize, Serialize};
38use smol::io::AsyncWriteExt;
39use std::{
40 io::{self, Write},
41 path::{Component, Path, PathBuf},
42 pin::Pin,
43 sync::Arc,
44 time::{Duration, SystemTime, UNIX_EPOCH},
45};
46use tempfile::TempDir;
47use text::LineEnding;
48
49#[cfg(any(test, feature = "test-support"))]
50mod fake_git_repo;
51#[cfg(any(test, feature = "test-support"))]
52use collections::{BTreeMap, btree_map};
53#[cfg(any(test, feature = "test-support"))]
54use fake_git_repo::FakeGitRepositoryState;
55#[cfg(any(test, feature = "test-support"))]
56use git::{
57 repository::{RepoPath, repo_path},
58 status::{FileStatus, StatusCode, TrackedStatus, UnmergedStatus},
59};
60
61#[cfg(any(test, feature = "test-support"))]
62use smol::io::AsyncReadExt;
63#[cfg(any(test, feature = "test-support"))]
64use std::ffi::OsStr;
65
66#[cfg(any(test, feature = "test-support"))]
67pub use fake_git_repo::{LOAD_HEAD_TEXT_TASK, LOAD_INDEX_TEXT_TASK};
68
69pub trait Watcher: Send + Sync {
70 fn add(&self, path: &Path) -> Result<()>;
71 fn remove(&self, path: &Path) -> Result<()>;
72}
73
74#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
75pub enum PathEventKind {
76 Removed,
77 Created,
78 Changed,
79}
80
81#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
82pub struct PathEvent {
83 pub path: PathBuf,
84 pub kind: Option<PathEventKind>,
85}
86
87impl From<PathEvent> for PathBuf {
88 fn from(event: PathEvent) -> Self {
89 event.path
90 }
91}
92
93#[async_trait::async_trait]
94pub trait Fs: Send + Sync {
95 async fn create_dir(&self, path: &Path) -> Result<()>;
96 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()>;
97 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()>;
98 async fn create_file_with(
99 &self,
100 path: &Path,
101 content: Pin<&mut (dyn AsyncRead + Send)>,
102 ) -> Result<()>;
103 async fn extract_tar_file(
104 &self,
105 path: &Path,
106 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
107 ) -> Result<()>;
108 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()>;
109 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()>;
110 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()>;
111 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
112 self.remove_dir(path, options).await
113 }
114 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()>;
115 async fn trash_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
116 self.remove_file(path, options).await
117 }
118 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>>;
119 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>>;
120 async fn load(&self, path: &Path) -> Result<String> {
121 Ok(String::from_utf8(self.load_bytes(path).await?)?)
122 }
123 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>>;
124 async fn atomic_write(&self, path: PathBuf, text: String) -> Result<()>;
125 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()>;
126 async fn write(&self, path: &Path, content: &[u8]) -> Result<()>;
127 async fn canonicalize(&self, path: &Path) -> Result<PathBuf>;
128 async fn is_file(&self, path: &Path) -> bool;
129 async fn is_dir(&self, path: &Path) -> bool;
130 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>>;
131 async fn read_link(&self, path: &Path) -> Result<PathBuf>;
132 async fn read_dir(
133 &self,
134 path: &Path,
135 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>>;
136
137 async fn watch(
138 &self,
139 path: &Path,
140 latency: Duration,
141 ) -> (
142 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
143 Arc<dyn Watcher>,
144 );
145
146 fn open_repo(
147 &self,
148 abs_dot_git: &Path,
149 system_git_binary_path: Option<&Path>,
150 ) -> Option<Arc<dyn GitRepository>>;
151 async fn git_init(&self, abs_work_directory: &Path, fallback_branch_name: String)
152 -> Result<()>;
153 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()>;
154 fn is_fake(&self) -> bool;
155 async fn is_case_sensitive(&self) -> Result<bool>;
156 fn subscribe_to_jobs(&self) -> JobEventReceiver;
157
158 #[cfg(any(test, feature = "test-support"))]
159 fn as_fake(&self) -> Arc<FakeFs> {
160 panic!("called as_fake on a real fs");
161 }
162}
163
164struct GlobalFs(Arc<dyn Fs>);
165
166impl Global for GlobalFs {}
167
168impl dyn Fs {
169 /// Returns the global [`Fs`].
170 pub fn global(cx: &App) -> Arc<Self> {
171 GlobalFs::global(cx).0.clone()
172 }
173
174 /// Sets the global [`Fs`].
175 pub fn set_global(fs: Arc<Self>, cx: &mut App) {
176 cx.set_global(GlobalFs(fs));
177 }
178}
179
180#[derive(Copy, Clone, Default)]
181pub struct CreateOptions {
182 pub overwrite: bool,
183 pub ignore_if_exists: bool,
184}
185
186#[derive(Copy, Clone, Default)]
187pub struct CopyOptions {
188 pub overwrite: bool,
189 pub ignore_if_exists: bool,
190}
191
192#[derive(Copy, Clone, Default)]
193pub struct RenameOptions {
194 pub overwrite: bool,
195 pub ignore_if_exists: bool,
196 /// Whether to create parent directories if they do not exist.
197 pub create_parents: bool,
198}
199
200#[derive(Copy, Clone, Default)]
201pub struct RemoveOptions {
202 pub recursive: bool,
203 pub ignore_if_not_exists: bool,
204}
205
206#[derive(Copy, Clone, Debug)]
207pub struct Metadata {
208 pub inode: u64,
209 pub mtime: MTime,
210 pub is_symlink: bool,
211 pub is_dir: bool,
212 pub len: u64,
213 pub is_fifo: bool,
214 pub is_executable: bool,
215}
216
217/// Filesystem modification time. The purpose of this newtype is to discourage use of operations
218/// that do not make sense for mtimes. In particular, it is not always valid to compare mtimes using
219/// `<` or `>`, as there are many things that can cause the mtime of a file to be earlier than it
220/// was. See ["mtime comparison considered harmful" - apenwarr](https://apenwarr.ca/log/20181113).
221///
222/// Do not derive Ord, PartialOrd, or arithmetic operation traits.
223#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize)]
224#[serde(transparent)]
225pub struct MTime(SystemTime);
226
227pub type JobId = usize;
228
229#[derive(Clone, Debug)]
230pub struct JobInfo {
231 pub start: Instant,
232 pub message: SharedString,
233 pub id: JobId,
234}
235
236#[derive(Debug, Clone)]
237pub enum JobEvent {
238 Started { info: JobInfo },
239 Completed { id: JobId },
240}
241
242pub type JobEventSender = futures::channel::mpsc::UnboundedSender<JobEvent>;
243pub type JobEventReceiver = futures::channel::mpsc::UnboundedReceiver<JobEvent>;
244
245struct JobTracker {
246 id: JobId,
247 subscribers: Arc<Mutex<Vec<JobEventSender>>>,
248}
249
250impl JobTracker {
251 fn new(info: JobInfo, subscribers: Arc<Mutex<Vec<JobEventSender>>>) -> Self {
252 let id = info.id;
253 {
254 let mut subs = subscribers.lock();
255 subs.retain(|sender| {
256 sender
257 .unbounded_send(JobEvent::Started { info: info.clone() })
258 .is_ok()
259 });
260 }
261 Self { id, subscribers }
262 }
263}
264
265impl Drop for JobTracker {
266 fn drop(&mut self) {
267 let mut subs = self.subscribers.lock();
268 subs.retain(|sender| {
269 sender
270 .unbounded_send(JobEvent::Completed { id: self.id })
271 .is_ok()
272 });
273 }
274}
275
276impl MTime {
277 /// Conversion intended for persistence and testing.
278 pub fn from_seconds_and_nanos(secs: u64, nanos: u32) -> Self {
279 MTime(UNIX_EPOCH + Duration::new(secs, nanos))
280 }
281
282 /// Conversion intended for persistence.
283 pub fn to_seconds_and_nanos_for_persistence(self) -> Option<(u64, u32)> {
284 self.0
285 .duration_since(UNIX_EPOCH)
286 .ok()
287 .map(|duration| (duration.as_secs(), duration.subsec_nanos()))
288 }
289
290 /// Returns the value wrapped by this `MTime`, for presentation to the user. The name including
291 /// "_for_user" is to discourage misuse - this method should not be used when making decisions
292 /// about file dirtiness.
293 pub fn timestamp_for_user(self) -> SystemTime {
294 self.0
295 }
296
297 /// Temporary method to split out the behavior changes from introduction of this newtype.
298 pub fn bad_is_greater_than(self, other: MTime) -> bool {
299 self.0 > other.0
300 }
301}
302
303impl From<proto::Timestamp> for MTime {
304 fn from(timestamp: proto::Timestamp) -> Self {
305 MTime(timestamp.into())
306 }
307}
308
309impl From<MTime> for proto::Timestamp {
310 fn from(mtime: MTime) -> Self {
311 mtime.0.into()
312 }
313}
314
315pub struct RealFs {
316 bundled_git_binary_path: Option<PathBuf>,
317 executor: BackgroundExecutor,
318 next_job_id: Arc<AtomicUsize>,
319 job_event_subscribers: Arc<Mutex<Vec<JobEventSender>>>,
320}
321
322pub trait FileHandle: Send + Sync + std::fmt::Debug {
323 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf>;
324}
325
326impl FileHandle for std::fs::File {
327 #[cfg(target_os = "macos")]
328 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
329 use std::{
330 ffi::{CStr, OsStr},
331 os::unix::ffi::OsStrExt,
332 };
333
334 let fd = self.as_fd();
335 let mut path_buf = MaybeUninit::<[u8; libc::PATH_MAX as usize]>::uninit();
336
337 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_GETPATH, path_buf.as_mut_ptr()) };
338 anyhow::ensure!(result != -1, "fcntl returned -1");
339
340 // SAFETY: `fcntl` will initialize the path buffer.
341 let c_str = unsafe { CStr::from_ptr(path_buf.as_ptr().cast()) };
342 anyhow::ensure!(!c_str.is_empty(), "Could find a path for the file handle");
343 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
344 Ok(path)
345 }
346
347 #[cfg(target_os = "linux")]
348 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
349 let fd = self.as_fd();
350 let fd_path = format!("/proc/self/fd/{}", fd.as_raw_fd());
351 let new_path = std::fs::read_link(fd_path)?;
352 if new_path
353 .file_name()
354 .is_some_and(|f| f.to_string_lossy().ends_with(" (deleted)"))
355 {
356 anyhow::bail!("file was deleted")
357 };
358
359 Ok(new_path)
360 }
361
362 #[cfg(target_os = "freebsd")]
363 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
364 use std::{
365 ffi::{CStr, OsStr},
366 os::unix::ffi::OsStrExt,
367 };
368
369 let fd = self.as_fd();
370 let mut kif = MaybeUninit::<libc::kinfo_file>::uninit();
371 kif.kf_structsize = libc::KINFO_FILE_SIZE;
372
373 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_KINFO, kif.as_mut_ptr()) };
374 anyhow::ensure!(result != -1, "fcntl returned -1");
375
376 // SAFETY: `fcntl` will initialize the kif.
377 let c_str = unsafe { CStr::from_ptr(kif.assume_init().kf_path.as_ptr()) };
378 anyhow::ensure!(!c_str.is_empty(), "Could find a path for the file handle");
379 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
380 Ok(path)
381 }
382
383 #[cfg(target_os = "windows")]
384 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
385 use std::ffi::OsString;
386 use std::os::windows::ffi::OsStringExt;
387 use std::os::windows::io::AsRawHandle;
388
389 use windows::Win32::Foundation::HANDLE;
390 use windows::Win32::Storage::FileSystem::{
391 FILE_NAME_NORMALIZED, GetFinalPathNameByHandleW,
392 };
393
394 let handle = HANDLE(self.as_raw_handle() as _);
395
396 // Query required buffer size (in wide chars)
397 let required_len =
398 unsafe { GetFinalPathNameByHandleW(handle, &mut [], FILE_NAME_NORMALIZED) };
399 anyhow::ensure!(
400 required_len != 0,
401 "GetFinalPathNameByHandleW returned 0 length"
402 );
403
404 // Allocate buffer and retrieve the path
405 let mut buf: Vec<u16> = vec![0u16; required_len as usize + 1];
406 let written = unsafe { GetFinalPathNameByHandleW(handle, &mut buf, FILE_NAME_NORMALIZED) };
407 anyhow::ensure!(
408 written != 0,
409 "GetFinalPathNameByHandleW failed to write path"
410 );
411
412 let os_str: OsString = OsString::from_wide(&buf[..written as usize]);
413 anyhow::ensure!(!os_str.is_empty(), "Could find a path for the file handle");
414 Ok(PathBuf::from(os_str))
415 }
416}
417
418pub struct RealWatcher {}
419
420impl RealFs {
421 pub fn new(git_binary_path: Option<PathBuf>, executor: BackgroundExecutor) -> Self {
422 Self {
423 bundled_git_binary_path: git_binary_path,
424 executor,
425 next_job_id: Arc::new(AtomicUsize::new(0)),
426 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
427 }
428 }
429
430 #[cfg(target_os = "windows")]
431 fn canonicalize(path: &Path) -> Result<PathBuf> {
432 let mut strip_prefix = None;
433
434 let mut new_path = PathBuf::new();
435 for component in path.components() {
436 match component {
437 std::path::Component::Prefix(_) => {
438 let component = component.as_os_str();
439 let canonicalized = if component
440 .to_str()
441 .map(|e| e.ends_with("\\"))
442 .unwrap_or(false)
443 {
444 std::fs::canonicalize(component)
445 } else {
446 let mut component = component.to_os_string();
447 component.push("\\");
448 std::fs::canonicalize(component)
449 }?;
450
451 let mut strip = PathBuf::new();
452 for component in canonicalized.components() {
453 match component {
454 Component::Prefix(prefix_component) => {
455 match prefix_component.kind() {
456 std::path::Prefix::Verbatim(os_str) => {
457 strip.push(os_str);
458 }
459 std::path::Prefix::VerbatimUNC(host, share) => {
460 strip.push("\\\\");
461 strip.push(host);
462 strip.push(share);
463 }
464 std::path::Prefix::VerbatimDisk(disk) => {
465 strip.push(format!("{}:", disk as char));
466 }
467 _ => strip.push(component),
468 };
469 }
470 _ => strip.push(component),
471 }
472 }
473 strip_prefix = Some(strip);
474 new_path.push(component);
475 }
476 std::path::Component::RootDir => {
477 new_path.push(component);
478 }
479 std::path::Component::CurDir => {
480 if strip_prefix.is_none() {
481 // unrooted path
482 new_path.push(component);
483 }
484 }
485 std::path::Component::ParentDir => {
486 if strip_prefix.is_some() {
487 // rooted path
488 new_path.pop();
489 } else {
490 new_path.push(component);
491 }
492 }
493 std::path::Component::Normal(_) => {
494 if let Ok(link) = std::fs::read_link(new_path.join(component)) {
495 let link = match &strip_prefix {
496 Some(e) => link.strip_prefix(e).unwrap_or(&link),
497 None => &link,
498 };
499 new_path.extend(link);
500 } else {
501 new_path.push(component);
502 }
503 }
504 }
505 }
506
507 Ok(new_path)
508 }
509}
510
511#[async_trait::async_trait]
512impl Fs for RealFs {
513 async fn create_dir(&self, path: &Path) -> Result<()> {
514 Ok(smol::fs::create_dir_all(path).await?)
515 }
516
517 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
518 #[cfg(unix)]
519 smol::fs::unix::symlink(target, path).await?;
520
521 #[cfg(windows)]
522 if smol::fs::metadata(&target).await?.is_dir() {
523 let status = new_smol_command("cmd")
524 .args(["/C", "mklink", "/J"])
525 .args([path, target.as_path()])
526 .status()
527 .await?;
528
529 if !status.success() {
530 return Err(anyhow::anyhow!(
531 "Failed to create junction from {:?} to {:?}",
532 path,
533 target
534 ));
535 }
536 } else {
537 smol::fs::windows::symlink_file(target, path).await?
538 }
539
540 Ok(())
541 }
542
543 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
544 let mut open_options = smol::fs::OpenOptions::new();
545 open_options.write(true).create(true);
546 if options.overwrite {
547 open_options.truncate(true);
548 } else if !options.ignore_if_exists {
549 open_options.create_new(true);
550 }
551 open_options.open(path).await?;
552 Ok(())
553 }
554
555 async fn create_file_with(
556 &self,
557 path: &Path,
558 content: Pin<&mut (dyn AsyncRead + Send)>,
559 ) -> Result<()> {
560 let mut file = smol::fs::File::create(&path).await?;
561 futures::io::copy(content, &mut file).await?;
562 Ok(())
563 }
564
565 async fn extract_tar_file(
566 &self,
567 path: &Path,
568 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
569 ) -> Result<()> {
570 content.unpack(path).await?;
571 Ok(())
572 }
573
574 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
575 if !options.overwrite && smol::fs::metadata(target).await.is_ok() {
576 if options.ignore_if_exists {
577 return Ok(());
578 } else {
579 anyhow::bail!("{target:?} already exists");
580 }
581 }
582
583 smol::fs::copy(source, target).await?;
584 Ok(())
585 }
586
587 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()> {
588 if !options.overwrite && smol::fs::metadata(target).await.is_ok() {
589 if options.ignore_if_exists {
590 return Ok(());
591 } else {
592 anyhow::bail!("{target:?} already exists");
593 }
594 }
595
596 if options.create_parents {
597 if let Some(parent) = target.parent() {
598 self.create_dir(parent).await?;
599 }
600 }
601
602 smol::fs::rename(source, target).await?;
603 Ok(())
604 }
605
606 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
607 let result = if options.recursive {
608 smol::fs::remove_dir_all(path).await
609 } else {
610 smol::fs::remove_dir(path).await
611 };
612 match result {
613 Ok(()) => Ok(()),
614 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
615 Ok(())
616 }
617 Err(err) => Err(err)?,
618 }
619 }
620
621 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
622 #[cfg(windows)]
623 if let Ok(Some(metadata)) = self.metadata(path).await
624 && metadata.is_symlink
625 && metadata.is_dir
626 {
627 self.remove_dir(
628 path,
629 RemoveOptions {
630 recursive: false,
631 ignore_if_not_exists: true,
632 },
633 )
634 .await?;
635 return Ok(());
636 }
637
638 match smol::fs::remove_file(path).await {
639 Ok(()) => Ok(()),
640 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
641 Ok(())
642 }
643 Err(err) => Err(err)?,
644 }
645 }
646
647 #[cfg(target_os = "macos")]
648 async fn trash_file(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
649 use cocoa::{
650 base::{id, nil},
651 foundation::{NSAutoreleasePool, NSString},
652 };
653 use objc::{class, msg_send, sel, sel_impl};
654
655 unsafe {
656 /// Allow NSString::alloc use here because it sets autorelease
657 #[allow(clippy::disallowed_methods)]
658 unsafe fn ns_string(string: &str) -> id {
659 unsafe { NSString::alloc(nil).init_str(string).autorelease() }
660 }
661
662 let url: id = msg_send![class!(NSURL), fileURLWithPath: ns_string(path.to_string_lossy().as_ref())];
663 let array: id = msg_send![class!(NSArray), arrayWithObject: url];
664 let workspace: id = msg_send![class!(NSWorkspace), sharedWorkspace];
665
666 let _: id = msg_send![workspace, recycleURLs: array completionHandler: nil];
667 }
668 Ok(())
669 }
670
671 #[cfg(any(target_os = "linux", target_os = "freebsd"))]
672 async fn trash_file(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
673 if let Ok(Some(metadata)) = self.metadata(path).await
674 && metadata.is_symlink
675 {
676 // TODO: trash_file does not support trashing symlinks yet - https://github.com/bilelmoussaoui/ashpd/issues/255
677 return self.remove_file(path, RemoveOptions::default()).await;
678 }
679 let file = smol::fs::File::open(path).await?;
680 match trash::trash_file(&file.as_fd()).await {
681 Ok(_) => Ok(()),
682 Err(err) => {
683 log::error!("Failed to trash file: {}", err);
684 // Trashing files can fail if you don't have a trashing dbus service configured.
685 // In that case, delete the file directly instead.
686 return self.remove_file(path, RemoveOptions::default()).await;
687 }
688 }
689 }
690
691 #[cfg(target_os = "windows")]
692 async fn trash_file(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
693 use util::paths::SanitizedPath;
694 use windows::{
695 Storage::{StorageDeleteOption, StorageFile},
696 core::HSTRING,
697 };
698 // todo(windows)
699 // When new version of `windows-rs` release, make this operation `async`
700 let path = path.canonicalize()?;
701 let path = SanitizedPath::new(&path);
702 let path_string = path.to_string();
703 let file = StorageFile::GetFileFromPathAsync(&HSTRING::from(path_string))?.get()?;
704 file.DeleteAsync(StorageDeleteOption::Default)?.get()?;
705 Ok(())
706 }
707
708 #[cfg(target_os = "macos")]
709 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
710 self.trash_file(path, options).await
711 }
712
713 #[cfg(any(target_os = "linux", target_os = "freebsd"))]
714 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
715 self.trash_file(path, options).await
716 }
717
718 #[cfg(target_os = "windows")]
719 async fn trash_dir(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
720 use util::paths::SanitizedPath;
721 use windows::{
722 Storage::{StorageDeleteOption, StorageFolder},
723 core::HSTRING,
724 };
725
726 // todo(windows)
727 // When new version of `windows-rs` release, make this operation `async`
728 let path = path.canonicalize()?;
729 let path = SanitizedPath::new(&path);
730 let path_string = path.to_string();
731 let folder = StorageFolder::GetFolderFromPathAsync(&HSTRING::from(path_string))?.get()?;
732 folder.DeleteAsync(StorageDeleteOption::Default)?.get()?;
733 Ok(())
734 }
735
736 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
737 Ok(Box::new(std::fs::File::open(path)?))
738 }
739
740 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
741 let mut options = std::fs::OpenOptions::new();
742 options.read(true);
743 #[cfg(windows)]
744 {
745 use std::os::windows::fs::OpenOptionsExt;
746 options.custom_flags(windows::Win32::Storage::FileSystem::FILE_FLAG_BACKUP_SEMANTICS.0);
747 }
748 Ok(Arc::new(options.open(path)?))
749 }
750
751 async fn load(&self, path: &Path) -> Result<String> {
752 let path = path.to_path_buf();
753 self.executor
754 .spawn(async move { Ok(std::fs::read_to_string(path)?) })
755 .await
756 }
757
758 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
759 let path = path.to_path_buf();
760 let bytes = self
761 .executor
762 .spawn(async move { std::fs::read(path) })
763 .await?;
764 Ok(bytes)
765 }
766
767 #[cfg(not(target_os = "windows"))]
768 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
769 smol::unblock(move || {
770 // Use the directory of the destination as temp dir to avoid
771 // invalid cross-device link error, and XDG_CACHE_DIR for fallback.
772 // See https://github.com/zed-industries/zed/pull/8437 for more details.
773 let mut tmp_file =
774 tempfile::NamedTempFile::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
775 tmp_file.write_all(data.as_bytes())?;
776 tmp_file.persist(path)?;
777 anyhow::Ok(())
778 })
779 .await?;
780
781 Ok(())
782 }
783
784 #[cfg(target_os = "windows")]
785 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
786 smol::unblock(move || {
787 // If temp dir is set to a different drive than the destination,
788 // we receive error:
789 //
790 // failed to persist temporary file:
791 // The system cannot move the file to a different disk drive. (os error 17)
792 //
793 // This is because `ReplaceFileW` does not support cross volume moves.
794 // See the remark section: "The backup file, replaced file, and replacement file must all reside on the same volume."
795 // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-replacefilew#remarks
796 //
797 // So we use the directory of the destination as a temp dir to avoid it.
798 // https://github.com/zed-industries/zed/issues/16571
799 let temp_dir = TempDir::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
800 let temp_file = {
801 let temp_file_path = temp_dir.path().join("temp_file");
802 let mut file = std::fs::File::create_new(&temp_file_path)?;
803 file.write_all(data.as_bytes())?;
804 temp_file_path
805 };
806 atomic_replace(path.as_path(), temp_file.as_path())?;
807 anyhow::Ok(())
808 })
809 .await?;
810 Ok(())
811 }
812
813 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
814 let buffer_size = text.summary().len.min(10 * 1024);
815 if let Some(path) = path.parent() {
816 self.create_dir(path).await?;
817 }
818 let file = smol::fs::File::create(path).await?;
819 let mut writer = smol::io::BufWriter::with_capacity(buffer_size, file);
820 for chunk in text::chunks_with_line_ending(text, line_ending) {
821 writer.write_all(chunk.as_bytes()).await?;
822 }
823 writer.flush().await?;
824 Ok(())
825 }
826
827 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
828 if let Some(path) = path.parent() {
829 self.create_dir(path).await?;
830 }
831 let path = path.to_owned();
832 let contents = content.to_owned();
833 self.executor
834 .spawn(async move {
835 std::fs::write(path, contents)?;
836 Ok(())
837 })
838 .await
839 }
840
841 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
842 let path = path.to_owned();
843 self.executor
844 .spawn(async move {
845 #[cfg(target_os = "windows")]
846 let result = Self::canonicalize(&path);
847
848 #[cfg(not(target_os = "windows"))]
849 let result = std::fs::canonicalize(&path);
850
851 result.with_context(|| format!("canonicalizing {path:?}"))
852 })
853 .await
854 }
855
856 async fn is_file(&self, path: &Path) -> bool {
857 let path = path.to_owned();
858 self.executor
859 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_file()) })
860 .await
861 }
862
863 async fn is_dir(&self, path: &Path) -> bool {
864 let path = path.to_owned();
865 self.executor
866 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_dir()) })
867 .await
868 }
869
870 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
871 let path_buf = path.to_owned();
872 let symlink_metadata = match self
873 .executor
874 .spawn(async move { std::fs::symlink_metadata(&path_buf) })
875 .await
876 {
877 Ok(metadata) => metadata,
878 Err(err) => {
879 return match err.kind() {
880 io::ErrorKind::NotFound | io::ErrorKind::NotADirectory => Ok(None),
881 _ => Err(anyhow::Error::new(err)),
882 };
883 }
884 };
885
886 let is_symlink = symlink_metadata.file_type().is_symlink();
887 let metadata = if is_symlink {
888 let path_buf = path.to_path_buf();
889 let path_exists = self
890 .executor
891 .spawn(async move {
892 path_buf
893 .try_exists()
894 .with_context(|| format!("checking existence for path {path_buf:?}"))
895 })
896 .await?;
897 if path_exists {
898 let path_buf = path.to_path_buf();
899 self.executor
900 .spawn(async move { std::fs::metadata(path_buf) })
901 .await
902 .with_context(|| "accessing symlink for path {path}")?
903 } else {
904 symlink_metadata
905 }
906 } else {
907 symlink_metadata
908 };
909
910 #[cfg(unix)]
911 let inode = metadata.ino();
912
913 #[cfg(windows)]
914 let inode = file_id(path).await?;
915
916 #[cfg(windows)]
917 let is_fifo = false;
918
919 #[cfg(unix)]
920 let is_fifo = metadata.file_type().is_fifo();
921
922 let path_buf = path.to_path_buf();
923 let is_executable = self
924 .executor
925 .spawn(async move { path_buf.is_executable() })
926 .await;
927
928 Ok(Some(Metadata {
929 inode,
930 mtime: MTime(metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH)),
931 len: metadata.len(),
932 is_symlink,
933 is_dir: metadata.file_type().is_dir(),
934 is_fifo,
935 is_executable,
936 }))
937 }
938
939 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
940 let path = path.to_owned();
941 let path = self
942 .executor
943 .spawn(async move { std::fs::read_link(&path) })
944 .await?;
945 Ok(path)
946 }
947
948 async fn read_dir(
949 &self,
950 path: &Path,
951 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
952 let path = path.to_owned();
953 let result = iter(
954 self.executor
955 .spawn(async move { std::fs::read_dir(path) })
956 .await?,
957 )
958 .map(|entry| match entry {
959 Ok(entry) => Ok(entry.path()),
960 Err(error) => Err(anyhow!("failed to read dir entry {error:?}")),
961 });
962 Ok(Box::pin(result))
963 }
964
965 #[cfg(target_os = "macos")]
966 async fn watch(
967 &self,
968 path: &Path,
969 latency: Duration,
970 ) -> (
971 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
972 Arc<dyn Watcher>,
973 ) {
974 use fsevent::StreamFlags;
975
976 let (events_tx, events_rx) = smol::channel::unbounded();
977 let handles = Arc::new(parking_lot::Mutex::new(collections::BTreeMap::default()));
978 let watcher = Arc::new(mac_watcher::MacWatcher::new(
979 events_tx,
980 Arc::downgrade(&handles),
981 latency,
982 ));
983 watcher.add(path).expect("handles can't be dropped");
984
985 (
986 Box::pin(
987 events_rx
988 .map(|events| {
989 events
990 .into_iter()
991 .map(|event| {
992 log::trace!("fs path event: {event:?}");
993 let kind = if event.flags.contains(StreamFlags::ITEM_REMOVED) {
994 Some(PathEventKind::Removed)
995 } else if event.flags.contains(StreamFlags::ITEM_CREATED) {
996 Some(PathEventKind::Created)
997 } else if event.flags.contains(StreamFlags::ITEM_MODIFIED)
998 | event.flags.contains(StreamFlags::ITEM_RENAMED)
999 {
1000 Some(PathEventKind::Changed)
1001 } else {
1002 None
1003 };
1004 PathEvent {
1005 path: event.path,
1006 kind,
1007 }
1008 })
1009 .collect()
1010 })
1011 .chain(futures::stream::once(async move {
1012 drop(handles);
1013 vec![]
1014 })),
1015 ),
1016 watcher,
1017 )
1018 }
1019
1020 #[cfg(not(target_os = "macos"))]
1021 async fn watch(
1022 &self,
1023 path: &Path,
1024 latency: Duration,
1025 ) -> (
1026 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
1027 Arc<dyn Watcher>,
1028 ) {
1029 use util::{ResultExt as _, paths::SanitizedPath};
1030
1031 let (tx, rx) = smol::channel::unbounded();
1032 let pending_paths: Arc<Mutex<Vec<PathEvent>>> = Default::default();
1033 let watcher = Arc::new(fs_watcher::FsWatcher::new(tx, pending_paths.clone()));
1034
1035 // If the path doesn't exist yet (e.g. settings.json), watch the parent dir to learn when it's created.
1036 if let Err(e) = watcher.add(path)
1037 && let Some(parent) = path.parent()
1038 && let Err(parent_e) = watcher.add(parent)
1039 {
1040 log::warn!(
1041 "Failed to watch {} and its parent directory {}:\n{e}\n{parent_e}",
1042 path.display(),
1043 parent.display()
1044 );
1045 }
1046
1047 // Check if path is a symlink and follow the target parent
1048 if let Some(mut target) = self.read_link(path).await.ok() {
1049 log::trace!("watch symlink {path:?} -> {target:?}");
1050 // Check if symlink target is relative path, if so make it absolute
1051 if target.is_relative()
1052 && let Some(parent) = path.parent()
1053 {
1054 target = parent.join(target);
1055 if let Ok(canonical) = self.canonicalize(&target).await {
1056 target = SanitizedPath::new(&canonical).as_path().to_path_buf();
1057 }
1058 }
1059 watcher.add(&target).ok();
1060 if let Some(parent) = target.parent() {
1061 watcher.add(parent).log_err();
1062 }
1063 }
1064
1065 (
1066 Box::pin(rx.filter_map({
1067 let watcher = watcher.clone();
1068 move |_| {
1069 let _ = watcher.clone();
1070 let pending_paths = pending_paths.clone();
1071 async move {
1072 smol::Timer::after(latency).await;
1073 let paths = std::mem::take(&mut *pending_paths.lock());
1074 (!paths.is_empty()).then_some(paths)
1075 }
1076 }
1077 })),
1078 watcher,
1079 )
1080 }
1081
1082 fn open_repo(
1083 &self,
1084 dotgit_path: &Path,
1085 system_git_binary_path: Option<&Path>,
1086 ) -> Option<Arc<dyn GitRepository>> {
1087 Some(Arc::new(RealGitRepository::new(
1088 dotgit_path,
1089 self.bundled_git_binary_path.clone(),
1090 system_git_binary_path.map(|path| path.to_path_buf()),
1091 self.executor.clone(),
1092 )?))
1093 }
1094
1095 async fn git_init(
1096 &self,
1097 abs_work_directory_path: &Path,
1098 fallback_branch_name: String,
1099 ) -> Result<()> {
1100 let config = new_smol_command("git")
1101 .current_dir(abs_work_directory_path)
1102 .args(&["config", "--global", "--get", "init.defaultBranch"])
1103 .output()
1104 .await?;
1105
1106 let branch_name;
1107
1108 if config.status.success() && !config.stdout.is_empty() {
1109 branch_name = String::from_utf8_lossy(&config.stdout);
1110 } else {
1111 branch_name = Cow::Borrowed(fallback_branch_name.as_str());
1112 }
1113
1114 new_smol_command("git")
1115 .current_dir(abs_work_directory_path)
1116 .args(&["init", "-b"])
1117 .arg(branch_name.trim())
1118 .output()
1119 .await?;
1120
1121 Ok(())
1122 }
1123
1124 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()> {
1125 let job_id = self.next_job_id.fetch_add(1, Ordering::SeqCst);
1126 let job_info = JobInfo {
1127 id: job_id,
1128 start: Instant::now(),
1129 message: SharedString::from(format!("Cloning {}", repo_url)),
1130 };
1131
1132 let _job_tracker = JobTracker::new(job_info, self.job_event_subscribers.clone());
1133
1134 let output = new_smol_command("git")
1135 .current_dir(abs_work_directory)
1136 .args(&["clone", repo_url])
1137 .output()
1138 .await?;
1139
1140 if !output.status.success() {
1141 anyhow::bail!(
1142 "git clone failed: {}",
1143 String::from_utf8_lossy(&output.stderr)
1144 );
1145 }
1146
1147 Ok(())
1148 }
1149
1150 fn is_fake(&self) -> bool {
1151 false
1152 }
1153
1154 fn subscribe_to_jobs(&self) -> JobEventReceiver {
1155 let (sender, receiver) = futures::channel::mpsc::unbounded();
1156 self.job_event_subscribers.lock().push(sender);
1157 receiver
1158 }
1159
1160 /// Checks whether the file system is case sensitive by attempting to create two files
1161 /// that have the same name except for the casing.
1162 ///
1163 /// It creates both files in a temporary directory it removes at the end.
1164 async fn is_case_sensitive(&self) -> Result<bool> {
1165 let temp_dir = TempDir::new()?;
1166 let test_file_1 = temp_dir.path().join("case_sensitivity_test.tmp");
1167 let test_file_2 = temp_dir.path().join("CASE_SENSITIVITY_TEST.TMP");
1168
1169 let create_opts = CreateOptions {
1170 overwrite: false,
1171 ignore_if_exists: false,
1172 };
1173
1174 // Create file1
1175 self.create_file(&test_file_1, create_opts).await?;
1176
1177 // Now check whether it's possible to create file2
1178 let case_sensitive = match self.create_file(&test_file_2, create_opts).await {
1179 Ok(_) => Ok(true),
1180 Err(e) => {
1181 if let Some(io_error) = e.downcast_ref::<io::Error>() {
1182 if io_error.kind() == io::ErrorKind::AlreadyExists {
1183 Ok(false)
1184 } else {
1185 Err(e)
1186 }
1187 } else {
1188 Err(e)
1189 }
1190 }
1191 };
1192
1193 temp_dir.close()?;
1194 case_sensitive
1195 }
1196}
1197
1198#[cfg(not(any(target_os = "linux", target_os = "freebsd")))]
1199impl Watcher for RealWatcher {
1200 fn add(&self, _: &Path) -> Result<()> {
1201 Ok(())
1202 }
1203
1204 fn remove(&self, _: &Path) -> Result<()> {
1205 Ok(())
1206 }
1207}
1208
1209#[cfg(any(test, feature = "test-support"))]
1210pub struct FakeFs {
1211 this: std::sync::Weak<Self>,
1212 // Use an unfair lock to ensure tests are deterministic.
1213 state: Arc<Mutex<FakeFsState>>,
1214 executor: gpui::BackgroundExecutor,
1215}
1216
1217#[cfg(any(test, feature = "test-support"))]
1218struct FakeFsState {
1219 root: FakeFsEntry,
1220 next_inode: u64,
1221 next_mtime: SystemTime,
1222 git_event_tx: smol::channel::Sender<PathBuf>,
1223 event_txs: Vec<(PathBuf, smol::channel::Sender<Vec<PathEvent>>)>,
1224 events_paused: bool,
1225 buffered_events: Vec<PathEvent>,
1226 metadata_call_count: usize,
1227 read_dir_call_count: usize,
1228 path_write_counts: std::collections::HashMap<PathBuf, usize>,
1229 moves: std::collections::HashMap<u64, PathBuf>,
1230 job_event_subscribers: Arc<Mutex<Vec<JobEventSender>>>,
1231}
1232
1233#[cfg(any(test, feature = "test-support"))]
1234#[derive(Clone, Debug)]
1235enum FakeFsEntry {
1236 File {
1237 inode: u64,
1238 mtime: MTime,
1239 len: u64,
1240 content: Vec<u8>,
1241 // The path to the repository state directory, if this is a gitfile.
1242 git_dir_path: Option<PathBuf>,
1243 },
1244 Dir {
1245 inode: u64,
1246 mtime: MTime,
1247 len: u64,
1248 entries: BTreeMap<String, FakeFsEntry>,
1249 git_repo_state: Option<Arc<Mutex<FakeGitRepositoryState>>>,
1250 },
1251 Symlink {
1252 target: PathBuf,
1253 },
1254}
1255
1256#[cfg(any(test, feature = "test-support"))]
1257impl PartialEq for FakeFsEntry {
1258 fn eq(&self, other: &Self) -> bool {
1259 match (self, other) {
1260 (
1261 Self::File {
1262 inode: l_inode,
1263 mtime: l_mtime,
1264 len: l_len,
1265 content: l_content,
1266 git_dir_path: l_git_dir_path,
1267 },
1268 Self::File {
1269 inode: r_inode,
1270 mtime: r_mtime,
1271 len: r_len,
1272 content: r_content,
1273 git_dir_path: r_git_dir_path,
1274 },
1275 ) => {
1276 l_inode == r_inode
1277 && l_mtime == r_mtime
1278 && l_len == r_len
1279 && l_content == r_content
1280 && l_git_dir_path == r_git_dir_path
1281 }
1282 (
1283 Self::Dir {
1284 inode: l_inode,
1285 mtime: l_mtime,
1286 len: l_len,
1287 entries: l_entries,
1288 git_repo_state: l_git_repo_state,
1289 },
1290 Self::Dir {
1291 inode: r_inode,
1292 mtime: r_mtime,
1293 len: r_len,
1294 entries: r_entries,
1295 git_repo_state: r_git_repo_state,
1296 },
1297 ) => {
1298 let same_repo_state = match (l_git_repo_state.as_ref(), r_git_repo_state.as_ref()) {
1299 (Some(l), Some(r)) => Arc::ptr_eq(l, r),
1300 (None, None) => true,
1301 _ => false,
1302 };
1303 l_inode == r_inode
1304 && l_mtime == r_mtime
1305 && l_len == r_len
1306 && l_entries == r_entries
1307 && same_repo_state
1308 }
1309 (Self::Symlink { target: l_target }, Self::Symlink { target: r_target }) => {
1310 l_target == r_target
1311 }
1312 _ => false,
1313 }
1314 }
1315}
1316
1317#[cfg(any(test, feature = "test-support"))]
1318impl FakeFsState {
1319 fn get_and_increment_mtime(&mut self) -> MTime {
1320 let mtime = self.next_mtime;
1321 self.next_mtime += FakeFs::SYSTEMTIME_INTERVAL;
1322 MTime(mtime)
1323 }
1324
1325 fn get_and_increment_inode(&mut self) -> u64 {
1326 let inode = self.next_inode;
1327 self.next_inode += 1;
1328 inode
1329 }
1330
1331 fn canonicalize(&self, target: &Path, follow_symlink: bool) -> Option<PathBuf> {
1332 let mut canonical_path = PathBuf::new();
1333 let mut path = target.to_path_buf();
1334 let mut entry_stack = Vec::new();
1335 'outer: loop {
1336 let mut path_components = path.components().peekable();
1337 let mut prefix = None;
1338 while let Some(component) = path_components.next() {
1339 match component {
1340 Component::Prefix(prefix_component) => prefix = Some(prefix_component),
1341 Component::RootDir => {
1342 entry_stack.clear();
1343 entry_stack.push(&self.root);
1344 canonical_path.clear();
1345 match prefix {
1346 Some(prefix_component) => {
1347 canonical_path = PathBuf::from(prefix_component.as_os_str());
1348 // Prefixes like `C:\\` are represented without their trailing slash, so we have to re-add it.
1349 canonical_path.push(std::path::MAIN_SEPARATOR_STR);
1350 }
1351 None => canonical_path = PathBuf::from(std::path::MAIN_SEPARATOR_STR),
1352 }
1353 }
1354 Component::CurDir => {}
1355 Component::ParentDir => {
1356 entry_stack.pop()?;
1357 canonical_path.pop();
1358 }
1359 Component::Normal(name) => {
1360 let current_entry = *entry_stack.last()?;
1361 if let FakeFsEntry::Dir { entries, .. } = current_entry {
1362 let entry = entries.get(name.to_str().unwrap())?;
1363 if (path_components.peek().is_some() || follow_symlink)
1364 && let FakeFsEntry::Symlink { target, .. } = entry
1365 {
1366 let mut target = target.clone();
1367 target.extend(path_components);
1368 path = target;
1369 continue 'outer;
1370 }
1371 entry_stack.push(entry);
1372 canonical_path = canonical_path.join(name);
1373 } else {
1374 return None;
1375 }
1376 }
1377 }
1378 }
1379 break;
1380 }
1381
1382 if entry_stack.is_empty() {
1383 None
1384 } else {
1385 Some(canonical_path)
1386 }
1387 }
1388
1389 fn try_entry(
1390 &mut self,
1391 target: &Path,
1392 follow_symlink: bool,
1393 ) -> Option<(&mut FakeFsEntry, PathBuf)> {
1394 let canonical_path = self.canonicalize(target, follow_symlink)?;
1395
1396 let mut components = canonical_path
1397 .components()
1398 .skip_while(|component| matches!(component, Component::Prefix(_)));
1399 let Some(Component::RootDir) = components.next() else {
1400 panic!(
1401 "the path {:?} was not canonicalized properly {:?}",
1402 target, canonical_path
1403 )
1404 };
1405
1406 let mut entry = &mut self.root;
1407 for component in components {
1408 match component {
1409 Component::Normal(name) => {
1410 if let FakeFsEntry::Dir { entries, .. } = entry {
1411 entry = entries.get_mut(name.to_str().unwrap())?;
1412 } else {
1413 return None;
1414 }
1415 }
1416 _ => {
1417 panic!(
1418 "the path {:?} was not canonicalized properly {:?}",
1419 target, canonical_path
1420 )
1421 }
1422 }
1423 }
1424
1425 Some((entry, canonical_path))
1426 }
1427
1428 fn entry(&mut self, target: &Path) -> Result<&mut FakeFsEntry> {
1429 Ok(self
1430 .try_entry(target, true)
1431 .ok_or_else(|| {
1432 anyhow!(io::Error::new(
1433 io::ErrorKind::NotFound,
1434 format!("not found: {target:?}")
1435 ))
1436 })?
1437 .0)
1438 }
1439
1440 fn write_path<Fn, T>(&mut self, path: &Path, callback: Fn) -> Result<T>
1441 where
1442 Fn: FnOnce(btree_map::Entry<String, FakeFsEntry>) -> Result<T>,
1443 {
1444 let path = normalize_path(path);
1445 let filename = path.file_name().context("cannot overwrite the root")?;
1446 let parent_path = path.parent().unwrap();
1447
1448 let parent = self.entry(parent_path)?;
1449 let new_entry = parent
1450 .dir_entries(parent_path)?
1451 .entry(filename.to_str().unwrap().into());
1452 callback(new_entry)
1453 }
1454
1455 fn emit_event<I, T>(&mut self, paths: I)
1456 where
1457 I: IntoIterator<Item = (T, Option<PathEventKind>)>,
1458 T: Into<PathBuf>,
1459 {
1460 self.buffered_events
1461 .extend(paths.into_iter().map(|(path, kind)| PathEvent {
1462 path: path.into(),
1463 kind,
1464 }));
1465
1466 if !self.events_paused {
1467 self.flush_events(self.buffered_events.len());
1468 }
1469 }
1470
1471 fn flush_events(&mut self, mut count: usize) {
1472 count = count.min(self.buffered_events.len());
1473 let events = self.buffered_events.drain(0..count).collect::<Vec<_>>();
1474 self.event_txs.retain(|(_, tx)| {
1475 let _ = tx.try_send(events.clone());
1476 !tx.is_closed()
1477 });
1478 }
1479}
1480
1481#[cfg(any(test, feature = "test-support"))]
1482pub static FS_DOT_GIT: std::sync::LazyLock<&'static OsStr> =
1483 std::sync::LazyLock::new(|| OsStr::new(".git"));
1484
1485#[cfg(any(test, feature = "test-support"))]
1486impl FakeFs {
1487 /// We need to use something large enough for Windows and Unix to consider this a new file.
1488 /// https://doc.rust-lang.org/nightly/std/time/struct.SystemTime.html#platform-specific-behavior
1489 const SYSTEMTIME_INTERVAL: Duration = Duration::from_nanos(100);
1490
1491 pub fn new(executor: gpui::BackgroundExecutor) -> Arc<Self> {
1492 let (tx, rx) = smol::channel::bounded::<PathBuf>(10);
1493
1494 let this = Arc::new_cyclic(|this| Self {
1495 this: this.clone(),
1496 executor: executor.clone(),
1497 state: Arc::new(Mutex::new(FakeFsState {
1498 root: FakeFsEntry::Dir {
1499 inode: 0,
1500 mtime: MTime(UNIX_EPOCH),
1501 len: 0,
1502 entries: Default::default(),
1503 git_repo_state: None,
1504 },
1505 git_event_tx: tx,
1506 next_mtime: UNIX_EPOCH + Self::SYSTEMTIME_INTERVAL,
1507 next_inode: 1,
1508 event_txs: Default::default(),
1509 buffered_events: Vec::new(),
1510 events_paused: false,
1511 read_dir_call_count: 0,
1512 metadata_call_count: 0,
1513 path_write_counts: Default::default(),
1514 moves: Default::default(),
1515 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
1516 })),
1517 });
1518
1519 executor.spawn({
1520 let this = this.clone();
1521 async move {
1522 while let Ok(git_event) = rx.recv().await {
1523 if let Some(mut state) = this.state.try_lock() {
1524 state.emit_event([(git_event, Some(PathEventKind::Changed))]);
1525 } else {
1526 panic!("Failed to lock file system state, this execution would have caused a test hang");
1527 }
1528 }
1529 }
1530 }).detach();
1531
1532 this
1533 }
1534
1535 pub fn set_next_mtime(&self, next_mtime: SystemTime) {
1536 let mut state = self.state.lock();
1537 state.next_mtime = next_mtime;
1538 }
1539
1540 pub fn get_and_increment_mtime(&self) -> MTime {
1541 let mut state = self.state.lock();
1542 state.get_and_increment_mtime()
1543 }
1544
1545 pub async fn touch_path(&self, path: impl AsRef<Path>) {
1546 let mut state = self.state.lock();
1547 let path = path.as_ref();
1548 let new_mtime = state.get_and_increment_mtime();
1549 let new_inode = state.get_and_increment_inode();
1550 state
1551 .write_path(path, move |entry| {
1552 match entry {
1553 btree_map::Entry::Vacant(e) => {
1554 e.insert(FakeFsEntry::File {
1555 inode: new_inode,
1556 mtime: new_mtime,
1557 content: Vec::new(),
1558 len: 0,
1559 git_dir_path: None,
1560 });
1561 }
1562 btree_map::Entry::Occupied(mut e) => match &mut *e.get_mut() {
1563 FakeFsEntry::File { mtime, .. } => *mtime = new_mtime,
1564 FakeFsEntry::Dir { mtime, .. } => *mtime = new_mtime,
1565 FakeFsEntry::Symlink { .. } => {}
1566 },
1567 }
1568 Ok(())
1569 })
1570 .unwrap();
1571 state.emit_event([(path.to_path_buf(), Some(PathEventKind::Changed))]);
1572 }
1573
1574 pub async fn insert_file(&self, path: impl AsRef<Path>, content: Vec<u8>) {
1575 self.write_file_internal(path, content, true).unwrap()
1576 }
1577
1578 pub async fn insert_symlink(&self, path: impl AsRef<Path>, target: PathBuf) {
1579 let mut state = self.state.lock();
1580 let path = path.as_ref();
1581 let file = FakeFsEntry::Symlink { target };
1582 state
1583 .write_path(path.as_ref(), move |e| match e {
1584 btree_map::Entry::Vacant(e) => {
1585 e.insert(file);
1586 Ok(())
1587 }
1588 btree_map::Entry::Occupied(mut e) => {
1589 *e.get_mut() = file;
1590 Ok(())
1591 }
1592 })
1593 .unwrap();
1594 state.emit_event([(path, Some(PathEventKind::Created))]);
1595 }
1596
1597 fn write_file_internal(
1598 &self,
1599 path: impl AsRef<Path>,
1600 new_content: Vec<u8>,
1601 recreate_inode: bool,
1602 ) -> Result<()> {
1603 let mut state = self.state.lock();
1604 let path_buf = path.as_ref().to_path_buf();
1605 *state.path_write_counts.entry(path_buf).or_insert(0) += 1;
1606 let new_inode = state.get_and_increment_inode();
1607 let new_mtime = state.get_and_increment_mtime();
1608 let new_len = new_content.len() as u64;
1609 let mut kind = None;
1610 state.write_path(path.as_ref(), |entry| {
1611 match entry {
1612 btree_map::Entry::Vacant(e) => {
1613 kind = Some(PathEventKind::Created);
1614 e.insert(FakeFsEntry::File {
1615 inode: new_inode,
1616 mtime: new_mtime,
1617 len: new_len,
1618 content: new_content,
1619 git_dir_path: None,
1620 });
1621 }
1622 btree_map::Entry::Occupied(mut e) => {
1623 kind = Some(PathEventKind::Changed);
1624 if let FakeFsEntry::File {
1625 inode,
1626 mtime,
1627 len,
1628 content,
1629 ..
1630 } = e.get_mut()
1631 {
1632 *mtime = new_mtime;
1633 *content = new_content;
1634 *len = new_len;
1635 if recreate_inode {
1636 *inode = new_inode;
1637 }
1638 } else {
1639 anyhow::bail!("not a file")
1640 }
1641 }
1642 }
1643 Ok(())
1644 })?;
1645 state.emit_event([(path.as_ref(), kind)]);
1646 Ok(())
1647 }
1648
1649 pub fn read_file_sync(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1650 let path = path.as_ref();
1651 let path = normalize_path(path);
1652 let mut state = self.state.lock();
1653 let entry = state.entry(&path)?;
1654 entry.file_content(&path).cloned()
1655 }
1656
1657 async fn load_internal(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1658 let path = path.as_ref();
1659 let path = normalize_path(path);
1660 self.simulate_random_delay().await;
1661 let mut state = self.state.lock();
1662 let entry = state.entry(&path)?;
1663 entry.file_content(&path).cloned()
1664 }
1665
1666 pub fn pause_events(&self) {
1667 self.state.lock().events_paused = true;
1668 }
1669
1670 pub fn unpause_events_and_flush(&self) {
1671 self.state.lock().events_paused = false;
1672 self.flush_events(usize::MAX);
1673 }
1674
1675 pub fn buffered_event_count(&self) -> usize {
1676 self.state.lock().buffered_events.len()
1677 }
1678
1679 pub fn flush_events(&self, count: usize) {
1680 self.state.lock().flush_events(count);
1681 }
1682
1683 pub(crate) fn entry(&self, target: &Path) -> Result<FakeFsEntry> {
1684 self.state.lock().entry(target).cloned()
1685 }
1686
1687 pub(crate) fn insert_entry(&self, target: &Path, new_entry: FakeFsEntry) -> Result<()> {
1688 let mut state = self.state.lock();
1689 state.write_path(target, |entry| {
1690 match entry {
1691 btree_map::Entry::Vacant(vacant_entry) => {
1692 vacant_entry.insert(new_entry);
1693 }
1694 btree_map::Entry::Occupied(mut occupied_entry) => {
1695 occupied_entry.insert(new_entry);
1696 }
1697 }
1698 Ok(())
1699 })
1700 }
1701
1702 #[must_use]
1703 pub fn insert_tree<'a>(
1704 &'a self,
1705 path: impl 'a + AsRef<Path> + Send,
1706 tree: serde_json::Value,
1707 ) -> futures::future::BoxFuture<'a, ()> {
1708 use futures::FutureExt as _;
1709 use serde_json::Value::*;
1710
1711 async move {
1712 let path = path.as_ref();
1713
1714 match tree {
1715 Object(map) => {
1716 self.create_dir(path).await.unwrap();
1717 for (name, contents) in map {
1718 let mut path = PathBuf::from(path);
1719 path.push(name);
1720 self.insert_tree(&path, contents).await;
1721 }
1722 }
1723 Null => {
1724 self.create_dir(path).await.unwrap();
1725 }
1726 String(contents) => {
1727 self.insert_file(&path, contents.into_bytes()).await;
1728 }
1729 _ => {
1730 panic!("JSON object must contain only objects, strings, or null");
1731 }
1732 }
1733 }
1734 .boxed()
1735 }
1736
1737 pub fn insert_tree_from_real_fs<'a>(
1738 &'a self,
1739 path: impl 'a + AsRef<Path> + Send,
1740 src_path: impl 'a + AsRef<Path> + Send,
1741 ) -> futures::future::BoxFuture<'a, ()> {
1742 use futures::FutureExt as _;
1743
1744 async move {
1745 let path = path.as_ref();
1746 if std::fs::metadata(&src_path).unwrap().is_file() {
1747 let contents = std::fs::read(src_path).unwrap();
1748 self.insert_file(path, contents).await;
1749 } else {
1750 self.create_dir(path).await.unwrap();
1751 for entry in std::fs::read_dir(&src_path).unwrap() {
1752 let entry = entry.unwrap();
1753 self.insert_tree_from_real_fs(path.join(entry.file_name()), entry.path())
1754 .await;
1755 }
1756 }
1757 }
1758 .boxed()
1759 }
1760
1761 pub fn with_git_state_and_paths<T, F>(
1762 &self,
1763 dot_git: &Path,
1764 emit_git_event: bool,
1765 f: F,
1766 ) -> Result<T>
1767 where
1768 F: FnOnce(&mut FakeGitRepositoryState, &Path, &Path) -> T,
1769 {
1770 let mut state = self.state.lock();
1771 let git_event_tx = state.git_event_tx.clone();
1772 let entry = state.entry(dot_git).context("open .git")?;
1773
1774 if let FakeFsEntry::Dir { git_repo_state, .. } = entry {
1775 let repo_state = git_repo_state.get_or_insert_with(|| {
1776 log::debug!("insert git state for {dot_git:?}");
1777 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
1778 });
1779 let mut repo_state = repo_state.lock();
1780
1781 let result = f(&mut repo_state, dot_git, dot_git);
1782
1783 drop(repo_state);
1784 if emit_git_event {
1785 state.emit_event([(dot_git, Some(PathEventKind::Changed))]);
1786 }
1787
1788 Ok(result)
1789 } else if let FakeFsEntry::File {
1790 content,
1791 git_dir_path,
1792 ..
1793 } = &mut *entry
1794 {
1795 let path = match git_dir_path {
1796 Some(path) => path,
1797 None => {
1798 let path = std::str::from_utf8(content)
1799 .ok()
1800 .and_then(|content| content.strip_prefix("gitdir:"))
1801 .context("not a valid gitfile")?
1802 .trim();
1803 git_dir_path.insert(normalize_path(&dot_git.parent().unwrap().join(path)))
1804 }
1805 }
1806 .clone();
1807 let Some((git_dir_entry, canonical_path)) = state.try_entry(&path, true) else {
1808 anyhow::bail!("pointed-to git dir {path:?} not found")
1809 };
1810 let FakeFsEntry::Dir {
1811 git_repo_state,
1812 entries,
1813 ..
1814 } = git_dir_entry
1815 else {
1816 anyhow::bail!("gitfile points to a non-directory")
1817 };
1818 let common_dir = if let Some(child) = entries.get("commondir") {
1819 Path::new(
1820 std::str::from_utf8(child.file_content("commondir".as_ref())?)
1821 .context("commondir content")?,
1822 )
1823 .to_owned()
1824 } else {
1825 canonical_path.clone()
1826 };
1827 let repo_state = git_repo_state.get_or_insert_with(|| {
1828 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
1829 });
1830 let mut repo_state = repo_state.lock();
1831
1832 let result = f(&mut repo_state, &canonical_path, &common_dir);
1833
1834 if emit_git_event {
1835 drop(repo_state);
1836 state.emit_event([(canonical_path, Some(PathEventKind::Changed))]);
1837 }
1838
1839 Ok(result)
1840 } else {
1841 anyhow::bail!("not a valid git repository");
1842 }
1843 }
1844
1845 pub fn with_git_state<T, F>(&self, dot_git: &Path, emit_git_event: bool, f: F) -> Result<T>
1846 where
1847 F: FnOnce(&mut FakeGitRepositoryState) -> T,
1848 {
1849 self.with_git_state_and_paths(dot_git, emit_git_event, |state, _, _| f(state))
1850 }
1851
1852 pub fn set_branch_name(&self, dot_git: &Path, branch: Option<impl Into<String>>) {
1853 self.with_git_state(dot_git, true, |state| {
1854 let branch = branch.map(Into::into);
1855 state.branches.extend(branch.clone());
1856 state.current_branch_name = branch
1857 })
1858 .unwrap();
1859 }
1860
1861 pub fn set_remote_for_repo(
1862 &self,
1863 dot_git: &Path,
1864 name: impl Into<String>,
1865 url: impl Into<String>,
1866 ) {
1867 self.with_git_state(dot_git, true, |state| {
1868 state.remotes.insert(name.into(), url.into());
1869 })
1870 .unwrap();
1871 }
1872
1873 pub fn insert_branches(&self, dot_git: &Path, branches: &[&str]) {
1874 self.with_git_state(dot_git, true, |state| {
1875 if let Some(first) = branches.first()
1876 && state.current_branch_name.is_none()
1877 {
1878 state.current_branch_name = Some(first.to_string())
1879 }
1880 state
1881 .branches
1882 .extend(branches.iter().map(ToString::to_string));
1883 })
1884 .unwrap();
1885 }
1886
1887 pub fn set_unmerged_paths_for_repo(
1888 &self,
1889 dot_git: &Path,
1890 unmerged_state: &[(RepoPath, UnmergedStatus)],
1891 ) {
1892 self.with_git_state(dot_git, true, |state| {
1893 state.unmerged_paths.clear();
1894 state.unmerged_paths.extend(
1895 unmerged_state
1896 .iter()
1897 .map(|(path, content)| (path.clone(), *content)),
1898 );
1899 })
1900 .unwrap();
1901 }
1902
1903 pub fn set_index_for_repo(&self, dot_git: &Path, index_state: &[(&str, String)]) {
1904 self.with_git_state(dot_git, true, |state| {
1905 state.index_contents.clear();
1906 state.index_contents.extend(
1907 index_state
1908 .iter()
1909 .map(|(path, content)| (repo_path(path), content.clone())),
1910 );
1911 })
1912 .unwrap();
1913 }
1914
1915 pub fn set_head_for_repo(
1916 &self,
1917 dot_git: &Path,
1918 head_state: &[(&str, String)],
1919 sha: impl Into<String>,
1920 ) {
1921 self.with_git_state(dot_git, true, |state| {
1922 state.head_contents.clear();
1923 state.head_contents.extend(
1924 head_state
1925 .iter()
1926 .map(|(path, content)| (repo_path(path), content.clone())),
1927 );
1928 state.refs.insert("HEAD".into(), sha.into());
1929 })
1930 .unwrap();
1931 }
1932
1933 pub fn set_head_and_index_for_repo(&self, dot_git: &Path, contents_by_path: &[(&str, String)]) {
1934 self.with_git_state(dot_git, true, |state| {
1935 state.head_contents.clear();
1936 state.head_contents.extend(
1937 contents_by_path
1938 .iter()
1939 .map(|(path, contents)| (repo_path(path), contents.clone())),
1940 );
1941 state.index_contents = state.head_contents.clone();
1942 })
1943 .unwrap();
1944 }
1945
1946 pub fn set_merge_base_content_for_repo(
1947 &self,
1948 dot_git: &Path,
1949 contents_by_path: &[(&str, String)],
1950 ) {
1951 self.with_git_state(dot_git, true, |state| {
1952 use git::Oid;
1953
1954 state.merge_base_contents.clear();
1955 let oids = (1..)
1956 .map(|n| n.to_string())
1957 .map(|n| Oid::from_bytes(n.repeat(20).as_bytes()).unwrap());
1958 for ((path, content), oid) in contents_by_path.iter().zip(oids) {
1959 state.merge_base_contents.insert(repo_path(path), oid);
1960 state.oids.insert(oid, content.clone());
1961 }
1962 })
1963 .unwrap();
1964 }
1965
1966 pub fn set_blame_for_repo(&self, dot_git: &Path, blames: Vec<(RepoPath, git::blame::Blame)>) {
1967 self.with_git_state(dot_git, true, |state| {
1968 state.blames.clear();
1969 state.blames.extend(blames);
1970 })
1971 .unwrap();
1972 }
1973
1974 /// Put the given git repository into a state with the given status,
1975 /// by mutating the head, index, and unmerged state.
1976 pub fn set_status_for_repo(&self, dot_git: &Path, statuses: &[(&str, FileStatus)]) {
1977 let workdir_path = dot_git.parent().unwrap();
1978 let workdir_contents = self.files_with_contents(workdir_path);
1979 self.with_git_state(dot_git, true, |state| {
1980 state.index_contents.clear();
1981 state.head_contents.clear();
1982 state.unmerged_paths.clear();
1983 for (path, content) in workdir_contents {
1984 use util::{paths::PathStyle, rel_path::RelPath};
1985
1986 let repo_path = RelPath::new(path.strip_prefix(&workdir_path).unwrap(), PathStyle::local()).unwrap();
1987 let repo_path = RepoPath::from_rel_path(&repo_path);
1988 let status = statuses
1989 .iter()
1990 .find_map(|(p, status)| (*p == repo_path.as_unix_str()).then_some(status));
1991 let mut content = String::from_utf8_lossy(&content).to_string();
1992
1993 let mut index_content = None;
1994 let mut head_content = None;
1995 match status {
1996 None => {
1997 index_content = Some(content.clone());
1998 head_content = Some(content);
1999 }
2000 Some(FileStatus::Untracked | FileStatus::Ignored) => {}
2001 Some(FileStatus::Unmerged(unmerged_status)) => {
2002 state
2003 .unmerged_paths
2004 .insert(repo_path.clone(), *unmerged_status);
2005 content.push_str(" (unmerged)");
2006 index_content = Some(content.clone());
2007 head_content = Some(content);
2008 }
2009 Some(FileStatus::Tracked(TrackedStatus {
2010 index_status,
2011 worktree_status,
2012 })) => {
2013 match worktree_status {
2014 StatusCode::Modified => {
2015 let mut content = content.clone();
2016 content.push_str(" (modified in working copy)");
2017 index_content = Some(content);
2018 }
2019 StatusCode::TypeChanged | StatusCode::Unmodified => {
2020 index_content = Some(content.clone());
2021 }
2022 StatusCode::Added => {}
2023 StatusCode::Deleted | StatusCode::Renamed | StatusCode::Copied => {
2024 panic!("cannot create these statuses for an existing file");
2025 }
2026 };
2027 match index_status {
2028 StatusCode::Modified => {
2029 let mut content = index_content.clone().expect(
2030 "file cannot be both modified in index and created in working copy",
2031 );
2032 content.push_str(" (modified in index)");
2033 head_content = Some(content);
2034 }
2035 StatusCode::TypeChanged | StatusCode::Unmodified => {
2036 head_content = Some(index_content.clone().expect("file cannot be both unmodified in index and created in working copy"));
2037 }
2038 StatusCode::Added => {}
2039 StatusCode::Deleted => {
2040 head_content = Some("".into());
2041 }
2042 StatusCode::Renamed | StatusCode::Copied => {
2043 panic!("cannot create these statuses for an existing file");
2044 }
2045 };
2046 }
2047 };
2048
2049 if let Some(content) = index_content {
2050 state.index_contents.insert(repo_path.clone(), content);
2051 }
2052 if let Some(content) = head_content {
2053 state.head_contents.insert(repo_path.clone(), content);
2054 }
2055 }
2056 }).unwrap();
2057 }
2058
2059 pub fn set_error_message_for_index_write(&self, dot_git: &Path, message: Option<String>) {
2060 self.with_git_state(dot_git, true, |state| {
2061 state.simulated_index_write_error_message = message;
2062 })
2063 .unwrap();
2064 }
2065
2066 pub fn paths(&self, include_dot_git: bool) -> Vec<PathBuf> {
2067 let mut result = Vec::new();
2068 let mut queue = collections::VecDeque::new();
2069 let state = &*self.state.lock();
2070 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2071 while let Some((path, entry)) = queue.pop_front() {
2072 if let FakeFsEntry::Dir { entries, .. } = entry {
2073 for (name, entry) in entries {
2074 queue.push_back((path.join(name), entry));
2075 }
2076 }
2077 if include_dot_git
2078 || !path
2079 .components()
2080 .any(|component| component.as_os_str() == *FS_DOT_GIT)
2081 {
2082 result.push(path);
2083 }
2084 }
2085 result
2086 }
2087
2088 pub fn directories(&self, include_dot_git: bool) -> Vec<PathBuf> {
2089 let mut result = Vec::new();
2090 let mut queue = collections::VecDeque::new();
2091 let state = &*self.state.lock();
2092 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2093 while let Some((path, entry)) = queue.pop_front() {
2094 if let FakeFsEntry::Dir { entries, .. } = entry {
2095 for (name, entry) in entries {
2096 queue.push_back((path.join(name), entry));
2097 }
2098 if include_dot_git
2099 || !path
2100 .components()
2101 .any(|component| component.as_os_str() == *FS_DOT_GIT)
2102 {
2103 result.push(path);
2104 }
2105 }
2106 }
2107 result
2108 }
2109
2110 pub fn files(&self) -> Vec<PathBuf> {
2111 let mut result = Vec::new();
2112 let mut queue = collections::VecDeque::new();
2113 let state = &*self.state.lock();
2114 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2115 while let Some((path, entry)) = queue.pop_front() {
2116 match entry {
2117 FakeFsEntry::File { .. } => result.push(path),
2118 FakeFsEntry::Dir { entries, .. } => {
2119 for (name, entry) in entries {
2120 queue.push_back((path.join(name), entry));
2121 }
2122 }
2123 FakeFsEntry::Symlink { .. } => {}
2124 }
2125 }
2126 result
2127 }
2128
2129 pub fn files_with_contents(&self, prefix: &Path) -> Vec<(PathBuf, Vec<u8>)> {
2130 let mut result = Vec::new();
2131 let mut queue = collections::VecDeque::new();
2132 let state = &*self.state.lock();
2133 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2134 while let Some((path, entry)) = queue.pop_front() {
2135 match entry {
2136 FakeFsEntry::File { content, .. } => {
2137 if path.starts_with(prefix) {
2138 result.push((path, content.clone()));
2139 }
2140 }
2141 FakeFsEntry::Dir { entries, .. } => {
2142 for (name, entry) in entries {
2143 queue.push_back((path.join(name), entry));
2144 }
2145 }
2146 FakeFsEntry::Symlink { .. } => {}
2147 }
2148 }
2149 result
2150 }
2151
2152 /// How many `read_dir` calls have been issued.
2153 pub fn read_dir_call_count(&self) -> usize {
2154 self.state.lock().read_dir_call_count
2155 }
2156
2157 pub fn watched_paths(&self) -> Vec<PathBuf> {
2158 let state = self.state.lock();
2159 state
2160 .event_txs
2161 .iter()
2162 .filter_map(|(path, tx)| Some(path.clone()).filter(|_| !tx.is_closed()))
2163 .collect()
2164 }
2165
2166 /// How many `metadata` calls have been issued.
2167 pub fn metadata_call_count(&self) -> usize {
2168 self.state.lock().metadata_call_count
2169 }
2170
2171 /// How many write operations have been issued for a specific path.
2172 pub fn write_count_for_path(&self, path: impl AsRef<Path>) -> usize {
2173 let path = path.as_ref().to_path_buf();
2174 self.state
2175 .lock()
2176 .path_write_counts
2177 .get(&path)
2178 .copied()
2179 .unwrap_or(0)
2180 }
2181
2182 pub fn emit_fs_event(&self, path: impl Into<PathBuf>, event: Option<PathEventKind>) {
2183 self.state.lock().emit_event(std::iter::once((path, event)));
2184 }
2185
2186 fn simulate_random_delay(&self) -> impl futures::Future<Output = ()> {
2187 self.executor.simulate_random_delay()
2188 }
2189}
2190
2191#[cfg(any(test, feature = "test-support"))]
2192impl FakeFsEntry {
2193 fn is_file(&self) -> bool {
2194 matches!(self, Self::File { .. })
2195 }
2196
2197 fn is_symlink(&self) -> bool {
2198 matches!(self, Self::Symlink { .. })
2199 }
2200
2201 fn file_content(&self, path: &Path) -> Result<&Vec<u8>> {
2202 if let Self::File { content, .. } = self {
2203 Ok(content)
2204 } else {
2205 anyhow::bail!("not a file: {path:?}");
2206 }
2207 }
2208
2209 fn dir_entries(&mut self, path: &Path) -> Result<&mut BTreeMap<String, FakeFsEntry>> {
2210 if let Self::Dir { entries, .. } = self {
2211 Ok(entries)
2212 } else {
2213 anyhow::bail!("not a directory: {path:?}");
2214 }
2215 }
2216}
2217
2218#[cfg(any(test, feature = "test-support"))]
2219struct FakeWatcher {
2220 tx: smol::channel::Sender<Vec<PathEvent>>,
2221 original_path: PathBuf,
2222 fs_state: Arc<Mutex<FakeFsState>>,
2223 prefixes: Mutex<Vec<PathBuf>>,
2224}
2225
2226#[cfg(any(test, feature = "test-support"))]
2227impl Watcher for FakeWatcher {
2228 fn add(&self, path: &Path) -> Result<()> {
2229 if path.starts_with(&self.original_path) {
2230 return Ok(());
2231 }
2232 self.fs_state
2233 .try_lock()
2234 .unwrap()
2235 .event_txs
2236 .push((path.to_owned(), self.tx.clone()));
2237 self.prefixes.lock().push(path.to_owned());
2238 Ok(())
2239 }
2240
2241 fn remove(&self, _: &Path) -> Result<()> {
2242 Ok(())
2243 }
2244}
2245
2246#[cfg(any(test, feature = "test-support"))]
2247#[derive(Debug)]
2248struct FakeHandle {
2249 inode: u64,
2250}
2251
2252#[cfg(any(test, feature = "test-support"))]
2253impl FileHandle for FakeHandle {
2254 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf> {
2255 let fs = fs.as_fake();
2256 let mut state = fs.state.lock();
2257 let Some(target) = state.moves.get(&self.inode).cloned() else {
2258 anyhow::bail!("fake fd not moved")
2259 };
2260
2261 if state.try_entry(&target, false).is_some() {
2262 return Ok(target);
2263 }
2264 anyhow::bail!("fake fd target not found")
2265 }
2266}
2267
2268#[cfg(any(test, feature = "test-support"))]
2269#[async_trait::async_trait]
2270impl Fs for FakeFs {
2271 async fn create_dir(&self, path: &Path) -> Result<()> {
2272 self.simulate_random_delay().await;
2273
2274 let mut created_dirs = Vec::new();
2275 let mut cur_path = PathBuf::new();
2276 for component in path.components() {
2277 let should_skip = matches!(component, Component::Prefix(..) | Component::RootDir);
2278 cur_path.push(component);
2279 if should_skip {
2280 continue;
2281 }
2282 let mut state = self.state.lock();
2283
2284 let inode = state.get_and_increment_inode();
2285 let mtime = state.get_and_increment_mtime();
2286 state.write_path(&cur_path, |entry| {
2287 entry.or_insert_with(|| {
2288 created_dirs.push((cur_path.clone(), Some(PathEventKind::Created)));
2289 FakeFsEntry::Dir {
2290 inode,
2291 mtime,
2292 len: 0,
2293 entries: Default::default(),
2294 git_repo_state: None,
2295 }
2296 });
2297 Ok(())
2298 })?
2299 }
2300
2301 self.state.lock().emit_event(created_dirs);
2302 Ok(())
2303 }
2304
2305 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
2306 self.simulate_random_delay().await;
2307 let mut state = self.state.lock();
2308 let inode = state.get_and_increment_inode();
2309 let mtime = state.get_and_increment_mtime();
2310 let file = FakeFsEntry::File {
2311 inode,
2312 mtime,
2313 len: 0,
2314 content: Vec::new(),
2315 git_dir_path: None,
2316 };
2317 let mut kind = Some(PathEventKind::Created);
2318 state.write_path(path, |entry| {
2319 match entry {
2320 btree_map::Entry::Occupied(mut e) => {
2321 if options.overwrite {
2322 kind = Some(PathEventKind::Changed);
2323 *e.get_mut() = file;
2324 } else if !options.ignore_if_exists {
2325 anyhow::bail!("path already exists: {path:?}");
2326 }
2327 }
2328 btree_map::Entry::Vacant(e) => {
2329 e.insert(file);
2330 }
2331 }
2332 Ok(())
2333 })?;
2334 state.emit_event([(path, kind)]);
2335 Ok(())
2336 }
2337
2338 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
2339 let mut state = self.state.lock();
2340 let file = FakeFsEntry::Symlink { target };
2341 state
2342 .write_path(path.as_ref(), move |e| match e {
2343 btree_map::Entry::Vacant(e) => {
2344 e.insert(file);
2345 Ok(())
2346 }
2347 btree_map::Entry::Occupied(mut e) => {
2348 *e.get_mut() = file;
2349 Ok(())
2350 }
2351 })
2352 .unwrap();
2353 state.emit_event([(path, Some(PathEventKind::Created))]);
2354
2355 Ok(())
2356 }
2357
2358 async fn create_file_with(
2359 &self,
2360 path: &Path,
2361 mut content: Pin<&mut (dyn AsyncRead + Send)>,
2362 ) -> Result<()> {
2363 let mut bytes = Vec::new();
2364 content.read_to_end(&mut bytes).await?;
2365 self.write_file_internal(path, bytes, true)?;
2366 Ok(())
2367 }
2368
2369 async fn extract_tar_file(
2370 &self,
2371 path: &Path,
2372 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
2373 ) -> Result<()> {
2374 let mut entries = content.entries()?;
2375 while let Some(entry) = entries.next().await {
2376 let mut entry = entry?;
2377 if entry.header().entry_type().is_file() {
2378 let path = path.join(entry.path()?.as_ref());
2379 let mut bytes = Vec::new();
2380 entry.read_to_end(&mut bytes).await?;
2381 self.create_dir(path.parent().unwrap()).await?;
2382 self.write_file_internal(&path, bytes, true)?;
2383 }
2384 }
2385 Ok(())
2386 }
2387
2388 async fn rename(&self, old_path: &Path, new_path: &Path, options: RenameOptions) -> Result<()> {
2389 self.simulate_random_delay().await;
2390
2391 let old_path = normalize_path(old_path);
2392 let new_path = normalize_path(new_path);
2393
2394 if options.create_parents {
2395 if let Some(parent) = new_path.parent() {
2396 self.create_dir(parent).await?;
2397 }
2398 }
2399
2400 let mut state = self.state.lock();
2401 let moved_entry = state.write_path(&old_path, |e| {
2402 if let btree_map::Entry::Occupied(e) = e {
2403 Ok(e.get().clone())
2404 } else {
2405 anyhow::bail!("path does not exist: {old_path:?}")
2406 }
2407 })?;
2408
2409 let inode = match moved_entry {
2410 FakeFsEntry::File { inode, .. } => inode,
2411 FakeFsEntry::Dir { inode, .. } => inode,
2412 _ => 0,
2413 };
2414
2415 state.moves.insert(inode, new_path.clone());
2416
2417 state.write_path(&new_path, |e| {
2418 match e {
2419 btree_map::Entry::Occupied(mut e) => {
2420 if options.overwrite {
2421 *e.get_mut() = moved_entry;
2422 } else if !options.ignore_if_exists {
2423 anyhow::bail!("path already exists: {new_path:?}");
2424 }
2425 }
2426 btree_map::Entry::Vacant(e) => {
2427 e.insert(moved_entry);
2428 }
2429 }
2430 Ok(())
2431 })?;
2432
2433 state
2434 .write_path(&old_path, |e| {
2435 if let btree_map::Entry::Occupied(e) = e {
2436 Ok(e.remove())
2437 } else {
2438 unreachable!()
2439 }
2440 })
2441 .unwrap();
2442
2443 state.emit_event([
2444 (old_path, Some(PathEventKind::Removed)),
2445 (new_path, Some(PathEventKind::Created)),
2446 ]);
2447 Ok(())
2448 }
2449
2450 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
2451 self.simulate_random_delay().await;
2452
2453 let source = normalize_path(source);
2454 let target = normalize_path(target);
2455 let mut state = self.state.lock();
2456 let mtime = state.get_and_increment_mtime();
2457 let inode = state.get_and_increment_inode();
2458 let source_entry = state.entry(&source)?;
2459 let content = source_entry.file_content(&source)?.clone();
2460 let mut kind = Some(PathEventKind::Created);
2461 state.write_path(&target, |e| match e {
2462 btree_map::Entry::Occupied(e) => {
2463 if options.overwrite {
2464 kind = Some(PathEventKind::Changed);
2465 Ok(Some(e.get().clone()))
2466 } else if !options.ignore_if_exists {
2467 anyhow::bail!("{target:?} already exists");
2468 } else {
2469 Ok(None)
2470 }
2471 }
2472 btree_map::Entry::Vacant(e) => Ok(Some(
2473 e.insert(FakeFsEntry::File {
2474 inode,
2475 mtime,
2476 len: content.len() as u64,
2477 content,
2478 git_dir_path: None,
2479 })
2480 .clone(),
2481 )),
2482 })?;
2483 state.emit_event([(target, kind)]);
2484 Ok(())
2485 }
2486
2487 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
2488 self.simulate_random_delay().await;
2489
2490 let path = normalize_path(path);
2491 let parent_path = path.parent().context("cannot remove the root")?;
2492 let base_name = path.file_name().context("cannot remove the root")?;
2493
2494 let mut state = self.state.lock();
2495 let parent_entry = state.entry(parent_path)?;
2496 let entry = parent_entry
2497 .dir_entries(parent_path)?
2498 .entry(base_name.to_str().unwrap().into());
2499
2500 match entry {
2501 btree_map::Entry::Vacant(_) => {
2502 if !options.ignore_if_not_exists {
2503 anyhow::bail!("{path:?} does not exist");
2504 }
2505 }
2506 btree_map::Entry::Occupied(mut entry) => {
2507 {
2508 let children = entry.get_mut().dir_entries(&path)?;
2509 if !options.recursive && !children.is_empty() {
2510 anyhow::bail!("{path:?} is not empty");
2511 }
2512 }
2513 entry.remove();
2514 }
2515 }
2516 state.emit_event([(path, Some(PathEventKind::Removed))]);
2517 Ok(())
2518 }
2519
2520 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
2521 self.simulate_random_delay().await;
2522
2523 let path = normalize_path(path);
2524 let parent_path = path.parent().context("cannot remove the root")?;
2525 let base_name = path.file_name().unwrap();
2526 let mut state = self.state.lock();
2527 let parent_entry = state.entry(parent_path)?;
2528 let entry = parent_entry
2529 .dir_entries(parent_path)?
2530 .entry(base_name.to_str().unwrap().into());
2531 match entry {
2532 btree_map::Entry::Vacant(_) => {
2533 if !options.ignore_if_not_exists {
2534 anyhow::bail!("{path:?} does not exist");
2535 }
2536 }
2537 btree_map::Entry::Occupied(mut entry) => {
2538 entry.get_mut().file_content(&path)?;
2539 entry.remove();
2540 }
2541 }
2542 state.emit_event([(path, Some(PathEventKind::Removed))]);
2543 Ok(())
2544 }
2545
2546 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
2547 let bytes = self.load_internal(path).await?;
2548 Ok(Box::new(io::Cursor::new(bytes)))
2549 }
2550
2551 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
2552 self.simulate_random_delay().await;
2553 let mut state = self.state.lock();
2554 let inode = match state.entry(path)? {
2555 FakeFsEntry::File { inode, .. } => *inode,
2556 FakeFsEntry::Dir { inode, .. } => *inode,
2557 _ => unreachable!(),
2558 };
2559 Ok(Arc::new(FakeHandle { inode }))
2560 }
2561
2562 async fn load(&self, path: &Path) -> Result<String> {
2563 let content = self.load_internal(path).await?;
2564 Ok(String::from_utf8(content)?)
2565 }
2566
2567 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
2568 self.load_internal(path).await
2569 }
2570
2571 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
2572 self.simulate_random_delay().await;
2573 let path = normalize_path(path.as_path());
2574 if let Some(path) = path.parent() {
2575 self.create_dir(path).await?;
2576 }
2577 self.write_file_internal(path, data.into_bytes(), true)?;
2578 Ok(())
2579 }
2580
2581 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
2582 self.simulate_random_delay().await;
2583 let path = normalize_path(path);
2584 let content = text::chunks_with_line_ending(text, line_ending).collect::<String>();
2585 if let Some(path) = path.parent() {
2586 self.create_dir(path).await?;
2587 }
2588 self.write_file_internal(path, content.into_bytes(), false)?;
2589 Ok(())
2590 }
2591
2592 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
2593 self.simulate_random_delay().await;
2594 let path = normalize_path(path);
2595 if let Some(path) = path.parent() {
2596 self.create_dir(path).await?;
2597 }
2598 self.write_file_internal(path, content.to_vec(), false)?;
2599 Ok(())
2600 }
2601
2602 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
2603 let path = normalize_path(path);
2604 self.simulate_random_delay().await;
2605 let state = self.state.lock();
2606 let canonical_path = state
2607 .canonicalize(&path, true)
2608 .with_context(|| format!("path does not exist: {path:?}"))?;
2609 Ok(canonical_path)
2610 }
2611
2612 async fn is_file(&self, path: &Path) -> bool {
2613 let path = normalize_path(path);
2614 self.simulate_random_delay().await;
2615 let mut state = self.state.lock();
2616 if let Some((entry, _)) = state.try_entry(&path, true) {
2617 entry.is_file()
2618 } else {
2619 false
2620 }
2621 }
2622
2623 async fn is_dir(&self, path: &Path) -> bool {
2624 self.metadata(path)
2625 .await
2626 .is_ok_and(|metadata| metadata.is_some_and(|metadata| metadata.is_dir))
2627 }
2628
2629 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
2630 self.simulate_random_delay().await;
2631 let path = normalize_path(path);
2632 let mut state = self.state.lock();
2633 state.metadata_call_count += 1;
2634 if let Some((mut entry, _)) = state.try_entry(&path, false) {
2635 let is_symlink = entry.is_symlink();
2636 if is_symlink {
2637 if let Some(e) = state.try_entry(&path, true).map(|e| e.0) {
2638 entry = e;
2639 } else {
2640 return Ok(None);
2641 }
2642 }
2643
2644 Ok(Some(match &*entry {
2645 FakeFsEntry::File {
2646 inode, mtime, len, ..
2647 } => Metadata {
2648 inode: *inode,
2649 mtime: *mtime,
2650 len: *len,
2651 is_dir: false,
2652 is_symlink,
2653 is_fifo: false,
2654 is_executable: false,
2655 },
2656 FakeFsEntry::Dir {
2657 inode, mtime, len, ..
2658 } => Metadata {
2659 inode: *inode,
2660 mtime: *mtime,
2661 len: *len,
2662 is_dir: true,
2663 is_symlink,
2664 is_fifo: false,
2665 is_executable: false,
2666 },
2667 FakeFsEntry::Symlink { .. } => unreachable!(),
2668 }))
2669 } else {
2670 Ok(None)
2671 }
2672 }
2673
2674 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
2675 self.simulate_random_delay().await;
2676 let path = normalize_path(path);
2677 let mut state = self.state.lock();
2678 let (entry, _) = state
2679 .try_entry(&path, false)
2680 .with_context(|| format!("path does not exist: {path:?}"))?;
2681 if let FakeFsEntry::Symlink { target } = entry {
2682 Ok(target.clone())
2683 } else {
2684 anyhow::bail!("not a symlink: {path:?}")
2685 }
2686 }
2687
2688 async fn read_dir(
2689 &self,
2690 path: &Path,
2691 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
2692 self.simulate_random_delay().await;
2693 let path = normalize_path(path);
2694 let mut state = self.state.lock();
2695 state.read_dir_call_count += 1;
2696 let entry = state.entry(&path)?;
2697 let children = entry.dir_entries(&path)?;
2698 let paths = children
2699 .keys()
2700 .map(|file_name| Ok(path.join(file_name)))
2701 .collect::<Vec<_>>();
2702 Ok(Box::pin(futures::stream::iter(paths)))
2703 }
2704
2705 async fn watch(
2706 &self,
2707 path: &Path,
2708 _: Duration,
2709 ) -> (
2710 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
2711 Arc<dyn Watcher>,
2712 ) {
2713 self.simulate_random_delay().await;
2714 let (tx, rx) = smol::channel::unbounded();
2715 let path = path.to_path_buf();
2716 self.state.lock().event_txs.push((path.clone(), tx.clone()));
2717 let executor = self.executor.clone();
2718 let watcher = Arc::new(FakeWatcher {
2719 tx,
2720 original_path: path.to_owned(),
2721 fs_state: self.state.clone(),
2722 prefixes: Mutex::new(vec![path]),
2723 });
2724 (
2725 Box::pin(futures::StreamExt::filter(rx, {
2726 let watcher = watcher.clone();
2727 move |events| {
2728 let result = events.iter().any(|evt_path| {
2729 watcher
2730 .prefixes
2731 .lock()
2732 .iter()
2733 .any(|prefix| evt_path.path.starts_with(prefix))
2734 });
2735 let executor = executor.clone();
2736 async move {
2737 executor.simulate_random_delay().await;
2738 result
2739 }
2740 }
2741 })),
2742 watcher,
2743 )
2744 }
2745
2746 fn open_repo(
2747 &self,
2748 abs_dot_git: &Path,
2749 _system_git_binary: Option<&Path>,
2750 ) -> Option<Arc<dyn GitRepository>> {
2751 use util::ResultExt as _;
2752
2753 self.with_git_state_and_paths(
2754 abs_dot_git,
2755 false,
2756 |_, repository_dir_path, common_dir_path| {
2757 Arc::new(fake_git_repo::FakeGitRepository {
2758 fs: self.this.upgrade().unwrap(),
2759 executor: self.executor.clone(),
2760 dot_git_path: abs_dot_git.to_path_buf(),
2761 repository_dir_path: repository_dir_path.to_owned(),
2762 common_dir_path: common_dir_path.to_owned(),
2763 checkpoints: Arc::default(),
2764 }) as _
2765 },
2766 )
2767 .log_err()
2768 }
2769
2770 async fn git_init(
2771 &self,
2772 abs_work_directory_path: &Path,
2773 _fallback_branch_name: String,
2774 ) -> Result<()> {
2775 self.create_dir(&abs_work_directory_path.join(".git")).await
2776 }
2777
2778 async fn git_clone(&self, _repo_url: &str, _abs_work_directory: &Path) -> Result<()> {
2779 anyhow::bail!("Git clone is not supported in fake Fs")
2780 }
2781
2782 fn is_fake(&self) -> bool {
2783 true
2784 }
2785
2786 async fn is_case_sensitive(&self) -> Result<bool> {
2787 Ok(true)
2788 }
2789
2790 fn subscribe_to_jobs(&self) -> JobEventReceiver {
2791 let (sender, receiver) = futures::channel::mpsc::unbounded();
2792 self.state.lock().job_event_subscribers.lock().push(sender);
2793 receiver
2794 }
2795
2796 #[cfg(any(test, feature = "test-support"))]
2797 fn as_fake(&self) -> Arc<FakeFs> {
2798 self.this.upgrade().unwrap()
2799 }
2800}
2801
2802pub fn normalize_path(path: &Path) -> PathBuf {
2803 let mut components = path.components().peekable();
2804 let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek().cloned() {
2805 components.next();
2806 PathBuf::from(c.as_os_str())
2807 } else {
2808 PathBuf::new()
2809 };
2810
2811 for component in components {
2812 match component {
2813 Component::Prefix(..) => unreachable!(),
2814 Component::RootDir => {
2815 ret.push(component.as_os_str());
2816 }
2817 Component::CurDir => {}
2818 Component::ParentDir => {
2819 ret.pop();
2820 }
2821 Component::Normal(c) => {
2822 ret.push(c);
2823 }
2824 }
2825 }
2826 ret
2827}
2828
2829pub async fn copy_recursive<'a>(
2830 fs: &'a dyn Fs,
2831 source: &'a Path,
2832 target: &'a Path,
2833 options: CopyOptions,
2834) -> Result<()> {
2835 for (item, is_dir) in read_dir_items(fs, source).await? {
2836 let Ok(item_relative_path) = item.strip_prefix(source) else {
2837 continue;
2838 };
2839 let target_item = if item_relative_path == Path::new("") {
2840 target.to_path_buf()
2841 } else {
2842 target.join(item_relative_path)
2843 };
2844 if is_dir {
2845 if !options.overwrite && fs.metadata(&target_item).await.is_ok_and(|m| m.is_some()) {
2846 if options.ignore_if_exists {
2847 continue;
2848 } else {
2849 anyhow::bail!("{target_item:?} already exists");
2850 }
2851 }
2852 let _ = fs
2853 .remove_dir(
2854 &target_item,
2855 RemoveOptions {
2856 recursive: true,
2857 ignore_if_not_exists: true,
2858 },
2859 )
2860 .await;
2861 fs.create_dir(&target_item).await?;
2862 } else {
2863 fs.copy_file(&item, &target_item, options).await?;
2864 }
2865 }
2866 Ok(())
2867}
2868
2869/// Recursively reads all of the paths in the given directory.
2870///
2871/// Returns a vector of tuples of (path, is_dir).
2872pub async fn read_dir_items<'a>(fs: &'a dyn Fs, source: &'a Path) -> Result<Vec<(PathBuf, bool)>> {
2873 let mut items = Vec::new();
2874 read_recursive(fs, source, &mut items).await?;
2875 Ok(items)
2876}
2877
2878fn read_recursive<'a>(
2879 fs: &'a dyn Fs,
2880 source: &'a Path,
2881 output: &'a mut Vec<(PathBuf, bool)>,
2882) -> BoxFuture<'a, Result<()>> {
2883 use futures::future::FutureExt;
2884
2885 async move {
2886 let metadata = fs
2887 .metadata(source)
2888 .await?
2889 .with_context(|| format!("path does not exist: {source:?}"))?;
2890
2891 if metadata.is_dir {
2892 output.push((source.to_path_buf(), true));
2893 let mut children = fs.read_dir(source).await?;
2894 while let Some(child_path) = children.next().await {
2895 if let Ok(child_path) = child_path {
2896 read_recursive(fs, &child_path, output).await?;
2897 }
2898 }
2899 } else {
2900 output.push((source.to_path_buf(), false));
2901 }
2902 Ok(())
2903 }
2904 .boxed()
2905}
2906
2907// todo(windows)
2908// can we get file id not open the file twice?
2909// https://github.com/rust-lang/rust/issues/63010
2910#[cfg(target_os = "windows")]
2911async fn file_id(path: impl AsRef<Path>) -> Result<u64> {
2912 use std::os::windows::io::AsRawHandle;
2913
2914 use smol::fs::windows::OpenOptionsExt;
2915 use windows::Win32::{
2916 Foundation::HANDLE,
2917 Storage::FileSystem::{
2918 BY_HANDLE_FILE_INFORMATION, FILE_FLAG_BACKUP_SEMANTICS, GetFileInformationByHandle,
2919 },
2920 };
2921
2922 let file = smol::fs::OpenOptions::new()
2923 .read(true)
2924 .custom_flags(FILE_FLAG_BACKUP_SEMANTICS.0)
2925 .open(path)
2926 .await?;
2927
2928 let mut info: BY_HANDLE_FILE_INFORMATION = unsafe { std::mem::zeroed() };
2929 // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfileinformationbyhandle
2930 // This function supports Windows XP+
2931 smol::unblock(move || {
2932 unsafe { GetFileInformationByHandle(HANDLE(file.as_raw_handle() as _), &mut info)? };
2933
2934 Ok(((info.nFileIndexHigh as u64) << 32) | (info.nFileIndexLow as u64))
2935 })
2936 .await
2937}
2938
2939#[cfg(target_os = "windows")]
2940fn atomic_replace<P: AsRef<Path>>(
2941 replaced_file: P,
2942 replacement_file: P,
2943) -> windows::core::Result<()> {
2944 use windows::{
2945 Win32::Storage::FileSystem::{REPLACE_FILE_FLAGS, ReplaceFileW},
2946 core::HSTRING,
2947 };
2948
2949 // If the file does not exist, create it.
2950 let _ = std::fs::File::create_new(replaced_file.as_ref());
2951
2952 unsafe {
2953 ReplaceFileW(
2954 &HSTRING::from(replaced_file.as_ref().to_string_lossy().into_owned()),
2955 &HSTRING::from(replacement_file.as_ref().to_string_lossy().into_owned()),
2956 None,
2957 REPLACE_FILE_FLAGS::default(),
2958 None,
2959 None,
2960 )
2961 }
2962}
2963
2964#[cfg(test)]
2965mod tests {
2966 use super::*;
2967 use gpui::BackgroundExecutor;
2968 use serde_json::json;
2969 use util::path;
2970
2971 #[gpui::test]
2972 async fn test_fake_fs(executor: BackgroundExecutor) {
2973 let fs = FakeFs::new(executor.clone());
2974 fs.insert_tree(
2975 path!("/root"),
2976 json!({
2977 "dir1": {
2978 "a": "A",
2979 "b": "B"
2980 },
2981 "dir2": {
2982 "c": "C",
2983 "dir3": {
2984 "d": "D"
2985 }
2986 }
2987 }),
2988 )
2989 .await;
2990
2991 assert_eq!(
2992 fs.files(),
2993 vec![
2994 PathBuf::from(path!("/root/dir1/a")),
2995 PathBuf::from(path!("/root/dir1/b")),
2996 PathBuf::from(path!("/root/dir2/c")),
2997 PathBuf::from(path!("/root/dir2/dir3/d")),
2998 ]
2999 );
3000
3001 fs.create_symlink(path!("/root/dir2/link-to-dir3").as_ref(), "./dir3".into())
3002 .await
3003 .unwrap();
3004
3005 assert_eq!(
3006 fs.canonicalize(path!("/root/dir2/link-to-dir3").as_ref())
3007 .await
3008 .unwrap(),
3009 PathBuf::from(path!("/root/dir2/dir3")),
3010 );
3011 assert_eq!(
3012 fs.canonicalize(path!("/root/dir2/link-to-dir3/d").as_ref())
3013 .await
3014 .unwrap(),
3015 PathBuf::from(path!("/root/dir2/dir3/d")),
3016 );
3017 assert_eq!(
3018 fs.load(path!("/root/dir2/link-to-dir3/d").as_ref())
3019 .await
3020 .unwrap(),
3021 "D",
3022 );
3023 }
3024
3025 #[gpui::test]
3026 async fn test_copy_recursive_with_single_file(executor: BackgroundExecutor) {
3027 let fs = FakeFs::new(executor.clone());
3028 fs.insert_tree(
3029 path!("/outer"),
3030 json!({
3031 "a": "A",
3032 "b": "B",
3033 "inner": {}
3034 }),
3035 )
3036 .await;
3037
3038 assert_eq!(
3039 fs.files(),
3040 vec![
3041 PathBuf::from(path!("/outer/a")),
3042 PathBuf::from(path!("/outer/b")),
3043 ]
3044 );
3045
3046 let source = Path::new(path!("/outer/a"));
3047 let target = Path::new(path!("/outer/a copy"));
3048 copy_recursive(fs.as_ref(), source, target, Default::default())
3049 .await
3050 .unwrap();
3051
3052 assert_eq!(
3053 fs.files(),
3054 vec![
3055 PathBuf::from(path!("/outer/a")),
3056 PathBuf::from(path!("/outer/a copy")),
3057 PathBuf::from(path!("/outer/b")),
3058 ]
3059 );
3060
3061 let source = Path::new(path!("/outer/a"));
3062 let target = Path::new(path!("/outer/inner/a copy"));
3063 copy_recursive(fs.as_ref(), source, target, Default::default())
3064 .await
3065 .unwrap();
3066
3067 assert_eq!(
3068 fs.files(),
3069 vec![
3070 PathBuf::from(path!("/outer/a")),
3071 PathBuf::from(path!("/outer/a copy")),
3072 PathBuf::from(path!("/outer/b")),
3073 PathBuf::from(path!("/outer/inner/a copy")),
3074 ]
3075 );
3076 }
3077
3078 #[gpui::test]
3079 async fn test_copy_recursive_with_single_dir(executor: BackgroundExecutor) {
3080 let fs = FakeFs::new(executor.clone());
3081 fs.insert_tree(
3082 path!("/outer"),
3083 json!({
3084 "a": "A",
3085 "empty": {},
3086 "non-empty": {
3087 "b": "B",
3088 }
3089 }),
3090 )
3091 .await;
3092
3093 assert_eq!(
3094 fs.files(),
3095 vec![
3096 PathBuf::from(path!("/outer/a")),
3097 PathBuf::from(path!("/outer/non-empty/b")),
3098 ]
3099 );
3100 assert_eq!(
3101 fs.directories(false),
3102 vec![
3103 PathBuf::from(path!("/")),
3104 PathBuf::from(path!("/outer")),
3105 PathBuf::from(path!("/outer/empty")),
3106 PathBuf::from(path!("/outer/non-empty")),
3107 ]
3108 );
3109
3110 let source = Path::new(path!("/outer/empty"));
3111 let target = Path::new(path!("/outer/empty copy"));
3112 copy_recursive(fs.as_ref(), source, target, Default::default())
3113 .await
3114 .unwrap();
3115
3116 assert_eq!(
3117 fs.files(),
3118 vec![
3119 PathBuf::from(path!("/outer/a")),
3120 PathBuf::from(path!("/outer/non-empty/b")),
3121 ]
3122 );
3123 assert_eq!(
3124 fs.directories(false),
3125 vec![
3126 PathBuf::from(path!("/")),
3127 PathBuf::from(path!("/outer")),
3128 PathBuf::from(path!("/outer/empty")),
3129 PathBuf::from(path!("/outer/empty copy")),
3130 PathBuf::from(path!("/outer/non-empty")),
3131 ]
3132 );
3133
3134 let source = Path::new(path!("/outer/non-empty"));
3135 let target = Path::new(path!("/outer/non-empty copy"));
3136 copy_recursive(fs.as_ref(), source, target, Default::default())
3137 .await
3138 .unwrap();
3139
3140 assert_eq!(
3141 fs.files(),
3142 vec![
3143 PathBuf::from(path!("/outer/a")),
3144 PathBuf::from(path!("/outer/non-empty/b")),
3145 PathBuf::from(path!("/outer/non-empty copy/b")),
3146 ]
3147 );
3148 assert_eq!(
3149 fs.directories(false),
3150 vec![
3151 PathBuf::from(path!("/")),
3152 PathBuf::from(path!("/outer")),
3153 PathBuf::from(path!("/outer/empty")),
3154 PathBuf::from(path!("/outer/empty copy")),
3155 PathBuf::from(path!("/outer/non-empty")),
3156 PathBuf::from(path!("/outer/non-empty copy")),
3157 ]
3158 );
3159 }
3160
3161 #[gpui::test]
3162 async fn test_copy_recursive(executor: BackgroundExecutor) {
3163 let fs = FakeFs::new(executor.clone());
3164 fs.insert_tree(
3165 path!("/outer"),
3166 json!({
3167 "inner1": {
3168 "a": "A",
3169 "b": "B",
3170 "inner3": {
3171 "d": "D",
3172 },
3173 "inner4": {}
3174 },
3175 "inner2": {
3176 "c": "C",
3177 }
3178 }),
3179 )
3180 .await;
3181
3182 assert_eq!(
3183 fs.files(),
3184 vec![
3185 PathBuf::from(path!("/outer/inner1/a")),
3186 PathBuf::from(path!("/outer/inner1/b")),
3187 PathBuf::from(path!("/outer/inner2/c")),
3188 PathBuf::from(path!("/outer/inner1/inner3/d")),
3189 ]
3190 );
3191 assert_eq!(
3192 fs.directories(false),
3193 vec![
3194 PathBuf::from(path!("/")),
3195 PathBuf::from(path!("/outer")),
3196 PathBuf::from(path!("/outer/inner1")),
3197 PathBuf::from(path!("/outer/inner2")),
3198 PathBuf::from(path!("/outer/inner1/inner3")),
3199 PathBuf::from(path!("/outer/inner1/inner4")),
3200 ]
3201 );
3202
3203 let source = Path::new(path!("/outer"));
3204 let target = Path::new(path!("/outer/inner1/outer"));
3205 copy_recursive(fs.as_ref(), source, target, Default::default())
3206 .await
3207 .unwrap();
3208
3209 assert_eq!(
3210 fs.files(),
3211 vec![
3212 PathBuf::from(path!("/outer/inner1/a")),
3213 PathBuf::from(path!("/outer/inner1/b")),
3214 PathBuf::from(path!("/outer/inner2/c")),
3215 PathBuf::from(path!("/outer/inner1/inner3/d")),
3216 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3217 PathBuf::from(path!("/outer/inner1/outer/inner1/b")),
3218 PathBuf::from(path!("/outer/inner1/outer/inner2/c")),
3219 PathBuf::from(path!("/outer/inner1/outer/inner1/inner3/d")),
3220 ]
3221 );
3222 assert_eq!(
3223 fs.directories(false),
3224 vec![
3225 PathBuf::from(path!("/")),
3226 PathBuf::from(path!("/outer")),
3227 PathBuf::from(path!("/outer/inner1")),
3228 PathBuf::from(path!("/outer/inner2")),
3229 PathBuf::from(path!("/outer/inner1/inner3")),
3230 PathBuf::from(path!("/outer/inner1/inner4")),
3231 PathBuf::from(path!("/outer/inner1/outer")),
3232 PathBuf::from(path!("/outer/inner1/outer/inner1")),
3233 PathBuf::from(path!("/outer/inner1/outer/inner2")),
3234 PathBuf::from(path!("/outer/inner1/outer/inner1/inner3")),
3235 PathBuf::from(path!("/outer/inner1/outer/inner1/inner4")),
3236 ]
3237 );
3238 }
3239
3240 #[gpui::test]
3241 async fn test_copy_recursive_with_overwriting(executor: BackgroundExecutor) {
3242 let fs = FakeFs::new(executor.clone());
3243 fs.insert_tree(
3244 path!("/outer"),
3245 json!({
3246 "inner1": {
3247 "a": "A",
3248 "b": "B",
3249 "outer": {
3250 "inner1": {
3251 "a": "B"
3252 }
3253 }
3254 },
3255 "inner2": {
3256 "c": "C",
3257 }
3258 }),
3259 )
3260 .await;
3261
3262 assert_eq!(
3263 fs.files(),
3264 vec![
3265 PathBuf::from(path!("/outer/inner1/a")),
3266 PathBuf::from(path!("/outer/inner1/b")),
3267 PathBuf::from(path!("/outer/inner2/c")),
3268 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3269 ]
3270 );
3271 assert_eq!(
3272 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3273 .await
3274 .unwrap(),
3275 "B",
3276 );
3277
3278 let source = Path::new(path!("/outer"));
3279 let target = Path::new(path!("/outer/inner1/outer"));
3280 copy_recursive(
3281 fs.as_ref(),
3282 source,
3283 target,
3284 CopyOptions {
3285 overwrite: true,
3286 ..Default::default()
3287 },
3288 )
3289 .await
3290 .unwrap();
3291
3292 assert_eq!(
3293 fs.files(),
3294 vec![
3295 PathBuf::from(path!("/outer/inner1/a")),
3296 PathBuf::from(path!("/outer/inner1/b")),
3297 PathBuf::from(path!("/outer/inner2/c")),
3298 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3299 PathBuf::from(path!("/outer/inner1/outer/inner1/b")),
3300 PathBuf::from(path!("/outer/inner1/outer/inner2/c")),
3301 PathBuf::from(path!("/outer/inner1/outer/inner1/outer/inner1/a")),
3302 ]
3303 );
3304 assert_eq!(
3305 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3306 .await
3307 .unwrap(),
3308 "A"
3309 );
3310 }
3311
3312 #[gpui::test]
3313 async fn test_copy_recursive_with_ignoring(executor: BackgroundExecutor) {
3314 let fs = FakeFs::new(executor.clone());
3315 fs.insert_tree(
3316 path!("/outer"),
3317 json!({
3318 "inner1": {
3319 "a": "A",
3320 "b": "B",
3321 "outer": {
3322 "inner1": {
3323 "a": "B"
3324 }
3325 }
3326 },
3327 "inner2": {
3328 "c": "C",
3329 }
3330 }),
3331 )
3332 .await;
3333
3334 assert_eq!(
3335 fs.files(),
3336 vec![
3337 PathBuf::from(path!("/outer/inner1/a")),
3338 PathBuf::from(path!("/outer/inner1/b")),
3339 PathBuf::from(path!("/outer/inner2/c")),
3340 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3341 ]
3342 );
3343 assert_eq!(
3344 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3345 .await
3346 .unwrap(),
3347 "B",
3348 );
3349
3350 let source = Path::new(path!("/outer"));
3351 let target = Path::new(path!("/outer/inner1/outer"));
3352 copy_recursive(
3353 fs.as_ref(),
3354 source,
3355 target,
3356 CopyOptions {
3357 ignore_if_exists: true,
3358 ..Default::default()
3359 },
3360 )
3361 .await
3362 .unwrap();
3363
3364 assert_eq!(
3365 fs.files(),
3366 vec![
3367 PathBuf::from(path!("/outer/inner1/a")),
3368 PathBuf::from(path!("/outer/inner1/b")),
3369 PathBuf::from(path!("/outer/inner2/c")),
3370 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3371 PathBuf::from(path!("/outer/inner1/outer/inner1/b")),
3372 PathBuf::from(path!("/outer/inner1/outer/inner2/c")),
3373 PathBuf::from(path!("/outer/inner1/outer/inner1/outer/inner1/a")),
3374 ]
3375 );
3376 assert_eq!(
3377 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3378 .await
3379 .unwrap(),
3380 "B"
3381 );
3382 }
3383
3384 #[gpui::test]
3385 async fn test_realfs_atomic_write(executor: BackgroundExecutor) {
3386 // With the file handle still open, the file should be replaced
3387 // https://github.com/zed-industries/zed/issues/30054
3388 let fs = RealFs {
3389 bundled_git_binary_path: None,
3390 executor,
3391 next_job_id: Arc::new(AtomicUsize::new(0)),
3392 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
3393 };
3394 let temp_dir = TempDir::new().unwrap();
3395 let file_to_be_replaced = temp_dir.path().join("file.txt");
3396 let mut file = std::fs::File::create_new(&file_to_be_replaced).unwrap();
3397 file.write_all(b"Hello").unwrap();
3398 // drop(file); // We still hold the file handle here
3399 let content = std::fs::read_to_string(&file_to_be_replaced).unwrap();
3400 assert_eq!(content, "Hello");
3401 smol::block_on(fs.atomic_write(file_to_be_replaced.clone(), "World".into())).unwrap();
3402 let content = std::fs::read_to_string(&file_to_be_replaced).unwrap();
3403 assert_eq!(content, "World");
3404 }
3405
3406 #[gpui::test]
3407 async fn test_realfs_atomic_write_non_existing_file(executor: BackgroundExecutor) {
3408 let fs = RealFs {
3409 bundled_git_binary_path: None,
3410 executor,
3411 next_job_id: Arc::new(AtomicUsize::new(0)),
3412 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
3413 };
3414 let temp_dir = TempDir::new().unwrap();
3415 let file_to_be_replaced = temp_dir.path().join("file.txt");
3416 smol::block_on(fs.atomic_write(file_to_be_replaced.clone(), "Hello".into())).unwrap();
3417 let content = std::fs::read_to_string(&file_to_be_replaced).unwrap();
3418 assert_eq!(content, "Hello");
3419 }
3420
3421 #[gpui::test]
3422 #[cfg(target_os = "windows")]
3423 async fn test_realfs_canonicalize(executor: BackgroundExecutor) {
3424 use util::paths::SanitizedPath;
3425
3426 let fs = RealFs {
3427 bundled_git_binary_path: None,
3428 executor,
3429 next_job_id: Arc::new(AtomicUsize::new(0)),
3430 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
3431 };
3432 let temp_dir = TempDir::new().unwrap();
3433 let file = temp_dir.path().join("test (1).txt");
3434 let file = SanitizedPath::new(&file);
3435 std::fs::write(&file, "test").unwrap();
3436
3437 let canonicalized = fs.canonicalize(file.as_path()).await;
3438 assert!(canonicalized.is_ok());
3439 }
3440
3441 #[gpui::test]
3442 async fn test_rename(executor: BackgroundExecutor) {
3443 let fs = FakeFs::new(executor.clone());
3444 fs.insert_tree(
3445 path!("/root"),
3446 json!({
3447 "src": {
3448 "file_a.txt": "content a",
3449 "file_b.txt": "content b"
3450 }
3451 }),
3452 )
3453 .await;
3454
3455 fs.rename(
3456 Path::new(path!("/root/src/file_a.txt")),
3457 Path::new(path!("/root/src/new/renamed_a.txt")),
3458 RenameOptions {
3459 create_parents: true,
3460 ..Default::default()
3461 },
3462 )
3463 .await
3464 .unwrap();
3465
3466 // Assert that the `file_a.txt` file was being renamed and moved to a
3467 // different directory that did not exist before.
3468 assert_eq!(
3469 fs.files(),
3470 vec![
3471 PathBuf::from(path!("/root/src/file_b.txt")),
3472 PathBuf::from(path!("/root/src/new/renamed_a.txt")),
3473 ]
3474 );
3475
3476 let result = fs
3477 .rename(
3478 Path::new(path!("/root/src/file_b.txt")),
3479 Path::new(path!("/root/src/old/renamed_b.txt")),
3480 RenameOptions {
3481 create_parents: false,
3482 ..Default::default()
3483 },
3484 )
3485 .await;
3486
3487 // Assert that the `file_b.txt` file was not renamed nor moved, as
3488 // `create_parents` was set to `false`.
3489 // different directory that did not exist before.
3490 assert!(result.is_err());
3491 assert_eq!(
3492 fs.files(),
3493 vec![
3494 PathBuf::from(path!("/root/src/file_b.txt")),
3495 PathBuf::from(path!("/root/src/new/renamed_a.txt")),
3496 ]
3497 );
3498 }
3499}