1#[cfg(target_os = "macos")]
2mod mac_watcher;
3
4#[cfg(not(target_os = "macos"))]
5pub mod fs_watcher;
6
7use parking_lot::Mutex;
8use std::sync::atomic::{AtomicUsize, Ordering};
9use std::time::Instant;
10
11use anyhow::{Context as _, Result, anyhow};
12#[cfg(any(target_os = "linux", target_os = "freebsd"))]
13use ashpd::desktop::trash;
14use futures::stream::iter;
15use gpui::App;
16use gpui::BackgroundExecutor;
17use gpui::Global;
18use gpui::ReadGlobal as _;
19use gpui::SharedString;
20use std::borrow::Cow;
21use util::command::new_smol_command;
22
23#[cfg(unix)]
24use std::os::fd::{AsFd, AsRawFd};
25
26#[cfg(unix)]
27use std::os::unix::fs::{FileTypeExt, MetadataExt};
28
29#[cfg(any(target_os = "macos", target_os = "freebsd"))]
30use std::mem::MaybeUninit;
31
32use async_tar::Archive;
33use futures::{AsyncRead, Stream, StreamExt, future::BoxFuture};
34use git::repository::{GitRepository, RealGitRepository};
35use is_executable::IsExecutable;
36use rope::Rope;
37use serde::{Deserialize, Serialize};
38use smol::io::AsyncWriteExt;
39use std::{
40 io::{self, Write},
41 path::{Component, Path, PathBuf},
42 pin::Pin,
43 sync::Arc,
44 time::{Duration, SystemTime, UNIX_EPOCH},
45};
46use tempfile::TempDir;
47use text::LineEnding;
48
49#[cfg(any(test, feature = "test-support"))]
50mod fake_git_repo;
51#[cfg(any(test, feature = "test-support"))]
52use collections::{BTreeMap, btree_map};
53#[cfg(any(test, feature = "test-support"))]
54use fake_git_repo::FakeGitRepositoryState;
55#[cfg(any(test, feature = "test-support"))]
56use git::{
57 repository::{RepoPath, repo_path},
58 status::{FileStatus, StatusCode, TrackedStatus, UnmergedStatus},
59};
60
61#[cfg(any(test, feature = "test-support"))]
62use smol::io::AsyncReadExt;
63#[cfg(any(test, feature = "test-support"))]
64use std::ffi::OsStr;
65
66#[cfg(any(test, feature = "test-support"))]
67pub use fake_git_repo::{LOAD_HEAD_TEXT_TASK, LOAD_INDEX_TEXT_TASK};
68
69pub trait Watcher: Send + Sync {
70 fn add(&self, path: &Path) -> Result<()>;
71 fn remove(&self, path: &Path) -> Result<()>;
72}
73
74#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
75pub enum PathEventKind {
76 Removed,
77 Created,
78 Changed,
79}
80
81#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
82pub struct PathEvent {
83 pub path: PathBuf,
84 pub kind: Option<PathEventKind>,
85}
86
87impl From<PathEvent> for PathBuf {
88 fn from(event: PathEvent) -> Self {
89 event.path
90 }
91}
92
93#[async_trait::async_trait]
94pub trait Fs: Send + Sync {
95 async fn create_dir(&self, path: &Path) -> Result<()>;
96 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()>;
97 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()>;
98 async fn create_file_with(
99 &self,
100 path: &Path,
101 content: Pin<&mut (dyn AsyncRead + Send)>,
102 ) -> Result<()>;
103 async fn extract_tar_file(
104 &self,
105 path: &Path,
106 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
107 ) -> Result<()>;
108 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()>;
109 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()>;
110 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()>;
111 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
112 self.remove_dir(path, options).await
113 }
114 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()>;
115 async fn trash_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
116 self.remove_file(path, options).await
117 }
118 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>>;
119 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>>;
120 async fn load(&self, path: &Path) -> Result<String> {
121 Ok(String::from_utf8(self.load_bytes(path).await?)?)
122 }
123 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>>;
124 async fn atomic_write(&self, path: PathBuf, text: String) -> Result<()>;
125 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()>;
126 async fn write(&self, path: &Path, content: &[u8]) -> Result<()>;
127 async fn canonicalize(&self, path: &Path) -> Result<PathBuf>;
128 async fn is_file(&self, path: &Path) -> bool;
129 async fn is_dir(&self, path: &Path) -> bool;
130 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>>;
131 async fn read_link(&self, path: &Path) -> Result<PathBuf>;
132 async fn read_dir(
133 &self,
134 path: &Path,
135 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>>;
136
137 async fn watch(
138 &self,
139 path: &Path,
140 latency: Duration,
141 ) -> (
142 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
143 Arc<dyn Watcher>,
144 );
145
146 fn open_repo(
147 &self,
148 abs_dot_git: &Path,
149 system_git_binary_path: Option<&Path>,
150 ) -> Option<Arc<dyn GitRepository>>;
151 async fn git_init(&self, abs_work_directory: &Path, fallback_branch_name: String)
152 -> Result<()>;
153 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()>;
154 fn is_fake(&self) -> bool;
155 async fn is_case_sensitive(&self) -> Result<bool>;
156 fn subscribe_to_jobs(&self) -> JobEventReceiver;
157
158 #[cfg(any(test, feature = "test-support"))]
159 fn as_fake(&self) -> Arc<FakeFs> {
160 panic!("called as_fake on a real fs");
161 }
162}
163
164struct GlobalFs(Arc<dyn Fs>);
165
166impl Global for GlobalFs {}
167
168impl dyn Fs {
169 /// Returns the global [`Fs`].
170 pub fn global(cx: &App) -> Arc<Self> {
171 GlobalFs::global(cx).0.clone()
172 }
173
174 /// Sets the global [`Fs`].
175 pub fn set_global(fs: Arc<Self>, cx: &mut App) {
176 cx.set_global(GlobalFs(fs));
177 }
178}
179
180#[derive(Copy, Clone, Default)]
181pub struct CreateOptions {
182 pub overwrite: bool,
183 pub ignore_if_exists: bool,
184}
185
186#[derive(Copy, Clone, Default)]
187pub struct CopyOptions {
188 pub overwrite: bool,
189 pub ignore_if_exists: bool,
190}
191
192#[derive(Copy, Clone, Default)]
193pub struct RenameOptions {
194 pub overwrite: bool,
195 pub ignore_if_exists: bool,
196 /// Whether to create parent directories if they do not exist.
197 pub create_parents: bool,
198}
199
200#[derive(Copy, Clone, Default)]
201pub struct RemoveOptions {
202 pub recursive: bool,
203 pub ignore_if_not_exists: bool,
204}
205
206#[derive(Copy, Clone, Debug)]
207pub struct Metadata {
208 pub inode: u64,
209 pub mtime: MTime,
210 pub is_symlink: bool,
211 pub is_dir: bool,
212 pub len: u64,
213 pub is_fifo: bool,
214 pub is_executable: bool,
215}
216
217/// Filesystem modification time. The purpose of this newtype is to discourage use of operations
218/// that do not make sense for mtimes. In particular, it is not always valid to compare mtimes using
219/// `<` or `>`, as there are many things that can cause the mtime of a file to be earlier than it
220/// was. See ["mtime comparison considered harmful" - apenwarr](https://apenwarr.ca/log/20181113).
221///
222/// Do not derive Ord, PartialOrd, or arithmetic operation traits.
223#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize)]
224#[serde(transparent)]
225pub struct MTime(SystemTime);
226
227pub type JobId = usize;
228
229#[derive(Clone, Debug)]
230pub struct JobInfo {
231 pub start: Instant,
232 pub message: SharedString,
233 pub id: JobId,
234}
235
236#[derive(Debug, Clone)]
237pub enum JobEvent {
238 Started { info: JobInfo },
239 Completed { id: JobId },
240}
241
242pub type JobEventSender = futures::channel::mpsc::UnboundedSender<JobEvent>;
243pub type JobEventReceiver = futures::channel::mpsc::UnboundedReceiver<JobEvent>;
244
245struct JobTracker {
246 id: JobId,
247 subscribers: Arc<Mutex<Vec<JobEventSender>>>,
248}
249
250impl JobTracker {
251 fn new(info: JobInfo, subscribers: Arc<Mutex<Vec<JobEventSender>>>) -> Self {
252 let id = info.id;
253 {
254 let mut subs = subscribers.lock();
255 subs.retain(|sender| {
256 sender
257 .unbounded_send(JobEvent::Started { info: info.clone() })
258 .is_ok()
259 });
260 }
261 Self { id, subscribers }
262 }
263}
264
265impl Drop for JobTracker {
266 fn drop(&mut self) {
267 let mut subs = self.subscribers.lock();
268 subs.retain(|sender| {
269 sender
270 .unbounded_send(JobEvent::Completed { id: self.id })
271 .is_ok()
272 });
273 }
274}
275
276impl MTime {
277 /// Conversion intended for persistence and testing.
278 pub fn from_seconds_and_nanos(secs: u64, nanos: u32) -> Self {
279 MTime(UNIX_EPOCH + Duration::new(secs, nanos))
280 }
281
282 /// Conversion intended for persistence.
283 pub fn to_seconds_and_nanos_for_persistence(self) -> Option<(u64, u32)> {
284 self.0
285 .duration_since(UNIX_EPOCH)
286 .ok()
287 .map(|duration| (duration.as_secs(), duration.subsec_nanos()))
288 }
289
290 /// Returns the value wrapped by this `MTime`, for presentation to the user. The name including
291 /// "_for_user" is to discourage misuse - this method should not be used when making decisions
292 /// about file dirtiness.
293 pub fn timestamp_for_user(self) -> SystemTime {
294 self.0
295 }
296
297 /// Temporary method to split out the behavior changes from introduction of this newtype.
298 pub fn bad_is_greater_than(self, other: MTime) -> bool {
299 self.0 > other.0
300 }
301}
302
303impl From<proto::Timestamp> for MTime {
304 fn from(timestamp: proto::Timestamp) -> Self {
305 MTime(timestamp.into())
306 }
307}
308
309impl From<MTime> for proto::Timestamp {
310 fn from(mtime: MTime) -> Self {
311 mtime.0.into()
312 }
313}
314
315pub struct RealFs {
316 bundled_git_binary_path: Option<PathBuf>,
317 executor: BackgroundExecutor,
318 next_job_id: Arc<AtomicUsize>,
319 job_event_subscribers: Arc<Mutex<Vec<JobEventSender>>>,
320}
321
322pub trait FileHandle: Send + Sync + std::fmt::Debug {
323 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf>;
324}
325
326impl FileHandle for std::fs::File {
327 #[cfg(target_os = "macos")]
328 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
329 use std::{
330 ffi::{CStr, OsStr},
331 os::unix::ffi::OsStrExt,
332 };
333
334 let fd = self.as_fd();
335 let mut path_buf = MaybeUninit::<[u8; libc::PATH_MAX as usize]>::uninit();
336
337 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_GETPATH, path_buf.as_mut_ptr()) };
338 if result == -1 {
339 anyhow::bail!("fcntl returned -1".to_string());
340 }
341
342 // SAFETY: `fcntl` will initialize the path buffer.
343 let c_str = unsafe { CStr::from_ptr(path_buf.as_ptr().cast()) };
344 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
345 Ok(path)
346 }
347
348 #[cfg(target_os = "linux")]
349 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
350 let fd = self.as_fd();
351 let fd_path = format!("/proc/self/fd/{}", fd.as_raw_fd());
352 let new_path = std::fs::read_link(fd_path)?;
353 if new_path
354 .file_name()
355 .is_some_and(|f| f.to_string_lossy().ends_with(" (deleted)"))
356 {
357 anyhow::bail!("file was deleted")
358 };
359
360 Ok(new_path)
361 }
362
363 #[cfg(target_os = "freebsd")]
364 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
365 use std::{
366 ffi::{CStr, OsStr},
367 os::unix::ffi::OsStrExt,
368 };
369
370 let fd = self.as_fd();
371 let mut kif = MaybeUninit::<libc::kinfo_file>::uninit();
372 kif.kf_structsize = libc::KINFO_FILE_SIZE;
373
374 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_KINFO, kif.as_mut_ptr()) };
375 if result == -1 {
376 anyhow::bail!("fcntl returned -1".to_string());
377 }
378
379 // SAFETY: `fcntl` will initialize the kif.
380 let c_str = unsafe { CStr::from_ptr(kif.assume_init().kf_path.as_ptr()) };
381 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
382 Ok(path)
383 }
384
385 #[cfg(target_os = "windows")]
386 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
387 use std::ffi::OsString;
388 use std::os::windows::ffi::OsStringExt;
389 use std::os::windows::io::AsRawHandle;
390
391 use windows::Win32::Foundation::HANDLE;
392 use windows::Win32::Storage::FileSystem::{
393 FILE_NAME_NORMALIZED, GetFinalPathNameByHandleW,
394 };
395
396 let handle = HANDLE(self.as_raw_handle() as _);
397
398 // Query required buffer size (in wide chars)
399 let required_len =
400 unsafe { GetFinalPathNameByHandleW(handle, &mut [], FILE_NAME_NORMALIZED) };
401 if required_len == 0 {
402 anyhow::bail!("GetFinalPathNameByHandleW returned 0 length");
403 }
404
405 // Allocate buffer and retrieve the path
406 let mut buf: Vec<u16> = vec![0u16; required_len as usize + 1];
407 let written = unsafe { GetFinalPathNameByHandleW(handle, &mut buf, FILE_NAME_NORMALIZED) };
408 if written == 0 {
409 anyhow::bail!("GetFinalPathNameByHandleW failed to write path");
410 }
411
412 let os_str: OsString = OsString::from_wide(&buf[..written as usize]);
413 Ok(PathBuf::from(os_str))
414 }
415}
416
417pub struct RealWatcher {}
418
419impl RealFs {
420 pub fn new(git_binary_path: Option<PathBuf>, executor: BackgroundExecutor) -> Self {
421 Self {
422 bundled_git_binary_path: git_binary_path,
423 executor,
424 next_job_id: Arc::new(AtomicUsize::new(0)),
425 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
426 }
427 }
428
429 #[cfg(target_os = "windows")]
430 fn canonicalize(path: &Path) -> Result<PathBuf> {
431 let mut strip_prefix = None;
432
433 let mut new_path = PathBuf::new();
434 for component in path.components() {
435 match component {
436 std::path::Component::Prefix(_) => {
437 let canonicalized = std::fs::canonicalize(component)?;
438
439 let mut strip = PathBuf::new();
440 for component in canonicalized.components() {
441 match component {
442 Component::Prefix(prefix_component) => {
443 match prefix_component.kind() {
444 std::path::Prefix::Verbatim(os_str) => {
445 strip.push(os_str);
446 }
447 std::path::Prefix::VerbatimUNC(host, share) => {
448 strip.push("\\\\");
449 strip.push(host);
450 strip.push(share);
451 }
452 std::path::Prefix::VerbatimDisk(disk) => {
453 strip.push(format!("{}:", disk as char));
454 }
455 _ => strip.push(component),
456 };
457 }
458 _ => strip.push(component),
459 }
460 }
461 strip_prefix = Some(strip);
462 new_path.push(component);
463 }
464 std::path::Component::RootDir => {
465 new_path.push(component);
466 }
467 std::path::Component::CurDir => {
468 if strip_prefix.is_none() {
469 // unrooted path
470 new_path.push(component);
471 }
472 }
473 std::path::Component::ParentDir => {
474 if strip_prefix.is_some() {
475 // rooted path
476 new_path.pop();
477 } else {
478 new_path.push(component);
479 }
480 }
481 std::path::Component::Normal(_) => {
482 if let Ok(link) = std::fs::read_link(new_path.join(component)) {
483 let link = match &strip_prefix {
484 Some(e) => link.strip_prefix(e).unwrap_or(&link),
485 None => &link,
486 };
487 new_path.extend(link);
488 } else {
489 new_path.push(component);
490 }
491 }
492 }
493 }
494
495 Ok(new_path)
496 }
497}
498
499#[async_trait::async_trait]
500impl Fs for RealFs {
501 async fn create_dir(&self, path: &Path) -> Result<()> {
502 Ok(smol::fs::create_dir_all(path).await?)
503 }
504
505 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
506 #[cfg(unix)]
507 smol::fs::unix::symlink(target, path).await?;
508
509 #[cfg(windows)]
510 if smol::fs::metadata(&target).await?.is_dir() {
511 let status = new_smol_command("cmd")
512 .args(["/C", "mklink", "/J"])
513 .args([path, target.as_path()])
514 .status()
515 .await?;
516
517 if !status.success() {
518 return Err(anyhow::anyhow!(
519 "Failed to create junction from {:?} to {:?}",
520 path,
521 target
522 ));
523 }
524 } else {
525 smol::fs::windows::symlink_file(target, path).await?
526 }
527
528 Ok(())
529 }
530
531 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
532 let mut open_options = smol::fs::OpenOptions::new();
533 open_options.write(true).create(true);
534 if options.overwrite {
535 open_options.truncate(true);
536 } else if !options.ignore_if_exists {
537 open_options.create_new(true);
538 }
539 open_options.open(path).await?;
540 Ok(())
541 }
542
543 async fn create_file_with(
544 &self,
545 path: &Path,
546 content: Pin<&mut (dyn AsyncRead + Send)>,
547 ) -> Result<()> {
548 let mut file = smol::fs::File::create(&path).await?;
549 futures::io::copy(content, &mut file).await?;
550 Ok(())
551 }
552
553 async fn extract_tar_file(
554 &self,
555 path: &Path,
556 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
557 ) -> Result<()> {
558 content.unpack(path).await?;
559 Ok(())
560 }
561
562 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
563 if !options.overwrite && smol::fs::metadata(target).await.is_ok() {
564 if options.ignore_if_exists {
565 return Ok(());
566 } else {
567 anyhow::bail!("{target:?} already exists");
568 }
569 }
570
571 smol::fs::copy(source, target).await?;
572 Ok(())
573 }
574
575 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()> {
576 if !options.overwrite && smol::fs::metadata(target).await.is_ok() {
577 if options.ignore_if_exists {
578 return Ok(());
579 } else {
580 anyhow::bail!("{target:?} already exists");
581 }
582 }
583
584 if options.create_parents {
585 if let Some(parent) = target.parent() {
586 self.create_dir(parent).await?;
587 }
588 }
589
590 smol::fs::rename(source, target).await?;
591 Ok(())
592 }
593
594 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
595 let result = if options.recursive {
596 smol::fs::remove_dir_all(path).await
597 } else {
598 smol::fs::remove_dir(path).await
599 };
600 match result {
601 Ok(()) => Ok(()),
602 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
603 Ok(())
604 }
605 Err(err) => Err(err)?,
606 }
607 }
608
609 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
610 #[cfg(windows)]
611 if let Ok(Some(metadata)) = self.metadata(path).await
612 && metadata.is_symlink
613 && metadata.is_dir
614 {
615 self.remove_dir(
616 path,
617 RemoveOptions {
618 recursive: false,
619 ignore_if_not_exists: true,
620 },
621 )
622 .await?;
623 return Ok(());
624 }
625
626 match smol::fs::remove_file(path).await {
627 Ok(()) => Ok(()),
628 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
629 Ok(())
630 }
631 Err(err) => Err(err)?,
632 }
633 }
634
635 #[cfg(target_os = "macos")]
636 async fn trash_file(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
637 use cocoa::{
638 base::{id, nil},
639 foundation::{NSAutoreleasePool, NSString},
640 };
641 use objc::{class, msg_send, sel, sel_impl};
642
643 unsafe {
644 unsafe fn ns_string(string: &str) -> id {
645 unsafe { NSString::alloc(nil).init_str(string).autorelease() }
646 }
647
648 let url: id = msg_send![class!(NSURL), fileURLWithPath: ns_string(path.to_string_lossy().as_ref())];
649 let array: id = msg_send![class!(NSArray), arrayWithObject: url];
650 let workspace: id = msg_send![class!(NSWorkspace), sharedWorkspace];
651
652 let _: id = msg_send![workspace, recycleURLs: array completionHandler: nil];
653 }
654 Ok(())
655 }
656
657 #[cfg(any(target_os = "linux", target_os = "freebsd"))]
658 async fn trash_file(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
659 if let Ok(Some(metadata)) = self.metadata(path).await
660 && metadata.is_symlink
661 {
662 // TODO: trash_file does not support trashing symlinks yet - https://github.com/bilelmoussaoui/ashpd/issues/255
663 return self.remove_file(path, RemoveOptions::default()).await;
664 }
665 let file = smol::fs::File::open(path).await?;
666 match trash::trash_file(&file.as_fd()).await {
667 Ok(_) => Ok(()),
668 Err(err) => {
669 log::error!("Failed to trash file: {}", err);
670 // Trashing files can fail if you don't have a trashing dbus service configured.
671 // In that case, delete the file directly instead.
672 return self.remove_file(path, RemoveOptions::default()).await;
673 }
674 }
675 }
676
677 #[cfg(target_os = "windows")]
678 async fn trash_file(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
679 use util::paths::SanitizedPath;
680 use windows::{
681 Storage::{StorageDeleteOption, StorageFile},
682 core::HSTRING,
683 };
684 // todo(windows)
685 // When new version of `windows-rs` release, make this operation `async`
686 let path = path.canonicalize()?;
687 let path = SanitizedPath::new(&path);
688 let path_string = path.to_string();
689 let file = StorageFile::GetFileFromPathAsync(&HSTRING::from(path_string))?.get()?;
690 file.DeleteAsync(StorageDeleteOption::Default)?.get()?;
691 Ok(())
692 }
693
694 #[cfg(target_os = "macos")]
695 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
696 self.trash_file(path, options).await
697 }
698
699 #[cfg(any(target_os = "linux", target_os = "freebsd"))]
700 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
701 self.trash_file(path, options).await
702 }
703
704 #[cfg(target_os = "windows")]
705 async fn trash_dir(&self, path: &Path, _options: RemoveOptions) -> Result<()> {
706 use util::paths::SanitizedPath;
707 use windows::{
708 Storage::{StorageDeleteOption, StorageFolder},
709 core::HSTRING,
710 };
711
712 // todo(windows)
713 // When new version of `windows-rs` release, make this operation `async`
714 let path = path.canonicalize()?;
715 let path = SanitizedPath::new(&path);
716 let path_string = path.to_string();
717 let folder = StorageFolder::GetFolderFromPathAsync(&HSTRING::from(path_string))?.get()?;
718 folder.DeleteAsync(StorageDeleteOption::Default)?.get()?;
719 Ok(())
720 }
721
722 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
723 Ok(Box::new(std::fs::File::open(path)?))
724 }
725
726 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
727 let mut options = std::fs::OpenOptions::new();
728 options.read(true);
729 #[cfg(windows)]
730 {
731 use std::os::windows::fs::OpenOptionsExt;
732 options.custom_flags(windows::Win32::Storage::FileSystem::FILE_FLAG_BACKUP_SEMANTICS.0);
733 }
734 Ok(Arc::new(options.open(path)?))
735 }
736
737 async fn load(&self, path: &Path) -> Result<String> {
738 let path = path.to_path_buf();
739 self.executor
740 .spawn(async move { Ok(std::fs::read_to_string(path)?) })
741 .await
742 }
743
744 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
745 let path = path.to_path_buf();
746 let bytes = self
747 .executor
748 .spawn(async move { std::fs::read(path) })
749 .await?;
750 Ok(bytes)
751 }
752
753 #[cfg(not(target_os = "windows"))]
754 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
755 smol::unblock(move || {
756 // Use the directory of the destination as temp dir to avoid
757 // invalid cross-device link error, and XDG_CACHE_DIR for fallback.
758 // See https://github.com/zed-industries/zed/pull/8437 for more details.
759 let mut tmp_file =
760 tempfile::NamedTempFile::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
761 tmp_file.write_all(data.as_bytes())?;
762 tmp_file.persist(path)?;
763 anyhow::Ok(())
764 })
765 .await?;
766
767 Ok(())
768 }
769
770 #[cfg(target_os = "windows")]
771 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
772 smol::unblock(move || {
773 // If temp dir is set to a different drive than the destination,
774 // we receive error:
775 //
776 // failed to persist temporary file:
777 // The system cannot move the file to a different disk drive. (os error 17)
778 //
779 // This is because `ReplaceFileW` does not support cross volume moves.
780 // See the remark section: "The backup file, replaced file, and replacement file must all reside on the same volume."
781 // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-replacefilew#remarks
782 //
783 // So we use the directory of the destination as a temp dir to avoid it.
784 // https://github.com/zed-industries/zed/issues/16571
785 let temp_dir = TempDir::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
786 let temp_file = {
787 let temp_file_path = temp_dir.path().join("temp_file");
788 let mut file = std::fs::File::create_new(&temp_file_path)?;
789 file.write_all(data.as_bytes())?;
790 temp_file_path
791 };
792 atomic_replace(path.as_path(), temp_file.as_path())?;
793 anyhow::Ok(())
794 })
795 .await?;
796 Ok(())
797 }
798
799 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
800 let buffer_size = text.summary().len.min(10 * 1024);
801 if let Some(path) = path.parent() {
802 self.create_dir(path).await?;
803 }
804 let file = smol::fs::File::create(path).await?;
805 let mut writer = smol::io::BufWriter::with_capacity(buffer_size, file);
806 for chunk in chunks(text, line_ending) {
807 writer.write_all(chunk.as_bytes()).await?;
808 }
809 writer.flush().await?;
810 Ok(())
811 }
812
813 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
814 if let Some(path) = path.parent() {
815 self.create_dir(path).await?;
816 }
817 let path = path.to_owned();
818 let contents = content.to_owned();
819 self.executor
820 .spawn(async move {
821 std::fs::write(path, contents)?;
822 Ok(())
823 })
824 .await
825 }
826
827 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
828 let path = path.to_owned();
829 self.executor
830 .spawn(async move {
831 #[cfg(target_os = "windows")]
832 let result = Self::canonicalize(&path);
833
834 #[cfg(not(target_os = "windows"))]
835 let result = std::fs::canonicalize(&path);
836
837 result.with_context(|| format!("canonicalizing {path:?}"))
838 })
839 .await
840 }
841
842 async fn is_file(&self, path: &Path) -> bool {
843 let path = path.to_owned();
844 self.executor
845 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_file()) })
846 .await
847 }
848
849 async fn is_dir(&self, path: &Path) -> bool {
850 let path = path.to_owned();
851 self.executor
852 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_dir()) })
853 .await
854 }
855
856 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
857 let path_buf = path.to_owned();
858 let symlink_metadata = match self
859 .executor
860 .spawn(async move { std::fs::symlink_metadata(&path_buf) })
861 .await
862 {
863 Ok(metadata) => metadata,
864 Err(err) => {
865 return match err.kind() {
866 io::ErrorKind::NotFound | io::ErrorKind::NotADirectory => Ok(None),
867 _ => Err(anyhow::Error::new(err)),
868 };
869 }
870 };
871
872 let is_symlink = symlink_metadata.file_type().is_symlink();
873 let metadata = if is_symlink {
874 let path_buf = path.to_path_buf();
875 let path_exists = self
876 .executor
877 .spawn(async move {
878 path_buf
879 .try_exists()
880 .with_context(|| format!("checking existence for path {path_buf:?}"))
881 })
882 .await?;
883 if path_exists {
884 let path_buf = path.to_path_buf();
885 self.executor
886 .spawn(async move { std::fs::metadata(path_buf) })
887 .await
888 .with_context(|| "accessing symlink for path {path}")?
889 } else {
890 symlink_metadata
891 }
892 } else {
893 symlink_metadata
894 };
895
896 #[cfg(unix)]
897 let inode = metadata.ino();
898
899 #[cfg(windows)]
900 let inode = file_id(path).await?;
901
902 #[cfg(windows)]
903 let is_fifo = false;
904
905 #[cfg(unix)]
906 let is_fifo = metadata.file_type().is_fifo();
907
908 let path_buf = path.to_path_buf();
909 let is_executable = self
910 .executor
911 .spawn(async move { path_buf.is_executable() })
912 .await;
913
914 Ok(Some(Metadata {
915 inode,
916 mtime: MTime(metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH)),
917 len: metadata.len(),
918 is_symlink,
919 is_dir: metadata.file_type().is_dir(),
920 is_fifo,
921 is_executable,
922 }))
923 }
924
925 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
926 let path = path.to_owned();
927 let path = self
928 .executor
929 .spawn(async move { std::fs::read_link(&path) })
930 .await?;
931 Ok(path)
932 }
933
934 async fn read_dir(
935 &self,
936 path: &Path,
937 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
938 let path = path.to_owned();
939 let result = iter(
940 self.executor
941 .spawn(async move { std::fs::read_dir(path) })
942 .await?,
943 )
944 .map(|entry| match entry {
945 Ok(entry) => Ok(entry.path()),
946 Err(error) => Err(anyhow!("failed to read dir entry {error:?}")),
947 });
948 Ok(Box::pin(result))
949 }
950
951 #[cfg(target_os = "macos")]
952 async fn watch(
953 &self,
954 path: &Path,
955 latency: Duration,
956 ) -> (
957 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
958 Arc<dyn Watcher>,
959 ) {
960 use fsevent::StreamFlags;
961
962 let (events_tx, events_rx) = smol::channel::unbounded();
963 let handles = Arc::new(parking_lot::Mutex::new(collections::BTreeMap::default()));
964 let watcher = Arc::new(mac_watcher::MacWatcher::new(
965 events_tx,
966 Arc::downgrade(&handles),
967 latency,
968 ));
969 watcher.add(path).expect("handles can't be dropped");
970
971 (
972 Box::pin(
973 events_rx
974 .map(|events| {
975 events
976 .into_iter()
977 .map(|event| {
978 log::trace!("fs path event: {event:?}");
979 let kind = if event.flags.contains(StreamFlags::ITEM_REMOVED) {
980 Some(PathEventKind::Removed)
981 } else if event.flags.contains(StreamFlags::ITEM_CREATED) {
982 Some(PathEventKind::Created)
983 } else if event.flags.contains(StreamFlags::ITEM_MODIFIED)
984 | event.flags.contains(StreamFlags::ITEM_RENAMED)
985 {
986 Some(PathEventKind::Changed)
987 } else {
988 None
989 };
990 PathEvent {
991 path: event.path,
992 kind,
993 }
994 })
995 .collect()
996 })
997 .chain(futures::stream::once(async move {
998 drop(handles);
999 vec![]
1000 })),
1001 ),
1002 watcher,
1003 )
1004 }
1005
1006 #[cfg(not(target_os = "macos"))]
1007 async fn watch(
1008 &self,
1009 path: &Path,
1010 latency: Duration,
1011 ) -> (
1012 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
1013 Arc<dyn Watcher>,
1014 ) {
1015 use util::{ResultExt as _, paths::SanitizedPath};
1016
1017 let (tx, rx) = smol::channel::unbounded();
1018 let pending_paths: Arc<Mutex<Vec<PathEvent>>> = Default::default();
1019 let watcher = Arc::new(fs_watcher::FsWatcher::new(tx, pending_paths.clone()));
1020
1021 // If the path doesn't exist yet (e.g. settings.json), watch the parent dir to learn when it's created.
1022 if let Err(e) = watcher.add(path)
1023 && let Some(parent) = path.parent()
1024 && let Err(parent_e) = watcher.add(parent)
1025 {
1026 log::warn!(
1027 "Failed to watch {} and its parent directory {}:\n{e}\n{parent_e}",
1028 path.display(),
1029 parent.display()
1030 );
1031 }
1032
1033 // Check if path is a symlink and follow the target parent
1034 if let Some(mut target) = self.read_link(path).await.ok() {
1035 log::trace!("watch symlink {path:?} -> {target:?}");
1036 // Check if symlink target is relative path, if so make it absolute
1037 if target.is_relative()
1038 && let Some(parent) = path.parent()
1039 {
1040 target = parent.join(target);
1041 if let Ok(canonical) = self.canonicalize(&target).await {
1042 target = SanitizedPath::new(&canonical).as_path().to_path_buf();
1043 }
1044 }
1045 watcher.add(&target).ok();
1046 if let Some(parent) = target.parent() {
1047 watcher.add(parent).log_err();
1048 }
1049 }
1050
1051 (
1052 Box::pin(rx.filter_map({
1053 let watcher = watcher.clone();
1054 move |_| {
1055 let _ = watcher.clone();
1056 let pending_paths = pending_paths.clone();
1057 async move {
1058 smol::Timer::after(latency).await;
1059 let paths = std::mem::take(&mut *pending_paths.lock());
1060 (!paths.is_empty()).then_some(paths)
1061 }
1062 }
1063 })),
1064 watcher,
1065 )
1066 }
1067
1068 fn open_repo(
1069 &self,
1070 dotgit_path: &Path,
1071 system_git_binary_path: Option<&Path>,
1072 ) -> Option<Arc<dyn GitRepository>> {
1073 Some(Arc::new(RealGitRepository::new(
1074 dotgit_path,
1075 self.bundled_git_binary_path.clone(),
1076 system_git_binary_path.map(|path| path.to_path_buf()),
1077 self.executor.clone(),
1078 )?))
1079 }
1080
1081 async fn git_init(
1082 &self,
1083 abs_work_directory_path: &Path,
1084 fallback_branch_name: String,
1085 ) -> Result<()> {
1086 let config = new_smol_command("git")
1087 .current_dir(abs_work_directory_path)
1088 .args(&["config", "--global", "--get", "init.defaultBranch"])
1089 .output()
1090 .await?;
1091
1092 let branch_name;
1093
1094 if config.status.success() && !config.stdout.is_empty() {
1095 branch_name = String::from_utf8_lossy(&config.stdout);
1096 } else {
1097 branch_name = Cow::Borrowed(fallback_branch_name.as_str());
1098 }
1099
1100 new_smol_command("git")
1101 .current_dir(abs_work_directory_path)
1102 .args(&["init", "-b"])
1103 .arg(branch_name.trim())
1104 .output()
1105 .await?;
1106
1107 Ok(())
1108 }
1109
1110 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()> {
1111 let job_id = self.next_job_id.fetch_add(1, Ordering::SeqCst);
1112 let job_info = JobInfo {
1113 id: job_id,
1114 start: Instant::now(),
1115 message: SharedString::from(format!("Cloning {}", repo_url)),
1116 };
1117
1118 let _job_tracker = JobTracker::new(job_info, self.job_event_subscribers.clone());
1119
1120 let output = new_smol_command("git")
1121 .current_dir(abs_work_directory)
1122 .args(&["clone", repo_url])
1123 .output()
1124 .await?;
1125
1126 if !output.status.success() {
1127 anyhow::bail!(
1128 "git clone failed: {}",
1129 String::from_utf8_lossy(&output.stderr)
1130 );
1131 }
1132
1133 Ok(())
1134 }
1135
1136 fn is_fake(&self) -> bool {
1137 false
1138 }
1139
1140 fn subscribe_to_jobs(&self) -> JobEventReceiver {
1141 let (sender, receiver) = futures::channel::mpsc::unbounded();
1142 self.job_event_subscribers.lock().push(sender);
1143 receiver
1144 }
1145
1146 /// Checks whether the file system is case sensitive by attempting to create two files
1147 /// that have the same name except for the casing.
1148 ///
1149 /// It creates both files in a temporary directory it removes at the end.
1150 async fn is_case_sensitive(&self) -> Result<bool> {
1151 let temp_dir = TempDir::new()?;
1152 let test_file_1 = temp_dir.path().join("case_sensitivity_test.tmp");
1153 let test_file_2 = temp_dir.path().join("CASE_SENSITIVITY_TEST.TMP");
1154
1155 let create_opts = CreateOptions {
1156 overwrite: false,
1157 ignore_if_exists: false,
1158 };
1159
1160 // Create file1
1161 self.create_file(&test_file_1, create_opts).await?;
1162
1163 // Now check whether it's possible to create file2
1164 let case_sensitive = match self.create_file(&test_file_2, create_opts).await {
1165 Ok(_) => Ok(true),
1166 Err(e) => {
1167 if let Some(io_error) = e.downcast_ref::<io::Error>() {
1168 if io_error.kind() == io::ErrorKind::AlreadyExists {
1169 Ok(false)
1170 } else {
1171 Err(e)
1172 }
1173 } else {
1174 Err(e)
1175 }
1176 }
1177 };
1178
1179 temp_dir.close()?;
1180 case_sensitive
1181 }
1182}
1183
1184#[cfg(not(any(target_os = "linux", target_os = "freebsd")))]
1185impl Watcher for RealWatcher {
1186 fn add(&self, _: &Path) -> Result<()> {
1187 Ok(())
1188 }
1189
1190 fn remove(&self, _: &Path) -> Result<()> {
1191 Ok(())
1192 }
1193}
1194
1195#[cfg(any(test, feature = "test-support"))]
1196pub struct FakeFs {
1197 this: std::sync::Weak<Self>,
1198 // Use an unfair lock to ensure tests are deterministic.
1199 state: Arc<Mutex<FakeFsState>>,
1200 executor: gpui::BackgroundExecutor,
1201}
1202
1203#[cfg(any(test, feature = "test-support"))]
1204struct FakeFsState {
1205 root: FakeFsEntry,
1206 next_inode: u64,
1207 next_mtime: SystemTime,
1208 git_event_tx: smol::channel::Sender<PathBuf>,
1209 event_txs: Vec<(PathBuf, smol::channel::Sender<Vec<PathEvent>>)>,
1210 events_paused: bool,
1211 buffered_events: Vec<PathEvent>,
1212 metadata_call_count: usize,
1213 read_dir_call_count: usize,
1214 path_write_counts: std::collections::HashMap<PathBuf, usize>,
1215 moves: std::collections::HashMap<u64, PathBuf>,
1216 job_event_subscribers: Arc<Mutex<Vec<JobEventSender>>>,
1217}
1218
1219#[cfg(any(test, feature = "test-support"))]
1220#[derive(Clone, Debug)]
1221enum FakeFsEntry {
1222 File {
1223 inode: u64,
1224 mtime: MTime,
1225 len: u64,
1226 content: Vec<u8>,
1227 // The path to the repository state directory, if this is a gitfile.
1228 git_dir_path: Option<PathBuf>,
1229 },
1230 Dir {
1231 inode: u64,
1232 mtime: MTime,
1233 len: u64,
1234 entries: BTreeMap<String, FakeFsEntry>,
1235 git_repo_state: Option<Arc<Mutex<FakeGitRepositoryState>>>,
1236 },
1237 Symlink {
1238 target: PathBuf,
1239 },
1240}
1241
1242#[cfg(any(test, feature = "test-support"))]
1243impl PartialEq for FakeFsEntry {
1244 fn eq(&self, other: &Self) -> bool {
1245 match (self, other) {
1246 (
1247 Self::File {
1248 inode: l_inode,
1249 mtime: l_mtime,
1250 len: l_len,
1251 content: l_content,
1252 git_dir_path: l_git_dir_path,
1253 },
1254 Self::File {
1255 inode: r_inode,
1256 mtime: r_mtime,
1257 len: r_len,
1258 content: r_content,
1259 git_dir_path: r_git_dir_path,
1260 },
1261 ) => {
1262 l_inode == r_inode
1263 && l_mtime == r_mtime
1264 && l_len == r_len
1265 && l_content == r_content
1266 && l_git_dir_path == r_git_dir_path
1267 }
1268 (
1269 Self::Dir {
1270 inode: l_inode,
1271 mtime: l_mtime,
1272 len: l_len,
1273 entries: l_entries,
1274 git_repo_state: l_git_repo_state,
1275 },
1276 Self::Dir {
1277 inode: r_inode,
1278 mtime: r_mtime,
1279 len: r_len,
1280 entries: r_entries,
1281 git_repo_state: r_git_repo_state,
1282 },
1283 ) => {
1284 let same_repo_state = match (l_git_repo_state.as_ref(), r_git_repo_state.as_ref()) {
1285 (Some(l), Some(r)) => Arc::ptr_eq(l, r),
1286 (None, None) => true,
1287 _ => false,
1288 };
1289 l_inode == r_inode
1290 && l_mtime == r_mtime
1291 && l_len == r_len
1292 && l_entries == r_entries
1293 && same_repo_state
1294 }
1295 (Self::Symlink { target: l_target }, Self::Symlink { target: r_target }) => {
1296 l_target == r_target
1297 }
1298 _ => false,
1299 }
1300 }
1301}
1302
1303#[cfg(any(test, feature = "test-support"))]
1304impl FakeFsState {
1305 fn get_and_increment_mtime(&mut self) -> MTime {
1306 let mtime = self.next_mtime;
1307 self.next_mtime += FakeFs::SYSTEMTIME_INTERVAL;
1308 MTime(mtime)
1309 }
1310
1311 fn get_and_increment_inode(&mut self) -> u64 {
1312 let inode = self.next_inode;
1313 self.next_inode += 1;
1314 inode
1315 }
1316
1317 fn canonicalize(&self, target: &Path, follow_symlink: bool) -> Option<PathBuf> {
1318 let mut canonical_path = PathBuf::new();
1319 let mut path = target.to_path_buf();
1320 let mut entry_stack = Vec::new();
1321 'outer: loop {
1322 let mut path_components = path.components().peekable();
1323 let mut prefix = None;
1324 while let Some(component) = path_components.next() {
1325 match component {
1326 Component::Prefix(prefix_component) => prefix = Some(prefix_component),
1327 Component::RootDir => {
1328 entry_stack.clear();
1329 entry_stack.push(&self.root);
1330 canonical_path.clear();
1331 match prefix {
1332 Some(prefix_component) => {
1333 canonical_path = PathBuf::from(prefix_component.as_os_str());
1334 // Prefixes like `C:\\` are represented without their trailing slash, so we have to re-add it.
1335 canonical_path.push(std::path::MAIN_SEPARATOR_STR);
1336 }
1337 None => canonical_path = PathBuf::from(std::path::MAIN_SEPARATOR_STR),
1338 }
1339 }
1340 Component::CurDir => {}
1341 Component::ParentDir => {
1342 entry_stack.pop()?;
1343 canonical_path.pop();
1344 }
1345 Component::Normal(name) => {
1346 let current_entry = *entry_stack.last()?;
1347 if let FakeFsEntry::Dir { entries, .. } = current_entry {
1348 let entry = entries.get(name.to_str().unwrap())?;
1349 if (path_components.peek().is_some() || follow_symlink)
1350 && let FakeFsEntry::Symlink { target, .. } = entry
1351 {
1352 let mut target = target.clone();
1353 target.extend(path_components);
1354 path = target;
1355 continue 'outer;
1356 }
1357 entry_stack.push(entry);
1358 canonical_path = canonical_path.join(name);
1359 } else {
1360 return None;
1361 }
1362 }
1363 }
1364 }
1365 break;
1366 }
1367
1368 if entry_stack.is_empty() {
1369 None
1370 } else {
1371 Some(canonical_path)
1372 }
1373 }
1374
1375 fn try_entry(
1376 &mut self,
1377 target: &Path,
1378 follow_symlink: bool,
1379 ) -> Option<(&mut FakeFsEntry, PathBuf)> {
1380 let canonical_path = self.canonicalize(target, follow_symlink)?;
1381
1382 let mut components = canonical_path
1383 .components()
1384 .skip_while(|component| matches!(component, Component::Prefix(_)));
1385 let Some(Component::RootDir) = components.next() else {
1386 panic!(
1387 "the path {:?} was not canonicalized properly {:?}",
1388 target, canonical_path
1389 )
1390 };
1391
1392 let mut entry = &mut self.root;
1393 for component in components {
1394 match component {
1395 Component::Normal(name) => {
1396 if let FakeFsEntry::Dir { entries, .. } = entry {
1397 entry = entries.get_mut(name.to_str().unwrap())?;
1398 } else {
1399 return None;
1400 }
1401 }
1402 _ => {
1403 panic!(
1404 "the path {:?} was not canonicalized properly {:?}",
1405 target, canonical_path
1406 )
1407 }
1408 }
1409 }
1410
1411 Some((entry, canonical_path))
1412 }
1413
1414 fn entry(&mut self, target: &Path) -> Result<&mut FakeFsEntry> {
1415 Ok(self
1416 .try_entry(target, true)
1417 .ok_or_else(|| {
1418 anyhow!(io::Error::new(
1419 io::ErrorKind::NotFound,
1420 format!("not found: {target:?}")
1421 ))
1422 })?
1423 .0)
1424 }
1425
1426 fn write_path<Fn, T>(&mut self, path: &Path, callback: Fn) -> Result<T>
1427 where
1428 Fn: FnOnce(btree_map::Entry<String, FakeFsEntry>) -> Result<T>,
1429 {
1430 let path = normalize_path(path);
1431 let filename = path.file_name().context("cannot overwrite the root")?;
1432 let parent_path = path.parent().unwrap();
1433
1434 let parent = self.entry(parent_path)?;
1435 let new_entry = parent
1436 .dir_entries(parent_path)?
1437 .entry(filename.to_str().unwrap().into());
1438 callback(new_entry)
1439 }
1440
1441 fn emit_event<I, T>(&mut self, paths: I)
1442 where
1443 I: IntoIterator<Item = (T, Option<PathEventKind>)>,
1444 T: Into<PathBuf>,
1445 {
1446 self.buffered_events
1447 .extend(paths.into_iter().map(|(path, kind)| PathEvent {
1448 path: path.into(),
1449 kind,
1450 }));
1451
1452 if !self.events_paused {
1453 self.flush_events(self.buffered_events.len());
1454 }
1455 }
1456
1457 fn flush_events(&mut self, mut count: usize) {
1458 count = count.min(self.buffered_events.len());
1459 let events = self.buffered_events.drain(0..count).collect::<Vec<_>>();
1460 self.event_txs.retain(|(_, tx)| {
1461 let _ = tx.try_send(events.clone());
1462 !tx.is_closed()
1463 });
1464 }
1465}
1466
1467#[cfg(any(test, feature = "test-support"))]
1468pub static FS_DOT_GIT: std::sync::LazyLock<&'static OsStr> =
1469 std::sync::LazyLock::new(|| OsStr::new(".git"));
1470
1471#[cfg(any(test, feature = "test-support"))]
1472impl FakeFs {
1473 /// We need to use something large enough for Windows and Unix to consider this a new file.
1474 /// https://doc.rust-lang.org/nightly/std/time/struct.SystemTime.html#platform-specific-behavior
1475 const SYSTEMTIME_INTERVAL: Duration = Duration::from_nanos(100);
1476
1477 pub fn new(executor: gpui::BackgroundExecutor) -> Arc<Self> {
1478 let (tx, rx) = smol::channel::bounded::<PathBuf>(10);
1479
1480 let this = Arc::new_cyclic(|this| Self {
1481 this: this.clone(),
1482 executor: executor.clone(),
1483 state: Arc::new(Mutex::new(FakeFsState {
1484 root: FakeFsEntry::Dir {
1485 inode: 0,
1486 mtime: MTime(UNIX_EPOCH),
1487 len: 0,
1488 entries: Default::default(),
1489 git_repo_state: None,
1490 },
1491 git_event_tx: tx,
1492 next_mtime: UNIX_EPOCH + Self::SYSTEMTIME_INTERVAL,
1493 next_inode: 1,
1494 event_txs: Default::default(),
1495 buffered_events: Vec::new(),
1496 events_paused: false,
1497 read_dir_call_count: 0,
1498 metadata_call_count: 0,
1499 path_write_counts: Default::default(),
1500 moves: Default::default(),
1501 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
1502 })),
1503 });
1504
1505 executor.spawn({
1506 let this = this.clone();
1507 async move {
1508 while let Ok(git_event) = rx.recv().await {
1509 if let Some(mut state) = this.state.try_lock() {
1510 state.emit_event([(git_event, Some(PathEventKind::Changed))]);
1511 } else {
1512 panic!("Failed to lock file system state, this execution would have caused a test hang");
1513 }
1514 }
1515 }
1516 }).detach();
1517
1518 this
1519 }
1520
1521 pub fn set_next_mtime(&self, next_mtime: SystemTime) {
1522 let mut state = self.state.lock();
1523 state.next_mtime = next_mtime;
1524 }
1525
1526 pub fn get_and_increment_mtime(&self) -> MTime {
1527 let mut state = self.state.lock();
1528 state.get_and_increment_mtime()
1529 }
1530
1531 pub async fn touch_path(&self, path: impl AsRef<Path>) {
1532 let mut state = self.state.lock();
1533 let path = path.as_ref();
1534 let new_mtime = state.get_and_increment_mtime();
1535 let new_inode = state.get_and_increment_inode();
1536 state
1537 .write_path(path, move |entry| {
1538 match entry {
1539 btree_map::Entry::Vacant(e) => {
1540 e.insert(FakeFsEntry::File {
1541 inode: new_inode,
1542 mtime: new_mtime,
1543 content: Vec::new(),
1544 len: 0,
1545 git_dir_path: None,
1546 });
1547 }
1548 btree_map::Entry::Occupied(mut e) => match &mut *e.get_mut() {
1549 FakeFsEntry::File { mtime, .. } => *mtime = new_mtime,
1550 FakeFsEntry::Dir { mtime, .. } => *mtime = new_mtime,
1551 FakeFsEntry::Symlink { .. } => {}
1552 },
1553 }
1554 Ok(())
1555 })
1556 .unwrap();
1557 state.emit_event([(path.to_path_buf(), Some(PathEventKind::Changed))]);
1558 }
1559
1560 pub async fn insert_file(&self, path: impl AsRef<Path>, content: Vec<u8>) {
1561 self.write_file_internal(path, content, true).unwrap()
1562 }
1563
1564 pub async fn insert_symlink(&self, path: impl AsRef<Path>, target: PathBuf) {
1565 let mut state = self.state.lock();
1566 let path = path.as_ref();
1567 let file = FakeFsEntry::Symlink { target };
1568 state
1569 .write_path(path.as_ref(), move |e| match e {
1570 btree_map::Entry::Vacant(e) => {
1571 e.insert(file);
1572 Ok(())
1573 }
1574 btree_map::Entry::Occupied(mut e) => {
1575 *e.get_mut() = file;
1576 Ok(())
1577 }
1578 })
1579 .unwrap();
1580 state.emit_event([(path, Some(PathEventKind::Created))]);
1581 }
1582
1583 fn write_file_internal(
1584 &self,
1585 path: impl AsRef<Path>,
1586 new_content: Vec<u8>,
1587 recreate_inode: bool,
1588 ) -> Result<()> {
1589 let mut state = self.state.lock();
1590 let path_buf = path.as_ref().to_path_buf();
1591 *state.path_write_counts.entry(path_buf).or_insert(0) += 1;
1592 let new_inode = state.get_and_increment_inode();
1593 let new_mtime = state.get_and_increment_mtime();
1594 let new_len = new_content.len() as u64;
1595 let mut kind = None;
1596 state.write_path(path.as_ref(), |entry| {
1597 match entry {
1598 btree_map::Entry::Vacant(e) => {
1599 kind = Some(PathEventKind::Created);
1600 e.insert(FakeFsEntry::File {
1601 inode: new_inode,
1602 mtime: new_mtime,
1603 len: new_len,
1604 content: new_content,
1605 git_dir_path: None,
1606 });
1607 }
1608 btree_map::Entry::Occupied(mut e) => {
1609 kind = Some(PathEventKind::Changed);
1610 if let FakeFsEntry::File {
1611 inode,
1612 mtime,
1613 len,
1614 content,
1615 ..
1616 } = e.get_mut()
1617 {
1618 *mtime = new_mtime;
1619 *content = new_content;
1620 *len = new_len;
1621 if recreate_inode {
1622 *inode = new_inode;
1623 }
1624 } else {
1625 anyhow::bail!("not a file")
1626 }
1627 }
1628 }
1629 Ok(())
1630 })?;
1631 state.emit_event([(path.as_ref(), kind)]);
1632 Ok(())
1633 }
1634
1635 pub fn read_file_sync(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1636 let path = path.as_ref();
1637 let path = normalize_path(path);
1638 let mut state = self.state.lock();
1639 let entry = state.entry(&path)?;
1640 entry.file_content(&path).cloned()
1641 }
1642
1643 async fn load_internal(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1644 let path = path.as_ref();
1645 let path = normalize_path(path);
1646 self.simulate_random_delay().await;
1647 let mut state = self.state.lock();
1648 let entry = state.entry(&path)?;
1649 entry.file_content(&path).cloned()
1650 }
1651
1652 pub fn pause_events(&self) {
1653 self.state.lock().events_paused = true;
1654 }
1655
1656 pub fn unpause_events_and_flush(&self) {
1657 self.state.lock().events_paused = false;
1658 self.flush_events(usize::MAX);
1659 }
1660
1661 pub fn buffered_event_count(&self) -> usize {
1662 self.state.lock().buffered_events.len()
1663 }
1664
1665 pub fn flush_events(&self, count: usize) {
1666 self.state.lock().flush_events(count);
1667 }
1668
1669 pub(crate) fn entry(&self, target: &Path) -> Result<FakeFsEntry> {
1670 self.state.lock().entry(target).cloned()
1671 }
1672
1673 pub(crate) fn insert_entry(&self, target: &Path, new_entry: FakeFsEntry) -> Result<()> {
1674 let mut state = self.state.lock();
1675 state.write_path(target, |entry| {
1676 match entry {
1677 btree_map::Entry::Vacant(vacant_entry) => {
1678 vacant_entry.insert(new_entry);
1679 }
1680 btree_map::Entry::Occupied(mut occupied_entry) => {
1681 occupied_entry.insert(new_entry);
1682 }
1683 }
1684 Ok(())
1685 })
1686 }
1687
1688 #[must_use]
1689 pub fn insert_tree<'a>(
1690 &'a self,
1691 path: impl 'a + AsRef<Path> + Send,
1692 tree: serde_json::Value,
1693 ) -> futures::future::BoxFuture<'a, ()> {
1694 use futures::FutureExt as _;
1695 use serde_json::Value::*;
1696
1697 async move {
1698 let path = path.as_ref();
1699
1700 match tree {
1701 Object(map) => {
1702 self.create_dir(path).await.unwrap();
1703 for (name, contents) in map {
1704 let mut path = PathBuf::from(path);
1705 path.push(name);
1706 self.insert_tree(&path, contents).await;
1707 }
1708 }
1709 Null => {
1710 self.create_dir(path).await.unwrap();
1711 }
1712 String(contents) => {
1713 self.insert_file(&path, contents.into_bytes()).await;
1714 }
1715 _ => {
1716 panic!("JSON object must contain only objects, strings, or null");
1717 }
1718 }
1719 }
1720 .boxed()
1721 }
1722
1723 pub fn insert_tree_from_real_fs<'a>(
1724 &'a self,
1725 path: impl 'a + AsRef<Path> + Send,
1726 src_path: impl 'a + AsRef<Path> + Send,
1727 ) -> futures::future::BoxFuture<'a, ()> {
1728 use futures::FutureExt as _;
1729
1730 async move {
1731 let path = path.as_ref();
1732 if std::fs::metadata(&src_path).unwrap().is_file() {
1733 let contents = std::fs::read(src_path).unwrap();
1734 self.insert_file(path, contents).await;
1735 } else {
1736 self.create_dir(path).await.unwrap();
1737 for entry in std::fs::read_dir(&src_path).unwrap() {
1738 let entry = entry.unwrap();
1739 self.insert_tree_from_real_fs(path.join(entry.file_name()), entry.path())
1740 .await;
1741 }
1742 }
1743 }
1744 .boxed()
1745 }
1746
1747 pub fn with_git_state_and_paths<T, F>(
1748 &self,
1749 dot_git: &Path,
1750 emit_git_event: bool,
1751 f: F,
1752 ) -> Result<T>
1753 where
1754 F: FnOnce(&mut FakeGitRepositoryState, &Path, &Path) -> T,
1755 {
1756 let mut state = self.state.lock();
1757 let git_event_tx = state.git_event_tx.clone();
1758 let entry = state.entry(dot_git).context("open .git")?;
1759
1760 if let FakeFsEntry::Dir { git_repo_state, .. } = entry {
1761 let repo_state = git_repo_state.get_or_insert_with(|| {
1762 log::debug!("insert git state for {dot_git:?}");
1763 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
1764 });
1765 let mut repo_state = repo_state.lock();
1766
1767 let result = f(&mut repo_state, dot_git, dot_git);
1768
1769 drop(repo_state);
1770 if emit_git_event {
1771 state.emit_event([(dot_git, Some(PathEventKind::Changed))]);
1772 }
1773
1774 Ok(result)
1775 } else if let FakeFsEntry::File {
1776 content,
1777 git_dir_path,
1778 ..
1779 } = &mut *entry
1780 {
1781 let path = match git_dir_path {
1782 Some(path) => path,
1783 None => {
1784 let path = std::str::from_utf8(content)
1785 .ok()
1786 .and_then(|content| content.strip_prefix("gitdir:"))
1787 .context("not a valid gitfile")?
1788 .trim();
1789 git_dir_path.insert(normalize_path(&dot_git.parent().unwrap().join(path)))
1790 }
1791 }
1792 .clone();
1793 let Some((git_dir_entry, canonical_path)) = state.try_entry(&path, true) else {
1794 anyhow::bail!("pointed-to git dir {path:?} not found")
1795 };
1796 let FakeFsEntry::Dir {
1797 git_repo_state,
1798 entries,
1799 ..
1800 } = git_dir_entry
1801 else {
1802 anyhow::bail!("gitfile points to a non-directory")
1803 };
1804 let common_dir = if let Some(child) = entries.get("commondir") {
1805 Path::new(
1806 std::str::from_utf8(child.file_content("commondir".as_ref())?)
1807 .context("commondir content")?,
1808 )
1809 .to_owned()
1810 } else {
1811 canonical_path.clone()
1812 };
1813 let repo_state = git_repo_state.get_or_insert_with(|| {
1814 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
1815 });
1816 let mut repo_state = repo_state.lock();
1817
1818 let result = f(&mut repo_state, &canonical_path, &common_dir);
1819
1820 if emit_git_event {
1821 drop(repo_state);
1822 state.emit_event([(canonical_path, Some(PathEventKind::Changed))]);
1823 }
1824
1825 Ok(result)
1826 } else {
1827 anyhow::bail!("not a valid git repository");
1828 }
1829 }
1830
1831 pub fn with_git_state<T, F>(&self, dot_git: &Path, emit_git_event: bool, f: F) -> Result<T>
1832 where
1833 F: FnOnce(&mut FakeGitRepositoryState) -> T,
1834 {
1835 self.with_git_state_and_paths(dot_git, emit_git_event, |state, _, _| f(state))
1836 }
1837
1838 pub fn set_branch_name(&self, dot_git: &Path, branch: Option<impl Into<String>>) {
1839 self.with_git_state(dot_git, true, |state| {
1840 let branch = branch.map(Into::into);
1841 state.branches.extend(branch.clone());
1842 state.current_branch_name = branch
1843 })
1844 .unwrap();
1845 }
1846
1847 pub fn insert_branches(&self, dot_git: &Path, branches: &[&str]) {
1848 self.with_git_state(dot_git, true, |state| {
1849 if let Some(first) = branches.first()
1850 && state.current_branch_name.is_none()
1851 {
1852 state.current_branch_name = Some(first.to_string())
1853 }
1854 state
1855 .branches
1856 .extend(branches.iter().map(ToString::to_string));
1857 })
1858 .unwrap();
1859 }
1860
1861 pub fn set_unmerged_paths_for_repo(
1862 &self,
1863 dot_git: &Path,
1864 unmerged_state: &[(RepoPath, UnmergedStatus)],
1865 ) {
1866 self.with_git_state(dot_git, true, |state| {
1867 state.unmerged_paths.clear();
1868 state.unmerged_paths.extend(
1869 unmerged_state
1870 .iter()
1871 .map(|(path, content)| (path.clone(), *content)),
1872 );
1873 })
1874 .unwrap();
1875 }
1876
1877 pub fn set_index_for_repo(&self, dot_git: &Path, index_state: &[(&str, String)]) {
1878 self.with_git_state(dot_git, true, |state| {
1879 state.index_contents.clear();
1880 state.index_contents.extend(
1881 index_state
1882 .iter()
1883 .map(|(path, content)| (repo_path(path), content.clone())),
1884 );
1885 })
1886 .unwrap();
1887 }
1888
1889 pub fn set_head_for_repo(
1890 &self,
1891 dot_git: &Path,
1892 head_state: &[(&str, String)],
1893 sha: impl Into<String>,
1894 ) {
1895 self.with_git_state(dot_git, true, |state| {
1896 state.head_contents.clear();
1897 state.head_contents.extend(
1898 head_state
1899 .iter()
1900 .map(|(path, content)| (repo_path(path), content.clone())),
1901 );
1902 state.refs.insert("HEAD".into(), sha.into());
1903 })
1904 .unwrap();
1905 }
1906
1907 pub fn set_head_and_index_for_repo(&self, dot_git: &Path, contents_by_path: &[(&str, String)]) {
1908 self.with_git_state(dot_git, true, |state| {
1909 state.head_contents.clear();
1910 state.head_contents.extend(
1911 contents_by_path
1912 .iter()
1913 .map(|(path, contents)| (repo_path(path), contents.clone())),
1914 );
1915 state.index_contents = state.head_contents.clone();
1916 })
1917 .unwrap();
1918 }
1919
1920 pub fn set_merge_base_content_for_repo(
1921 &self,
1922 dot_git: &Path,
1923 contents_by_path: &[(&str, String)],
1924 ) {
1925 self.with_git_state(dot_git, true, |state| {
1926 use git::Oid;
1927
1928 state.merge_base_contents.clear();
1929 let oids = (1..)
1930 .map(|n| n.to_string())
1931 .map(|n| Oid::from_bytes(n.repeat(20).as_bytes()).unwrap());
1932 for ((path, content), oid) in contents_by_path.iter().zip(oids) {
1933 state.merge_base_contents.insert(repo_path(path), oid);
1934 state.oids.insert(oid, content.clone());
1935 }
1936 })
1937 .unwrap();
1938 }
1939
1940 pub fn set_blame_for_repo(&self, dot_git: &Path, blames: Vec<(RepoPath, git::blame::Blame)>) {
1941 self.with_git_state(dot_git, true, |state| {
1942 state.blames.clear();
1943 state.blames.extend(blames);
1944 })
1945 .unwrap();
1946 }
1947
1948 /// Put the given git repository into a state with the given status,
1949 /// by mutating the head, index, and unmerged state.
1950 pub fn set_status_for_repo(&self, dot_git: &Path, statuses: &[(&str, FileStatus)]) {
1951 let workdir_path = dot_git.parent().unwrap();
1952 let workdir_contents = self.files_with_contents(workdir_path);
1953 self.with_git_state(dot_git, true, |state| {
1954 state.index_contents.clear();
1955 state.head_contents.clear();
1956 state.unmerged_paths.clear();
1957 for (path, content) in workdir_contents {
1958 use util::{paths::PathStyle, rel_path::RelPath};
1959
1960 let repo_path = RelPath::new(path.strip_prefix(&workdir_path).unwrap(), PathStyle::local()).unwrap();
1961 let repo_path = RepoPath::from_rel_path(&repo_path);
1962 let status = statuses
1963 .iter()
1964 .find_map(|(p, status)| (*p == repo_path.as_unix_str()).then_some(status));
1965 let mut content = String::from_utf8_lossy(&content).to_string();
1966
1967 let mut index_content = None;
1968 let mut head_content = None;
1969 match status {
1970 None => {
1971 index_content = Some(content.clone());
1972 head_content = Some(content);
1973 }
1974 Some(FileStatus::Untracked | FileStatus::Ignored) => {}
1975 Some(FileStatus::Unmerged(unmerged_status)) => {
1976 state
1977 .unmerged_paths
1978 .insert(repo_path.clone(), *unmerged_status);
1979 content.push_str(" (unmerged)");
1980 index_content = Some(content.clone());
1981 head_content = Some(content);
1982 }
1983 Some(FileStatus::Tracked(TrackedStatus {
1984 index_status,
1985 worktree_status,
1986 })) => {
1987 match worktree_status {
1988 StatusCode::Modified => {
1989 let mut content = content.clone();
1990 content.push_str(" (modified in working copy)");
1991 index_content = Some(content);
1992 }
1993 StatusCode::TypeChanged | StatusCode::Unmodified => {
1994 index_content = Some(content.clone());
1995 }
1996 StatusCode::Added => {}
1997 StatusCode::Deleted | StatusCode::Renamed | StatusCode::Copied => {
1998 panic!("cannot create these statuses for an existing file");
1999 }
2000 };
2001 match index_status {
2002 StatusCode::Modified => {
2003 let mut content = index_content.clone().expect(
2004 "file cannot be both modified in index and created in working copy",
2005 );
2006 content.push_str(" (modified in index)");
2007 head_content = Some(content);
2008 }
2009 StatusCode::TypeChanged | StatusCode::Unmodified => {
2010 head_content = Some(index_content.clone().expect("file cannot be both unmodified in index and created in working copy"));
2011 }
2012 StatusCode::Added => {}
2013 StatusCode::Deleted => {
2014 head_content = Some("".into());
2015 }
2016 StatusCode::Renamed | StatusCode::Copied => {
2017 panic!("cannot create these statuses for an existing file");
2018 }
2019 };
2020 }
2021 };
2022
2023 if let Some(content) = index_content {
2024 state.index_contents.insert(repo_path.clone(), content);
2025 }
2026 if let Some(content) = head_content {
2027 state.head_contents.insert(repo_path.clone(), content);
2028 }
2029 }
2030 }).unwrap();
2031 }
2032
2033 pub fn set_error_message_for_index_write(&self, dot_git: &Path, message: Option<String>) {
2034 self.with_git_state(dot_git, true, |state| {
2035 state.simulated_index_write_error_message = message;
2036 })
2037 .unwrap();
2038 }
2039
2040 pub fn paths(&self, include_dot_git: bool) -> Vec<PathBuf> {
2041 let mut result = Vec::new();
2042 let mut queue = collections::VecDeque::new();
2043 let state = &*self.state.lock();
2044 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2045 while let Some((path, entry)) = queue.pop_front() {
2046 if let FakeFsEntry::Dir { entries, .. } = entry {
2047 for (name, entry) in entries {
2048 queue.push_back((path.join(name), entry));
2049 }
2050 }
2051 if include_dot_git
2052 || !path
2053 .components()
2054 .any(|component| component.as_os_str() == *FS_DOT_GIT)
2055 {
2056 result.push(path);
2057 }
2058 }
2059 result
2060 }
2061
2062 pub fn directories(&self, include_dot_git: bool) -> Vec<PathBuf> {
2063 let mut result = Vec::new();
2064 let mut queue = collections::VecDeque::new();
2065 let state = &*self.state.lock();
2066 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2067 while let Some((path, entry)) = queue.pop_front() {
2068 if let FakeFsEntry::Dir { entries, .. } = entry {
2069 for (name, entry) in entries {
2070 queue.push_back((path.join(name), entry));
2071 }
2072 if include_dot_git
2073 || !path
2074 .components()
2075 .any(|component| component.as_os_str() == *FS_DOT_GIT)
2076 {
2077 result.push(path);
2078 }
2079 }
2080 }
2081 result
2082 }
2083
2084 pub fn files(&self) -> Vec<PathBuf> {
2085 let mut result = Vec::new();
2086 let mut queue = collections::VecDeque::new();
2087 let state = &*self.state.lock();
2088 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2089 while let Some((path, entry)) = queue.pop_front() {
2090 match entry {
2091 FakeFsEntry::File { .. } => result.push(path),
2092 FakeFsEntry::Dir { entries, .. } => {
2093 for (name, entry) in entries {
2094 queue.push_back((path.join(name), entry));
2095 }
2096 }
2097 FakeFsEntry::Symlink { .. } => {}
2098 }
2099 }
2100 result
2101 }
2102
2103 pub fn files_with_contents(&self, prefix: &Path) -> Vec<(PathBuf, Vec<u8>)> {
2104 let mut result = Vec::new();
2105 let mut queue = collections::VecDeque::new();
2106 let state = &*self.state.lock();
2107 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2108 while let Some((path, entry)) = queue.pop_front() {
2109 match entry {
2110 FakeFsEntry::File { content, .. } => {
2111 if path.starts_with(prefix) {
2112 result.push((path, content.clone()));
2113 }
2114 }
2115 FakeFsEntry::Dir { entries, .. } => {
2116 for (name, entry) in entries {
2117 queue.push_back((path.join(name), entry));
2118 }
2119 }
2120 FakeFsEntry::Symlink { .. } => {}
2121 }
2122 }
2123 result
2124 }
2125
2126 /// How many `read_dir` calls have been issued.
2127 pub fn read_dir_call_count(&self) -> usize {
2128 self.state.lock().read_dir_call_count
2129 }
2130
2131 pub fn watched_paths(&self) -> Vec<PathBuf> {
2132 let state = self.state.lock();
2133 state
2134 .event_txs
2135 .iter()
2136 .filter_map(|(path, tx)| Some(path.clone()).filter(|_| !tx.is_closed()))
2137 .collect()
2138 }
2139
2140 /// How many `metadata` calls have been issued.
2141 pub fn metadata_call_count(&self) -> usize {
2142 self.state.lock().metadata_call_count
2143 }
2144
2145 /// How many write operations have been issued for a specific path.
2146 pub fn write_count_for_path(&self, path: impl AsRef<Path>) -> usize {
2147 let path = path.as_ref().to_path_buf();
2148 self.state
2149 .lock()
2150 .path_write_counts
2151 .get(&path)
2152 .copied()
2153 .unwrap_or(0)
2154 }
2155
2156 pub fn emit_fs_event(&self, path: impl Into<PathBuf>, event: Option<PathEventKind>) {
2157 self.state.lock().emit_event(std::iter::once((path, event)));
2158 }
2159
2160 fn simulate_random_delay(&self) -> impl futures::Future<Output = ()> {
2161 self.executor.simulate_random_delay()
2162 }
2163}
2164
2165#[cfg(any(test, feature = "test-support"))]
2166impl FakeFsEntry {
2167 fn is_file(&self) -> bool {
2168 matches!(self, Self::File { .. })
2169 }
2170
2171 fn is_symlink(&self) -> bool {
2172 matches!(self, Self::Symlink { .. })
2173 }
2174
2175 fn file_content(&self, path: &Path) -> Result<&Vec<u8>> {
2176 if let Self::File { content, .. } = self {
2177 Ok(content)
2178 } else {
2179 anyhow::bail!("not a file: {path:?}");
2180 }
2181 }
2182
2183 fn dir_entries(&mut self, path: &Path) -> Result<&mut BTreeMap<String, FakeFsEntry>> {
2184 if let Self::Dir { entries, .. } = self {
2185 Ok(entries)
2186 } else {
2187 anyhow::bail!("not a directory: {path:?}");
2188 }
2189 }
2190}
2191
2192#[cfg(any(test, feature = "test-support"))]
2193struct FakeWatcher {
2194 tx: smol::channel::Sender<Vec<PathEvent>>,
2195 original_path: PathBuf,
2196 fs_state: Arc<Mutex<FakeFsState>>,
2197 prefixes: Mutex<Vec<PathBuf>>,
2198}
2199
2200#[cfg(any(test, feature = "test-support"))]
2201impl Watcher for FakeWatcher {
2202 fn add(&self, path: &Path) -> Result<()> {
2203 if path.starts_with(&self.original_path) {
2204 return Ok(());
2205 }
2206 self.fs_state
2207 .try_lock()
2208 .unwrap()
2209 .event_txs
2210 .push((path.to_owned(), self.tx.clone()));
2211 self.prefixes.lock().push(path.to_owned());
2212 Ok(())
2213 }
2214
2215 fn remove(&self, _: &Path) -> Result<()> {
2216 Ok(())
2217 }
2218}
2219
2220#[cfg(any(test, feature = "test-support"))]
2221#[derive(Debug)]
2222struct FakeHandle {
2223 inode: u64,
2224}
2225
2226#[cfg(any(test, feature = "test-support"))]
2227impl FileHandle for FakeHandle {
2228 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf> {
2229 let fs = fs.as_fake();
2230 let mut state = fs.state.lock();
2231 let Some(target) = state.moves.get(&self.inode).cloned() else {
2232 anyhow::bail!("fake fd not moved")
2233 };
2234
2235 if state.try_entry(&target, false).is_some() {
2236 return Ok(target);
2237 }
2238 anyhow::bail!("fake fd target not found")
2239 }
2240}
2241
2242#[cfg(any(test, feature = "test-support"))]
2243#[async_trait::async_trait]
2244impl Fs for FakeFs {
2245 async fn create_dir(&self, path: &Path) -> Result<()> {
2246 self.simulate_random_delay().await;
2247
2248 let mut created_dirs = Vec::new();
2249 let mut cur_path = PathBuf::new();
2250 for component in path.components() {
2251 let should_skip = matches!(component, Component::Prefix(..) | Component::RootDir);
2252 cur_path.push(component);
2253 if should_skip {
2254 continue;
2255 }
2256 let mut state = self.state.lock();
2257
2258 let inode = state.get_and_increment_inode();
2259 let mtime = state.get_and_increment_mtime();
2260 state.write_path(&cur_path, |entry| {
2261 entry.or_insert_with(|| {
2262 created_dirs.push((cur_path.clone(), Some(PathEventKind::Created)));
2263 FakeFsEntry::Dir {
2264 inode,
2265 mtime,
2266 len: 0,
2267 entries: Default::default(),
2268 git_repo_state: None,
2269 }
2270 });
2271 Ok(())
2272 })?
2273 }
2274
2275 self.state.lock().emit_event(created_dirs);
2276 Ok(())
2277 }
2278
2279 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
2280 self.simulate_random_delay().await;
2281 let mut state = self.state.lock();
2282 let inode = state.get_and_increment_inode();
2283 let mtime = state.get_and_increment_mtime();
2284 let file = FakeFsEntry::File {
2285 inode,
2286 mtime,
2287 len: 0,
2288 content: Vec::new(),
2289 git_dir_path: None,
2290 };
2291 let mut kind = Some(PathEventKind::Created);
2292 state.write_path(path, |entry| {
2293 match entry {
2294 btree_map::Entry::Occupied(mut e) => {
2295 if options.overwrite {
2296 kind = Some(PathEventKind::Changed);
2297 *e.get_mut() = file;
2298 } else if !options.ignore_if_exists {
2299 anyhow::bail!("path already exists: {path:?}");
2300 }
2301 }
2302 btree_map::Entry::Vacant(e) => {
2303 e.insert(file);
2304 }
2305 }
2306 Ok(())
2307 })?;
2308 state.emit_event([(path, kind)]);
2309 Ok(())
2310 }
2311
2312 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
2313 let mut state = self.state.lock();
2314 let file = FakeFsEntry::Symlink { target };
2315 state
2316 .write_path(path.as_ref(), move |e| match e {
2317 btree_map::Entry::Vacant(e) => {
2318 e.insert(file);
2319 Ok(())
2320 }
2321 btree_map::Entry::Occupied(mut e) => {
2322 *e.get_mut() = file;
2323 Ok(())
2324 }
2325 })
2326 .unwrap();
2327 state.emit_event([(path, Some(PathEventKind::Created))]);
2328
2329 Ok(())
2330 }
2331
2332 async fn create_file_with(
2333 &self,
2334 path: &Path,
2335 mut content: Pin<&mut (dyn AsyncRead + Send)>,
2336 ) -> Result<()> {
2337 let mut bytes = Vec::new();
2338 content.read_to_end(&mut bytes).await?;
2339 self.write_file_internal(path, bytes, true)?;
2340 Ok(())
2341 }
2342
2343 async fn extract_tar_file(
2344 &self,
2345 path: &Path,
2346 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
2347 ) -> Result<()> {
2348 let mut entries = content.entries()?;
2349 while let Some(entry) = entries.next().await {
2350 let mut entry = entry?;
2351 if entry.header().entry_type().is_file() {
2352 let path = path.join(entry.path()?.as_ref());
2353 let mut bytes = Vec::new();
2354 entry.read_to_end(&mut bytes).await?;
2355 self.create_dir(path.parent().unwrap()).await?;
2356 self.write_file_internal(&path, bytes, true)?;
2357 }
2358 }
2359 Ok(())
2360 }
2361
2362 async fn rename(&self, old_path: &Path, new_path: &Path, options: RenameOptions) -> Result<()> {
2363 self.simulate_random_delay().await;
2364
2365 let old_path = normalize_path(old_path);
2366 let new_path = normalize_path(new_path);
2367
2368 if options.create_parents {
2369 if let Some(parent) = new_path.parent() {
2370 self.create_dir(parent).await?;
2371 }
2372 }
2373
2374 let mut state = self.state.lock();
2375 let moved_entry = state.write_path(&old_path, |e| {
2376 if let btree_map::Entry::Occupied(e) = e {
2377 Ok(e.get().clone())
2378 } else {
2379 anyhow::bail!("path does not exist: {old_path:?}")
2380 }
2381 })?;
2382
2383 let inode = match moved_entry {
2384 FakeFsEntry::File { inode, .. } => inode,
2385 FakeFsEntry::Dir { inode, .. } => inode,
2386 _ => 0,
2387 };
2388
2389 state.moves.insert(inode, new_path.clone());
2390
2391 state.write_path(&new_path, |e| {
2392 match e {
2393 btree_map::Entry::Occupied(mut e) => {
2394 if options.overwrite {
2395 *e.get_mut() = moved_entry;
2396 } else if !options.ignore_if_exists {
2397 anyhow::bail!("path already exists: {new_path:?}");
2398 }
2399 }
2400 btree_map::Entry::Vacant(e) => {
2401 e.insert(moved_entry);
2402 }
2403 }
2404 Ok(())
2405 })?;
2406
2407 state
2408 .write_path(&old_path, |e| {
2409 if let btree_map::Entry::Occupied(e) = e {
2410 Ok(e.remove())
2411 } else {
2412 unreachable!()
2413 }
2414 })
2415 .unwrap();
2416
2417 state.emit_event([
2418 (old_path, Some(PathEventKind::Removed)),
2419 (new_path, Some(PathEventKind::Created)),
2420 ]);
2421 Ok(())
2422 }
2423
2424 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
2425 self.simulate_random_delay().await;
2426
2427 let source = normalize_path(source);
2428 let target = normalize_path(target);
2429 let mut state = self.state.lock();
2430 let mtime = state.get_and_increment_mtime();
2431 let inode = state.get_and_increment_inode();
2432 let source_entry = state.entry(&source)?;
2433 let content = source_entry.file_content(&source)?.clone();
2434 let mut kind = Some(PathEventKind::Created);
2435 state.write_path(&target, |e| match e {
2436 btree_map::Entry::Occupied(e) => {
2437 if options.overwrite {
2438 kind = Some(PathEventKind::Changed);
2439 Ok(Some(e.get().clone()))
2440 } else if !options.ignore_if_exists {
2441 anyhow::bail!("{target:?} already exists");
2442 } else {
2443 Ok(None)
2444 }
2445 }
2446 btree_map::Entry::Vacant(e) => Ok(Some(
2447 e.insert(FakeFsEntry::File {
2448 inode,
2449 mtime,
2450 len: content.len() as u64,
2451 content,
2452 git_dir_path: None,
2453 })
2454 .clone(),
2455 )),
2456 })?;
2457 state.emit_event([(target, kind)]);
2458 Ok(())
2459 }
2460
2461 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
2462 self.simulate_random_delay().await;
2463
2464 let path = normalize_path(path);
2465 let parent_path = path.parent().context("cannot remove the root")?;
2466 let base_name = path.file_name().context("cannot remove the root")?;
2467
2468 let mut state = self.state.lock();
2469 let parent_entry = state.entry(parent_path)?;
2470 let entry = parent_entry
2471 .dir_entries(parent_path)?
2472 .entry(base_name.to_str().unwrap().into());
2473
2474 match entry {
2475 btree_map::Entry::Vacant(_) => {
2476 if !options.ignore_if_not_exists {
2477 anyhow::bail!("{path:?} does not exist");
2478 }
2479 }
2480 btree_map::Entry::Occupied(mut entry) => {
2481 {
2482 let children = entry.get_mut().dir_entries(&path)?;
2483 if !options.recursive && !children.is_empty() {
2484 anyhow::bail!("{path:?} is not empty");
2485 }
2486 }
2487 entry.remove();
2488 }
2489 }
2490 state.emit_event([(path, Some(PathEventKind::Removed))]);
2491 Ok(())
2492 }
2493
2494 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
2495 self.simulate_random_delay().await;
2496
2497 let path = normalize_path(path);
2498 let parent_path = path.parent().context("cannot remove the root")?;
2499 let base_name = path.file_name().unwrap();
2500 let mut state = self.state.lock();
2501 let parent_entry = state.entry(parent_path)?;
2502 let entry = parent_entry
2503 .dir_entries(parent_path)?
2504 .entry(base_name.to_str().unwrap().into());
2505 match entry {
2506 btree_map::Entry::Vacant(_) => {
2507 if !options.ignore_if_not_exists {
2508 anyhow::bail!("{path:?} does not exist");
2509 }
2510 }
2511 btree_map::Entry::Occupied(mut entry) => {
2512 entry.get_mut().file_content(&path)?;
2513 entry.remove();
2514 }
2515 }
2516 state.emit_event([(path, Some(PathEventKind::Removed))]);
2517 Ok(())
2518 }
2519
2520 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
2521 let bytes = self.load_internal(path).await?;
2522 Ok(Box::new(io::Cursor::new(bytes)))
2523 }
2524
2525 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
2526 self.simulate_random_delay().await;
2527 let mut state = self.state.lock();
2528 let inode = match state.entry(path)? {
2529 FakeFsEntry::File { inode, .. } => *inode,
2530 FakeFsEntry::Dir { inode, .. } => *inode,
2531 _ => unreachable!(),
2532 };
2533 Ok(Arc::new(FakeHandle { inode }))
2534 }
2535
2536 async fn load(&self, path: &Path) -> Result<String> {
2537 let content = self.load_internal(path).await?;
2538 Ok(String::from_utf8(content)?)
2539 }
2540
2541 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
2542 self.load_internal(path).await
2543 }
2544
2545 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
2546 self.simulate_random_delay().await;
2547 let path = normalize_path(path.as_path());
2548 if let Some(path) = path.parent() {
2549 self.create_dir(path).await?;
2550 }
2551 self.write_file_internal(path, data.into_bytes(), true)?;
2552 Ok(())
2553 }
2554
2555 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
2556 self.simulate_random_delay().await;
2557 let path = normalize_path(path);
2558 let content = chunks(text, line_ending).collect::<String>();
2559 if let Some(path) = path.parent() {
2560 self.create_dir(path).await?;
2561 }
2562 self.write_file_internal(path, content.into_bytes(), false)?;
2563 Ok(())
2564 }
2565
2566 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
2567 self.simulate_random_delay().await;
2568 let path = normalize_path(path);
2569 if let Some(path) = path.parent() {
2570 self.create_dir(path).await?;
2571 }
2572 self.write_file_internal(path, content.to_vec(), false)?;
2573 Ok(())
2574 }
2575
2576 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
2577 let path = normalize_path(path);
2578 self.simulate_random_delay().await;
2579 let state = self.state.lock();
2580 let canonical_path = state
2581 .canonicalize(&path, true)
2582 .with_context(|| format!("path does not exist: {path:?}"))?;
2583 Ok(canonical_path)
2584 }
2585
2586 async fn is_file(&self, path: &Path) -> bool {
2587 let path = normalize_path(path);
2588 self.simulate_random_delay().await;
2589 let mut state = self.state.lock();
2590 if let Some((entry, _)) = state.try_entry(&path, true) {
2591 entry.is_file()
2592 } else {
2593 false
2594 }
2595 }
2596
2597 async fn is_dir(&self, path: &Path) -> bool {
2598 self.metadata(path)
2599 .await
2600 .is_ok_and(|metadata| metadata.is_some_and(|metadata| metadata.is_dir))
2601 }
2602
2603 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
2604 self.simulate_random_delay().await;
2605 let path = normalize_path(path);
2606 let mut state = self.state.lock();
2607 state.metadata_call_count += 1;
2608 if let Some((mut entry, _)) = state.try_entry(&path, false) {
2609 let is_symlink = entry.is_symlink();
2610 if is_symlink {
2611 if let Some(e) = state.try_entry(&path, true).map(|e| e.0) {
2612 entry = e;
2613 } else {
2614 return Ok(None);
2615 }
2616 }
2617
2618 Ok(Some(match &*entry {
2619 FakeFsEntry::File {
2620 inode, mtime, len, ..
2621 } => Metadata {
2622 inode: *inode,
2623 mtime: *mtime,
2624 len: *len,
2625 is_dir: false,
2626 is_symlink,
2627 is_fifo: false,
2628 is_executable: false,
2629 },
2630 FakeFsEntry::Dir {
2631 inode, mtime, len, ..
2632 } => Metadata {
2633 inode: *inode,
2634 mtime: *mtime,
2635 len: *len,
2636 is_dir: true,
2637 is_symlink,
2638 is_fifo: false,
2639 is_executable: false,
2640 },
2641 FakeFsEntry::Symlink { .. } => unreachable!(),
2642 }))
2643 } else {
2644 Ok(None)
2645 }
2646 }
2647
2648 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
2649 self.simulate_random_delay().await;
2650 let path = normalize_path(path);
2651 let mut state = self.state.lock();
2652 let (entry, _) = state
2653 .try_entry(&path, false)
2654 .with_context(|| format!("path does not exist: {path:?}"))?;
2655 if let FakeFsEntry::Symlink { target } = entry {
2656 Ok(target.clone())
2657 } else {
2658 anyhow::bail!("not a symlink: {path:?}")
2659 }
2660 }
2661
2662 async fn read_dir(
2663 &self,
2664 path: &Path,
2665 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
2666 self.simulate_random_delay().await;
2667 let path = normalize_path(path);
2668 let mut state = self.state.lock();
2669 state.read_dir_call_count += 1;
2670 let entry = state.entry(&path)?;
2671 let children = entry.dir_entries(&path)?;
2672 let paths = children
2673 .keys()
2674 .map(|file_name| Ok(path.join(file_name)))
2675 .collect::<Vec<_>>();
2676 Ok(Box::pin(futures::stream::iter(paths)))
2677 }
2678
2679 async fn watch(
2680 &self,
2681 path: &Path,
2682 _: Duration,
2683 ) -> (
2684 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
2685 Arc<dyn Watcher>,
2686 ) {
2687 self.simulate_random_delay().await;
2688 let (tx, rx) = smol::channel::unbounded();
2689 let path = path.to_path_buf();
2690 self.state.lock().event_txs.push((path.clone(), tx.clone()));
2691 let executor = self.executor.clone();
2692 let watcher = Arc::new(FakeWatcher {
2693 tx,
2694 original_path: path.to_owned(),
2695 fs_state: self.state.clone(),
2696 prefixes: Mutex::new(vec![path]),
2697 });
2698 (
2699 Box::pin(futures::StreamExt::filter(rx, {
2700 let watcher = watcher.clone();
2701 move |events| {
2702 let result = events.iter().any(|evt_path| {
2703 watcher
2704 .prefixes
2705 .lock()
2706 .iter()
2707 .any(|prefix| evt_path.path.starts_with(prefix))
2708 });
2709 let executor = executor.clone();
2710 async move {
2711 executor.simulate_random_delay().await;
2712 result
2713 }
2714 }
2715 })),
2716 watcher,
2717 )
2718 }
2719
2720 fn open_repo(
2721 &self,
2722 abs_dot_git: &Path,
2723 _system_git_binary: Option<&Path>,
2724 ) -> Option<Arc<dyn GitRepository>> {
2725 use util::ResultExt as _;
2726
2727 self.with_git_state_and_paths(
2728 abs_dot_git,
2729 false,
2730 |_, repository_dir_path, common_dir_path| {
2731 Arc::new(fake_git_repo::FakeGitRepository {
2732 fs: self.this.upgrade().unwrap(),
2733 executor: self.executor.clone(),
2734 dot_git_path: abs_dot_git.to_path_buf(),
2735 repository_dir_path: repository_dir_path.to_owned(),
2736 common_dir_path: common_dir_path.to_owned(),
2737 checkpoints: Arc::default(),
2738 }) as _
2739 },
2740 )
2741 .log_err()
2742 }
2743
2744 async fn git_init(
2745 &self,
2746 abs_work_directory_path: &Path,
2747 _fallback_branch_name: String,
2748 ) -> Result<()> {
2749 self.create_dir(&abs_work_directory_path.join(".git")).await
2750 }
2751
2752 async fn git_clone(&self, _repo_url: &str, _abs_work_directory: &Path) -> Result<()> {
2753 anyhow::bail!("Git clone is not supported in fake Fs")
2754 }
2755
2756 fn is_fake(&self) -> bool {
2757 true
2758 }
2759
2760 async fn is_case_sensitive(&self) -> Result<bool> {
2761 Ok(true)
2762 }
2763
2764 fn subscribe_to_jobs(&self) -> JobEventReceiver {
2765 let (sender, receiver) = futures::channel::mpsc::unbounded();
2766 self.state.lock().job_event_subscribers.lock().push(sender);
2767 receiver
2768 }
2769
2770 #[cfg(any(test, feature = "test-support"))]
2771 fn as_fake(&self) -> Arc<FakeFs> {
2772 self.this.upgrade().unwrap()
2773 }
2774}
2775
2776fn chunks(rope: &Rope, line_ending: LineEnding) -> impl Iterator<Item = &str> {
2777 rope.chunks().flat_map(move |chunk| {
2778 let mut newline = false;
2779 let end_with_newline = chunk.ends_with('\n').then_some(line_ending.as_str());
2780 chunk
2781 .lines()
2782 .flat_map(move |line| {
2783 let ending = if newline {
2784 Some(line_ending.as_str())
2785 } else {
2786 None
2787 };
2788 newline = true;
2789 ending.into_iter().chain([line])
2790 })
2791 .chain(end_with_newline)
2792 })
2793}
2794
2795pub fn normalize_path(path: &Path) -> PathBuf {
2796 let mut components = path.components().peekable();
2797 let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek().cloned() {
2798 components.next();
2799 PathBuf::from(c.as_os_str())
2800 } else {
2801 PathBuf::new()
2802 };
2803
2804 for component in components {
2805 match component {
2806 Component::Prefix(..) => unreachable!(),
2807 Component::RootDir => {
2808 ret.push(component.as_os_str());
2809 }
2810 Component::CurDir => {}
2811 Component::ParentDir => {
2812 ret.pop();
2813 }
2814 Component::Normal(c) => {
2815 ret.push(c);
2816 }
2817 }
2818 }
2819 ret
2820}
2821
2822pub async fn copy_recursive<'a>(
2823 fs: &'a dyn Fs,
2824 source: &'a Path,
2825 target: &'a Path,
2826 options: CopyOptions,
2827) -> Result<()> {
2828 for (item, is_dir) in read_dir_items(fs, source).await? {
2829 let Ok(item_relative_path) = item.strip_prefix(source) else {
2830 continue;
2831 };
2832 let target_item = if item_relative_path == Path::new("") {
2833 target.to_path_buf()
2834 } else {
2835 target.join(item_relative_path)
2836 };
2837 if is_dir {
2838 if !options.overwrite && fs.metadata(&target_item).await.is_ok_and(|m| m.is_some()) {
2839 if options.ignore_if_exists {
2840 continue;
2841 } else {
2842 anyhow::bail!("{target_item:?} already exists");
2843 }
2844 }
2845 let _ = fs
2846 .remove_dir(
2847 &target_item,
2848 RemoveOptions {
2849 recursive: true,
2850 ignore_if_not_exists: true,
2851 },
2852 )
2853 .await;
2854 fs.create_dir(&target_item).await?;
2855 } else {
2856 fs.copy_file(&item, &target_item, options).await?;
2857 }
2858 }
2859 Ok(())
2860}
2861
2862/// Recursively reads all of the paths in the given directory.
2863///
2864/// Returns a vector of tuples of (path, is_dir).
2865pub async fn read_dir_items<'a>(fs: &'a dyn Fs, source: &'a Path) -> Result<Vec<(PathBuf, bool)>> {
2866 let mut items = Vec::new();
2867 read_recursive(fs, source, &mut items).await?;
2868 Ok(items)
2869}
2870
2871fn read_recursive<'a>(
2872 fs: &'a dyn Fs,
2873 source: &'a Path,
2874 output: &'a mut Vec<(PathBuf, bool)>,
2875) -> BoxFuture<'a, Result<()>> {
2876 use futures::future::FutureExt;
2877
2878 async move {
2879 let metadata = fs
2880 .metadata(source)
2881 .await?
2882 .with_context(|| format!("path does not exist: {source:?}"))?;
2883
2884 if metadata.is_dir {
2885 output.push((source.to_path_buf(), true));
2886 let mut children = fs.read_dir(source).await?;
2887 while let Some(child_path) = children.next().await {
2888 if let Ok(child_path) = child_path {
2889 read_recursive(fs, &child_path, output).await?;
2890 }
2891 }
2892 } else {
2893 output.push((source.to_path_buf(), false));
2894 }
2895 Ok(())
2896 }
2897 .boxed()
2898}
2899
2900// todo(windows)
2901// can we get file id not open the file twice?
2902// https://github.com/rust-lang/rust/issues/63010
2903#[cfg(target_os = "windows")]
2904async fn file_id(path: impl AsRef<Path>) -> Result<u64> {
2905 use std::os::windows::io::AsRawHandle;
2906
2907 use smol::fs::windows::OpenOptionsExt;
2908 use windows::Win32::{
2909 Foundation::HANDLE,
2910 Storage::FileSystem::{
2911 BY_HANDLE_FILE_INFORMATION, FILE_FLAG_BACKUP_SEMANTICS, GetFileInformationByHandle,
2912 },
2913 };
2914
2915 let file = smol::fs::OpenOptions::new()
2916 .read(true)
2917 .custom_flags(FILE_FLAG_BACKUP_SEMANTICS.0)
2918 .open(path)
2919 .await?;
2920
2921 let mut info: BY_HANDLE_FILE_INFORMATION = unsafe { std::mem::zeroed() };
2922 // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfileinformationbyhandle
2923 // This function supports Windows XP+
2924 smol::unblock(move || {
2925 unsafe { GetFileInformationByHandle(HANDLE(file.as_raw_handle() as _), &mut info)? };
2926
2927 Ok(((info.nFileIndexHigh as u64) << 32) | (info.nFileIndexLow as u64))
2928 })
2929 .await
2930}
2931
2932#[cfg(target_os = "windows")]
2933fn atomic_replace<P: AsRef<Path>>(
2934 replaced_file: P,
2935 replacement_file: P,
2936) -> windows::core::Result<()> {
2937 use windows::{
2938 Win32::Storage::FileSystem::{REPLACE_FILE_FLAGS, ReplaceFileW},
2939 core::HSTRING,
2940 };
2941
2942 // If the file does not exist, create it.
2943 let _ = std::fs::File::create_new(replaced_file.as_ref());
2944
2945 unsafe {
2946 ReplaceFileW(
2947 &HSTRING::from(replaced_file.as_ref().to_string_lossy().into_owned()),
2948 &HSTRING::from(replacement_file.as_ref().to_string_lossy().into_owned()),
2949 None,
2950 REPLACE_FILE_FLAGS::default(),
2951 None,
2952 None,
2953 )
2954 }
2955}
2956
2957#[cfg(test)]
2958mod tests {
2959 use super::*;
2960 use gpui::BackgroundExecutor;
2961 use serde_json::json;
2962 use util::path;
2963
2964 #[gpui::test]
2965 async fn test_fake_fs(executor: BackgroundExecutor) {
2966 let fs = FakeFs::new(executor.clone());
2967 fs.insert_tree(
2968 path!("/root"),
2969 json!({
2970 "dir1": {
2971 "a": "A",
2972 "b": "B"
2973 },
2974 "dir2": {
2975 "c": "C",
2976 "dir3": {
2977 "d": "D"
2978 }
2979 }
2980 }),
2981 )
2982 .await;
2983
2984 assert_eq!(
2985 fs.files(),
2986 vec![
2987 PathBuf::from(path!("/root/dir1/a")),
2988 PathBuf::from(path!("/root/dir1/b")),
2989 PathBuf::from(path!("/root/dir2/c")),
2990 PathBuf::from(path!("/root/dir2/dir3/d")),
2991 ]
2992 );
2993
2994 fs.create_symlink(path!("/root/dir2/link-to-dir3").as_ref(), "./dir3".into())
2995 .await
2996 .unwrap();
2997
2998 assert_eq!(
2999 fs.canonicalize(path!("/root/dir2/link-to-dir3").as_ref())
3000 .await
3001 .unwrap(),
3002 PathBuf::from(path!("/root/dir2/dir3")),
3003 );
3004 assert_eq!(
3005 fs.canonicalize(path!("/root/dir2/link-to-dir3/d").as_ref())
3006 .await
3007 .unwrap(),
3008 PathBuf::from(path!("/root/dir2/dir3/d")),
3009 );
3010 assert_eq!(
3011 fs.load(path!("/root/dir2/link-to-dir3/d").as_ref())
3012 .await
3013 .unwrap(),
3014 "D",
3015 );
3016 }
3017
3018 #[gpui::test]
3019 async fn test_copy_recursive_with_single_file(executor: BackgroundExecutor) {
3020 let fs = FakeFs::new(executor.clone());
3021 fs.insert_tree(
3022 path!("/outer"),
3023 json!({
3024 "a": "A",
3025 "b": "B",
3026 "inner": {}
3027 }),
3028 )
3029 .await;
3030
3031 assert_eq!(
3032 fs.files(),
3033 vec![
3034 PathBuf::from(path!("/outer/a")),
3035 PathBuf::from(path!("/outer/b")),
3036 ]
3037 );
3038
3039 let source = Path::new(path!("/outer/a"));
3040 let target = Path::new(path!("/outer/a copy"));
3041 copy_recursive(fs.as_ref(), source, target, Default::default())
3042 .await
3043 .unwrap();
3044
3045 assert_eq!(
3046 fs.files(),
3047 vec![
3048 PathBuf::from(path!("/outer/a")),
3049 PathBuf::from(path!("/outer/a copy")),
3050 PathBuf::from(path!("/outer/b")),
3051 ]
3052 );
3053
3054 let source = Path::new(path!("/outer/a"));
3055 let target = Path::new(path!("/outer/inner/a copy"));
3056 copy_recursive(fs.as_ref(), source, target, Default::default())
3057 .await
3058 .unwrap();
3059
3060 assert_eq!(
3061 fs.files(),
3062 vec![
3063 PathBuf::from(path!("/outer/a")),
3064 PathBuf::from(path!("/outer/a copy")),
3065 PathBuf::from(path!("/outer/b")),
3066 PathBuf::from(path!("/outer/inner/a copy")),
3067 ]
3068 );
3069 }
3070
3071 #[gpui::test]
3072 async fn test_copy_recursive_with_single_dir(executor: BackgroundExecutor) {
3073 let fs = FakeFs::new(executor.clone());
3074 fs.insert_tree(
3075 path!("/outer"),
3076 json!({
3077 "a": "A",
3078 "empty": {},
3079 "non-empty": {
3080 "b": "B",
3081 }
3082 }),
3083 )
3084 .await;
3085
3086 assert_eq!(
3087 fs.files(),
3088 vec![
3089 PathBuf::from(path!("/outer/a")),
3090 PathBuf::from(path!("/outer/non-empty/b")),
3091 ]
3092 );
3093 assert_eq!(
3094 fs.directories(false),
3095 vec![
3096 PathBuf::from(path!("/")),
3097 PathBuf::from(path!("/outer")),
3098 PathBuf::from(path!("/outer/empty")),
3099 PathBuf::from(path!("/outer/non-empty")),
3100 ]
3101 );
3102
3103 let source = Path::new(path!("/outer/empty"));
3104 let target = Path::new(path!("/outer/empty copy"));
3105 copy_recursive(fs.as_ref(), source, target, Default::default())
3106 .await
3107 .unwrap();
3108
3109 assert_eq!(
3110 fs.files(),
3111 vec![
3112 PathBuf::from(path!("/outer/a")),
3113 PathBuf::from(path!("/outer/non-empty/b")),
3114 ]
3115 );
3116 assert_eq!(
3117 fs.directories(false),
3118 vec![
3119 PathBuf::from(path!("/")),
3120 PathBuf::from(path!("/outer")),
3121 PathBuf::from(path!("/outer/empty")),
3122 PathBuf::from(path!("/outer/empty copy")),
3123 PathBuf::from(path!("/outer/non-empty")),
3124 ]
3125 );
3126
3127 let source = Path::new(path!("/outer/non-empty"));
3128 let target = Path::new(path!("/outer/non-empty copy"));
3129 copy_recursive(fs.as_ref(), source, target, Default::default())
3130 .await
3131 .unwrap();
3132
3133 assert_eq!(
3134 fs.files(),
3135 vec![
3136 PathBuf::from(path!("/outer/a")),
3137 PathBuf::from(path!("/outer/non-empty/b")),
3138 PathBuf::from(path!("/outer/non-empty copy/b")),
3139 ]
3140 );
3141 assert_eq!(
3142 fs.directories(false),
3143 vec![
3144 PathBuf::from(path!("/")),
3145 PathBuf::from(path!("/outer")),
3146 PathBuf::from(path!("/outer/empty")),
3147 PathBuf::from(path!("/outer/empty copy")),
3148 PathBuf::from(path!("/outer/non-empty")),
3149 PathBuf::from(path!("/outer/non-empty copy")),
3150 ]
3151 );
3152 }
3153
3154 #[gpui::test]
3155 async fn test_copy_recursive(executor: BackgroundExecutor) {
3156 let fs = FakeFs::new(executor.clone());
3157 fs.insert_tree(
3158 path!("/outer"),
3159 json!({
3160 "inner1": {
3161 "a": "A",
3162 "b": "B",
3163 "inner3": {
3164 "d": "D",
3165 },
3166 "inner4": {}
3167 },
3168 "inner2": {
3169 "c": "C",
3170 }
3171 }),
3172 )
3173 .await;
3174
3175 assert_eq!(
3176 fs.files(),
3177 vec![
3178 PathBuf::from(path!("/outer/inner1/a")),
3179 PathBuf::from(path!("/outer/inner1/b")),
3180 PathBuf::from(path!("/outer/inner2/c")),
3181 PathBuf::from(path!("/outer/inner1/inner3/d")),
3182 ]
3183 );
3184 assert_eq!(
3185 fs.directories(false),
3186 vec![
3187 PathBuf::from(path!("/")),
3188 PathBuf::from(path!("/outer")),
3189 PathBuf::from(path!("/outer/inner1")),
3190 PathBuf::from(path!("/outer/inner2")),
3191 PathBuf::from(path!("/outer/inner1/inner3")),
3192 PathBuf::from(path!("/outer/inner1/inner4")),
3193 ]
3194 );
3195
3196 let source = Path::new(path!("/outer"));
3197 let target = Path::new(path!("/outer/inner1/outer"));
3198 copy_recursive(fs.as_ref(), source, target, Default::default())
3199 .await
3200 .unwrap();
3201
3202 assert_eq!(
3203 fs.files(),
3204 vec![
3205 PathBuf::from(path!("/outer/inner1/a")),
3206 PathBuf::from(path!("/outer/inner1/b")),
3207 PathBuf::from(path!("/outer/inner2/c")),
3208 PathBuf::from(path!("/outer/inner1/inner3/d")),
3209 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3210 PathBuf::from(path!("/outer/inner1/outer/inner1/b")),
3211 PathBuf::from(path!("/outer/inner1/outer/inner2/c")),
3212 PathBuf::from(path!("/outer/inner1/outer/inner1/inner3/d")),
3213 ]
3214 );
3215 assert_eq!(
3216 fs.directories(false),
3217 vec![
3218 PathBuf::from(path!("/")),
3219 PathBuf::from(path!("/outer")),
3220 PathBuf::from(path!("/outer/inner1")),
3221 PathBuf::from(path!("/outer/inner2")),
3222 PathBuf::from(path!("/outer/inner1/inner3")),
3223 PathBuf::from(path!("/outer/inner1/inner4")),
3224 PathBuf::from(path!("/outer/inner1/outer")),
3225 PathBuf::from(path!("/outer/inner1/outer/inner1")),
3226 PathBuf::from(path!("/outer/inner1/outer/inner2")),
3227 PathBuf::from(path!("/outer/inner1/outer/inner1/inner3")),
3228 PathBuf::from(path!("/outer/inner1/outer/inner1/inner4")),
3229 ]
3230 );
3231 }
3232
3233 #[gpui::test]
3234 async fn test_copy_recursive_with_overwriting(executor: BackgroundExecutor) {
3235 let fs = FakeFs::new(executor.clone());
3236 fs.insert_tree(
3237 path!("/outer"),
3238 json!({
3239 "inner1": {
3240 "a": "A",
3241 "b": "B",
3242 "outer": {
3243 "inner1": {
3244 "a": "B"
3245 }
3246 }
3247 },
3248 "inner2": {
3249 "c": "C",
3250 }
3251 }),
3252 )
3253 .await;
3254
3255 assert_eq!(
3256 fs.files(),
3257 vec![
3258 PathBuf::from(path!("/outer/inner1/a")),
3259 PathBuf::from(path!("/outer/inner1/b")),
3260 PathBuf::from(path!("/outer/inner2/c")),
3261 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3262 ]
3263 );
3264 assert_eq!(
3265 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3266 .await
3267 .unwrap(),
3268 "B",
3269 );
3270
3271 let source = Path::new(path!("/outer"));
3272 let target = Path::new(path!("/outer/inner1/outer"));
3273 copy_recursive(
3274 fs.as_ref(),
3275 source,
3276 target,
3277 CopyOptions {
3278 overwrite: true,
3279 ..Default::default()
3280 },
3281 )
3282 .await
3283 .unwrap();
3284
3285 assert_eq!(
3286 fs.files(),
3287 vec![
3288 PathBuf::from(path!("/outer/inner1/a")),
3289 PathBuf::from(path!("/outer/inner1/b")),
3290 PathBuf::from(path!("/outer/inner2/c")),
3291 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3292 PathBuf::from(path!("/outer/inner1/outer/inner1/b")),
3293 PathBuf::from(path!("/outer/inner1/outer/inner2/c")),
3294 PathBuf::from(path!("/outer/inner1/outer/inner1/outer/inner1/a")),
3295 ]
3296 );
3297 assert_eq!(
3298 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3299 .await
3300 .unwrap(),
3301 "A"
3302 );
3303 }
3304
3305 #[gpui::test]
3306 async fn test_copy_recursive_with_ignoring(executor: BackgroundExecutor) {
3307 let fs = FakeFs::new(executor.clone());
3308 fs.insert_tree(
3309 path!("/outer"),
3310 json!({
3311 "inner1": {
3312 "a": "A",
3313 "b": "B",
3314 "outer": {
3315 "inner1": {
3316 "a": "B"
3317 }
3318 }
3319 },
3320 "inner2": {
3321 "c": "C",
3322 }
3323 }),
3324 )
3325 .await;
3326
3327 assert_eq!(
3328 fs.files(),
3329 vec![
3330 PathBuf::from(path!("/outer/inner1/a")),
3331 PathBuf::from(path!("/outer/inner1/b")),
3332 PathBuf::from(path!("/outer/inner2/c")),
3333 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3334 ]
3335 );
3336 assert_eq!(
3337 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3338 .await
3339 .unwrap(),
3340 "B",
3341 );
3342
3343 let source = Path::new(path!("/outer"));
3344 let target = Path::new(path!("/outer/inner1/outer"));
3345 copy_recursive(
3346 fs.as_ref(),
3347 source,
3348 target,
3349 CopyOptions {
3350 ignore_if_exists: true,
3351 ..Default::default()
3352 },
3353 )
3354 .await
3355 .unwrap();
3356
3357 assert_eq!(
3358 fs.files(),
3359 vec![
3360 PathBuf::from(path!("/outer/inner1/a")),
3361 PathBuf::from(path!("/outer/inner1/b")),
3362 PathBuf::from(path!("/outer/inner2/c")),
3363 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3364 PathBuf::from(path!("/outer/inner1/outer/inner1/b")),
3365 PathBuf::from(path!("/outer/inner1/outer/inner2/c")),
3366 PathBuf::from(path!("/outer/inner1/outer/inner1/outer/inner1/a")),
3367 ]
3368 );
3369 assert_eq!(
3370 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3371 .await
3372 .unwrap(),
3373 "B"
3374 );
3375 }
3376
3377 #[gpui::test]
3378 async fn test_realfs_atomic_write(executor: BackgroundExecutor) {
3379 // With the file handle still open, the file should be replaced
3380 // https://github.com/zed-industries/zed/issues/30054
3381 let fs = RealFs {
3382 bundled_git_binary_path: None,
3383 executor,
3384 next_job_id: Arc::new(AtomicUsize::new(0)),
3385 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
3386 };
3387 let temp_dir = TempDir::new().unwrap();
3388 let file_to_be_replaced = temp_dir.path().join("file.txt");
3389 let mut file = std::fs::File::create_new(&file_to_be_replaced).unwrap();
3390 file.write_all(b"Hello").unwrap();
3391 // drop(file); // We still hold the file handle here
3392 let content = std::fs::read_to_string(&file_to_be_replaced).unwrap();
3393 assert_eq!(content, "Hello");
3394 smol::block_on(fs.atomic_write(file_to_be_replaced.clone(), "World".into())).unwrap();
3395 let content = std::fs::read_to_string(&file_to_be_replaced).unwrap();
3396 assert_eq!(content, "World");
3397 }
3398
3399 #[gpui::test]
3400 async fn test_realfs_atomic_write_non_existing_file(executor: BackgroundExecutor) {
3401 let fs = RealFs {
3402 bundled_git_binary_path: None,
3403 executor,
3404 next_job_id: Arc::new(AtomicUsize::new(0)),
3405 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
3406 };
3407 let temp_dir = TempDir::new().unwrap();
3408 let file_to_be_replaced = temp_dir.path().join("file.txt");
3409 smol::block_on(fs.atomic_write(file_to_be_replaced.clone(), "Hello".into())).unwrap();
3410 let content = std::fs::read_to_string(&file_to_be_replaced).unwrap();
3411 assert_eq!(content, "Hello");
3412 }
3413
3414 #[gpui::test]
3415 async fn test_rename(executor: BackgroundExecutor) {
3416 let fs = FakeFs::new(executor.clone());
3417 fs.insert_tree(
3418 path!("/root"),
3419 json!({
3420 "src": {
3421 "file_a.txt": "content a",
3422 "file_b.txt": "content b"
3423 }
3424 }),
3425 )
3426 .await;
3427
3428 fs.rename(
3429 Path::new(path!("/root/src/file_a.txt")),
3430 Path::new(path!("/root/src/new/renamed_a.txt")),
3431 RenameOptions {
3432 create_parents: true,
3433 ..Default::default()
3434 },
3435 )
3436 .await
3437 .unwrap();
3438
3439 // Assert that the `file_a.txt` file was being renamed and moved to a
3440 // different directory that did not exist before.
3441 assert_eq!(
3442 fs.files(),
3443 vec![
3444 PathBuf::from(path!("/root/src/file_b.txt")),
3445 PathBuf::from(path!("/root/src/new/renamed_a.txt")),
3446 ]
3447 );
3448
3449 let result = fs
3450 .rename(
3451 Path::new(path!("/root/src/file_b.txt")),
3452 Path::new(path!("/root/src/old/renamed_b.txt")),
3453 RenameOptions {
3454 create_parents: false,
3455 ..Default::default()
3456 },
3457 )
3458 .await;
3459
3460 // Assert that the `file_b.txt` file was not renamed nor moved, as
3461 // `create_parents` was set to `false`.
3462 // different directory that did not exist before.
3463 assert!(result.is_err());
3464 assert_eq!(
3465 fs.files(),
3466 vec![
3467 PathBuf::from(path!("/root/src/file_b.txt")),
3468 PathBuf::from(path!("/root/src/new/renamed_a.txt")),
3469 ]
3470 );
3471 }
3472}