1#[cfg(target_os = "macos")]
2mod mac_watcher;
3
4#[cfg(not(target_os = "macos"))]
5pub mod fs_watcher;
6
7use parking_lot::Mutex;
8use std::sync::atomic::{AtomicUsize, Ordering};
9use std::time::Instant;
10use uuid::Uuid;
11
12use anyhow::{Context as _, Result, anyhow};
13#[cfg(any(target_os = "linux", target_os = "freebsd"))]
14use ashpd::desktop::trash;
15use futures::stream::iter;
16use gpui::App;
17use gpui::BackgroundExecutor;
18use gpui::Global;
19use gpui::ReadGlobal as _;
20use gpui::SharedString;
21use std::borrow::Cow;
22use util::command::new_smol_command;
23
24#[cfg(unix)]
25use std::os::fd::{AsFd, AsRawFd};
26
27#[cfg(unix)]
28use std::os::unix::fs::{FileTypeExt, MetadataExt};
29
30#[cfg(any(target_os = "macos", target_os = "freebsd"))]
31use std::mem::MaybeUninit;
32
33use async_tar::Archive;
34use collections::HashMap;
35use futures::{AsyncRead, Stream, StreamExt, future::BoxFuture};
36use git::repository::{GitRepository, RealGitRepository};
37use is_executable::IsExecutable;
38use rope::Rope;
39use serde::{Deserialize, Serialize};
40use smol::io::AsyncWriteExt;
41use std::{
42 io::{self, Write},
43 path::{Component, Path, PathBuf},
44 pin::Pin,
45 sync::Arc,
46 time::{Duration, SystemTime, UNIX_EPOCH},
47};
48use tempfile::TempDir;
49use text::LineEnding;
50
51#[cfg(any(test, feature = "test-support"))]
52mod fake_git_repo;
53#[cfg(any(test, feature = "test-support"))]
54use collections::{BTreeMap, btree_map};
55#[cfg(any(test, feature = "test-support"))]
56use fake_git_repo::FakeGitRepositoryState;
57#[cfg(any(test, feature = "test-support"))]
58use git::{
59 repository::{RepoPath, repo_path},
60 status::{FileStatus, StatusCode, TrackedStatus, UnmergedStatus},
61};
62
63#[cfg(any(test, feature = "test-support"))]
64use smol::io::AsyncReadExt;
65#[cfg(any(test, feature = "test-support"))]
66use std::ffi::OsStr;
67
68#[cfg(any(test, feature = "test-support"))]
69pub use fake_git_repo::{LOAD_HEAD_TEXT_TASK, LOAD_INDEX_TEXT_TASK};
70
71/// Maximum size in bytes allowed for a file to allow trash & restore using temp dir.
72const TRASH_LIMIT: u64 = 8 * 1024 * 1024; // 8 MiB
73
74pub trait Watcher: Send + Sync {
75 fn add(&self, path: &Path) -> Result<()>;
76 fn remove(&self, path: &Path) -> Result<()>;
77}
78
79#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
80pub enum PathEventKind {
81 Removed,
82 Created,
83 Changed,
84}
85
86#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
87pub struct PathEvent {
88 pub path: PathBuf,
89 pub kind: Option<PathEventKind>,
90}
91
92impl From<PathEvent> for PathBuf {
93 fn from(event: PathEvent) -> Self {
94 event.path
95 }
96}
97
98#[derive(Debug, Default)]
99struct TrashCache {
100 trashed_items: HashMap<TrashedItem, TrashedItemInfo>,
101}
102impl TrashCache {
103 /// Adds an item to the trash cache.
104 ///
105 /// This assumes that the item will then be moved or copied to the returned `path_in_trash`.
106 fn add_item(&mut self, original_path: &Path) -> (TrashedItem, TrashedItemInfo) {
107 let uuid = Uuid::new_v4();
108 let path_in_trash = paths::temp_dir()
109 .join("trashed_files")
110 .join(uuid.to_string());
111 let id = TrashedItem(uuid);
112 let info = TrashedItemInfo {
113 path_in_trash,
114 original_path: original_path.to_path_buf(),
115 };
116 self.trashed_items.insert(id, info.clone());
117 (id, info)
118 }
119 fn remove(&mut self, id: TrashedItem) -> Option<TrashedItemInfo> {
120 self.trashed_items.remove(&id)
121 }
122}
123/// Info needed to restore an item from the trash.
124///
125/// In the future, this can be made OS-specific.
126#[derive(Debug, Clone)]
127struct TrashedItemInfo {
128 path_in_trash: PathBuf,
129 original_path: PathBuf,
130}
131
132/// Handle to a trashed item.
133#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
134pub struct TrashedItem(Uuid);
135
136#[async_trait::async_trait]
137pub trait Fs: Send + Sync {
138 async fn create_dir(&self, path: &Path) -> Result<()>;
139 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()>;
140 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()>;
141 async fn create_file_with(
142 &self,
143 path: &Path,
144 content: Pin<&mut (dyn AsyncRead + Send)>,
145 ) -> Result<()>;
146 async fn extract_tar_file(
147 &self,
148 path: &Path,
149 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
150 ) -> Result<()>;
151 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()>;
152 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()>;
153 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()>;
154 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<Option<TrashedItem>>;
155 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()>;
156 async fn trash_file(&self, path: &Path, options: RemoveOptions) -> Result<Option<TrashedItem>>;
157 async fn restore_from_trash(&self, trashed_item: TrashedItem) -> Result<PathBuf>;
158 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>>;
159 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>>;
160 async fn load(&self, path: &Path) -> Result<String> {
161 Ok(String::from_utf8(self.load_bytes(path).await?)?)
162 }
163 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>>;
164 async fn atomic_write(&self, path: PathBuf, text: String) -> Result<()>;
165 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()>;
166 async fn write(&self, path: &Path, content: &[u8]) -> Result<()>;
167 async fn canonicalize(&self, path: &Path) -> Result<PathBuf>;
168 async fn is_file(&self, path: &Path) -> bool;
169 async fn is_dir(&self, path: &Path) -> bool;
170 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>>;
171 async fn read_link(&self, path: &Path) -> Result<PathBuf>;
172 async fn read_dir(
173 &self,
174 path: &Path,
175 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>>;
176
177 async fn watch(
178 &self,
179 path: &Path,
180 latency: Duration,
181 ) -> (
182 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
183 Arc<dyn Watcher>,
184 );
185
186 fn open_repo(
187 &self,
188 abs_dot_git: &Path,
189 system_git_binary_path: Option<&Path>,
190 ) -> Option<Arc<dyn GitRepository>>;
191 async fn git_init(&self, abs_work_directory: &Path, fallback_branch_name: String)
192 -> Result<()>;
193 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()>;
194 fn is_fake(&self) -> bool;
195 async fn is_case_sensitive(&self) -> Result<bool>;
196 fn subscribe_to_jobs(&self) -> JobEventReceiver;
197
198 #[cfg(any(test, feature = "test-support"))]
199 fn as_fake(&self) -> Arc<FakeFs> {
200 panic!("called as_fake on a real fs");
201 }
202}
203
204struct GlobalFs(Arc<dyn Fs>);
205
206impl Global for GlobalFs {}
207
208impl dyn Fs {
209 /// Returns the global [`Fs`].
210 pub fn global(cx: &App) -> Arc<Self> {
211 GlobalFs::global(cx).0.clone()
212 }
213
214 /// Sets the global [`Fs`].
215 pub fn set_global(fs: Arc<Self>, cx: &mut App) {
216 cx.set_global(GlobalFs(fs));
217 }
218}
219
220#[derive(Copy, Clone, Default)]
221pub struct CreateOptions {
222 pub overwrite: bool,
223 pub ignore_if_exists: bool,
224}
225
226#[derive(Copy, Clone, Default)]
227pub struct CopyOptions {
228 pub overwrite: bool,
229 pub ignore_if_exists: bool,
230}
231
232#[derive(Copy, Clone, Default)]
233pub struct RenameOptions {
234 pub overwrite: bool,
235 pub ignore_if_exists: bool,
236 /// Whether to create parent directories if they do not exist.
237 pub create_parents: bool,
238}
239
240#[derive(Copy, Clone, Default)]
241pub struct RemoveOptions {
242 pub recursive: bool,
243 pub ignore_if_not_exists: bool,
244}
245
246#[derive(Copy, Clone, Debug)]
247pub struct Metadata {
248 pub inode: u64,
249 pub mtime: MTime,
250 pub is_symlink: bool,
251 pub is_dir: bool,
252 pub len: u64,
253 pub is_fifo: bool,
254 pub is_executable: bool,
255}
256
257/// Filesystem modification time. The purpose of this newtype is to discourage use of operations
258/// that do not make sense for mtimes. In particular, it is not always valid to compare mtimes using
259/// `<` or `>`, as there are many things that can cause the mtime of a file to be earlier than it
260/// was. See ["mtime comparison considered harmful" - apenwarr](https://apenwarr.ca/log/20181113).
261///
262/// Do not derive Ord, PartialOrd, or arithmetic operation traits.
263#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize)]
264#[serde(transparent)]
265pub struct MTime(SystemTime);
266
267pub type JobId = usize;
268
269#[derive(Clone, Debug)]
270pub struct JobInfo {
271 pub start: Instant,
272 pub message: SharedString,
273 pub id: JobId,
274}
275
276#[derive(Debug, Clone)]
277pub enum JobEvent {
278 Started { info: JobInfo },
279 Completed { id: JobId },
280}
281
282pub type JobEventSender = futures::channel::mpsc::UnboundedSender<JobEvent>;
283pub type JobEventReceiver = futures::channel::mpsc::UnboundedReceiver<JobEvent>;
284
285struct JobTracker {
286 id: JobId,
287 subscribers: Arc<Mutex<Vec<JobEventSender>>>,
288}
289
290impl JobTracker {
291 fn new(info: JobInfo, subscribers: Arc<Mutex<Vec<JobEventSender>>>) -> Self {
292 let id = info.id;
293 {
294 let mut subs = subscribers.lock();
295 subs.retain(|sender| {
296 sender
297 .unbounded_send(JobEvent::Started { info: info.clone() })
298 .is_ok()
299 });
300 }
301 Self { id, subscribers }
302 }
303}
304
305impl Drop for JobTracker {
306 fn drop(&mut self) {
307 let mut subs = self.subscribers.lock();
308 subs.retain(|sender| {
309 sender
310 .unbounded_send(JobEvent::Completed { id: self.id })
311 .is_ok()
312 });
313 }
314}
315
316impl MTime {
317 /// Conversion intended for persistence and testing.
318 pub fn from_seconds_and_nanos(secs: u64, nanos: u32) -> Self {
319 MTime(UNIX_EPOCH + Duration::new(secs, nanos))
320 }
321
322 /// Conversion intended for persistence.
323 pub fn to_seconds_and_nanos_for_persistence(self) -> Option<(u64, u32)> {
324 self.0
325 .duration_since(UNIX_EPOCH)
326 .ok()
327 .map(|duration| (duration.as_secs(), duration.subsec_nanos()))
328 }
329
330 /// Returns the value wrapped by this `MTime`, for presentation to the user. The name including
331 /// "_for_user" is to discourage misuse - this method should not be used when making decisions
332 /// about file dirtiness.
333 pub fn timestamp_for_user(self) -> SystemTime {
334 self.0
335 }
336
337 /// Temporary method to split out the behavior changes from introduction of this newtype.
338 pub fn bad_is_greater_than(self, other: MTime) -> bool {
339 self.0 > other.0
340 }
341}
342
343impl From<proto::Timestamp> for MTime {
344 fn from(timestamp: proto::Timestamp) -> Self {
345 MTime(timestamp.into())
346 }
347}
348
349impl From<MTime> for proto::Timestamp {
350 fn from(mtime: MTime) -> Self {
351 mtime.0.into()
352 }
353}
354
355pub struct RealFs {
356 bundled_git_binary_path: Option<PathBuf>,
357 executor: BackgroundExecutor,
358 next_job_id: Arc<AtomicUsize>,
359 job_event_subscribers: Arc<Mutex<Vec<JobEventSender>>>,
360 trash_cache: Arc<Mutex<TrashCache>>,
361}
362
363pub trait FileHandle: Send + Sync + std::fmt::Debug {
364 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf>;
365}
366
367impl FileHandle for std::fs::File {
368 #[cfg(target_os = "macos")]
369 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
370 use std::{
371 ffi::{CStr, OsStr},
372 os::unix::ffi::OsStrExt,
373 };
374
375 let fd = self.as_fd();
376 let mut path_buf = MaybeUninit::<[u8; libc::PATH_MAX as usize]>::uninit();
377
378 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_GETPATH, path_buf.as_mut_ptr()) };
379 if result == -1 {
380 anyhow::bail!("fcntl returned -1".to_string());
381 }
382
383 // SAFETY: `fcntl` will initialize the path buffer.
384 let c_str = unsafe { CStr::from_ptr(path_buf.as_ptr().cast()) };
385 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
386 Ok(path)
387 }
388
389 #[cfg(target_os = "linux")]
390 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
391 let fd = self.as_fd();
392 let fd_path = format!("/proc/self/fd/{}", fd.as_raw_fd());
393 let new_path = std::fs::read_link(fd_path)?;
394 if new_path
395 .file_name()
396 .is_some_and(|f| f.to_string_lossy().ends_with(" (deleted)"))
397 {
398 anyhow::bail!("file was deleted")
399 };
400
401 Ok(new_path)
402 }
403
404 #[cfg(target_os = "freebsd")]
405 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
406 use std::{
407 ffi::{CStr, OsStr},
408 os::unix::ffi::OsStrExt,
409 };
410
411 let fd = self.as_fd();
412 let mut kif = MaybeUninit::<libc::kinfo_file>::uninit();
413 kif.kf_structsize = libc::KINFO_FILE_SIZE;
414
415 let result = unsafe { libc::fcntl(fd.as_raw_fd(), libc::F_KINFO, kif.as_mut_ptr()) };
416 if result == -1 {
417 anyhow::bail!("fcntl returned -1".to_string());
418 }
419
420 // SAFETY: `fcntl` will initialize the kif.
421 let c_str = unsafe { CStr::from_ptr(kif.assume_init().kf_path.as_ptr()) };
422 let path = PathBuf::from(OsStr::from_bytes(c_str.to_bytes()));
423 Ok(path)
424 }
425
426 #[cfg(target_os = "windows")]
427 fn current_path(&self, _: &Arc<dyn Fs>) -> Result<PathBuf> {
428 use std::ffi::OsString;
429 use std::os::windows::ffi::OsStringExt;
430 use std::os::windows::io::AsRawHandle;
431
432 use windows::Win32::Foundation::HANDLE;
433 use windows::Win32::Storage::FileSystem::{
434 FILE_NAME_NORMALIZED, GetFinalPathNameByHandleW,
435 };
436
437 let handle = HANDLE(self.as_raw_handle() as _);
438
439 // Query required buffer size (in wide chars)
440 let required_len =
441 unsafe { GetFinalPathNameByHandleW(handle, &mut [], FILE_NAME_NORMALIZED) };
442 if required_len == 0 {
443 anyhow::bail!("GetFinalPathNameByHandleW returned 0 length");
444 }
445
446 // Allocate buffer and retrieve the path
447 let mut buf: Vec<u16> = vec![0u16; required_len as usize + 1];
448 let written = unsafe { GetFinalPathNameByHandleW(handle, &mut buf, FILE_NAME_NORMALIZED) };
449 if written == 0 {
450 anyhow::bail!("GetFinalPathNameByHandleW failed to write path");
451 }
452
453 let os_str: OsString = OsString::from_wide(&buf[..written as usize]);
454 Ok(PathBuf::from(os_str))
455 }
456}
457
458pub struct RealWatcher {}
459
460impl RealFs {
461 pub fn new(git_binary_path: Option<PathBuf>, executor: BackgroundExecutor) -> Self {
462 Self {
463 bundled_git_binary_path: git_binary_path,
464 executor,
465 next_job_id: Arc::new(AtomicUsize::new(0)),
466 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
467 trash_cache: Arc::new(Mutex::new(TrashCache::default())),
468 }
469 }
470
471 #[cfg(target_os = "windows")]
472 fn canonicalize(path: &Path) -> Result<PathBuf> {
473 let mut strip_prefix = None;
474
475 let mut new_path = PathBuf::new();
476 for component in path.components() {
477 match component {
478 std::path::Component::Prefix(_) => {
479 let component = component.as_os_str();
480 let canonicalized = if component
481 .to_str()
482 .map(|e| e.ends_with("\\"))
483 .unwrap_or(false)
484 {
485 std::fs::canonicalize(component)
486 } else {
487 let mut component = component.to_os_string();
488 component.push("\\");
489 std::fs::canonicalize(component)
490 }?;
491
492 let mut strip = PathBuf::new();
493 for component in canonicalized.components() {
494 match component {
495 Component::Prefix(prefix_component) => {
496 match prefix_component.kind() {
497 std::path::Prefix::Verbatim(os_str) => {
498 strip.push(os_str);
499 }
500 std::path::Prefix::VerbatimUNC(host, share) => {
501 strip.push("\\\\");
502 strip.push(host);
503 strip.push(share);
504 }
505 std::path::Prefix::VerbatimDisk(disk) => {
506 strip.push(format!("{}:", disk as char));
507 }
508 _ => strip.push(component),
509 };
510 }
511 _ => strip.push(component),
512 }
513 }
514 strip_prefix = Some(strip);
515 new_path.push(component);
516 }
517 std::path::Component::RootDir => {
518 new_path.push(component);
519 }
520 std::path::Component::CurDir => {
521 if strip_prefix.is_none() {
522 // unrooted path
523 new_path.push(component);
524 }
525 }
526 std::path::Component::ParentDir => {
527 if strip_prefix.is_some() {
528 // rooted path
529 new_path.pop();
530 } else {
531 new_path.push(component);
532 }
533 }
534 std::path::Component::Normal(_) => {
535 if let Ok(link) = std::fs::read_link(new_path.join(component)) {
536 let link = match &strip_prefix {
537 Some(e) => link.strip_prefix(e).unwrap_or(&link),
538 None => &link,
539 };
540 new_path.extend(link);
541 } else {
542 new_path.push(component);
543 }
544 }
545 }
546 }
547
548 Ok(new_path)
549 }
550}
551
552#[async_trait::async_trait]
553impl Fs for RealFs {
554 async fn create_dir(&self, path: &Path) -> Result<()> {
555 Ok(smol::fs::create_dir_all(path).await?)
556 }
557
558 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
559 #[cfg(unix)]
560 smol::fs::unix::symlink(target, path).await?;
561
562 #[cfg(windows)]
563 if smol::fs::metadata(&target).await?.is_dir() {
564 let status = new_smol_command("cmd")
565 .args(["/C", "mklink", "/J"])
566 .args([path, target.as_path()])
567 .status()
568 .await?;
569
570 if !status.success() {
571 return Err(anyhow::anyhow!(
572 "Failed to create junction from {:?} to {:?}",
573 path,
574 target
575 ));
576 }
577 } else {
578 smol::fs::windows::symlink_file(target, path).await?
579 }
580
581 Ok(())
582 }
583
584 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
585 let mut open_options = smol::fs::OpenOptions::new();
586 open_options.write(true).create(true);
587 if options.overwrite {
588 open_options.truncate(true);
589 } else if !options.ignore_if_exists {
590 open_options.create_new(true);
591 }
592 open_options.open(path).await?;
593 Ok(())
594 }
595
596 async fn create_file_with(
597 &self,
598 path: &Path,
599 content: Pin<&mut (dyn AsyncRead + Send)>,
600 ) -> Result<()> {
601 let mut file = smol::fs::File::create(&path).await?;
602 futures::io::copy(content, &mut file).await?;
603 Ok(())
604 }
605
606 async fn extract_tar_file(
607 &self,
608 path: &Path,
609 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
610 ) -> Result<()> {
611 content.unpack(path).await?;
612 Ok(())
613 }
614
615 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
616 if !options.overwrite && smol::fs::metadata(target).await.is_ok() {
617 if options.ignore_if_exists {
618 return Ok(());
619 } else {
620 anyhow::bail!("{target:?} already exists");
621 }
622 }
623
624 smol::fs::copy(source, target).await?;
625 Ok(())
626 }
627
628 async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()> {
629 if !options.overwrite && smol::fs::metadata(target).await.is_ok() {
630 if options.ignore_if_exists {
631 return Ok(());
632 } else {
633 anyhow::bail!("{target:?} already exists");
634 }
635 }
636
637 if options.create_parents {
638 if let Some(parent) = target.parent() {
639 self.create_dir(parent).await?;
640 }
641 }
642
643 smol::fs::rename(source, target).await?;
644 Ok(())
645 }
646
647 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
648 let result = if options.recursive {
649 smol::fs::remove_dir_all(path).await
650 } else {
651 smol::fs::remove_dir(path).await
652 };
653 match result {
654 Ok(()) => Ok(()),
655 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
656 Ok(())
657 }
658 Err(err) => Err(err)?,
659 }
660 }
661
662 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
663 #[cfg(windows)]
664 if let Ok(Some(metadata)) = self.metadata(path).await
665 && metadata.is_symlink
666 && metadata.is_dir
667 {
668 self.remove_dir(
669 path,
670 RemoveOptions {
671 recursive: false,
672 ignore_if_not_exists: true,
673 },
674 )
675 .await?;
676 return Ok(());
677 }
678
679 match smol::fs::remove_file(path).await {
680 Ok(()) => Ok(()),
681 Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
682 Ok(())
683 }
684 Err(err) => Err(err)?,
685 }
686 }
687
688 #[cfg(target_os = "macos")]
689 async fn trash_file(&self, path: &Path, options: RemoveOptions) -> Result<Option<TrashedItem>> {
690 let trashed_item = copy_to_trash_cache(self, path, &self.trash_cache, options).await?;
691
692 use cocoa::{
693 base::{id, nil},
694 foundation::{NSAutoreleasePool, NSString},
695 };
696 use objc::{class, msg_send, sel, sel_impl};
697
698 unsafe {
699 /// Allow NSString::alloc use here because it sets autorelease
700 #[allow(clippy::disallowed_methods)]
701 unsafe fn ns_string(string: &str) -> id {
702 unsafe { NSString::alloc(nil).init_str(string).autorelease() }
703 }
704
705 let url: id = msg_send![class!(NSURL), fileURLWithPath: ns_string(path.to_string_lossy().as_ref())];
706 let array: id = msg_send![class!(NSArray), arrayWithObject: url];
707 let workspace: id = msg_send![class!(NSWorkspace), sharedWorkspace];
708
709 let _: id = msg_send![workspace, recycleURLs: array completionHandler: nil];
710 }
711 Ok(trashed_item)
712 }
713
714 #[cfg(any(target_os = "linux", target_os = "freebsd"))]
715 async fn trash_file(&self, path: &Path, options: RemoveOptions) -> Result<Option<TrashedItem>> {
716 let trashed_item = copy_to_trash_cache(self, path, &self.trash_cache, options).await?;
717
718 if let Ok(Some(metadata)) = self.metadata(path).await
719 && metadata.is_symlink
720 {
721 // TODO: trash_file does not support trashing symlinks yet - https://github.com/bilelmoussaoui/ashpd/issues/255
722 return self.remove_file(path, RemoveOptions::default()).await;
723 }
724 let file = smol::fs::File::open(path).await?;
725 match trash::trash_file(&file.as_fd()).await {
726 Ok(_) => Ok(trashed_item),
727 Err(err) => {
728 log::error!("Failed to trash file: {}", err);
729 // Trashing files can fail if you don't have a trashing dbus service configured.
730 // In that case, delete the file directly instead.
731 return self.remove_file(path, RemoveOptions::default()).await;
732 }
733 }
734
735 Ok(trashed_item)
736 }
737
738 #[cfg(target_os = "windows")]
739 async fn trash_file(&self, path: &Path, options: RemoveOptions) -> Result<Option<TrashedItem>> {
740 let trashed_item = copy_to_trash_cache(self, path, &self.trash_cache, options).await?;
741
742 use util::paths::SanitizedPath;
743 use windows::{
744 Storage::{StorageDeleteOption, StorageFile},
745 core::HSTRING,
746 };
747 // todo(windows)
748 // When new version of `windows-rs` release, make this operation `async`
749 let path = path.canonicalize()?;
750 let path = SanitizedPath::new(&path);
751 let path_string = path.to_string();
752 let file = StorageFile::GetFileFromPathAsync(&HSTRING::from(path_string))?.get()?;
753 file.DeleteAsync(StorageDeleteOption::Default)?.get()?;
754 Ok(trashed_item)
755 }
756
757 #[cfg(target_os = "macos")]
758 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<Option<TrashedItem>> {
759 self.trash_file(path, options).await
760 }
761
762 #[cfg(any(target_os = "linux", target_os = "freebsd"))]
763 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<Option<TrashedItem>> {
764 self.trash_file(path, options).await
765 }
766
767 #[cfg(target_os = "windows")]
768 async fn trash_dir(&self, path: &Path, _options: RemoveOptions) -> Result<Option<TrashedItem>> {
769 let trashed_item = copy_to_trash_cache(self, path, &self.trash_cache, options).await?;
770
771 use util::paths::SanitizedPath;
772 use windows::{
773 Storage::{StorageDeleteOption, StorageFolder},
774 core::HSTRING,
775 };
776
777 // todo(windows)
778 // When new version of `windows-rs` release, make this operation `async`
779 let path = path.canonicalize()?;
780 let path = SanitizedPath::new(&path);
781 let path_string = path.to_string();
782 let folder = StorageFolder::GetFolderFromPathAsync(&HSTRING::from(path_string))?.get()?;
783 folder.DeleteAsync(StorageDeleteOption::Default)?.get()?;
784 Ok(trashed_item)
785 }
786
787 async fn restore_from_trash(&self, trashed_item: TrashedItem) -> Result<PathBuf> {
788 let trash_info = self
789 .trash_cache
790 .lock()
791 .remove(trashed_item)
792 .context("no item in trash")?;
793 self.rename(
794 &trash_info.path_in_trash,
795 &trash_info.original_path,
796 RenameOptions {
797 overwrite: false,
798 ignore_if_exists: false,
799 create_parents: true,
800 },
801 )
802 .await?;
803 Ok(trash_info.original_path)
804 }
805
806 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
807 Ok(Box::new(std::fs::File::open(path)?))
808 }
809
810 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
811 let mut options = std::fs::OpenOptions::new();
812 options.read(true);
813 #[cfg(windows)]
814 {
815 use std::os::windows::fs::OpenOptionsExt;
816 options.custom_flags(windows::Win32::Storage::FileSystem::FILE_FLAG_BACKUP_SEMANTICS.0);
817 }
818 Ok(Arc::new(options.open(path)?))
819 }
820
821 async fn load(&self, path: &Path) -> Result<String> {
822 let path = path.to_path_buf();
823 self.executor
824 .spawn(async move { Ok(std::fs::read_to_string(path)?) })
825 .await
826 }
827
828 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
829 let path = path.to_path_buf();
830 let bytes = self
831 .executor
832 .spawn(async move { std::fs::read(path) })
833 .await?;
834 Ok(bytes)
835 }
836
837 #[cfg(not(target_os = "windows"))]
838 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
839 smol::unblock(move || {
840 // Use the directory of the destination as temp dir to avoid
841 // invalid cross-device link error, and XDG_CACHE_DIR for fallback.
842 // See https://github.com/zed-industries/zed/pull/8437 for more details.
843 let mut tmp_file =
844 tempfile::NamedTempFile::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
845 tmp_file.write_all(data.as_bytes())?;
846 tmp_file.persist(path)?;
847 anyhow::Ok(())
848 })
849 .await?;
850
851 Ok(())
852 }
853
854 #[cfg(target_os = "windows")]
855 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
856 smol::unblock(move || {
857 // If temp dir is set to a different drive than the destination,
858 // we receive error:
859 //
860 // failed to persist temporary file:
861 // The system cannot move the file to a different disk drive. (os error 17)
862 //
863 // This is because `ReplaceFileW` does not support cross volume moves.
864 // See the remark section: "The backup file, replaced file, and replacement file must all reside on the same volume."
865 // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-replacefilew#remarks
866 //
867 // So we use the directory of the destination as a temp dir to avoid it.
868 // https://github.com/zed-industries/zed/issues/16571
869 let temp_dir = TempDir::new_in(path.parent().unwrap_or(paths::temp_dir()))?;
870 let temp_file = {
871 let temp_file_path = temp_dir.path().join("temp_file");
872 let mut file = std::fs::File::create_new(&temp_file_path)?;
873 file.write_all(data.as_bytes())?;
874 temp_file_path
875 };
876 atomic_replace(path.as_path(), temp_file.as_path())?;
877 anyhow::Ok(())
878 })
879 .await?;
880 Ok(())
881 }
882
883 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
884 let buffer_size = text.summary().len.min(10 * 1024);
885 if let Some(path) = path.parent() {
886 self.create_dir(path).await?;
887 }
888 let file = smol::fs::File::create(path).await?;
889 let mut writer = smol::io::BufWriter::with_capacity(buffer_size, file);
890 for chunk in text::chunks_with_line_ending(text, line_ending) {
891 writer.write_all(chunk.as_bytes()).await?;
892 }
893 writer.flush().await?;
894 Ok(())
895 }
896
897 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
898 if let Some(path) = path.parent() {
899 self.create_dir(path).await?;
900 }
901 let path = path.to_owned();
902 let contents = content.to_owned();
903 self.executor
904 .spawn(async move {
905 std::fs::write(path, contents)?;
906 Ok(())
907 })
908 .await
909 }
910
911 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
912 let path = path.to_owned();
913 self.executor
914 .spawn(async move {
915 #[cfg(target_os = "windows")]
916 let result = Self::canonicalize(&path);
917
918 #[cfg(not(target_os = "windows"))]
919 let result = std::fs::canonicalize(&path);
920
921 result.with_context(|| format!("canonicalizing {path:?}"))
922 })
923 .await
924 }
925
926 async fn is_file(&self, path: &Path) -> bool {
927 let path = path.to_owned();
928 self.executor
929 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_file()) })
930 .await
931 }
932
933 async fn is_dir(&self, path: &Path) -> bool {
934 let path = path.to_owned();
935 self.executor
936 .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_dir()) })
937 .await
938 }
939
940 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
941 let path_buf = path.to_owned();
942 let symlink_metadata = match self
943 .executor
944 .spawn(async move { std::fs::symlink_metadata(&path_buf) })
945 .await
946 {
947 Ok(metadata) => metadata,
948 Err(err) => {
949 return match err.kind() {
950 io::ErrorKind::NotFound | io::ErrorKind::NotADirectory => Ok(None),
951 _ => Err(anyhow::Error::new(err)),
952 };
953 }
954 };
955
956 let is_symlink = symlink_metadata.file_type().is_symlink();
957 let metadata = if is_symlink {
958 let path_buf = path.to_path_buf();
959 let path_exists = self
960 .executor
961 .spawn(async move {
962 path_buf
963 .try_exists()
964 .with_context(|| format!("checking existence for path {path_buf:?}"))
965 })
966 .await?;
967 if path_exists {
968 let path_buf = path.to_path_buf();
969 self.executor
970 .spawn(async move { std::fs::metadata(path_buf) })
971 .await
972 .with_context(|| "accessing symlink for path {path}")?
973 } else {
974 symlink_metadata
975 }
976 } else {
977 symlink_metadata
978 };
979
980 #[cfg(unix)]
981 let inode = metadata.ino();
982
983 #[cfg(windows)]
984 let inode = file_id(path).await?;
985
986 #[cfg(windows)]
987 let is_fifo = false;
988
989 #[cfg(unix)]
990 let is_fifo = metadata.file_type().is_fifo();
991
992 let path_buf = path.to_path_buf();
993 let is_executable = self
994 .executor
995 .spawn(async move { path_buf.is_executable() })
996 .await;
997
998 Ok(Some(Metadata {
999 inode,
1000 mtime: MTime(metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH)),
1001 len: metadata.len(),
1002 is_symlink,
1003 is_dir: metadata.file_type().is_dir(),
1004 is_fifo,
1005 is_executable,
1006 }))
1007 }
1008
1009 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
1010 let path = path.to_owned();
1011 let path = self
1012 .executor
1013 .spawn(async move { std::fs::read_link(&path) })
1014 .await?;
1015 Ok(path)
1016 }
1017
1018 async fn read_dir(
1019 &self,
1020 path: &Path,
1021 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
1022 let path = path.to_owned();
1023 let result = iter(
1024 self.executor
1025 .spawn(async move { std::fs::read_dir(path) })
1026 .await?,
1027 )
1028 .map(|entry| match entry {
1029 Ok(entry) => Ok(entry.path()),
1030 Err(error) => Err(anyhow!("failed to read dir entry {error:?}")),
1031 });
1032 Ok(Box::pin(result))
1033 }
1034
1035 #[cfg(target_os = "macos")]
1036 async fn watch(
1037 &self,
1038 path: &Path,
1039 latency: Duration,
1040 ) -> (
1041 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
1042 Arc<dyn Watcher>,
1043 ) {
1044 use fsevent::StreamFlags;
1045
1046 let (events_tx, events_rx) = smol::channel::unbounded();
1047 let handles = Arc::new(parking_lot::Mutex::new(collections::BTreeMap::default()));
1048 let watcher = Arc::new(mac_watcher::MacWatcher::new(
1049 events_tx,
1050 Arc::downgrade(&handles),
1051 latency,
1052 ));
1053 watcher.add(path).expect("handles can't be dropped");
1054
1055 (
1056 Box::pin(
1057 events_rx
1058 .map(|events| {
1059 events
1060 .into_iter()
1061 .map(|event| {
1062 log::trace!("fs path event: {event:?}");
1063 let kind = if event.flags.contains(StreamFlags::ITEM_REMOVED) {
1064 Some(PathEventKind::Removed)
1065 } else if event.flags.contains(StreamFlags::ITEM_CREATED) {
1066 Some(PathEventKind::Created)
1067 } else if event.flags.contains(StreamFlags::ITEM_MODIFIED)
1068 | event.flags.contains(StreamFlags::ITEM_RENAMED)
1069 {
1070 Some(PathEventKind::Changed)
1071 } else {
1072 None
1073 };
1074 PathEvent {
1075 path: event.path,
1076 kind,
1077 }
1078 })
1079 .collect()
1080 })
1081 .chain(futures::stream::once(async move {
1082 drop(handles);
1083 vec![]
1084 })),
1085 ),
1086 watcher,
1087 )
1088 }
1089
1090 #[cfg(not(target_os = "macos"))]
1091 async fn watch(
1092 &self,
1093 path: &Path,
1094 latency: Duration,
1095 ) -> (
1096 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
1097 Arc<dyn Watcher>,
1098 ) {
1099 use util::{ResultExt as _, paths::SanitizedPath};
1100
1101 let (tx, rx) = smol::channel::unbounded();
1102 let pending_paths: Arc<Mutex<Vec<PathEvent>>> = Default::default();
1103 let watcher = Arc::new(fs_watcher::FsWatcher::new(tx, pending_paths.clone()));
1104
1105 // If the path doesn't exist yet (e.g. settings.json), watch the parent dir to learn when it's created.
1106 if let Err(e) = watcher.add(path)
1107 && let Some(parent) = path.parent()
1108 && let Err(parent_e) = watcher.add(parent)
1109 {
1110 log::warn!(
1111 "Failed to watch {} and its parent directory {}:\n{e}\n{parent_e}",
1112 path.display(),
1113 parent.display()
1114 );
1115 }
1116
1117 // Check if path is a symlink and follow the target parent
1118 if let Some(mut target) = self.read_link(path).await.ok() {
1119 log::trace!("watch symlink {path:?} -> {target:?}");
1120 // Check if symlink target is relative path, if so make it absolute
1121 if target.is_relative()
1122 && let Some(parent) = path.parent()
1123 {
1124 target = parent.join(target);
1125 if let Ok(canonical) = self.canonicalize(&target).await {
1126 target = SanitizedPath::new(&canonical).as_path().to_path_buf();
1127 }
1128 }
1129 watcher.add(&target).ok();
1130 if let Some(parent) = target.parent() {
1131 watcher.add(parent).log_err();
1132 }
1133 }
1134
1135 (
1136 Box::pin(rx.filter_map({
1137 let watcher = watcher.clone();
1138 move |_| {
1139 let _ = watcher.clone();
1140 let pending_paths = pending_paths.clone();
1141 async move {
1142 smol::Timer::after(latency).await;
1143 let paths = std::mem::take(&mut *pending_paths.lock());
1144 (!paths.is_empty()).then_some(paths)
1145 }
1146 }
1147 })),
1148 watcher,
1149 )
1150 }
1151
1152 fn open_repo(
1153 &self,
1154 dotgit_path: &Path,
1155 system_git_binary_path: Option<&Path>,
1156 ) -> Option<Arc<dyn GitRepository>> {
1157 Some(Arc::new(RealGitRepository::new(
1158 dotgit_path,
1159 self.bundled_git_binary_path.clone(),
1160 system_git_binary_path.map(|path| path.to_path_buf()),
1161 self.executor.clone(),
1162 )?))
1163 }
1164
1165 async fn git_init(
1166 &self,
1167 abs_work_directory_path: &Path,
1168 fallback_branch_name: String,
1169 ) -> Result<()> {
1170 let config = new_smol_command("git")
1171 .current_dir(abs_work_directory_path)
1172 .args(&["config", "--global", "--get", "init.defaultBranch"])
1173 .output()
1174 .await?;
1175
1176 let branch_name;
1177
1178 if config.status.success() && !config.stdout.is_empty() {
1179 branch_name = String::from_utf8_lossy(&config.stdout);
1180 } else {
1181 branch_name = Cow::Borrowed(fallback_branch_name.as_str());
1182 }
1183
1184 new_smol_command("git")
1185 .current_dir(abs_work_directory_path)
1186 .args(&["init", "-b"])
1187 .arg(branch_name.trim())
1188 .output()
1189 .await?;
1190
1191 Ok(())
1192 }
1193
1194 async fn git_clone(&self, repo_url: &str, abs_work_directory: &Path) -> Result<()> {
1195 let job_id = self.next_job_id.fetch_add(1, Ordering::SeqCst);
1196 let job_info = JobInfo {
1197 id: job_id,
1198 start: Instant::now(),
1199 message: SharedString::from(format!("Cloning {}", repo_url)),
1200 };
1201
1202 let _job_tracker = JobTracker::new(job_info, self.job_event_subscribers.clone());
1203
1204 let output = new_smol_command("git")
1205 .current_dir(abs_work_directory)
1206 .args(&["clone", repo_url])
1207 .output()
1208 .await?;
1209
1210 if !output.status.success() {
1211 anyhow::bail!(
1212 "git clone failed: {}",
1213 String::from_utf8_lossy(&output.stderr)
1214 );
1215 }
1216
1217 Ok(())
1218 }
1219
1220 fn is_fake(&self) -> bool {
1221 false
1222 }
1223
1224 fn subscribe_to_jobs(&self) -> JobEventReceiver {
1225 let (sender, receiver) = futures::channel::mpsc::unbounded();
1226 self.job_event_subscribers.lock().push(sender);
1227 receiver
1228 }
1229
1230 /// Checks whether the file system is case sensitive by attempting to create two files
1231 /// that have the same name except for the casing.
1232 ///
1233 /// It creates both files in a temporary directory it removes at the end.
1234 async fn is_case_sensitive(&self) -> Result<bool> {
1235 let temp_dir = TempDir::new()?;
1236 let test_file_1 = temp_dir.path().join("case_sensitivity_test.tmp");
1237 let test_file_2 = temp_dir.path().join("CASE_SENSITIVITY_TEST.TMP");
1238
1239 let create_opts = CreateOptions {
1240 overwrite: false,
1241 ignore_if_exists: false,
1242 };
1243
1244 // Create file1
1245 self.create_file(&test_file_1, create_opts).await?;
1246
1247 // Now check whether it's possible to create file2
1248 let case_sensitive = match self.create_file(&test_file_2, create_opts).await {
1249 Ok(_) => Ok(true),
1250 Err(e) => {
1251 if let Some(io_error) = e.downcast_ref::<io::Error>() {
1252 if io_error.kind() == io::ErrorKind::AlreadyExists {
1253 Ok(false)
1254 } else {
1255 Err(e)
1256 }
1257 } else {
1258 Err(e)
1259 }
1260 }
1261 };
1262
1263 temp_dir.close()?;
1264 case_sensitive
1265 }
1266}
1267
1268#[cfg(not(any(target_os = "linux", target_os = "freebsd")))]
1269impl Watcher for RealWatcher {
1270 fn add(&self, _: &Path) -> Result<()> {
1271 Ok(())
1272 }
1273
1274 fn remove(&self, _: &Path) -> Result<()> {
1275 Ok(())
1276 }
1277}
1278
1279#[cfg(any(test, feature = "test-support"))]
1280pub struct FakeFs {
1281 this: std::sync::Weak<Self>,
1282 // Use an unfair lock to ensure tests are deterministic.
1283 state: Arc<Mutex<FakeFsState>>,
1284 executor: gpui::BackgroundExecutor,
1285 trash_cache: Arc<Mutex<TrashCache>>,
1286}
1287
1288#[cfg(any(test, feature = "test-support"))]
1289struct FakeFsState {
1290 root: FakeFsEntry,
1291 next_inode: u64,
1292 next_mtime: SystemTime,
1293 git_event_tx: smol::channel::Sender<PathBuf>,
1294 event_txs: Vec<(PathBuf, smol::channel::Sender<Vec<PathEvent>>)>,
1295 events_paused: bool,
1296 buffered_events: Vec<PathEvent>,
1297 metadata_call_count: usize,
1298 read_dir_call_count: usize,
1299 path_write_counts: std::collections::HashMap<PathBuf, usize>,
1300 moves: std::collections::HashMap<u64, PathBuf>,
1301 job_event_subscribers: Arc<Mutex<Vec<JobEventSender>>>,
1302}
1303
1304#[cfg(any(test, feature = "test-support"))]
1305#[derive(Clone, Debug)]
1306enum FakeFsEntry {
1307 File {
1308 inode: u64,
1309 mtime: MTime,
1310 len: u64,
1311 content: Vec<u8>,
1312 // The path to the repository state directory, if this is a gitfile.
1313 git_dir_path: Option<PathBuf>,
1314 },
1315 Dir {
1316 inode: u64,
1317 mtime: MTime,
1318 len: u64,
1319 entries: BTreeMap<String, FakeFsEntry>,
1320 git_repo_state: Option<Arc<Mutex<FakeGitRepositoryState>>>,
1321 },
1322 Symlink {
1323 target: PathBuf,
1324 },
1325}
1326
1327#[cfg(any(test, feature = "test-support"))]
1328impl PartialEq for FakeFsEntry {
1329 fn eq(&self, other: &Self) -> bool {
1330 match (self, other) {
1331 (
1332 Self::File {
1333 inode: l_inode,
1334 mtime: l_mtime,
1335 len: l_len,
1336 content: l_content,
1337 git_dir_path: l_git_dir_path,
1338 },
1339 Self::File {
1340 inode: r_inode,
1341 mtime: r_mtime,
1342 len: r_len,
1343 content: r_content,
1344 git_dir_path: r_git_dir_path,
1345 },
1346 ) => {
1347 l_inode == r_inode
1348 && l_mtime == r_mtime
1349 && l_len == r_len
1350 && l_content == r_content
1351 && l_git_dir_path == r_git_dir_path
1352 }
1353 (
1354 Self::Dir {
1355 inode: l_inode,
1356 mtime: l_mtime,
1357 len: l_len,
1358 entries: l_entries,
1359 git_repo_state: l_git_repo_state,
1360 },
1361 Self::Dir {
1362 inode: r_inode,
1363 mtime: r_mtime,
1364 len: r_len,
1365 entries: r_entries,
1366 git_repo_state: r_git_repo_state,
1367 },
1368 ) => {
1369 let same_repo_state = match (l_git_repo_state.as_ref(), r_git_repo_state.as_ref()) {
1370 (Some(l), Some(r)) => Arc::ptr_eq(l, r),
1371 (None, None) => true,
1372 _ => false,
1373 };
1374 l_inode == r_inode
1375 && l_mtime == r_mtime
1376 && l_len == r_len
1377 && l_entries == r_entries
1378 && same_repo_state
1379 }
1380 (Self::Symlink { target: l_target }, Self::Symlink { target: r_target }) => {
1381 l_target == r_target
1382 }
1383 _ => false,
1384 }
1385 }
1386}
1387
1388#[cfg(any(test, feature = "test-support"))]
1389impl FakeFsState {
1390 fn get_and_increment_mtime(&mut self) -> MTime {
1391 let mtime = self.next_mtime;
1392 self.next_mtime += FakeFs::SYSTEMTIME_INTERVAL;
1393 MTime(mtime)
1394 }
1395
1396 fn get_and_increment_inode(&mut self) -> u64 {
1397 let inode = self.next_inode;
1398 self.next_inode += 1;
1399 inode
1400 }
1401
1402 fn canonicalize(&self, target: &Path, follow_symlink: bool) -> Option<PathBuf> {
1403 let mut canonical_path = PathBuf::new();
1404 let mut path = target.to_path_buf();
1405 let mut entry_stack = Vec::new();
1406 'outer: loop {
1407 let mut path_components = path.components().peekable();
1408 let mut prefix = None;
1409 while let Some(component) = path_components.next() {
1410 match component {
1411 Component::Prefix(prefix_component) => prefix = Some(prefix_component),
1412 Component::RootDir => {
1413 entry_stack.clear();
1414 entry_stack.push(&self.root);
1415 canonical_path.clear();
1416 match prefix {
1417 Some(prefix_component) => {
1418 canonical_path = PathBuf::from(prefix_component.as_os_str());
1419 // Prefixes like `C:\\` are represented without their trailing slash, so we have to re-add it.
1420 canonical_path.push(std::path::MAIN_SEPARATOR_STR);
1421 }
1422 None => canonical_path = PathBuf::from(std::path::MAIN_SEPARATOR_STR),
1423 }
1424 }
1425 Component::CurDir => {}
1426 Component::ParentDir => {
1427 entry_stack.pop()?;
1428 canonical_path.pop();
1429 }
1430 Component::Normal(name) => {
1431 let current_entry = *entry_stack.last()?;
1432 if let FakeFsEntry::Dir { entries, .. } = current_entry {
1433 let entry = entries.get(name.to_str().unwrap())?;
1434 if (path_components.peek().is_some() || follow_symlink)
1435 && let FakeFsEntry::Symlink { target, .. } = entry
1436 {
1437 let mut target = target.clone();
1438 target.extend(path_components);
1439 path = target;
1440 continue 'outer;
1441 }
1442 entry_stack.push(entry);
1443 canonical_path = canonical_path.join(name);
1444 } else {
1445 return None;
1446 }
1447 }
1448 }
1449 }
1450 break;
1451 }
1452
1453 if entry_stack.is_empty() {
1454 None
1455 } else {
1456 Some(canonical_path)
1457 }
1458 }
1459
1460 fn try_entry(
1461 &mut self,
1462 target: &Path,
1463 follow_symlink: bool,
1464 ) -> Option<(&mut FakeFsEntry, PathBuf)> {
1465 let canonical_path = self.canonicalize(target, follow_symlink)?;
1466
1467 let mut components = canonical_path
1468 .components()
1469 .skip_while(|component| matches!(component, Component::Prefix(_)));
1470 let Some(Component::RootDir) = components.next() else {
1471 panic!(
1472 "the path {:?} was not canonicalized properly {:?}",
1473 target, canonical_path
1474 )
1475 };
1476
1477 let mut entry = &mut self.root;
1478 for component in components {
1479 match component {
1480 Component::Normal(name) => {
1481 if let FakeFsEntry::Dir { entries, .. } = entry {
1482 entry = entries.get_mut(name.to_str().unwrap())?;
1483 } else {
1484 return None;
1485 }
1486 }
1487 _ => {
1488 panic!(
1489 "the path {:?} was not canonicalized properly {:?}",
1490 target, canonical_path
1491 )
1492 }
1493 }
1494 }
1495
1496 Some((entry, canonical_path))
1497 }
1498
1499 fn entry(&mut self, target: &Path) -> Result<&mut FakeFsEntry> {
1500 Ok(self
1501 .try_entry(target, true)
1502 .ok_or_else(|| {
1503 anyhow!(io::Error::new(
1504 io::ErrorKind::NotFound,
1505 format!("not found: {target:?}")
1506 ))
1507 })?
1508 .0)
1509 }
1510
1511 fn write_path<Fn, T>(&mut self, path: &Path, callback: Fn) -> Result<T>
1512 where
1513 Fn: FnOnce(btree_map::Entry<String, FakeFsEntry>) -> Result<T>,
1514 {
1515 let path = normalize_path(path);
1516 let filename = path.file_name().context("cannot overwrite the root")?;
1517 let parent_path = path.parent().unwrap();
1518
1519 let parent = self.entry(parent_path)?;
1520 let new_entry = parent
1521 .dir_entries(parent_path)?
1522 .entry(filename.to_str().unwrap().into());
1523 callback(new_entry)
1524 }
1525
1526 fn emit_event<I, T>(&mut self, paths: I)
1527 where
1528 I: IntoIterator<Item = (T, Option<PathEventKind>)>,
1529 T: Into<PathBuf>,
1530 {
1531 self.buffered_events
1532 .extend(paths.into_iter().map(|(path, kind)| PathEvent {
1533 path: path.into(),
1534 kind,
1535 }));
1536
1537 if !self.events_paused {
1538 self.flush_events(self.buffered_events.len());
1539 }
1540 }
1541
1542 fn flush_events(&mut self, mut count: usize) {
1543 count = count.min(self.buffered_events.len());
1544 let events = self.buffered_events.drain(0..count).collect::<Vec<_>>();
1545 self.event_txs.retain(|(_, tx)| {
1546 let _ = tx.try_send(events.clone());
1547 !tx.is_closed()
1548 });
1549 }
1550}
1551
1552#[cfg(any(test, feature = "test-support"))]
1553pub static FS_DOT_GIT: std::sync::LazyLock<&'static OsStr> =
1554 std::sync::LazyLock::new(|| OsStr::new(".git"));
1555
1556#[cfg(any(test, feature = "test-support"))]
1557impl FakeFs {
1558 /// We need to use something large enough for Windows and Unix to consider this a new file.
1559 /// https://doc.rust-lang.org/nightly/std/time/struct.SystemTime.html#platform-specific-behavior
1560 const SYSTEMTIME_INTERVAL: Duration = Duration::from_nanos(100);
1561
1562 pub fn new(executor: gpui::BackgroundExecutor) -> Arc<Self> {
1563 let (tx, rx) = smol::channel::bounded::<PathBuf>(10);
1564
1565 let this = Arc::new_cyclic(|this| Self {
1566 this: this.clone(),
1567 executor: executor.clone(),
1568 state: Arc::new(Mutex::new(FakeFsState {
1569 root: FakeFsEntry::Dir {
1570 inode: 0,
1571 mtime: MTime(UNIX_EPOCH),
1572 len: 0,
1573 entries: Default::default(),
1574 git_repo_state: None,
1575 },
1576 git_event_tx: tx,
1577 next_mtime: UNIX_EPOCH + Self::SYSTEMTIME_INTERVAL,
1578 next_inode: 1,
1579 event_txs: Default::default(),
1580 buffered_events: Vec::new(),
1581 events_paused: false,
1582 read_dir_call_count: 0,
1583 metadata_call_count: 0,
1584 path_write_counts: Default::default(),
1585 moves: Default::default(),
1586 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
1587 })),
1588 trash_cache: Arc::new(Mutex::new(TrashCache::default())),
1589 });
1590
1591 executor.spawn({
1592 let this = this.clone();
1593 async move {
1594 while let Ok(git_event) = rx.recv().await {
1595 if let Some(mut state) = this.state.try_lock() {
1596 state.emit_event([(git_event, Some(PathEventKind::Changed))]);
1597 } else {
1598 panic!("Failed to lock file system state, this execution would have caused a test hang");
1599 }
1600 }
1601 }
1602 }).detach();
1603
1604 this
1605 }
1606
1607 pub fn set_next_mtime(&self, next_mtime: SystemTime) {
1608 let mut state = self.state.lock();
1609 state.next_mtime = next_mtime;
1610 }
1611
1612 pub fn get_and_increment_mtime(&self) -> MTime {
1613 let mut state = self.state.lock();
1614 state.get_and_increment_mtime()
1615 }
1616
1617 pub async fn touch_path(&self, path: impl AsRef<Path>) {
1618 let mut state = self.state.lock();
1619 let path = path.as_ref();
1620 let new_mtime = state.get_and_increment_mtime();
1621 let new_inode = state.get_and_increment_inode();
1622 state
1623 .write_path(path, move |entry| {
1624 match entry {
1625 btree_map::Entry::Vacant(e) => {
1626 e.insert(FakeFsEntry::File {
1627 inode: new_inode,
1628 mtime: new_mtime,
1629 content: Vec::new(),
1630 len: 0,
1631 git_dir_path: None,
1632 });
1633 }
1634 btree_map::Entry::Occupied(mut e) => match &mut *e.get_mut() {
1635 FakeFsEntry::File { mtime, .. } => *mtime = new_mtime,
1636 FakeFsEntry::Dir { mtime, .. } => *mtime = new_mtime,
1637 FakeFsEntry::Symlink { .. } => {}
1638 },
1639 }
1640 Ok(())
1641 })
1642 .unwrap();
1643 state.emit_event([(path.to_path_buf(), Some(PathEventKind::Changed))]);
1644 }
1645
1646 pub async fn insert_file(&self, path: impl AsRef<Path>, content: Vec<u8>) {
1647 self.write_file_internal(path, content, true).unwrap()
1648 }
1649
1650 pub async fn insert_symlink(&self, path: impl AsRef<Path>, target: PathBuf) {
1651 let mut state = self.state.lock();
1652 let path = path.as_ref();
1653 let file = FakeFsEntry::Symlink { target };
1654 state
1655 .write_path(path.as_ref(), move |e| match e {
1656 btree_map::Entry::Vacant(e) => {
1657 e.insert(file);
1658 Ok(())
1659 }
1660 btree_map::Entry::Occupied(mut e) => {
1661 *e.get_mut() = file;
1662 Ok(())
1663 }
1664 })
1665 .unwrap();
1666 state.emit_event([(path, Some(PathEventKind::Created))]);
1667 }
1668
1669 fn write_file_internal(
1670 &self,
1671 path: impl AsRef<Path>,
1672 new_content: Vec<u8>,
1673 recreate_inode: bool,
1674 ) -> Result<()> {
1675 let mut state = self.state.lock();
1676 let path_buf = path.as_ref().to_path_buf();
1677 *state.path_write_counts.entry(path_buf).or_insert(0) += 1;
1678 let new_inode = state.get_and_increment_inode();
1679 let new_mtime = state.get_and_increment_mtime();
1680 let new_len = new_content.len() as u64;
1681 let mut kind = None;
1682 state.write_path(path.as_ref(), |entry| {
1683 match entry {
1684 btree_map::Entry::Vacant(e) => {
1685 kind = Some(PathEventKind::Created);
1686 e.insert(FakeFsEntry::File {
1687 inode: new_inode,
1688 mtime: new_mtime,
1689 len: new_len,
1690 content: new_content,
1691 git_dir_path: None,
1692 });
1693 }
1694 btree_map::Entry::Occupied(mut e) => {
1695 kind = Some(PathEventKind::Changed);
1696 if let FakeFsEntry::File {
1697 inode,
1698 mtime,
1699 len,
1700 content,
1701 ..
1702 } = e.get_mut()
1703 {
1704 *mtime = new_mtime;
1705 *content = new_content;
1706 *len = new_len;
1707 if recreate_inode {
1708 *inode = new_inode;
1709 }
1710 } else {
1711 anyhow::bail!("not a file")
1712 }
1713 }
1714 }
1715 Ok(())
1716 })?;
1717 state.emit_event([(path.as_ref(), kind)]);
1718 Ok(())
1719 }
1720
1721 pub fn read_file_sync(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1722 let path = path.as_ref();
1723 let path = normalize_path(path);
1724 let mut state = self.state.lock();
1725 let entry = state.entry(&path)?;
1726 entry.file_content(&path).cloned()
1727 }
1728
1729 async fn load_internal(&self, path: impl AsRef<Path>) -> Result<Vec<u8>> {
1730 let path = path.as_ref();
1731 let path = normalize_path(path);
1732 self.simulate_random_delay().await;
1733 let mut state = self.state.lock();
1734 let entry = state.entry(&path)?;
1735 entry.file_content(&path).cloned()
1736 }
1737
1738 pub fn pause_events(&self) {
1739 self.state.lock().events_paused = true;
1740 }
1741
1742 pub fn unpause_events_and_flush(&self) {
1743 self.state.lock().events_paused = false;
1744 self.flush_events(usize::MAX);
1745 }
1746
1747 pub fn buffered_event_count(&self) -> usize {
1748 self.state.lock().buffered_events.len()
1749 }
1750
1751 pub fn flush_events(&self, count: usize) {
1752 self.state.lock().flush_events(count);
1753 }
1754
1755 pub(crate) fn entry(&self, target: &Path) -> Result<FakeFsEntry> {
1756 self.state.lock().entry(target).cloned()
1757 }
1758
1759 pub(crate) fn insert_entry(&self, target: &Path, new_entry: FakeFsEntry) -> Result<()> {
1760 let mut state = self.state.lock();
1761 state.write_path(target, |entry| {
1762 match entry {
1763 btree_map::Entry::Vacant(vacant_entry) => {
1764 vacant_entry.insert(new_entry);
1765 }
1766 btree_map::Entry::Occupied(mut occupied_entry) => {
1767 occupied_entry.insert(new_entry);
1768 }
1769 }
1770 Ok(())
1771 })
1772 }
1773
1774 #[must_use]
1775 pub fn insert_tree<'a>(
1776 &'a self,
1777 path: impl 'a + AsRef<Path> + Send,
1778 tree: serde_json::Value,
1779 ) -> futures::future::BoxFuture<'a, ()> {
1780 use futures::FutureExt as _;
1781 use serde_json::Value::*;
1782
1783 async move {
1784 let path = path.as_ref();
1785
1786 match tree {
1787 Object(map) => {
1788 self.create_dir(path).await.unwrap();
1789 for (name, contents) in map {
1790 let mut path = PathBuf::from(path);
1791 path.push(name);
1792 self.insert_tree(&path, contents).await;
1793 }
1794 }
1795 Null => {
1796 self.create_dir(path).await.unwrap();
1797 }
1798 String(contents) => {
1799 self.insert_file(&path, contents.into_bytes()).await;
1800 }
1801 _ => {
1802 panic!("JSON object must contain only objects, strings, or null");
1803 }
1804 }
1805 }
1806 .boxed()
1807 }
1808
1809 pub fn insert_tree_from_real_fs<'a>(
1810 &'a self,
1811 path: impl 'a + AsRef<Path> + Send,
1812 src_path: impl 'a + AsRef<Path> + Send,
1813 ) -> futures::future::BoxFuture<'a, ()> {
1814 use futures::FutureExt as _;
1815
1816 async move {
1817 let path = path.as_ref();
1818 if std::fs::metadata(&src_path).unwrap().is_file() {
1819 let contents = std::fs::read(src_path).unwrap();
1820 self.insert_file(path, contents).await;
1821 } else {
1822 self.create_dir(path).await.unwrap();
1823 for entry in std::fs::read_dir(&src_path).unwrap() {
1824 let entry = entry.unwrap();
1825 self.insert_tree_from_real_fs(path.join(entry.file_name()), entry.path())
1826 .await;
1827 }
1828 }
1829 }
1830 .boxed()
1831 }
1832
1833 pub fn with_git_state_and_paths<T, F>(
1834 &self,
1835 dot_git: &Path,
1836 emit_git_event: bool,
1837 f: F,
1838 ) -> Result<T>
1839 where
1840 F: FnOnce(&mut FakeGitRepositoryState, &Path, &Path) -> T,
1841 {
1842 let mut state = self.state.lock();
1843 let git_event_tx = state.git_event_tx.clone();
1844 let entry = state.entry(dot_git).context("open .git")?;
1845
1846 if let FakeFsEntry::Dir { git_repo_state, .. } = entry {
1847 let repo_state = git_repo_state.get_or_insert_with(|| {
1848 log::debug!("insert git state for {dot_git:?}");
1849 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
1850 });
1851 let mut repo_state = repo_state.lock();
1852
1853 let result = f(&mut repo_state, dot_git, dot_git);
1854
1855 drop(repo_state);
1856 if emit_git_event {
1857 state.emit_event([(dot_git, Some(PathEventKind::Changed))]);
1858 }
1859
1860 Ok(result)
1861 } else if let FakeFsEntry::File {
1862 content,
1863 git_dir_path,
1864 ..
1865 } = &mut *entry
1866 {
1867 let path = match git_dir_path {
1868 Some(path) => path,
1869 None => {
1870 let path = std::str::from_utf8(content)
1871 .ok()
1872 .and_then(|content| content.strip_prefix("gitdir:"))
1873 .context("not a valid gitfile")?
1874 .trim();
1875 git_dir_path.insert(normalize_path(&dot_git.parent().unwrap().join(path)))
1876 }
1877 }
1878 .clone();
1879 let Some((git_dir_entry, canonical_path)) = state.try_entry(&path, true) else {
1880 anyhow::bail!("pointed-to git dir {path:?} not found")
1881 };
1882 let FakeFsEntry::Dir {
1883 git_repo_state,
1884 entries,
1885 ..
1886 } = git_dir_entry
1887 else {
1888 anyhow::bail!("gitfile points to a non-directory")
1889 };
1890 let common_dir = if let Some(child) = entries.get("commondir") {
1891 Path::new(
1892 std::str::from_utf8(child.file_content("commondir".as_ref())?)
1893 .context("commondir content")?,
1894 )
1895 .to_owned()
1896 } else {
1897 canonical_path.clone()
1898 };
1899 let repo_state = git_repo_state.get_or_insert_with(|| {
1900 Arc::new(Mutex::new(FakeGitRepositoryState::new(git_event_tx)))
1901 });
1902 let mut repo_state = repo_state.lock();
1903
1904 let result = f(&mut repo_state, &canonical_path, &common_dir);
1905
1906 if emit_git_event {
1907 drop(repo_state);
1908 state.emit_event([(canonical_path, Some(PathEventKind::Changed))]);
1909 }
1910
1911 Ok(result)
1912 } else {
1913 anyhow::bail!("not a valid git repository");
1914 }
1915 }
1916
1917 pub fn with_git_state<T, F>(&self, dot_git: &Path, emit_git_event: bool, f: F) -> Result<T>
1918 where
1919 F: FnOnce(&mut FakeGitRepositoryState) -> T,
1920 {
1921 self.with_git_state_and_paths(dot_git, emit_git_event, |state, _, _| f(state))
1922 }
1923
1924 pub fn set_branch_name(&self, dot_git: &Path, branch: Option<impl Into<String>>) {
1925 self.with_git_state(dot_git, true, |state| {
1926 let branch = branch.map(Into::into);
1927 state.branches.extend(branch.clone());
1928 state.current_branch_name = branch
1929 })
1930 .unwrap();
1931 }
1932
1933 pub fn insert_branches(&self, dot_git: &Path, branches: &[&str]) {
1934 self.with_git_state(dot_git, true, |state| {
1935 if let Some(first) = branches.first()
1936 && state.current_branch_name.is_none()
1937 {
1938 state.current_branch_name = Some(first.to_string())
1939 }
1940 state
1941 .branches
1942 .extend(branches.iter().map(ToString::to_string));
1943 })
1944 .unwrap();
1945 }
1946
1947 pub fn set_unmerged_paths_for_repo(
1948 &self,
1949 dot_git: &Path,
1950 unmerged_state: &[(RepoPath, UnmergedStatus)],
1951 ) {
1952 self.with_git_state(dot_git, true, |state| {
1953 state.unmerged_paths.clear();
1954 state.unmerged_paths.extend(
1955 unmerged_state
1956 .iter()
1957 .map(|(path, content)| (path.clone(), *content)),
1958 );
1959 })
1960 .unwrap();
1961 }
1962
1963 pub fn set_index_for_repo(&self, dot_git: &Path, index_state: &[(&str, String)]) {
1964 self.with_git_state(dot_git, true, |state| {
1965 state.index_contents.clear();
1966 state.index_contents.extend(
1967 index_state
1968 .iter()
1969 .map(|(path, content)| (repo_path(path), content.clone())),
1970 );
1971 })
1972 .unwrap();
1973 }
1974
1975 pub fn set_head_for_repo(
1976 &self,
1977 dot_git: &Path,
1978 head_state: &[(&str, String)],
1979 sha: impl Into<String>,
1980 ) {
1981 self.with_git_state(dot_git, true, |state| {
1982 state.head_contents.clear();
1983 state.head_contents.extend(
1984 head_state
1985 .iter()
1986 .map(|(path, content)| (repo_path(path), content.clone())),
1987 );
1988 state.refs.insert("HEAD".into(), sha.into());
1989 })
1990 .unwrap();
1991 }
1992
1993 pub fn set_head_and_index_for_repo(&self, dot_git: &Path, contents_by_path: &[(&str, String)]) {
1994 self.with_git_state(dot_git, true, |state| {
1995 state.head_contents.clear();
1996 state.head_contents.extend(
1997 contents_by_path
1998 .iter()
1999 .map(|(path, contents)| (repo_path(path), contents.clone())),
2000 );
2001 state.index_contents = state.head_contents.clone();
2002 })
2003 .unwrap();
2004 }
2005
2006 pub fn set_merge_base_content_for_repo(
2007 &self,
2008 dot_git: &Path,
2009 contents_by_path: &[(&str, String)],
2010 ) {
2011 self.with_git_state(dot_git, true, |state| {
2012 use git::Oid;
2013
2014 state.merge_base_contents.clear();
2015 let oids = (1..)
2016 .map(|n| n.to_string())
2017 .map(|n| Oid::from_bytes(n.repeat(20).as_bytes()).unwrap());
2018 for ((path, content), oid) in contents_by_path.iter().zip(oids) {
2019 state.merge_base_contents.insert(repo_path(path), oid);
2020 state.oids.insert(oid, content.clone());
2021 }
2022 })
2023 .unwrap();
2024 }
2025
2026 pub fn set_blame_for_repo(&self, dot_git: &Path, blames: Vec<(RepoPath, git::blame::Blame)>) {
2027 self.with_git_state(dot_git, true, |state| {
2028 state.blames.clear();
2029 state.blames.extend(blames);
2030 })
2031 .unwrap();
2032 }
2033
2034 /// Put the given git repository into a state with the given status,
2035 /// by mutating the head, index, and unmerged state.
2036 pub fn set_status_for_repo(&self, dot_git: &Path, statuses: &[(&str, FileStatus)]) {
2037 let workdir_path = dot_git.parent().unwrap();
2038 let workdir_contents = self.files_with_contents(workdir_path);
2039 self.with_git_state(dot_git, true, |state| {
2040 state.index_contents.clear();
2041 state.head_contents.clear();
2042 state.unmerged_paths.clear();
2043 for (path, content) in workdir_contents {
2044 use util::{paths::PathStyle, rel_path::RelPath};
2045
2046 let repo_path = RelPath::new(path.strip_prefix(&workdir_path).unwrap(), PathStyle::local()).unwrap();
2047 let repo_path = RepoPath::from_rel_path(&repo_path);
2048 let status = statuses
2049 .iter()
2050 .find_map(|(p, status)| (*p == repo_path.as_unix_str()).then_some(status));
2051 let mut content = String::from_utf8_lossy(&content).to_string();
2052
2053 let mut index_content = None;
2054 let mut head_content = None;
2055 match status {
2056 None => {
2057 index_content = Some(content.clone());
2058 head_content = Some(content);
2059 }
2060 Some(FileStatus::Untracked | FileStatus::Ignored) => {}
2061 Some(FileStatus::Unmerged(unmerged_status)) => {
2062 state
2063 .unmerged_paths
2064 .insert(repo_path.clone(), *unmerged_status);
2065 content.push_str(" (unmerged)");
2066 index_content = Some(content.clone());
2067 head_content = Some(content);
2068 }
2069 Some(FileStatus::Tracked(TrackedStatus {
2070 index_status,
2071 worktree_status,
2072 })) => {
2073 match worktree_status {
2074 StatusCode::Modified => {
2075 let mut content = content.clone();
2076 content.push_str(" (modified in working copy)");
2077 index_content = Some(content);
2078 }
2079 StatusCode::TypeChanged | StatusCode::Unmodified => {
2080 index_content = Some(content.clone());
2081 }
2082 StatusCode::Added => {}
2083 StatusCode::Deleted | StatusCode::Renamed | StatusCode::Copied => {
2084 panic!("cannot create these statuses for an existing file");
2085 }
2086 };
2087 match index_status {
2088 StatusCode::Modified => {
2089 let mut content = index_content.clone().expect(
2090 "file cannot be both modified in index and created in working copy",
2091 );
2092 content.push_str(" (modified in index)");
2093 head_content = Some(content);
2094 }
2095 StatusCode::TypeChanged | StatusCode::Unmodified => {
2096 head_content = Some(index_content.clone().expect("file cannot be both unmodified in index and created in working copy"));
2097 }
2098 StatusCode::Added => {}
2099 StatusCode::Deleted => {
2100 head_content = Some("".into());
2101 }
2102 StatusCode::Renamed | StatusCode::Copied => {
2103 panic!("cannot create these statuses for an existing file");
2104 }
2105 };
2106 }
2107 };
2108
2109 if let Some(content) = index_content {
2110 state.index_contents.insert(repo_path.clone(), content);
2111 }
2112 if let Some(content) = head_content {
2113 state.head_contents.insert(repo_path.clone(), content);
2114 }
2115 }
2116 }).unwrap();
2117 }
2118
2119 pub fn set_error_message_for_index_write(&self, dot_git: &Path, message: Option<String>) {
2120 self.with_git_state(dot_git, true, |state| {
2121 state.simulated_index_write_error_message = message;
2122 })
2123 .unwrap();
2124 }
2125
2126 pub fn paths(&self, include_dot_git: bool) -> Vec<PathBuf> {
2127 let mut result = Vec::new();
2128 let mut queue = collections::VecDeque::new();
2129 let state = &*self.state.lock();
2130 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2131 while let Some((path, entry)) = queue.pop_front() {
2132 if let FakeFsEntry::Dir { entries, .. } = entry {
2133 for (name, entry) in entries {
2134 queue.push_back((path.join(name), entry));
2135 }
2136 }
2137 if include_dot_git
2138 || !path
2139 .components()
2140 .any(|component| component.as_os_str() == *FS_DOT_GIT)
2141 {
2142 result.push(path);
2143 }
2144 }
2145 result
2146 }
2147
2148 pub fn directories(&self, include_dot_git: bool) -> Vec<PathBuf> {
2149 let mut result = Vec::new();
2150 let mut queue = collections::VecDeque::new();
2151 let state = &*self.state.lock();
2152 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2153 while let Some((path, entry)) = queue.pop_front() {
2154 if let FakeFsEntry::Dir { entries, .. } = entry {
2155 for (name, entry) in entries {
2156 queue.push_back((path.join(name), entry));
2157 }
2158 if include_dot_git
2159 || !path
2160 .components()
2161 .any(|component| component.as_os_str() == *FS_DOT_GIT)
2162 {
2163 result.push(path);
2164 }
2165 }
2166 }
2167 result
2168 }
2169
2170 pub fn files(&self) -> Vec<PathBuf> {
2171 let mut result = Vec::new();
2172 let mut queue = collections::VecDeque::new();
2173 let state = &*self.state.lock();
2174 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2175 while let Some((path, entry)) = queue.pop_front() {
2176 match entry {
2177 FakeFsEntry::File { .. } => result.push(path),
2178 FakeFsEntry::Dir { entries, .. } => {
2179 for (name, entry) in entries {
2180 queue.push_back((path.join(name), entry));
2181 }
2182 }
2183 FakeFsEntry::Symlink { .. } => {}
2184 }
2185 }
2186 result
2187 }
2188
2189 pub fn files_with_contents(&self, prefix: &Path) -> Vec<(PathBuf, Vec<u8>)> {
2190 let mut result = Vec::new();
2191 let mut queue = collections::VecDeque::new();
2192 let state = &*self.state.lock();
2193 queue.push_back((PathBuf::from(util::path!("/")), &state.root));
2194 while let Some((path, entry)) = queue.pop_front() {
2195 match entry {
2196 FakeFsEntry::File { content, .. } => {
2197 if path.starts_with(prefix) {
2198 result.push((path, content.clone()));
2199 }
2200 }
2201 FakeFsEntry::Dir { entries, .. } => {
2202 for (name, entry) in entries {
2203 queue.push_back((path.join(name), entry));
2204 }
2205 }
2206 FakeFsEntry::Symlink { .. } => {}
2207 }
2208 }
2209 result
2210 }
2211
2212 /// How many `read_dir` calls have been issued.
2213 pub fn read_dir_call_count(&self) -> usize {
2214 self.state.lock().read_dir_call_count
2215 }
2216
2217 pub fn watched_paths(&self) -> Vec<PathBuf> {
2218 let state = self.state.lock();
2219 state
2220 .event_txs
2221 .iter()
2222 .filter_map(|(path, tx)| Some(path.clone()).filter(|_| !tx.is_closed()))
2223 .collect()
2224 }
2225
2226 /// How many `metadata` calls have been issued.
2227 pub fn metadata_call_count(&self) -> usize {
2228 self.state.lock().metadata_call_count
2229 }
2230
2231 /// How many write operations have been issued for a specific path.
2232 pub fn write_count_for_path(&self, path: impl AsRef<Path>) -> usize {
2233 let path = path.as_ref().to_path_buf();
2234 self.state
2235 .lock()
2236 .path_write_counts
2237 .get(&path)
2238 .copied()
2239 .unwrap_or(0)
2240 }
2241
2242 pub fn emit_fs_event(&self, path: impl Into<PathBuf>, event: Option<PathEventKind>) {
2243 self.state.lock().emit_event(std::iter::once((path, event)));
2244 }
2245
2246 fn simulate_random_delay(&self) -> impl futures::Future<Output = ()> {
2247 self.executor.simulate_random_delay()
2248 }
2249}
2250
2251#[cfg(any(test, feature = "test-support"))]
2252impl FakeFsEntry {
2253 fn is_file(&self) -> bool {
2254 matches!(self, Self::File { .. })
2255 }
2256
2257 fn is_symlink(&self) -> bool {
2258 matches!(self, Self::Symlink { .. })
2259 }
2260
2261 fn file_content(&self, path: &Path) -> Result<&Vec<u8>> {
2262 if let Self::File { content, .. } = self {
2263 Ok(content)
2264 } else {
2265 anyhow::bail!("not a file: {path:?}");
2266 }
2267 }
2268
2269 fn dir_entries(&mut self, path: &Path) -> Result<&mut BTreeMap<String, FakeFsEntry>> {
2270 if let Self::Dir { entries, .. } = self {
2271 Ok(entries)
2272 } else {
2273 anyhow::bail!("not a directory: {path:?}");
2274 }
2275 }
2276}
2277
2278#[cfg(any(test, feature = "test-support"))]
2279struct FakeWatcher {
2280 tx: smol::channel::Sender<Vec<PathEvent>>,
2281 original_path: PathBuf,
2282 fs_state: Arc<Mutex<FakeFsState>>,
2283 prefixes: Mutex<Vec<PathBuf>>,
2284}
2285
2286#[cfg(any(test, feature = "test-support"))]
2287impl Watcher for FakeWatcher {
2288 fn add(&self, path: &Path) -> Result<()> {
2289 if path.starts_with(&self.original_path) {
2290 return Ok(());
2291 }
2292 self.fs_state
2293 .try_lock()
2294 .unwrap()
2295 .event_txs
2296 .push((path.to_owned(), self.tx.clone()));
2297 self.prefixes.lock().push(path.to_owned());
2298 Ok(())
2299 }
2300
2301 fn remove(&self, _: &Path) -> Result<()> {
2302 Ok(())
2303 }
2304}
2305
2306#[cfg(any(test, feature = "test-support"))]
2307#[derive(Debug)]
2308struct FakeHandle {
2309 inode: u64,
2310}
2311
2312#[cfg(any(test, feature = "test-support"))]
2313impl FileHandle for FakeHandle {
2314 fn current_path(&self, fs: &Arc<dyn Fs>) -> Result<PathBuf> {
2315 let fs = fs.as_fake();
2316 let mut state = fs.state.lock();
2317 let Some(target) = state.moves.get(&self.inode).cloned() else {
2318 anyhow::bail!("fake fd not moved")
2319 };
2320
2321 if state.try_entry(&target, false).is_some() {
2322 return Ok(target);
2323 }
2324 anyhow::bail!("fake fd target not found")
2325 }
2326}
2327
2328#[cfg(any(test, feature = "test-support"))]
2329#[async_trait::async_trait]
2330impl Fs for FakeFs {
2331 async fn create_dir(&self, path: &Path) -> Result<()> {
2332 self.simulate_random_delay().await;
2333
2334 let mut created_dirs = Vec::new();
2335 let mut cur_path = PathBuf::new();
2336 for component in path.components() {
2337 let should_skip = matches!(component, Component::Prefix(..) | Component::RootDir);
2338 cur_path.push(component);
2339 if should_skip {
2340 continue;
2341 }
2342 let mut state = self.state.lock();
2343
2344 let inode = state.get_and_increment_inode();
2345 let mtime = state.get_and_increment_mtime();
2346 state.write_path(&cur_path, |entry| {
2347 entry.or_insert_with(|| {
2348 created_dirs.push((cur_path.clone(), Some(PathEventKind::Created)));
2349 FakeFsEntry::Dir {
2350 inode,
2351 mtime,
2352 len: 0,
2353 entries: Default::default(),
2354 git_repo_state: None,
2355 }
2356 });
2357 Ok(())
2358 })?
2359 }
2360
2361 self.state.lock().emit_event(created_dirs);
2362 Ok(())
2363 }
2364
2365 async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
2366 self.simulate_random_delay().await;
2367 let mut state = self.state.lock();
2368 let inode = state.get_and_increment_inode();
2369 let mtime = state.get_and_increment_mtime();
2370 let file = FakeFsEntry::File {
2371 inode,
2372 mtime,
2373 len: 0,
2374 content: Vec::new(),
2375 git_dir_path: None,
2376 };
2377 let mut kind = Some(PathEventKind::Created);
2378 state.write_path(path, |entry| {
2379 match entry {
2380 btree_map::Entry::Occupied(mut e) => {
2381 if options.overwrite {
2382 kind = Some(PathEventKind::Changed);
2383 *e.get_mut() = file;
2384 } else if !options.ignore_if_exists {
2385 anyhow::bail!("path already exists: {path:?}");
2386 }
2387 }
2388 btree_map::Entry::Vacant(e) => {
2389 e.insert(file);
2390 }
2391 }
2392 Ok(())
2393 })?;
2394 state.emit_event([(path, kind)]);
2395 Ok(())
2396 }
2397
2398 async fn create_symlink(&self, path: &Path, target: PathBuf) -> Result<()> {
2399 let mut state = self.state.lock();
2400 let file = FakeFsEntry::Symlink { target };
2401 state
2402 .write_path(path.as_ref(), move |e| match e {
2403 btree_map::Entry::Vacant(e) => {
2404 e.insert(file);
2405 Ok(())
2406 }
2407 btree_map::Entry::Occupied(mut e) => {
2408 *e.get_mut() = file;
2409 Ok(())
2410 }
2411 })
2412 .unwrap();
2413 state.emit_event([(path, Some(PathEventKind::Created))]);
2414
2415 Ok(())
2416 }
2417
2418 async fn create_file_with(
2419 &self,
2420 path: &Path,
2421 mut content: Pin<&mut (dyn AsyncRead + Send)>,
2422 ) -> Result<()> {
2423 let mut bytes = Vec::new();
2424 content.read_to_end(&mut bytes).await?;
2425 self.write_file_internal(path, bytes, true)?;
2426 Ok(())
2427 }
2428
2429 async fn extract_tar_file(
2430 &self,
2431 path: &Path,
2432 content: Archive<Pin<&mut (dyn AsyncRead + Send)>>,
2433 ) -> Result<()> {
2434 let mut entries = content.entries()?;
2435 while let Some(entry) = entries.next().await {
2436 let mut entry = entry?;
2437 if entry.header().entry_type().is_file() {
2438 let path = path.join(entry.path()?.as_ref());
2439 let mut bytes = Vec::new();
2440 entry.read_to_end(&mut bytes).await?;
2441 self.create_dir(path.parent().unwrap()).await?;
2442 self.write_file_internal(&path, bytes, true)?;
2443 }
2444 }
2445 Ok(())
2446 }
2447
2448 async fn rename(&self, old_path: &Path, new_path: &Path, options: RenameOptions) -> Result<()> {
2449 self.simulate_random_delay().await;
2450
2451 let old_path = normalize_path(old_path);
2452 let new_path = normalize_path(new_path);
2453
2454 if options.create_parents {
2455 if let Some(parent) = new_path.parent() {
2456 self.create_dir(parent).await?;
2457 }
2458 }
2459
2460 let mut state = self.state.lock();
2461 let moved_entry = state.write_path(&old_path, |e| {
2462 if let btree_map::Entry::Occupied(e) = e {
2463 Ok(e.get().clone())
2464 } else {
2465 anyhow::bail!("path does not exist: {old_path:?}")
2466 }
2467 })?;
2468
2469 let inode = match moved_entry {
2470 FakeFsEntry::File { inode, .. } => inode,
2471 FakeFsEntry::Dir { inode, .. } => inode,
2472 _ => 0,
2473 };
2474
2475 state.moves.insert(inode, new_path.clone());
2476
2477 state.write_path(&new_path, |e| {
2478 match e {
2479 btree_map::Entry::Occupied(mut e) => {
2480 if options.overwrite {
2481 *e.get_mut() = moved_entry;
2482 } else if !options.ignore_if_exists {
2483 anyhow::bail!("path already exists: {new_path:?}");
2484 }
2485 }
2486 btree_map::Entry::Vacant(e) => {
2487 e.insert(moved_entry);
2488 }
2489 }
2490 Ok(())
2491 })?;
2492
2493 state
2494 .write_path(&old_path, |e| {
2495 if let btree_map::Entry::Occupied(e) = e {
2496 Ok(e.remove())
2497 } else {
2498 unreachable!()
2499 }
2500 })
2501 .unwrap();
2502
2503 state.emit_event([
2504 (old_path, Some(PathEventKind::Removed)),
2505 (new_path, Some(PathEventKind::Created)),
2506 ]);
2507 Ok(())
2508 }
2509
2510 async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
2511 self.simulate_random_delay().await;
2512
2513 let source = normalize_path(source);
2514 let target = normalize_path(target);
2515 let mut state = self.state.lock();
2516 let mtime = state.get_and_increment_mtime();
2517 let inode = state.get_and_increment_inode();
2518 let source_entry = state.entry(&source)?;
2519 let content = source_entry.file_content(&source)?.clone();
2520 let mut kind = Some(PathEventKind::Created);
2521 state.write_path(&target, |e| match e {
2522 btree_map::Entry::Occupied(e) => {
2523 if options.overwrite {
2524 kind = Some(PathEventKind::Changed);
2525 Ok(Some(e.get().clone()))
2526 } else if !options.ignore_if_exists {
2527 anyhow::bail!("{target:?} already exists");
2528 } else {
2529 Ok(None)
2530 }
2531 }
2532 btree_map::Entry::Vacant(e) => Ok(Some(
2533 e.insert(FakeFsEntry::File {
2534 inode,
2535 mtime,
2536 len: content.len() as u64,
2537 content,
2538 git_dir_path: None,
2539 })
2540 .clone(),
2541 )),
2542 })?;
2543 state.emit_event([(target, kind)]);
2544 Ok(())
2545 }
2546
2547 async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
2548 self.simulate_random_delay().await;
2549
2550 let path = normalize_path(path);
2551 let parent_path = path.parent().context("cannot remove the root")?;
2552 let base_name = path.file_name().context("cannot remove the root")?;
2553
2554 let mut state = self.state.lock();
2555 let parent_entry = state.entry(parent_path)?;
2556 let entry = parent_entry
2557 .dir_entries(parent_path)?
2558 .entry(base_name.to_str().unwrap().into());
2559
2560 match entry {
2561 btree_map::Entry::Vacant(_) => {
2562 if !options.ignore_if_not_exists {
2563 anyhow::bail!("{path:?} does not exist");
2564 }
2565 }
2566 btree_map::Entry::Occupied(mut entry) => {
2567 {
2568 let children = entry.get_mut().dir_entries(&path)?;
2569 if !options.recursive && !children.is_empty() {
2570 anyhow::bail!("{path:?} is not empty");
2571 }
2572 }
2573 entry.remove();
2574 }
2575 }
2576 state.emit_event([(path, Some(PathEventKind::Removed))]);
2577 Ok(())
2578 }
2579
2580 async fn trash_dir(&self, path: &Path, options: RemoveOptions) -> Result<Option<TrashedItem>> {
2581 let trashed_item = copy_to_trash_cache(self, path, &self.trash_cache, options).await?;
2582 self.remove_dir(path, options).await?;
2583 Ok(trashed_item)
2584 }
2585
2586 async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
2587 self.simulate_random_delay().await;
2588
2589 let path = normalize_path(path);
2590 let parent_path = path.parent().context("cannot remove the root")?;
2591 let base_name = path.file_name().unwrap();
2592 let mut state = self.state.lock();
2593 let parent_entry = state.entry(parent_path)?;
2594 let entry = parent_entry
2595 .dir_entries(parent_path)?
2596 .entry(base_name.to_str().unwrap().into());
2597 match entry {
2598 btree_map::Entry::Vacant(_) => {
2599 if !options.ignore_if_not_exists {
2600 anyhow::bail!("{path:?} does not exist");
2601 }
2602 }
2603 btree_map::Entry::Occupied(mut entry) => {
2604 entry.get_mut().file_content(&path)?;
2605 entry.remove();
2606 }
2607 }
2608 state.emit_event([(path, Some(PathEventKind::Removed))]);
2609 Ok(())
2610 }
2611
2612 async fn trash_file(&self, path: &Path, options: RemoveOptions) -> Result<Option<TrashedItem>> {
2613 let trashed_item = copy_to_trash_cache(self, path, &self.trash_cache, options).await?;
2614 self.remove_file(path, options).await?;
2615 Ok(trashed_item)
2616 }
2617
2618 async fn restore_from_trash(&self, trashed_item: TrashedItem) -> Result<PathBuf> {
2619 let trash_info = self
2620 .trash_cache
2621 .lock()
2622 .remove(trashed_item)
2623 .context("no item in trash")?;
2624 self.rename(
2625 &trash_info.path_in_trash,
2626 &trash_info.original_path,
2627 RenameOptions {
2628 overwrite: false,
2629 ignore_if_exists: false,
2630 create_parents: true,
2631 },
2632 )
2633 .await?;
2634 Ok(trash_info.original_path)
2635 }
2636
2637 async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read + Send + Sync>> {
2638 let bytes = self.load_internal(path).await?;
2639 Ok(Box::new(io::Cursor::new(bytes)))
2640 }
2641
2642 async fn open_handle(&self, path: &Path) -> Result<Arc<dyn FileHandle>> {
2643 self.simulate_random_delay().await;
2644 let mut state = self.state.lock();
2645 let inode = match state.entry(path)? {
2646 FakeFsEntry::File { inode, .. } => *inode,
2647 FakeFsEntry::Dir { inode, .. } => *inode,
2648 _ => unreachable!(),
2649 };
2650 Ok(Arc::new(FakeHandle { inode }))
2651 }
2652
2653 async fn load(&self, path: &Path) -> Result<String> {
2654 let content = self.load_internal(path).await?;
2655 Ok(String::from_utf8(content)?)
2656 }
2657
2658 async fn load_bytes(&self, path: &Path) -> Result<Vec<u8>> {
2659 self.load_internal(path).await
2660 }
2661
2662 async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
2663 self.simulate_random_delay().await;
2664 let path = normalize_path(path.as_path());
2665 if let Some(path) = path.parent() {
2666 self.create_dir(path).await?;
2667 }
2668 self.write_file_internal(path, data.into_bytes(), true)?;
2669 Ok(())
2670 }
2671
2672 async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
2673 self.simulate_random_delay().await;
2674 let path = normalize_path(path);
2675 let content = text::chunks_with_line_ending(text, line_ending).collect::<String>();
2676 if let Some(path) = path.parent() {
2677 self.create_dir(path).await?;
2678 }
2679 self.write_file_internal(path, content.into_bytes(), false)?;
2680 Ok(())
2681 }
2682
2683 async fn write(&self, path: &Path, content: &[u8]) -> Result<()> {
2684 self.simulate_random_delay().await;
2685 let path = normalize_path(path);
2686 if let Some(path) = path.parent() {
2687 self.create_dir(path).await?;
2688 }
2689 self.write_file_internal(path, content.to_vec(), false)?;
2690 Ok(())
2691 }
2692
2693 async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
2694 let path = normalize_path(path);
2695 self.simulate_random_delay().await;
2696 let state = self.state.lock();
2697 let canonical_path = state
2698 .canonicalize(&path, true)
2699 .with_context(|| format!("path does not exist: {path:?}"))?;
2700 Ok(canonical_path)
2701 }
2702
2703 async fn is_file(&self, path: &Path) -> bool {
2704 let path = normalize_path(path);
2705 self.simulate_random_delay().await;
2706 let mut state = self.state.lock();
2707 if let Some((entry, _)) = state.try_entry(&path, true) {
2708 entry.is_file()
2709 } else {
2710 false
2711 }
2712 }
2713
2714 async fn is_dir(&self, path: &Path) -> bool {
2715 self.metadata(path)
2716 .await
2717 .is_ok_and(|metadata| metadata.is_some_and(|metadata| metadata.is_dir))
2718 }
2719
2720 async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
2721 self.simulate_random_delay().await;
2722 let path = normalize_path(path);
2723 let mut state = self.state.lock();
2724 state.metadata_call_count += 1;
2725 if let Some((mut entry, _)) = state.try_entry(&path, false) {
2726 let is_symlink = entry.is_symlink();
2727 if is_symlink {
2728 if let Some(e) = state.try_entry(&path, true).map(|e| e.0) {
2729 entry = e;
2730 } else {
2731 return Ok(None);
2732 }
2733 }
2734
2735 Ok(Some(match &*entry {
2736 FakeFsEntry::File {
2737 inode, mtime, len, ..
2738 } => Metadata {
2739 inode: *inode,
2740 mtime: *mtime,
2741 len: *len,
2742 is_dir: false,
2743 is_symlink,
2744 is_fifo: false,
2745 is_executable: false,
2746 },
2747 FakeFsEntry::Dir {
2748 inode, mtime, len, ..
2749 } => Metadata {
2750 inode: *inode,
2751 mtime: *mtime,
2752 len: *len,
2753 is_dir: true,
2754 is_symlink,
2755 is_fifo: false,
2756 is_executable: false,
2757 },
2758 FakeFsEntry::Symlink { .. } => unreachable!(),
2759 }))
2760 } else {
2761 Ok(None)
2762 }
2763 }
2764
2765 async fn read_link(&self, path: &Path) -> Result<PathBuf> {
2766 self.simulate_random_delay().await;
2767 let path = normalize_path(path);
2768 let mut state = self.state.lock();
2769 let (entry, _) = state
2770 .try_entry(&path, false)
2771 .with_context(|| format!("path does not exist: {path:?}"))?;
2772 if let FakeFsEntry::Symlink { target } = entry {
2773 Ok(target.clone())
2774 } else {
2775 anyhow::bail!("not a symlink: {path:?}")
2776 }
2777 }
2778
2779 async fn read_dir(
2780 &self,
2781 path: &Path,
2782 ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
2783 self.simulate_random_delay().await;
2784 let path = normalize_path(path);
2785 let mut state = self.state.lock();
2786 state.read_dir_call_count += 1;
2787 let entry = state.entry(&path)?;
2788 let children = entry.dir_entries(&path)?;
2789 let paths = children
2790 .keys()
2791 .map(|file_name| Ok(path.join(file_name)))
2792 .collect::<Vec<_>>();
2793 Ok(Box::pin(futures::stream::iter(paths)))
2794 }
2795
2796 async fn watch(
2797 &self,
2798 path: &Path,
2799 _: Duration,
2800 ) -> (
2801 Pin<Box<dyn Send + Stream<Item = Vec<PathEvent>>>>,
2802 Arc<dyn Watcher>,
2803 ) {
2804 self.simulate_random_delay().await;
2805 let (tx, rx) = smol::channel::unbounded();
2806 let path = path.to_path_buf();
2807 self.state.lock().event_txs.push((path.clone(), tx.clone()));
2808 let executor = self.executor.clone();
2809 let watcher = Arc::new(FakeWatcher {
2810 tx,
2811 original_path: path.to_owned(),
2812 fs_state: self.state.clone(),
2813 prefixes: Mutex::new(vec![path]),
2814 });
2815 (
2816 Box::pin(futures::StreamExt::filter(rx, {
2817 let watcher = watcher.clone();
2818 move |events| {
2819 let result = events.iter().any(|evt_path| {
2820 watcher
2821 .prefixes
2822 .lock()
2823 .iter()
2824 .any(|prefix| evt_path.path.starts_with(prefix))
2825 });
2826 let executor = executor.clone();
2827 async move {
2828 executor.simulate_random_delay().await;
2829 result
2830 }
2831 }
2832 })),
2833 watcher,
2834 )
2835 }
2836
2837 fn open_repo(
2838 &self,
2839 abs_dot_git: &Path,
2840 _system_git_binary: Option<&Path>,
2841 ) -> Option<Arc<dyn GitRepository>> {
2842 use util::ResultExt as _;
2843
2844 self.with_git_state_and_paths(
2845 abs_dot_git,
2846 false,
2847 |_, repository_dir_path, common_dir_path| {
2848 Arc::new(fake_git_repo::FakeGitRepository {
2849 fs: self.this.upgrade().unwrap(),
2850 executor: self.executor.clone(),
2851 dot_git_path: abs_dot_git.to_path_buf(),
2852 repository_dir_path: repository_dir_path.to_owned(),
2853 common_dir_path: common_dir_path.to_owned(),
2854 checkpoints: Arc::default(),
2855 }) as _
2856 },
2857 )
2858 .log_err()
2859 }
2860
2861 async fn git_init(
2862 &self,
2863 abs_work_directory_path: &Path,
2864 _fallback_branch_name: String,
2865 ) -> Result<()> {
2866 self.create_dir(&abs_work_directory_path.join(".git")).await
2867 }
2868
2869 async fn git_clone(&self, _repo_url: &str, _abs_work_directory: &Path) -> Result<()> {
2870 anyhow::bail!("Git clone is not supported in fake Fs")
2871 }
2872
2873 fn is_fake(&self) -> bool {
2874 true
2875 }
2876
2877 async fn is_case_sensitive(&self) -> Result<bool> {
2878 Ok(true)
2879 }
2880
2881 fn subscribe_to_jobs(&self) -> JobEventReceiver {
2882 let (sender, receiver) = futures::channel::mpsc::unbounded();
2883 self.state.lock().job_event_subscribers.lock().push(sender);
2884 receiver
2885 }
2886
2887 #[cfg(any(test, feature = "test-support"))]
2888 fn as_fake(&self) -> Arc<FakeFs> {
2889 self.this.upgrade().unwrap()
2890 }
2891}
2892
2893pub fn normalize_path(path: &Path) -> PathBuf {
2894 let mut components = path.components().peekable();
2895 let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek().cloned() {
2896 components.next();
2897 PathBuf::from(c.as_os_str())
2898 } else {
2899 PathBuf::new()
2900 };
2901
2902 for component in components {
2903 match component {
2904 Component::Prefix(..) => unreachable!(),
2905 Component::RootDir => {
2906 ret.push(component.as_os_str());
2907 }
2908 Component::CurDir => {}
2909 Component::ParentDir => {
2910 ret.pop();
2911 }
2912 Component::Normal(c) => {
2913 ret.push(c);
2914 }
2915 }
2916 }
2917 ret
2918}
2919
2920pub async fn copy_recursive<'a>(
2921 fs: &'a dyn Fs,
2922 source: &'a Path,
2923 target: &'a Path,
2924 options: CopyOptions,
2925) -> Result<()> {
2926 for (item, metadata) in read_dir_items(fs, source).await? {
2927 let Ok(item_relative_path) = item.strip_prefix(source) else {
2928 continue;
2929 };
2930 let target_item = if item_relative_path == Path::new("") {
2931 target.to_path_buf()
2932 } else {
2933 target.join(item_relative_path)
2934 };
2935 if metadata.is_dir {
2936 if !options.overwrite && fs.metadata(&target_item).await.is_ok_and(|m| m.is_some()) {
2937 if options.ignore_if_exists {
2938 continue;
2939 } else {
2940 anyhow::bail!("{target_item:?} already exists");
2941 }
2942 }
2943 let _ = fs
2944 .remove_dir(
2945 &target_item,
2946 RemoveOptions {
2947 recursive: true,
2948 ignore_if_not_exists: true,
2949 },
2950 )
2951 .await;
2952 fs.create_dir(&target_item).await?;
2953 } else {
2954 fs.copy_file(&item, &target_item, options).await?;
2955 }
2956 }
2957 Ok(())
2958}
2959
2960pub async fn dir_total_len<'a>(fs: &'a dyn Fs, source: &'a Path) -> Result<u64> {
2961 Ok(read_dir_items(fs, source)
2962 .await?
2963 .into_iter()
2964 .filter(|(_path, metadata)| !metadata.is_dir)
2965 .map(|(_path, metadata)| metadata.len)
2966 .sum())
2967}
2968
2969/// Recursively reads all of the paths in the given directory.
2970///
2971/// Returns a vector of tuples of (path, is_dir).
2972pub async fn read_dir_items<'a>(
2973 fs: &'a dyn Fs,
2974 source: &'a Path,
2975) -> Result<Vec<(PathBuf, Metadata)>> {
2976 let mut items = Vec::new();
2977 read_recursive(fs, source, &mut items).await?;
2978 Ok(items)
2979}
2980
2981fn read_recursive<'a>(
2982 fs: &'a dyn Fs,
2983 source: &'a Path,
2984 output: &'a mut Vec<(PathBuf, Metadata)>,
2985) -> BoxFuture<'a, Result<()>> {
2986 use futures::future::FutureExt;
2987
2988 async move {
2989 let metadata = fs
2990 .metadata(source)
2991 .await?
2992 .with_context(|| format!("path does not exist: {source:?}"))?;
2993
2994 if metadata.is_dir {
2995 output.push((source.to_path_buf(), metadata));
2996 let mut children = fs.read_dir(source).await?;
2997 while let Some(child_path) = children.next().await {
2998 if let Ok(child_path) = child_path {
2999 read_recursive(fs, &child_path, output).await?;
3000 }
3001 }
3002 } else {
3003 output.push((source.to_path_buf(), metadata));
3004 }
3005 Ok(())
3006 }
3007 .boxed()
3008}
3009
3010/// If implementing OS-specific restore-from-trash functionality, use
3011/// `#[cfg(...)]` to exclude this function or change its implementation
3012async fn copy_to_trash_cache<F: Fs>(
3013 fs: &F,
3014 path: &Path,
3015 trash_cache: &Mutex<TrashCache>,
3016 options: RemoveOptions,
3017) -> Result<Option<TrashedItem>> {
3018 // if path doesn't exist, we'll return `None` and let the caller handle the error case
3019 let Some(metadata) = fs.metadata(path).await? else {
3020 return Ok(None);
3021 };
3022
3023 let len = if metadata.is_dir {
3024 dir_total_len(fs, path).await?
3025 } else {
3026 metadata.len
3027 };
3028 if len <= TRASH_LIMIT {
3029 let (id, trash_info) = trash_cache.lock().add_item(path);
3030 if let Some(parent) = trash_info.path_in_trash.parent() {
3031 fs.create_dir(parent).await?;
3032 }
3033 if metadata.is_dir {
3034 if options.recursive {
3035 copy_recursive(fs, path, &trash_info.path_in_trash, CopyOptions::default()).await?;
3036 } else {
3037 fs.create_dir(path).await?;
3038 }
3039 } else {
3040 fs.copy_file(path, &trash_info.path_in_trash, CopyOptions::default())
3041 .await?;
3042 }
3043 Ok(Some(id))
3044 } else {
3045 Ok(None) // file is too big
3046 }
3047}
3048
3049// todo(windows)
3050// can we get file id not open the file twice?
3051// https://github.com/rust-lang/rust/issues/63010
3052#[cfg(target_os = "windows")]
3053async fn file_id(path: impl AsRef<Path>) -> Result<u64> {
3054 use std::os::windows::io::AsRawHandle;
3055
3056 use smol::fs::windows::OpenOptionsExt;
3057 use windows::Win32::{
3058 Foundation::HANDLE,
3059 Storage::FileSystem::{
3060 BY_HANDLE_FILE_INFORMATION, FILE_FLAG_BACKUP_SEMANTICS, GetFileInformationByHandle,
3061 },
3062 };
3063
3064 let file = smol::fs::OpenOptions::new()
3065 .read(true)
3066 .custom_flags(FILE_FLAG_BACKUP_SEMANTICS.0)
3067 .open(path)
3068 .await?;
3069
3070 let mut info: BY_HANDLE_FILE_INFORMATION = unsafe { std::mem::zeroed() };
3071 // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfileinformationbyhandle
3072 // This function supports Windows XP+
3073 smol::unblock(move || {
3074 unsafe { GetFileInformationByHandle(HANDLE(file.as_raw_handle() as _), &mut info)? };
3075
3076 Ok(((info.nFileIndexHigh as u64) << 32) | (info.nFileIndexLow as u64))
3077 })
3078 .await
3079}
3080
3081#[cfg(target_os = "windows")]
3082fn atomic_replace<P: AsRef<Path>>(
3083 replaced_file: P,
3084 replacement_file: P,
3085) -> windows::core::Result<()> {
3086 use windows::{
3087 Win32::Storage::FileSystem::{REPLACE_FILE_FLAGS, ReplaceFileW},
3088 core::HSTRING,
3089 };
3090
3091 // If the file does not exist, create it.
3092 let _ = std::fs::File::create_new(replaced_file.as_ref());
3093
3094 unsafe {
3095 ReplaceFileW(
3096 &HSTRING::from(replaced_file.as_ref().to_string_lossy().into_owned()),
3097 &HSTRING::from(replacement_file.as_ref().to_string_lossy().into_owned()),
3098 None,
3099 REPLACE_FILE_FLAGS::default(),
3100 None,
3101 None,
3102 )
3103 }
3104}
3105
3106#[cfg(test)]
3107mod tests {
3108 use super::*;
3109 use gpui::BackgroundExecutor;
3110 use serde_json::json;
3111 use util::path;
3112
3113 #[gpui::test]
3114 async fn test_fake_fs(executor: BackgroundExecutor) {
3115 let fs = FakeFs::new(executor.clone());
3116 fs.insert_tree(
3117 path!("/root"),
3118 json!({
3119 "dir1": {
3120 "a": "A",
3121 "b": "B"
3122 },
3123 "dir2": {
3124 "c": "C",
3125 "dir3": {
3126 "d": "D"
3127 }
3128 }
3129 }),
3130 )
3131 .await;
3132
3133 assert_eq!(
3134 fs.files(),
3135 vec![
3136 PathBuf::from(path!("/root/dir1/a")),
3137 PathBuf::from(path!("/root/dir1/b")),
3138 PathBuf::from(path!("/root/dir2/c")),
3139 PathBuf::from(path!("/root/dir2/dir3/d")),
3140 ]
3141 );
3142
3143 fs.create_symlink(path!("/root/dir2/link-to-dir3").as_ref(), "./dir3".into())
3144 .await
3145 .unwrap();
3146
3147 assert_eq!(
3148 fs.canonicalize(path!("/root/dir2/link-to-dir3").as_ref())
3149 .await
3150 .unwrap(),
3151 PathBuf::from(path!("/root/dir2/dir3")),
3152 );
3153 assert_eq!(
3154 fs.canonicalize(path!("/root/dir2/link-to-dir3/d").as_ref())
3155 .await
3156 .unwrap(),
3157 PathBuf::from(path!("/root/dir2/dir3/d")),
3158 );
3159 assert_eq!(
3160 fs.load(path!("/root/dir2/link-to-dir3/d").as_ref())
3161 .await
3162 .unwrap(),
3163 "D",
3164 );
3165 }
3166
3167 #[gpui::test]
3168 async fn test_copy_recursive_with_single_file(executor: BackgroundExecutor) {
3169 let fs = FakeFs::new(executor.clone());
3170 fs.insert_tree(
3171 path!("/outer"),
3172 json!({
3173 "a": "A",
3174 "b": "B",
3175 "inner": {}
3176 }),
3177 )
3178 .await;
3179
3180 assert_eq!(
3181 fs.files(),
3182 vec![
3183 PathBuf::from(path!("/outer/a")),
3184 PathBuf::from(path!("/outer/b")),
3185 ]
3186 );
3187
3188 let source = Path::new(path!("/outer/a"));
3189 let target = Path::new(path!("/outer/a copy"));
3190 copy_recursive(fs.as_ref(), source, target, Default::default())
3191 .await
3192 .unwrap();
3193
3194 assert_eq!(
3195 fs.files(),
3196 vec![
3197 PathBuf::from(path!("/outer/a")),
3198 PathBuf::from(path!("/outer/a copy")),
3199 PathBuf::from(path!("/outer/b")),
3200 ]
3201 );
3202
3203 let source = Path::new(path!("/outer/a"));
3204 let target = Path::new(path!("/outer/inner/a copy"));
3205 copy_recursive(fs.as_ref(), source, target, Default::default())
3206 .await
3207 .unwrap();
3208
3209 assert_eq!(
3210 fs.files(),
3211 vec![
3212 PathBuf::from(path!("/outer/a")),
3213 PathBuf::from(path!("/outer/a copy")),
3214 PathBuf::from(path!("/outer/b")),
3215 PathBuf::from(path!("/outer/inner/a copy")),
3216 ]
3217 );
3218 }
3219
3220 #[gpui::test]
3221 async fn test_copy_recursive_with_single_dir(executor: BackgroundExecutor) {
3222 let fs = FakeFs::new(executor.clone());
3223 fs.insert_tree(
3224 path!("/outer"),
3225 json!({
3226 "a": "A",
3227 "empty": {},
3228 "non-empty": {
3229 "b": "B",
3230 }
3231 }),
3232 )
3233 .await;
3234
3235 assert_eq!(
3236 fs.files(),
3237 vec![
3238 PathBuf::from(path!("/outer/a")),
3239 PathBuf::from(path!("/outer/non-empty/b")),
3240 ]
3241 );
3242 assert_eq!(
3243 fs.directories(false),
3244 vec![
3245 PathBuf::from(path!("/")),
3246 PathBuf::from(path!("/outer")),
3247 PathBuf::from(path!("/outer/empty")),
3248 PathBuf::from(path!("/outer/non-empty")),
3249 ]
3250 );
3251
3252 let source = Path::new(path!("/outer/empty"));
3253 let target = Path::new(path!("/outer/empty copy"));
3254 copy_recursive(fs.as_ref(), source, target, Default::default())
3255 .await
3256 .unwrap();
3257
3258 assert_eq!(
3259 fs.files(),
3260 vec![
3261 PathBuf::from(path!("/outer/a")),
3262 PathBuf::from(path!("/outer/non-empty/b")),
3263 ]
3264 );
3265 assert_eq!(
3266 fs.directories(false),
3267 vec![
3268 PathBuf::from(path!("/")),
3269 PathBuf::from(path!("/outer")),
3270 PathBuf::from(path!("/outer/empty")),
3271 PathBuf::from(path!("/outer/empty copy")),
3272 PathBuf::from(path!("/outer/non-empty")),
3273 ]
3274 );
3275
3276 let source = Path::new(path!("/outer/non-empty"));
3277 let target = Path::new(path!("/outer/non-empty copy"));
3278 copy_recursive(fs.as_ref(), source, target, Default::default())
3279 .await
3280 .unwrap();
3281
3282 assert_eq!(
3283 fs.files(),
3284 vec![
3285 PathBuf::from(path!("/outer/a")),
3286 PathBuf::from(path!("/outer/non-empty/b")),
3287 PathBuf::from(path!("/outer/non-empty copy/b")),
3288 ]
3289 );
3290 assert_eq!(
3291 fs.directories(false),
3292 vec![
3293 PathBuf::from(path!("/")),
3294 PathBuf::from(path!("/outer")),
3295 PathBuf::from(path!("/outer/empty")),
3296 PathBuf::from(path!("/outer/empty copy")),
3297 PathBuf::from(path!("/outer/non-empty")),
3298 PathBuf::from(path!("/outer/non-empty copy")),
3299 ]
3300 );
3301 }
3302
3303 #[gpui::test]
3304 async fn test_copy_recursive(executor: BackgroundExecutor) {
3305 let fs = FakeFs::new(executor.clone());
3306 fs.insert_tree(
3307 path!("/outer"),
3308 json!({
3309 "inner1": {
3310 "a": "A",
3311 "b": "B",
3312 "inner3": {
3313 "d": "D",
3314 },
3315 "inner4": {}
3316 },
3317 "inner2": {
3318 "c": "C",
3319 }
3320 }),
3321 )
3322 .await;
3323
3324 assert_eq!(
3325 fs.files(),
3326 vec![
3327 PathBuf::from(path!("/outer/inner1/a")),
3328 PathBuf::from(path!("/outer/inner1/b")),
3329 PathBuf::from(path!("/outer/inner2/c")),
3330 PathBuf::from(path!("/outer/inner1/inner3/d")),
3331 ]
3332 );
3333 assert_eq!(
3334 fs.directories(false),
3335 vec![
3336 PathBuf::from(path!("/")),
3337 PathBuf::from(path!("/outer")),
3338 PathBuf::from(path!("/outer/inner1")),
3339 PathBuf::from(path!("/outer/inner2")),
3340 PathBuf::from(path!("/outer/inner1/inner3")),
3341 PathBuf::from(path!("/outer/inner1/inner4")),
3342 ]
3343 );
3344
3345 let source = Path::new(path!("/outer"));
3346 let target = Path::new(path!("/outer/inner1/outer"));
3347 copy_recursive(fs.as_ref(), source, target, Default::default())
3348 .await
3349 .unwrap();
3350
3351 assert_eq!(
3352 fs.files(),
3353 vec![
3354 PathBuf::from(path!("/outer/inner1/a")),
3355 PathBuf::from(path!("/outer/inner1/b")),
3356 PathBuf::from(path!("/outer/inner2/c")),
3357 PathBuf::from(path!("/outer/inner1/inner3/d")),
3358 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3359 PathBuf::from(path!("/outer/inner1/outer/inner1/b")),
3360 PathBuf::from(path!("/outer/inner1/outer/inner2/c")),
3361 PathBuf::from(path!("/outer/inner1/outer/inner1/inner3/d")),
3362 ]
3363 );
3364 assert_eq!(
3365 fs.directories(false),
3366 vec![
3367 PathBuf::from(path!("/")),
3368 PathBuf::from(path!("/outer")),
3369 PathBuf::from(path!("/outer/inner1")),
3370 PathBuf::from(path!("/outer/inner2")),
3371 PathBuf::from(path!("/outer/inner1/inner3")),
3372 PathBuf::from(path!("/outer/inner1/inner4")),
3373 PathBuf::from(path!("/outer/inner1/outer")),
3374 PathBuf::from(path!("/outer/inner1/outer/inner1")),
3375 PathBuf::from(path!("/outer/inner1/outer/inner2")),
3376 PathBuf::from(path!("/outer/inner1/outer/inner1/inner3")),
3377 PathBuf::from(path!("/outer/inner1/outer/inner1/inner4")),
3378 ]
3379 );
3380 }
3381
3382 #[gpui::test]
3383 async fn test_copy_recursive_with_overwriting(executor: BackgroundExecutor) {
3384 let fs = FakeFs::new(executor.clone());
3385 fs.insert_tree(
3386 path!("/outer"),
3387 json!({
3388 "inner1": {
3389 "a": "A",
3390 "b": "B",
3391 "outer": {
3392 "inner1": {
3393 "a": "B"
3394 }
3395 }
3396 },
3397 "inner2": {
3398 "c": "C",
3399 }
3400 }),
3401 )
3402 .await;
3403
3404 assert_eq!(
3405 fs.files(),
3406 vec![
3407 PathBuf::from(path!("/outer/inner1/a")),
3408 PathBuf::from(path!("/outer/inner1/b")),
3409 PathBuf::from(path!("/outer/inner2/c")),
3410 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3411 ]
3412 );
3413 assert_eq!(
3414 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3415 .await
3416 .unwrap(),
3417 "B",
3418 );
3419
3420 let source = Path::new(path!("/outer"));
3421 let target = Path::new(path!("/outer/inner1/outer"));
3422 copy_recursive(
3423 fs.as_ref(),
3424 source,
3425 target,
3426 CopyOptions {
3427 overwrite: true,
3428 ..Default::default()
3429 },
3430 )
3431 .await
3432 .unwrap();
3433
3434 assert_eq!(
3435 fs.files(),
3436 vec![
3437 PathBuf::from(path!("/outer/inner1/a")),
3438 PathBuf::from(path!("/outer/inner1/b")),
3439 PathBuf::from(path!("/outer/inner2/c")),
3440 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3441 PathBuf::from(path!("/outer/inner1/outer/inner1/b")),
3442 PathBuf::from(path!("/outer/inner1/outer/inner2/c")),
3443 PathBuf::from(path!("/outer/inner1/outer/inner1/outer/inner1/a")),
3444 ]
3445 );
3446 assert_eq!(
3447 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3448 .await
3449 .unwrap(),
3450 "A"
3451 );
3452 }
3453
3454 #[gpui::test]
3455 async fn test_copy_recursive_with_ignoring(executor: BackgroundExecutor) {
3456 let fs = FakeFs::new(executor.clone());
3457 fs.insert_tree(
3458 path!("/outer"),
3459 json!({
3460 "inner1": {
3461 "a": "A",
3462 "b": "B",
3463 "outer": {
3464 "inner1": {
3465 "a": "B"
3466 }
3467 }
3468 },
3469 "inner2": {
3470 "c": "C",
3471 }
3472 }),
3473 )
3474 .await;
3475
3476 assert_eq!(
3477 fs.files(),
3478 vec![
3479 PathBuf::from(path!("/outer/inner1/a")),
3480 PathBuf::from(path!("/outer/inner1/b")),
3481 PathBuf::from(path!("/outer/inner2/c")),
3482 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3483 ]
3484 );
3485 assert_eq!(
3486 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3487 .await
3488 .unwrap(),
3489 "B",
3490 );
3491
3492 let source = Path::new(path!("/outer"));
3493 let target = Path::new(path!("/outer/inner1/outer"));
3494 copy_recursive(
3495 fs.as_ref(),
3496 source,
3497 target,
3498 CopyOptions {
3499 ignore_if_exists: true,
3500 ..Default::default()
3501 },
3502 )
3503 .await
3504 .unwrap();
3505
3506 assert_eq!(
3507 fs.files(),
3508 vec![
3509 PathBuf::from(path!("/outer/inner1/a")),
3510 PathBuf::from(path!("/outer/inner1/b")),
3511 PathBuf::from(path!("/outer/inner2/c")),
3512 PathBuf::from(path!("/outer/inner1/outer/inner1/a")),
3513 PathBuf::from(path!("/outer/inner1/outer/inner1/b")),
3514 PathBuf::from(path!("/outer/inner1/outer/inner2/c")),
3515 PathBuf::from(path!("/outer/inner1/outer/inner1/outer/inner1/a")),
3516 ]
3517 );
3518 assert_eq!(
3519 fs.load(path!("/outer/inner1/outer/inner1/a").as_ref())
3520 .await
3521 .unwrap(),
3522 "B"
3523 );
3524 }
3525
3526 #[gpui::test]
3527 async fn test_realfs_atomic_write(executor: BackgroundExecutor) {
3528 // With the file handle still open, the file should be replaced
3529 // https://github.com/zed-industries/zed/issues/30054
3530 let fs = RealFs {
3531 bundled_git_binary_path: None,
3532 executor,
3533 next_job_id: Arc::new(AtomicUsize::new(0)),
3534 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
3535 trash_cache: Arc::new(Mutex::new(TrashCache::default())),
3536 };
3537 let temp_dir = TempDir::new().unwrap();
3538 let file_to_be_replaced = temp_dir.path().join("file.txt");
3539 let mut file = std::fs::File::create_new(&file_to_be_replaced).unwrap();
3540 file.write_all(b"Hello").unwrap();
3541 // drop(file); // We still hold the file handle here
3542 let content = std::fs::read_to_string(&file_to_be_replaced).unwrap();
3543 assert_eq!(content, "Hello");
3544 smol::block_on(fs.atomic_write(file_to_be_replaced.clone(), "World".into())).unwrap();
3545 let content = std::fs::read_to_string(&file_to_be_replaced).unwrap();
3546 assert_eq!(content, "World");
3547 }
3548
3549 #[gpui::test]
3550 async fn test_realfs_atomic_write_non_existing_file(executor: BackgroundExecutor) {
3551 let fs = RealFs {
3552 bundled_git_binary_path: None,
3553 executor,
3554 next_job_id: Arc::new(AtomicUsize::new(0)),
3555 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
3556 trash_cache: Arc::new(Mutex::new(TrashCache::default())),
3557 };
3558 let temp_dir = TempDir::new().unwrap();
3559 let file_to_be_replaced = temp_dir.path().join("file.txt");
3560 smol::block_on(fs.atomic_write(file_to_be_replaced.clone(), "Hello".into())).unwrap();
3561 let content = std::fs::read_to_string(&file_to_be_replaced).unwrap();
3562 assert_eq!(content, "Hello");
3563 }
3564
3565 #[gpui::test]
3566 #[cfg(target_os = "windows")]
3567 async fn test_realfs_canonicalize(executor: BackgroundExecutor) {
3568 use util::paths::SanitizedPath;
3569
3570 let fs = RealFs {
3571 bundled_git_binary_path: None,
3572 executor,
3573 next_job_id: Arc::new(AtomicUsize::new(0)),
3574 job_event_subscribers: Arc::new(Mutex::new(Vec::new())),
3575 };
3576 let temp_dir = TempDir::new().unwrap();
3577 let file = temp_dir.path().join("test (1).txt");
3578 let file = SanitizedPath::new(&file);
3579 std::fs::write(&file, "test").unwrap();
3580
3581 let canonicalized = fs.canonicalize(file.as_path()).await;
3582 assert!(canonicalized.is_ok());
3583 }
3584
3585 #[gpui::test]
3586 async fn test_rename(executor: BackgroundExecutor) {
3587 let fs = FakeFs::new(executor.clone());
3588 fs.insert_tree(
3589 path!("/root"),
3590 json!({
3591 "src": {
3592 "file_a.txt": "content a",
3593 "file_b.txt": "content b"
3594 }
3595 }),
3596 )
3597 .await;
3598
3599 fs.rename(
3600 Path::new(path!("/root/src/file_a.txt")),
3601 Path::new(path!("/root/src/new/renamed_a.txt")),
3602 RenameOptions {
3603 create_parents: true,
3604 ..Default::default()
3605 },
3606 )
3607 .await
3608 .unwrap();
3609
3610 // Assert that the `file_a.txt` file was being renamed and moved to a
3611 // different directory that did not exist before.
3612 assert_eq!(
3613 fs.files(),
3614 vec![
3615 PathBuf::from(path!("/root/src/file_b.txt")),
3616 PathBuf::from(path!("/root/src/new/renamed_a.txt")),
3617 ]
3618 );
3619
3620 let result = fs
3621 .rename(
3622 Path::new(path!("/root/src/file_b.txt")),
3623 Path::new(path!("/root/src/old/renamed_b.txt")),
3624 RenameOptions {
3625 create_parents: false,
3626 ..Default::default()
3627 },
3628 )
3629 .await;
3630
3631 // Assert that the `file_b.txt` file was not renamed nor moved, as
3632 // `create_parents` was set to `false`.
3633 // different directory that did not exist before.
3634 assert!(result.is_err());
3635 assert_eq!(
3636 fs.files(),
3637 vec![
3638 PathBuf::from(path!("/root/src/file_b.txt")),
3639 PathBuf::from(path!("/root/src/new/renamed_a.txt")),
3640 ]
3641 );
3642 }
3643
3644 #[gpui::test]
3645 async fn test_trash_and_restore(executor: BackgroundExecutor) {
3646 let fs = FakeFs::new(executor.clone());
3647 fs.insert_tree(
3648 path!("/root"),
3649 json!({
3650 "src": {
3651 "file_a.txt": "content a",
3652 "file_b.txt": "content b",
3653 "file_c.txt": "content c"
3654 }
3655 }),
3656 )
3657 .await;
3658 let file_a = fs
3659 .trash_file(
3660 path!("/root/src/file_a.txt").as_ref(),
3661 RemoveOptions::default(),
3662 )
3663 .await
3664 .unwrap()
3665 .unwrap();
3666 assert!(!fs.is_file(path!("/root/src/file_a.txt").as_ref()).await);
3667 let src_dir = fs
3668 .trash_dir(
3669 path!("/root/src").as_ref(),
3670 RemoveOptions {
3671 recursive: true,
3672 ignore_if_not_exists: false,
3673 },
3674 )
3675 .await
3676 .unwrap()
3677 .unwrap();
3678 assert!(!fs.is_dir(path!("/root/src").as_ref()).await);
3679 fs.restore_from_trash(src_dir).await.unwrap();
3680 assert!(fs.is_dir(path!("/root/src").as_ref()).await);
3681 assert!(!fs.is_file(path!("/root/src/file_a.txt").as_ref()).await);
3682 fs.restore_from_trash(file_a).await.unwrap();
3683 assert!(fs.is_file(path!("/root/src/file_a.txt").as_ref()).await);
3684 }
3685}