@@ -0,0 +1,1283 @@
+pub mod repository;
+
+use anyhow::{anyhow, Result};
+use fsevent::EventStream;
+use futures::{future::BoxFuture, Stream, StreamExt};
+use git2::Repository as LibGitRepository;
+use parking_lot::Mutex;
+use repository::GitRepository;
+use rope::Rope;
+use smol::io::{AsyncReadExt, AsyncWriteExt};
+use std::io::Write;
+use std::sync::Arc;
+use std::{
+ io,
+ os::unix::fs::MetadataExt,
+ path::{Component, Path, PathBuf},
+ pin::Pin,
+ time::{Duration, SystemTime},
+};
+use tempfile::NamedTempFile;
+use text::LineEnding;
+use util::ResultExt;
+
+#[cfg(any(test, feature = "test-support"))]
+use collections::{btree_map, BTreeMap};
+#[cfg(any(test, feature = "test-support"))]
+use repository::{FakeGitRepositoryState, GitFileStatus};
+#[cfg(any(test, feature = "test-support"))]
+use std::ffi::OsStr;
+
+#[async_trait::async_trait]
+pub trait Fs: Send + Sync {
+ async fn create_dir(&self, path: &Path) -> Result<()>;
+ async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()>;
+ async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()>;
+ async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()>;
+ async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()>;
+ async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()>;
+ async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read>>;
+ async fn load(&self, path: &Path) -> Result<String>;
+ async fn atomic_write(&self, path: PathBuf, text: String) -> Result<()>;
+ async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()>;
+ async fn canonicalize(&self, path: &Path) -> Result<PathBuf>;
+ async fn is_file(&self, path: &Path) -> bool;
+ async fn metadata(&self, path: &Path) -> Result<Option<Metadata>>;
+ async fn read_link(&self, path: &Path) -> Result<PathBuf>;
+ async fn read_dir(
+ &self,
+ path: &Path,
+ ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>>;
+ async fn watch(
+ &self,
+ path: &Path,
+ latency: Duration,
+ ) -> Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>;
+ fn open_repo(&self, abs_dot_git: &Path) -> Option<Arc<Mutex<dyn GitRepository>>>;
+ fn is_fake(&self) -> bool;
+ #[cfg(any(test, feature = "test-support"))]
+ fn as_fake(&self) -> &FakeFs;
+}
+
+#[derive(Copy, Clone, Default)]
+pub struct CreateOptions {
+ pub overwrite: bool,
+ pub ignore_if_exists: bool,
+}
+
+#[derive(Copy, Clone, Default)]
+pub struct CopyOptions {
+ pub overwrite: bool,
+ pub ignore_if_exists: bool,
+}
+
+#[derive(Copy, Clone, Default)]
+pub struct RenameOptions {
+ pub overwrite: bool,
+ pub ignore_if_exists: bool,
+}
+
+#[derive(Copy, Clone, Default)]
+pub struct RemoveOptions {
+ pub recursive: bool,
+ pub ignore_if_not_exists: bool,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct Metadata {
+ pub inode: u64,
+ pub mtime: SystemTime,
+ pub is_symlink: bool,
+ pub is_dir: bool,
+}
+
+pub struct RealFs;
+
+#[async_trait::async_trait]
+impl Fs for RealFs {
+ async fn create_dir(&self, path: &Path) -> Result<()> {
+ Ok(smol::fs::create_dir_all(path).await?)
+ }
+
+ async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
+ let mut open_options = smol::fs::OpenOptions::new();
+ open_options.write(true).create(true);
+ if options.overwrite {
+ open_options.truncate(true);
+ } else if !options.ignore_if_exists {
+ open_options.create_new(true);
+ }
+ open_options.open(path).await?;
+ Ok(())
+ }
+
+ async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
+ if !options.overwrite && smol::fs::metadata(target).await.is_ok() {
+ if options.ignore_if_exists {
+ return Ok(());
+ } else {
+ return Err(anyhow!("{target:?} already exists"));
+ }
+ }
+
+ smol::fs::copy(source, target).await?;
+ Ok(())
+ }
+
+ async fn rename(&self, source: &Path, target: &Path, options: RenameOptions) -> Result<()> {
+ if !options.overwrite && smol::fs::metadata(target).await.is_ok() {
+ if options.ignore_if_exists {
+ return Ok(());
+ } else {
+ return Err(anyhow!("{target:?} already exists"));
+ }
+ }
+
+ smol::fs::rename(source, target).await?;
+ Ok(())
+ }
+
+ async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
+ let result = if options.recursive {
+ smol::fs::remove_dir_all(path).await
+ } else {
+ smol::fs::remove_dir(path).await
+ };
+ match result {
+ Ok(()) => Ok(()),
+ Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
+ Ok(())
+ }
+ Err(err) => Err(err)?,
+ }
+ }
+
+ async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
+ match smol::fs::remove_file(path).await {
+ Ok(()) => Ok(()),
+ Err(err) if err.kind() == io::ErrorKind::NotFound && options.ignore_if_not_exists => {
+ Ok(())
+ }
+ Err(err) => Err(err)?,
+ }
+ }
+
+ async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read>> {
+ Ok(Box::new(std::fs::File::open(path)?))
+ }
+
+ async fn load(&self, path: &Path) -> Result<String> {
+ let mut file = smol::fs::File::open(path).await?;
+ let mut text = String::new();
+ file.read_to_string(&mut text).await?;
+ Ok(text)
+ }
+
+ async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
+ smol::unblock(move || {
+ let mut tmp_file = NamedTempFile::new()?;
+ tmp_file.write_all(data.as_bytes())?;
+ tmp_file.persist(path)?;
+ Ok::<(), anyhow::Error>(())
+ })
+ .await?;
+
+ Ok(())
+ }
+
+ async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
+ let buffer_size = text.summary().len.min(10 * 1024);
+ if let Some(path) = path.parent() {
+ self.create_dir(path).await?;
+ }
+ let file = smol::fs::File::create(path).await?;
+ let mut writer = smol::io::BufWriter::with_capacity(buffer_size, file);
+ for chunk in chunks(text, line_ending) {
+ writer.write_all(chunk.as_bytes()).await?;
+ }
+ writer.flush().await?;
+ Ok(())
+ }
+
+ async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
+ Ok(smol::fs::canonicalize(path).await?)
+ }
+
+ async fn is_file(&self, path: &Path) -> bool {
+ smol::fs::metadata(path)
+ .await
+ .map_or(false, |metadata| metadata.is_file())
+ }
+
+ async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
+ let symlink_metadata = match smol::fs::symlink_metadata(path).await {
+ Ok(metadata) => metadata,
+ Err(err) => {
+ return match (err.kind(), err.raw_os_error()) {
+ (io::ErrorKind::NotFound, _) => Ok(None),
+ (io::ErrorKind::Other, Some(libc::ENOTDIR)) => Ok(None),
+ _ => Err(anyhow::Error::new(err)),
+ }
+ }
+ };
+
+ let is_symlink = symlink_metadata.file_type().is_symlink();
+ let metadata = if is_symlink {
+ smol::fs::metadata(path).await?
+ } else {
+ symlink_metadata
+ };
+ Ok(Some(Metadata {
+ inode: metadata.ino(),
+ mtime: metadata.modified().unwrap(),
+ is_symlink,
+ is_dir: metadata.file_type().is_dir(),
+ }))
+ }
+
+ async fn read_link(&self, path: &Path) -> Result<PathBuf> {
+ let path = smol::fs::read_link(path).await?;
+ Ok(path)
+ }
+
+ async fn read_dir(
+ &self,
+ path: &Path,
+ ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
+ let result = smol::fs::read_dir(path).await?.map(|entry| match entry {
+ Ok(entry) => Ok(entry.path()),
+ Err(error) => Err(anyhow!("failed to read dir entry {:?}", error)),
+ });
+ Ok(Box::pin(result))
+ }
+
+ async fn watch(
+ &self,
+ path: &Path,
+ latency: Duration,
+ ) -> Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>> {
+ let (tx, rx) = smol::channel::unbounded();
+ let (stream, handle) = EventStream::new(&[path], latency);
+ std::thread::spawn(move || {
+ stream.run(move |events| smol::block_on(tx.send(events)).is_ok());
+ });
+ Box::pin(rx.chain(futures::stream::once(async move {
+ drop(handle);
+ vec![]
+ })))
+ }
+
+ fn open_repo(&self, dotgit_path: &Path) -> Option<Arc<Mutex<dyn GitRepository>>> {
+ LibGitRepository::open(&dotgit_path)
+ .log_err()
+ .and_then::<Arc<Mutex<dyn GitRepository>>, _>(|libgit_repository| {
+ Some(Arc::new(Mutex::new(libgit_repository)))
+ })
+ }
+
+ fn is_fake(&self) -> bool {
+ false
+ }
+ #[cfg(any(test, feature = "test-support"))]
+ fn as_fake(&self) -> &FakeFs {
+ panic!("called `RealFs::as_fake`")
+ }
+}
+
+#[cfg(any(test, feature = "test-support"))]
+pub struct FakeFs {
+ // Use an unfair lock to ensure tests are deterministic.
+ state: Mutex<FakeFsState>,
+ executor: gpui2::Executor,
+}
+
+#[cfg(any(test, feature = "test-support"))]
+struct FakeFsState {
+ root: Arc<Mutex<FakeFsEntry>>,
+ next_inode: u64,
+ next_mtime: SystemTime,
+ event_txs: Vec<smol::channel::Sender<Vec<fsevent::Event>>>,
+ events_paused: bool,
+ buffered_events: Vec<fsevent::Event>,
+ metadata_call_count: usize,
+ read_dir_call_count: usize,
+}
+
+#[cfg(any(test, feature = "test-support"))]
+#[derive(Debug)]
+enum FakeFsEntry {
+ File {
+ inode: u64,
+ mtime: SystemTime,
+ content: String,
+ },
+ Dir {
+ inode: u64,
+ mtime: SystemTime,
+ entries: BTreeMap<String, Arc<Mutex<FakeFsEntry>>>,
+ git_repo_state: Option<Arc<Mutex<repository::FakeGitRepositoryState>>>,
+ },
+ Symlink {
+ target: PathBuf,
+ },
+}
+
+#[cfg(any(test, feature = "test-support"))]
+impl FakeFsState {
+ fn read_path<'a>(&'a self, target: &Path) -> Result<Arc<Mutex<FakeFsEntry>>> {
+ Ok(self
+ .try_read_path(target, true)
+ .ok_or_else(|| anyhow!("path does not exist: {}", target.display()))?
+ .0)
+ }
+
+ fn try_read_path<'a>(
+ &'a self,
+ target: &Path,
+ follow_symlink: bool,
+ ) -> Option<(Arc<Mutex<FakeFsEntry>>, PathBuf)> {
+ let mut path = target.to_path_buf();
+ let mut canonical_path = PathBuf::new();
+ let mut entry_stack = Vec::new();
+ 'outer: loop {
+ let mut path_components = path.components().peekable();
+ while let Some(component) = path_components.next() {
+ match component {
+ Component::Prefix(_) => panic!("prefix paths aren't supported"),
+ Component::RootDir => {
+ entry_stack.clear();
+ entry_stack.push(self.root.clone());
+ canonical_path.clear();
+ canonical_path.push("/");
+ }
+ Component::CurDir => {}
+ Component::ParentDir => {
+ entry_stack.pop()?;
+ canonical_path.pop();
+ }
+ Component::Normal(name) => {
+ let current_entry = entry_stack.last().cloned()?;
+ let current_entry = current_entry.lock();
+ if let FakeFsEntry::Dir { entries, .. } = &*current_entry {
+ let entry = entries.get(name.to_str().unwrap()).cloned()?;
+ if path_components.peek().is_some() || follow_symlink {
+ let entry = entry.lock();
+ if let FakeFsEntry::Symlink { target, .. } = &*entry {
+ let mut target = target.clone();
+ target.extend(path_components);
+ path = target;
+ continue 'outer;
+ }
+ }
+ entry_stack.push(entry.clone());
+ canonical_path.push(name);
+ } else {
+ return None;
+ }
+ }
+ }
+ }
+ break;
+ }
+ Some((entry_stack.pop()?, canonical_path))
+ }
+
+ fn write_path<Fn, T>(&self, path: &Path, callback: Fn) -> Result<T>
+ where
+ Fn: FnOnce(btree_map::Entry<String, Arc<Mutex<FakeFsEntry>>>) -> Result<T>,
+ {
+ let path = normalize_path(path);
+ let filename = path
+ .file_name()
+ .ok_or_else(|| anyhow!("cannot overwrite the root"))?;
+ let parent_path = path.parent().unwrap();
+
+ let parent = self.read_path(parent_path)?;
+ let mut parent = parent.lock();
+ let new_entry = parent
+ .dir_entries(parent_path)?
+ .entry(filename.to_str().unwrap().into());
+ callback(new_entry)
+ }
+
+ fn emit_event<I, T>(&mut self, paths: I)
+ where
+ I: IntoIterator<Item = T>,
+ T: Into<PathBuf>,
+ {
+ self.buffered_events
+ .extend(paths.into_iter().map(|path| fsevent::Event {
+ event_id: 0,
+ flags: fsevent::StreamFlags::empty(),
+ path: path.into(),
+ }));
+
+ if !self.events_paused {
+ self.flush_events(self.buffered_events.len());
+ }
+ }
+
+ fn flush_events(&mut self, mut count: usize) {
+ count = count.min(self.buffered_events.len());
+ let events = self.buffered_events.drain(0..count).collect::<Vec<_>>();
+ self.event_txs.retain(|tx| {
+ let _ = tx.try_send(events.clone());
+ !tx.is_closed()
+ });
+ }
+}
+
+#[cfg(any(test, feature = "test-support"))]
+lazy_static::lazy_static! {
+ pub static ref FS_DOT_GIT: &'static OsStr = OsStr::new(".git");
+}
+
+#[cfg(any(test, feature = "test-support"))]
+impl FakeFs {
+ pub fn new(executor: gpui2::Executor) -> Arc<Self> {
+ Arc::new(Self {
+ executor,
+ state: Mutex::new(FakeFsState {
+ root: Arc::new(Mutex::new(FakeFsEntry::Dir {
+ inode: 0,
+ mtime: SystemTime::UNIX_EPOCH,
+ entries: Default::default(),
+ git_repo_state: None,
+ })),
+ next_mtime: SystemTime::UNIX_EPOCH,
+ next_inode: 1,
+ event_txs: Default::default(),
+ buffered_events: Vec::new(),
+ events_paused: false,
+ read_dir_call_count: 0,
+ metadata_call_count: 0,
+ }),
+ })
+ }
+
+ pub async fn insert_file(&self, path: impl AsRef<Path>, content: String) {
+ self.write_file_internal(path, content).unwrap()
+ }
+
+ pub async fn insert_symlink(&self, path: impl AsRef<Path>, target: PathBuf) {
+ let mut state = self.state.lock();
+ let path = path.as_ref();
+ let file = Arc::new(Mutex::new(FakeFsEntry::Symlink { target }));
+ state
+ .write_path(path.as_ref(), move |e| match e {
+ btree_map::Entry::Vacant(e) => {
+ e.insert(file);
+ Ok(())
+ }
+ btree_map::Entry::Occupied(mut e) => {
+ *e.get_mut() = file;
+ Ok(())
+ }
+ })
+ .unwrap();
+ state.emit_event(&[path]);
+ }
+
+ pub fn write_file_internal(&self, path: impl AsRef<Path>, content: String) -> Result<()> {
+ let mut state = self.state.lock();
+ let path = path.as_ref();
+ let inode = state.next_inode;
+ let mtime = state.next_mtime;
+ state.next_inode += 1;
+ state.next_mtime += Duration::from_nanos(1);
+ let file = Arc::new(Mutex::new(FakeFsEntry::File {
+ inode,
+ mtime,
+ content,
+ }));
+ state.write_path(path, move |entry| {
+ match entry {
+ btree_map::Entry::Vacant(e) => {
+ e.insert(file);
+ }
+ btree_map::Entry::Occupied(mut e) => {
+ *e.get_mut() = file;
+ }
+ }
+ Ok(())
+ })?;
+ state.emit_event(&[path]);
+ Ok(())
+ }
+
+ pub fn pause_events(&self) {
+ self.state.lock().events_paused = true;
+ }
+
+ pub fn buffered_event_count(&self) -> usize {
+ self.state.lock().buffered_events.len()
+ }
+
+ pub fn flush_events(&self, count: usize) {
+ self.state.lock().flush_events(count);
+ }
+
+ #[must_use]
+ pub fn insert_tree<'a>(
+ &'a self,
+ path: impl 'a + AsRef<Path> + Send,
+ tree: serde_json::Value,
+ ) -> futures::future::BoxFuture<'a, ()> {
+ use futures::FutureExt as _;
+ use serde_json::Value::*;
+
+ async move {
+ let path = path.as_ref();
+
+ match tree {
+ Object(map) => {
+ self.create_dir(path).await.unwrap();
+ for (name, contents) in map {
+ let mut path = PathBuf::from(path);
+ path.push(name);
+ self.insert_tree(&path, contents).await;
+ }
+ }
+ Null => {
+ self.create_dir(path).await.unwrap();
+ }
+ String(contents) => {
+ self.insert_file(&path, contents).await;
+ }
+ _ => {
+ panic!("JSON object must contain only objects, strings, or null");
+ }
+ }
+ }
+ .boxed()
+ }
+
+ pub fn with_git_state<F>(&self, dot_git: &Path, emit_git_event: bool, f: F)
+ where
+ F: FnOnce(&mut FakeGitRepositoryState),
+ {
+ let mut state = self.state.lock();
+ let entry = state.read_path(dot_git).unwrap();
+ let mut entry = entry.lock();
+
+ if let FakeFsEntry::Dir { git_repo_state, .. } = &mut *entry {
+ let repo_state = git_repo_state.get_or_insert_with(Default::default);
+ let mut repo_state = repo_state.lock();
+
+ f(&mut repo_state);
+
+ if emit_git_event {
+ state.emit_event([dot_git]);
+ }
+ } else {
+ panic!("not a directory");
+ }
+ }
+
+ pub fn set_branch_name(&self, dot_git: &Path, branch: Option<impl Into<String>>) {
+ self.with_git_state(dot_git, true, |state| {
+ state.branch_name = branch.map(Into::into)
+ })
+ }
+
+ pub fn set_index_for_repo(&self, dot_git: &Path, head_state: &[(&Path, String)]) {
+ self.with_git_state(dot_git, true, |state| {
+ state.index_contents.clear();
+ state.index_contents.extend(
+ head_state
+ .iter()
+ .map(|(path, content)| (path.to_path_buf(), content.clone())),
+ );
+ });
+ }
+
+ pub fn set_status_for_repo_via_working_copy_change(
+ &self,
+ dot_git: &Path,
+ statuses: &[(&Path, GitFileStatus)],
+ ) {
+ self.with_git_state(dot_git, false, |state| {
+ state.worktree_statuses.clear();
+ state.worktree_statuses.extend(
+ statuses
+ .iter()
+ .map(|(path, content)| ((**path).into(), content.clone())),
+ );
+ });
+ self.state.lock().emit_event(
+ statuses
+ .iter()
+ .map(|(path, _)| dot_git.parent().unwrap().join(path)),
+ );
+ }
+
+ pub fn set_status_for_repo_via_git_operation(
+ &self,
+ dot_git: &Path,
+ statuses: &[(&Path, GitFileStatus)],
+ ) {
+ self.with_git_state(dot_git, true, |state| {
+ state.worktree_statuses.clear();
+ state.worktree_statuses.extend(
+ statuses
+ .iter()
+ .map(|(path, content)| ((**path).into(), content.clone())),
+ );
+ });
+ }
+
+ pub fn paths(&self, include_dot_git: bool) -> Vec<PathBuf> {
+ let mut result = Vec::new();
+ let mut queue = collections::VecDeque::new();
+ queue.push_back((PathBuf::from("/"), self.state.lock().root.clone()));
+ while let Some((path, entry)) = queue.pop_front() {
+ if let FakeFsEntry::Dir { entries, .. } = &*entry.lock() {
+ for (name, entry) in entries {
+ queue.push_back((path.join(name), entry.clone()));
+ }
+ }
+ if include_dot_git
+ || !path
+ .components()
+ .any(|component| component.as_os_str() == *FS_DOT_GIT)
+ {
+ result.push(path);
+ }
+ }
+ result
+ }
+
+ pub fn directories(&self, include_dot_git: bool) -> Vec<PathBuf> {
+ let mut result = Vec::new();
+ let mut queue = collections::VecDeque::new();
+ queue.push_back((PathBuf::from("/"), self.state.lock().root.clone()));
+ while let Some((path, entry)) = queue.pop_front() {
+ if let FakeFsEntry::Dir { entries, .. } = &*entry.lock() {
+ for (name, entry) in entries {
+ queue.push_back((path.join(name), entry.clone()));
+ }
+ if include_dot_git
+ || !path
+ .components()
+ .any(|component| component.as_os_str() == *FS_DOT_GIT)
+ {
+ result.push(path);
+ }
+ }
+ }
+ result
+ }
+
+ pub fn files(&self) -> Vec<PathBuf> {
+ let mut result = Vec::new();
+ let mut queue = collections::VecDeque::new();
+ queue.push_back((PathBuf::from("/"), self.state.lock().root.clone()));
+ while let Some((path, entry)) = queue.pop_front() {
+ let e = entry.lock();
+ match &*e {
+ FakeFsEntry::File { .. } => result.push(path),
+ FakeFsEntry::Dir { entries, .. } => {
+ for (name, entry) in entries {
+ queue.push_back((path.join(name), entry.clone()));
+ }
+ }
+ FakeFsEntry::Symlink { .. } => {}
+ }
+ }
+ result
+ }
+
+ /// How many `read_dir` calls have been issued.
+ pub fn read_dir_call_count(&self) -> usize {
+ self.state.lock().read_dir_call_count
+ }
+
+ /// How many `metadata` calls have been issued.
+ pub fn metadata_call_count(&self) -> usize {
+ self.state.lock().metadata_call_count
+ }
+
+ fn simulate_random_delay(&self) -> impl futures::Future<Output = ()> {
+ self.executor.simulate_random_delay()
+ }
+}
+
+#[cfg(any(test, feature = "test-support"))]
+impl FakeFsEntry {
+ fn is_file(&self) -> bool {
+ matches!(self, Self::File { .. })
+ }
+
+ fn is_symlink(&self) -> bool {
+ matches!(self, Self::Symlink { .. })
+ }
+
+ fn file_content(&self, path: &Path) -> Result<&String> {
+ if let Self::File { content, .. } = self {
+ Ok(content)
+ } else {
+ Err(anyhow!("not a file: {}", path.display()))
+ }
+ }
+
+ fn set_file_content(&mut self, path: &Path, new_content: String) -> Result<()> {
+ if let Self::File { content, mtime, .. } = self {
+ *mtime = SystemTime::now();
+ *content = new_content;
+ Ok(())
+ } else {
+ Err(anyhow!("not a file: {}", path.display()))
+ }
+ }
+
+ fn dir_entries(
+ &mut self,
+ path: &Path,
+ ) -> Result<&mut BTreeMap<String, Arc<Mutex<FakeFsEntry>>>> {
+ if let Self::Dir { entries, .. } = self {
+ Ok(entries)
+ } else {
+ Err(anyhow!("not a directory: {}", path.display()))
+ }
+ }
+}
+
+#[cfg(any(test, feature = "test-support"))]
+#[async_trait::async_trait]
+impl Fs for FakeFs {
+ async fn create_dir(&self, path: &Path) -> Result<()> {
+ self.simulate_random_delay().await;
+
+ let mut created_dirs = Vec::new();
+ let mut cur_path = PathBuf::new();
+ for component in path.components() {
+ let mut state = self.state.lock();
+ cur_path.push(component);
+ if cur_path == Path::new("/") {
+ continue;
+ }
+
+ let inode = state.next_inode;
+ let mtime = state.next_mtime;
+ state.next_mtime += Duration::from_nanos(1);
+ state.next_inode += 1;
+ state.write_path(&cur_path, |entry| {
+ entry.or_insert_with(|| {
+ created_dirs.push(cur_path.clone());
+ Arc::new(Mutex::new(FakeFsEntry::Dir {
+ inode,
+ mtime,
+ entries: Default::default(),
+ git_repo_state: None,
+ }))
+ });
+ Ok(())
+ })?
+ }
+
+ self.state.lock().emit_event(&created_dirs);
+ Ok(())
+ }
+
+ async fn create_file(&self, path: &Path, options: CreateOptions) -> Result<()> {
+ self.simulate_random_delay().await;
+ let mut state = self.state.lock();
+ let inode = state.next_inode;
+ let mtime = state.next_mtime;
+ state.next_mtime += Duration::from_nanos(1);
+ state.next_inode += 1;
+ let file = Arc::new(Mutex::new(FakeFsEntry::File {
+ inode,
+ mtime,
+ content: String::new(),
+ }));
+ state.write_path(path, |entry| {
+ match entry {
+ btree_map::Entry::Occupied(mut e) => {
+ if options.overwrite {
+ *e.get_mut() = file;
+ } else if !options.ignore_if_exists {
+ return Err(anyhow!("path already exists: {}", path.display()));
+ }
+ }
+ btree_map::Entry::Vacant(e) => {
+ e.insert(file);
+ }
+ }
+ Ok(())
+ })?;
+ state.emit_event(&[path]);
+ Ok(())
+ }
+
+ async fn rename(&self, old_path: &Path, new_path: &Path, options: RenameOptions) -> Result<()> {
+ self.simulate_random_delay().await;
+
+ let old_path = normalize_path(old_path);
+ let new_path = normalize_path(new_path);
+
+ let mut state = self.state.lock();
+ let moved_entry = state.write_path(&old_path, |e| {
+ if let btree_map::Entry::Occupied(e) = e {
+ Ok(e.get().clone())
+ } else {
+ Err(anyhow!("path does not exist: {}", &old_path.display()))
+ }
+ })?;
+
+ state.write_path(&new_path, |e| {
+ match e {
+ btree_map::Entry::Occupied(mut e) => {
+ if options.overwrite {
+ *e.get_mut() = moved_entry;
+ } else if !options.ignore_if_exists {
+ return Err(anyhow!("path already exists: {}", new_path.display()));
+ }
+ }
+ btree_map::Entry::Vacant(e) => {
+ e.insert(moved_entry);
+ }
+ }
+ Ok(())
+ })?;
+
+ state
+ .write_path(&old_path, |e| {
+ if let btree_map::Entry::Occupied(e) = e {
+ Ok(e.remove())
+ } else {
+ unreachable!()
+ }
+ })
+ .unwrap();
+
+ state.emit_event(&[old_path, new_path]);
+ Ok(())
+ }
+
+ async fn copy_file(&self, source: &Path, target: &Path, options: CopyOptions) -> Result<()> {
+ self.simulate_random_delay().await;
+
+ let source = normalize_path(source);
+ let target = normalize_path(target);
+ let mut state = self.state.lock();
+ let mtime = state.next_mtime;
+ let inode = util::post_inc(&mut state.next_inode);
+ state.next_mtime += Duration::from_nanos(1);
+ let source_entry = state.read_path(&source)?;
+ let content = source_entry.lock().file_content(&source)?.clone();
+ let entry = state.write_path(&target, |e| match e {
+ btree_map::Entry::Occupied(e) => {
+ if options.overwrite {
+ Ok(Some(e.get().clone()))
+ } else if !options.ignore_if_exists {
+ return Err(anyhow!("{target:?} already exists"));
+ } else {
+ Ok(None)
+ }
+ }
+ btree_map::Entry::Vacant(e) => Ok(Some(
+ e.insert(Arc::new(Mutex::new(FakeFsEntry::File {
+ inode,
+ mtime,
+ content: String::new(),
+ })))
+ .clone(),
+ )),
+ })?;
+ if let Some(entry) = entry {
+ entry.lock().set_file_content(&target, content)?;
+ }
+ state.emit_event(&[target]);
+ Ok(())
+ }
+
+ async fn remove_dir(&self, path: &Path, options: RemoveOptions) -> Result<()> {
+ self.simulate_random_delay().await;
+
+ let path = normalize_path(path);
+ let parent_path = path
+ .parent()
+ .ok_or_else(|| anyhow!("cannot remove the root"))?;
+ let base_name = path.file_name().unwrap();
+
+ let mut state = self.state.lock();
+ let parent_entry = state.read_path(parent_path)?;
+ let mut parent_entry = parent_entry.lock();
+ let entry = parent_entry
+ .dir_entries(parent_path)?
+ .entry(base_name.to_str().unwrap().into());
+
+ match entry {
+ btree_map::Entry::Vacant(_) => {
+ if !options.ignore_if_not_exists {
+ return Err(anyhow!("{path:?} does not exist"));
+ }
+ }
+ btree_map::Entry::Occupied(e) => {
+ {
+ let mut entry = e.get().lock();
+ let children = entry.dir_entries(&path)?;
+ if !options.recursive && !children.is_empty() {
+ return Err(anyhow!("{path:?} is not empty"));
+ }
+ }
+ e.remove();
+ }
+ }
+ state.emit_event(&[path]);
+ Ok(())
+ }
+
+ async fn remove_file(&self, path: &Path, options: RemoveOptions) -> Result<()> {
+ self.simulate_random_delay().await;
+
+ let path = normalize_path(path);
+ let parent_path = path
+ .parent()
+ .ok_or_else(|| anyhow!("cannot remove the root"))?;
+ let base_name = path.file_name().unwrap();
+ let mut state = self.state.lock();
+ let parent_entry = state.read_path(parent_path)?;
+ let mut parent_entry = parent_entry.lock();
+ let entry = parent_entry
+ .dir_entries(parent_path)?
+ .entry(base_name.to_str().unwrap().into());
+ match entry {
+ btree_map::Entry::Vacant(_) => {
+ if !options.ignore_if_not_exists {
+ return Err(anyhow!("{path:?} does not exist"));
+ }
+ }
+ btree_map::Entry::Occupied(e) => {
+ e.get().lock().file_content(&path)?;
+ e.remove();
+ }
+ }
+ state.emit_event(&[path]);
+ Ok(())
+ }
+
+ async fn open_sync(&self, path: &Path) -> Result<Box<dyn io::Read>> {
+ let text = self.load(path).await?;
+ Ok(Box::new(io::Cursor::new(text)))
+ }
+
+ async fn load(&self, path: &Path) -> Result<String> {
+ let path = normalize_path(path);
+ self.simulate_random_delay().await;
+ let state = self.state.lock();
+ let entry = state.read_path(&path)?;
+ let entry = entry.lock();
+ entry.file_content(&path).cloned()
+ }
+
+ async fn atomic_write(&self, path: PathBuf, data: String) -> Result<()> {
+ self.simulate_random_delay().await;
+ let path = normalize_path(path.as_path());
+ self.write_file_internal(path, data.to_string())?;
+
+ Ok(())
+ }
+
+ async fn save(&self, path: &Path, text: &Rope, line_ending: LineEnding) -> Result<()> {
+ self.simulate_random_delay().await;
+ let path = normalize_path(path);
+ let content = chunks(text, line_ending).collect();
+ if let Some(path) = path.parent() {
+ self.create_dir(path).await?;
+ }
+ self.write_file_internal(path, content)?;
+ Ok(())
+ }
+
+ async fn canonicalize(&self, path: &Path) -> Result<PathBuf> {
+ let path = normalize_path(path);
+ self.simulate_random_delay().await;
+ let state = self.state.lock();
+ if let Some((_, canonical_path)) = state.try_read_path(&path, true) {
+ Ok(canonical_path)
+ } else {
+ Err(anyhow!("path does not exist: {}", path.display()))
+ }
+ }
+
+ async fn is_file(&self, path: &Path) -> bool {
+ let path = normalize_path(path);
+ self.simulate_random_delay().await;
+ let state = self.state.lock();
+ if let Some((entry, _)) = state.try_read_path(&path, true) {
+ entry.lock().is_file()
+ } else {
+ false
+ }
+ }
+
+ async fn metadata(&self, path: &Path) -> Result<Option<Metadata>> {
+ self.simulate_random_delay().await;
+ let path = normalize_path(path);
+ let mut state = self.state.lock();
+ state.metadata_call_count += 1;
+ if let Some((mut entry, _)) = state.try_read_path(&path, false) {
+ let is_symlink = entry.lock().is_symlink();
+ if is_symlink {
+ if let Some(e) = state.try_read_path(&path, true).map(|e| e.0) {
+ entry = e;
+ } else {
+ return Ok(None);
+ }
+ }
+
+ let entry = entry.lock();
+ Ok(Some(match &*entry {
+ FakeFsEntry::File { inode, mtime, .. } => Metadata {
+ inode: *inode,
+ mtime: *mtime,
+ is_dir: false,
+ is_symlink,
+ },
+ FakeFsEntry::Dir { inode, mtime, .. } => Metadata {
+ inode: *inode,
+ mtime: *mtime,
+ is_dir: true,
+ is_symlink,
+ },
+ FakeFsEntry::Symlink { .. } => unreachable!(),
+ }))
+ } else {
+ Ok(None)
+ }
+ }
+
+ async fn read_link(&self, path: &Path) -> Result<PathBuf> {
+ self.simulate_random_delay().await;
+ let path = normalize_path(path);
+ let state = self.state.lock();
+ if let Some((entry, _)) = state.try_read_path(&path, false) {
+ let entry = entry.lock();
+ if let FakeFsEntry::Symlink { target } = &*entry {
+ Ok(target.clone())
+ } else {
+ Err(anyhow!("not a symlink: {}", path.display()))
+ }
+ } else {
+ Err(anyhow!("path does not exist: {}", path.display()))
+ }
+ }
+
+ async fn read_dir(
+ &self,
+ path: &Path,
+ ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
+ self.simulate_random_delay().await;
+ let path = normalize_path(path);
+ let mut state = self.state.lock();
+ state.read_dir_call_count += 1;
+ let entry = state.read_path(&path)?;
+ let mut entry = entry.lock();
+ let children = entry.dir_entries(&path)?;
+ let paths = children
+ .keys()
+ .map(|file_name| Ok(path.join(file_name)))
+ .collect::<Vec<_>>();
+ Ok(Box::pin(futures::stream::iter(paths)))
+ }
+
+ async fn watch(
+ &self,
+ path: &Path,
+ _: Duration,
+ ) -> Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>> {
+ self.simulate_random_delay().await;
+ let (tx, rx) = smol::channel::unbounded();
+ self.state.lock().event_txs.push(tx);
+ let path = path.to_path_buf();
+ let executor = self.executor.clone();
+ Box::pin(futures::StreamExt::filter(rx, move |events| {
+ let result = events.iter().any(|event| event.path.starts_with(&path));
+ let executor = executor.clone();
+ async move {
+ executor.simulate_random_delay().await;
+ result
+ }
+ }))
+ }
+
+ fn open_repo(&self, abs_dot_git: &Path) -> Option<Arc<Mutex<dyn GitRepository>>> {
+ let state = self.state.lock();
+ let entry = state.read_path(abs_dot_git).unwrap();
+ let mut entry = entry.lock();
+ if let FakeFsEntry::Dir { git_repo_state, .. } = &mut *entry {
+ let state = git_repo_state
+ .get_or_insert_with(|| Arc::new(Mutex::new(FakeGitRepositoryState::default())))
+ .clone();
+ Some(repository::FakeGitRepository::open(state))
+ } else {
+ None
+ }
+ }
+
+ fn is_fake(&self) -> bool {
+ true
+ }
+
+ #[cfg(any(test, feature = "test-support"))]
+ fn as_fake(&self) -> &FakeFs {
+ self
+ }
+}
+
+fn chunks(rope: &Rope, line_ending: LineEnding) -> impl Iterator<Item = &str> {
+ rope.chunks().flat_map(move |chunk| {
+ let mut newline = false;
+ chunk.split('\n').flat_map(move |line| {
+ let ending = if newline {
+ Some(line_ending.as_str())
+ } else {
+ None
+ };
+ newline = true;
+ ending.into_iter().chain([line])
+ })
+ })
+}
+
+pub fn normalize_path(path: &Path) -> PathBuf {
+ let mut components = path.components().peekable();
+ let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek().cloned() {
+ components.next();
+ PathBuf::from(c.as_os_str())
+ } else {
+ PathBuf::new()
+ };
+
+ for component in components {
+ match component {
+ Component::Prefix(..) => unreachable!(),
+ Component::RootDir => {
+ ret.push(component.as_os_str());
+ }
+ Component::CurDir => {}
+ Component::ParentDir => {
+ ret.pop();
+ }
+ Component::Normal(c) => {
+ ret.push(c);
+ }
+ }
+ }
+ ret
+}
+
+pub fn copy_recursive<'a>(
+ fs: &'a dyn Fs,
+ source: &'a Path,
+ target: &'a Path,
+ options: CopyOptions,
+) -> BoxFuture<'a, Result<()>> {
+ use futures::future::FutureExt;
+
+ async move {
+ let metadata = fs
+ .metadata(source)
+ .await?
+ .ok_or_else(|| anyhow!("path does not exist: {}", source.display()))?;
+ if metadata.is_dir {
+ if !options.overwrite && fs.metadata(target).await.is_ok() {
+ if options.ignore_if_exists {
+ return Ok(());
+ } else {
+ return Err(anyhow!("{target:?} already exists"));
+ }
+ }
+
+ let _ = fs
+ .remove_dir(
+ target,
+ RemoveOptions {
+ recursive: true,
+ ignore_if_not_exists: true,
+ },
+ )
+ .await;
+ fs.create_dir(target).await?;
+ let mut children = fs.read_dir(source).await?;
+ while let Some(child_path) = children.next().await {
+ if let Ok(child_path) = child_path {
+ if let Some(file_name) = child_path.file_name() {
+ let child_target_path = target.join(file_name);
+ copy_recursive(fs, &child_path, &child_target_path, options).await?;
+ }
+ }
+ }
+
+ Ok(())
+ } else {
+ fs.copy_file(source, target, options).await
+ }
+ }
+ .boxed()
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use gpui2::{Executor, TestDispatcher};
+ use rand::prelude::*;
+ use serde_json::json;
+
+ #[test]
+ fn test_fake_fs() {
+ let dispatcher = TestDispatcher::new(StdRng::seed_from_u64(0));
+ let executor = Executor::new(Arc::new(dispatcher));
+ let fs = FakeFs::new(executor.clone());
+ executor.block(async move {
+ fs.insert_tree(
+ "/root",
+ json!({
+ "dir1": {
+ "a": "A",
+ "b": "B"
+ },
+ "dir2": {
+ "c": "C",
+ "dir3": {
+ "d": "D"
+ }
+ }
+ }),
+ )
+ .await;
+
+ assert_eq!(
+ fs.files(),
+ vec![
+ PathBuf::from("/root/dir1/a"),
+ PathBuf::from("/root/dir1/b"),
+ PathBuf::from("/root/dir2/c"),
+ PathBuf::from("/root/dir2/dir3/d"),
+ ]
+ );
+
+ fs.insert_symlink("/root/dir2/link-to-dir3", "./dir3".into())
+ .await;
+
+ assert_eq!(
+ fs.canonicalize("/root/dir2/link-to-dir3".as_ref())
+ .await
+ .unwrap(),
+ PathBuf::from("/root/dir2/dir3"),
+ );
+ assert_eq!(
+ fs.canonicalize("/root/dir2/link-to-dir3/d".as_ref())
+ .await
+ .unwrap(),
+ PathBuf::from("/root/dir2/dir3/d"),
+ );
+ assert_eq!(
+ fs.load("/root/dir2/link-to-dir3/d".as_ref()).await.unwrap(),
+ "D",
+ );
+ });
+ }
+}
@@ -0,0 +1,417 @@
+use anyhow::Result;
+use collections::HashMap;
+use git2::{BranchType, StatusShow};
+use parking_lot::Mutex;
+use serde_derive::{Deserialize, Serialize};
+use std::{
+ cmp::Ordering,
+ ffi::OsStr,
+ os::unix::prelude::OsStrExt,
+ path::{Component, Path, PathBuf},
+ sync::Arc,
+ time::SystemTime,
+};
+use sum_tree::{MapSeekTarget, TreeMap};
+use util::ResultExt;
+
+pub use git2::Repository as LibGitRepository;
+
+#[derive(Clone, Debug, Hash, PartialEq)]
+pub struct Branch {
+ pub name: Box<str>,
+ /// Timestamp of most recent commit, normalized to Unix Epoch format.
+ pub unix_timestamp: Option<i64>,
+}
+
+#[async_trait::async_trait]
+pub trait GitRepository: Send {
+ fn reload_index(&self);
+ fn load_index_text(&self, relative_file_path: &Path) -> Option<String>;
+ fn branch_name(&self) -> Option<String>;
+
+ /// Get the statuses of all of the files in the index that start with the given
+ /// path and have changes with resepect to the HEAD commit. This is fast because
+ /// the index stores hashes of trees, so that unchanged directories can be skipped.
+ fn staged_statuses(&self, path_prefix: &Path) -> TreeMap<RepoPath, GitFileStatus>;
+
+ /// Get the status of a given file in the working directory with respect to
+ /// the index. In the common case, when there are no changes, this only requires
+ /// an index lookup. The index stores the mtime of each file when it was added,
+ /// so there's no work to do if the mtime matches.
+ fn unstaged_status(&self, path: &RepoPath, mtime: SystemTime) -> Option<GitFileStatus>;
+
+ /// Get the status of a given file in the working directory with respect to
+ /// the HEAD commit. In the common case, when there are no changes, this only
+ /// requires an index lookup and blob comparison between the index and the HEAD
+ /// commit. The index stores the mtime of each file when it was added, so there's
+ /// no need to consider the working directory file if the mtime matches.
+ fn status(&self, path: &RepoPath, mtime: SystemTime) -> Option<GitFileStatus>;
+
+ fn branches(&self) -> Result<Vec<Branch>>;
+ fn change_branch(&self, _: &str) -> Result<()>;
+ fn create_branch(&self, _: &str) -> Result<()>;
+}
+
+impl std::fmt::Debug for dyn GitRepository {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("dyn GitRepository<...>").finish()
+ }
+}
+
+impl GitRepository for LibGitRepository {
+ fn reload_index(&self) {
+ if let Ok(mut index) = self.index() {
+ _ = index.read(false);
+ }
+ }
+
+ fn load_index_text(&self, relative_file_path: &Path) -> Option<String> {
+ fn logic(repo: &LibGitRepository, relative_file_path: &Path) -> Result<Option<String>> {
+ const STAGE_NORMAL: i32 = 0;
+ let index = repo.index()?;
+
+ // This check is required because index.get_path() unwraps internally :(
+ check_path_to_repo_path_errors(relative_file_path)?;
+
+ let oid = match index.get_path(&relative_file_path, STAGE_NORMAL) {
+ Some(entry) => entry.id,
+ None => return Ok(None),
+ };
+
+ let content = repo.find_blob(oid)?.content().to_owned();
+ Ok(Some(String::from_utf8(content)?))
+ }
+
+ match logic(&self, relative_file_path) {
+ Ok(value) => return value,
+ Err(err) => log::error!("Error loading head text: {:?}", err),
+ }
+ None
+ }
+
+ fn branch_name(&self) -> Option<String> {
+ let head = self.head().log_err()?;
+ let branch = String::from_utf8_lossy(head.shorthand_bytes());
+ Some(branch.to_string())
+ }
+
+ fn staged_statuses(&self, path_prefix: &Path) -> TreeMap<RepoPath, GitFileStatus> {
+ let mut map = TreeMap::default();
+
+ let mut options = git2::StatusOptions::new();
+ options.pathspec(path_prefix);
+ options.show(StatusShow::Index);
+
+ if let Some(statuses) = self.statuses(Some(&mut options)).log_err() {
+ for status in statuses.iter() {
+ let path = RepoPath(PathBuf::from(OsStr::from_bytes(status.path_bytes())));
+ let status = status.status();
+ if !status.contains(git2::Status::IGNORED) {
+ if let Some(status) = read_status(status) {
+ map.insert(path, status)
+ }
+ }
+ }
+ }
+ map
+ }
+
+ fn unstaged_status(&self, path: &RepoPath, mtime: SystemTime) -> Option<GitFileStatus> {
+ // If the file has not changed since it was added to the index, then
+ // there can't be any changes.
+ if matches_index(self, path, mtime) {
+ return None;
+ }
+
+ let mut options = git2::StatusOptions::new();
+ options.pathspec(&path.0);
+ options.disable_pathspec_match(true);
+ options.include_untracked(true);
+ options.recurse_untracked_dirs(true);
+ options.include_unmodified(true);
+ options.show(StatusShow::Workdir);
+
+ let statuses = self.statuses(Some(&mut options)).log_err()?;
+ let status = statuses.get(0).and_then(|s| read_status(s.status()));
+ status
+ }
+
+ fn status(&self, path: &RepoPath, mtime: SystemTime) -> Option<GitFileStatus> {
+ let mut options = git2::StatusOptions::new();
+ options.pathspec(&path.0);
+ options.disable_pathspec_match(true);
+ options.include_untracked(true);
+ options.recurse_untracked_dirs(true);
+ options.include_unmodified(true);
+
+ // If the file has not changed since it was added to the index, then
+ // there's no need to examine the working directory file: just compare
+ // the blob in the index to the one in the HEAD commit.
+ if matches_index(self, path, mtime) {
+ options.show(StatusShow::Index);
+ }
+
+ let statuses = self.statuses(Some(&mut options)).log_err()?;
+ let status = statuses.get(0).and_then(|s| read_status(s.status()));
+ status
+ }
+
+ fn branches(&self) -> Result<Vec<Branch>> {
+ let local_branches = self.branches(Some(BranchType::Local))?;
+ let valid_branches = local_branches
+ .filter_map(|branch| {
+ branch.ok().and_then(|(branch, _)| {
+ let name = branch.name().ok().flatten().map(Box::from)?;
+ let timestamp = branch.get().peel_to_commit().ok()?.time();
+ let unix_timestamp = timestamp.seconds();
+ let timezone_offset = timestamp.offset_minutes();
+ let utc_offset =
+ time::UtcOffset::from_whole_seconds(timezone_offset * 60).ok()?;
+ let unix_timestamp =
+ time::OffsetDateTime::from_unix_timestamp(unix_timestamp).ok()?;
+ Some(Branch {
+ name,
+ unix_timestamp: Some(unix_timestamp.to_offset(utc_offset).unix_timestamp()),
+ })
+ })
+ })
+ .collect();
+ Ok(valid_branches)
+ }
+ fn change_branch(&self, name: &str) -> Result<()> {
+ let revision = self.find_branch(name, BranchType::Local)?;
+ let revision = revision.get();
+ let as_tree = revision.peel_to_tree()?;
+ self.checkout_tree(as_tree.as_object(), None)?;
+ self.set_head(
+ revision
+ .name()
+ .ok_or_else(|| anyhow::anyhow!("Branch name could not be retrieved"))?,
+ )?;
+ Ok(())
+ }
+ fn create_branch(&self, name: &str) -> Result<()> {
+ let current_commit = self.head()?.peel_to_commit()?;
+ self.branch(name, ¤t_commit, false)?;
+
+ Ok(())
+ }
+}
+
+fn matches_index(repo: &LibGitRepository, path: &RepoPath, mtime: SystemTime) -> bool {
+ if let Some(index) = repo.index().log_err() {
+ if let Some(entry) = index.get_path(&path, 0) {
+ if let Some(mtime) = mtime.duration_since(SystemTime::UNIX_EPOCH).log_err() {
+ if entry.mtime.seconds() == mtime.as_secs() as i32
+ && entry.mtime.nanoseconds() == mtime.subsec_nanos()
+ {
+ return true;
+ }
+ }
+ }
+ }
+ false
+}
+
+fn read_status(status: git2::Status) -> Option<GitFileStatus> {
+ if status.contains(git2::Status::CONFLICTED) {
+ Some(GitFileStatus::Conflict)
+ } else if status.intersects(
+ git2::Status::WT_MODIFIED
+ | git2::Status::WT_RENAMED
+ | git2::Status::INDEX_MODIFIED
+ | git2::Status::INDEX_RENAMED,
+ ) {
+ Some(GitFileStatus::Modified)
+ } else if status.intersects(git2::Status::WT_NEW | git2::Status::INDEX_NEW) {
+ Some(GitFileStatus::Added)
+ } else {
+ None
+ }
+}
+
+#[derive(Debug, Clone, Default)]
+pub struct FakeGitRepository {
+ state: Arc<Mutex<FakeGitRepositoryState>>,
+}
+
+#[derive(Debug, Clone, Default)]
+pub struct FakeGitRepositoryState {
+ pub index_contents: HashMap<PathBuf, String>,
+ pub worktree_statuses: HashMap<RepoPath, GitFileStatus>,
+ pub branch_name: Option<String>,
+}
+
+impl FakeGitRepository {
+ pub fn open(state: Arc<Mutex<FakeGitRepositoryState>>) -> Arc<Mutex<dyn GitRepository>> {
+ Arc::new(Mutex::new(FakeGitRepository { state }))
+ }
+}
+
+#[async_trait::async_trait]
+impl GitRepository for FakeGitRepository {
+ fn reload_index(&self) {}
+
+ fn load_index_text(&self, path: &Path) -> Option<String> {
+ let state = self.state.lock();
+ state.index_contents.get(path).cloned()
+ }
+
+ fn branch_name(&self) -> Option<String> {
+ let state = self.state.lock();
+ state.branch_name.clone()
+ }
+
+ fn staged_statuses(&self, path_prefix: &Path) -> TreeMap<RepoPath, GitFileStatus> {
+ let mut map = TreeMap::default();
+ let state = self.state.lock();
+ for (repo_path, status) in state.worktree_statuses.iter() {
+ if repo_path.0.starts_with(path_prefix) {
+ map.insert(repo_path.to_owned(), status.to_owned());
+ }
+ }
+ map
+ }
+
+ fn unstaged_status(&self, _path: &RepoPath, _mtime: SystemTime) -> Option<GitFileStatus> {
+ None
+ }
+
+ fn status(&self, path: &RepoPath, _mtime: SystemTime) -> Option<GitFileStatus> {
+ let state = self.state.lock();
+ state.worktree_statuses.get(path).cloned()
+ }
+
+ fn branches(&self) -> Result<Vec<Branch>> {
+ Ok(vec![])
+ }
+
+ fn change_branch(&self, name: &str) -> Result<()> {
+ let mut state = self.state.lock();
+ state.branch_name = Some(name.to_owned());
+ Ok(())
+ }
+
+ fn create_branch(&self, name: &str) -> Result<()> {
+ let mut state = self.state.lock();
+ state.branch_name = Some(name.to_owned());
+ Ok(())
+ }
+}
+
+fn check_path_to_repo_path_errors(relative_file_path: &Path) -> Result<()> {
+ match relative_file_path.components().next() {
+ None => anyhow::bail!("repo path should not be empty"),
+ Some(Component::Prefix(_)) => anyhow::bail!(
+ "repo path `{}` should be relative, not a windows prefix",
+ relative_file_path.to_string_lossy()
+ ),
+ Some(Component::RootDir) => {
+ anyhow::bail!(
+ "repo path `{}` should be relative",
+ relative_file_path.to_string_lossy()
+ )
+ }
+ Some(Component::CurDir) => {
+ anyhow::bail!(
+ "repo path `{}` should not start with `.`",
+ relative_file_path.to_string_lossy()
+ )
+ }
+ Some(Component::ParentDir) => {
+ anyhow::bail!(
+ "repo path `{}` should not start with `..`",
+ relative_file_path.to_string_lossy()
+ )
+ }
+ _ => Ok(()),
+ }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
+pub enum GitFileStatus {
+ Added,
+ Modified,
+ Conflict,
+}
+
+impl GitFileStatus {
+ pub fn merge(
+ this: Option<GitFileStatus>,
+ other: Option<GitFileStatus>,
+ prefer_other: bool,
+ ) -> Option<GitFileStatus> {
+ if prefer_other {
+ return other;
+ } else {
+ match (this, other) {
+ (Some(GitFileStatus::Conflict), _) | (_, Some(GitFileStatus::Conflict)) => {
+ Some(GitFileStatus::Conflict)
+ }
+ (Some(GitFileStatus::Modified), _) | (_, Some(GitFileStatus::Modified)) => {
+ Some(GitFileStatus::Modified)
+ }
+ (Some(GitFileStatus::Added), _) | (_, Some(GitFileStatus::Added)) => {
+ Some(GitFileStatus::Added)
+ }
+ _ => None,
+ }
+ }
+ }
+}
+
+#[derive(Clone, Debug, Ord, Hash, PartialOrd, Eq, PartialEq)]
+pub struct RepoPath(pub PathBuf);
+
+impl RepoPath {
+ pub fn new(path: PathBuf) -> Self {
+ debug_assert!(path.is_relative(), "Repo paths must be relative");
+
+ RepoPath(path)
+ }
+}
+
+impl From<&Path> for RepoPath {
+ fn from(value: &Path) -> Self {
+ RepoPath::new(value.to_path_buf())
+ }
+}
+
+impl From<PathBuf> for RepoPath {
+ fn from(value: PathBuf) -> Self {
+ RepoPath::new(value)
+ }
+}
+
+impl Default for RepoPath {
+ fn default() -> Self {
+ RepoPath(PathBuf::new())
+ }
+}
+
+impl AsRef<Path> for RepoPath {
+ fn as_ref(&self) -> &Path {
+ self.0.as_ref()
+ }
+}
+
+impl std::ops::Deref for RepoPath {
+ type Target = PathBuf;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+#[derive(Debug)]
+pub struct RepoPathDescendants<'a>(pub &'a Path);
+
+impl<'a> MapSeekTarget<RepoPath> for RepoPathDescendants<'a> {
+ fn cmp_cursor(&self, key: &RepoPath) -> Ordering {
+ if key.starts_with(&self.0) {
+ Ordering::Greater
+ } else {
+ self.0.cmp(key)
+ }
+ }
+}