@@ -349,12 +349,22 @@ impl BackgroundExecutor {
/// How many CPUs are available to the dispatcher.
pub fn num_cpus(&self) -> usize {
#[cfg(any(test, feature = "test-support"))]
- if self.dispatcher.as_test().is_some() {
- return 4;
+ if let Some(test) = self.dispatcher.as_test() {
+ return test.num_cpus_override().unwrap_or(4);
}
num_cpus::get()
}
+ /// Override the number of CPUs reported by this executor in tests.
+ /// Panics if not called on a test executor.
+ #[cfg(any(test, feature = "test-support"))]
+ pub fn set_num_cpus(&self, count: usize) {
+ self.dispatcher
+ .as_test()
+ .expect("set_num_cpus can only be called on a test executor")
+ .set_num_cpus(count);
+ }
+
/// Whether we're on the main thread.
pub fn is_main_thread(&self) -> bool {
self.dispatcher.is_main_thread()
@@ -1,7 +1,10 @@
use crate::{PlatformDispatcher, Priority, RunnableVariant};
use scheduler::{Clock, Scheduler, SessionId, TestScheduler, TestSchedulerConfig, Yield};
use std::{
- sync::Arc,
+ sync::{
+ Arc,
+ atomic::{AtomicUsize, Ordering},
+ },
time::{Duration, Instant},
};
@@ -13,6 +16,7 @@ use std::{
pub struct TestDispatcher {
session_id: SessionId,
scheduler: Arc<TestScheduler>,
+ num_cpus_override: Arc<AtomicUsize>,
}
impl TestDispatcher {
@@ -31,6 +35,7 @@ impl TestDispatcher {
TestDispatcher {
session_id,
scheduler,
+ num_cpus_override: Arc::new(AtomicUsize::new(0)),
}
}
@@ -65,6 +70,20 @@ impl TestDispatcher {
pub fn run_until_parked(&self) {
while self.tick(false) {}
}
+
+ /// Override the value returned by `BackgroundExecutor::num_cpus()` in tests.
+ /// A value of 0 means no override (the default of 4 is used).
+ pub fn set_num_cpus(&self, count: usize) {
+ self.num_cpus_override.store(count, Ordering::SeqCst);
+ }
+
+ /// Returns the overridden CPU count, or `None` if no override is set.
+ pub fn num_cpus_override(&self) -> Option<usize> {
+ match self.num_cpus_override.load(Ordering::SeqCst) {
+ 0 => None,
+ n => Some(n),
+ }
+ }
}
impl Clone for TestDispatcher {
@@ -73,6 +92,7 @@ impl Clone for TestDispatcher {
Self {
session_id,
scheduler: self.scheduler.clone(),
+ num_cpus_override: self.num_cpus_override.clone(),
}
}
}
@@ -335,7 +335,8 @@ impl Search {
assert!(num_cpus > 0);
_executor
.scoped(|scope| {
- for _ in 0..num_cpus - 1 {
+ let worker_count = (num_cpus - 1).max(1);
+ for _ in 0..worker_count {
let worker = Worker {
query: query.clone(),
open_buffers: open_buffers.clone(),
@@ -290,6 +290,53 @@ async fn test_remote_project_search(cx: &mut TestAppContext, server_cx: &mut Tes
.await;
}
+#[gpui::test]
+async fn test_remote_project_search_single_cpu(
+ cx: &mut TestAppContext,
+ server_cx: &mut TestAppContext,
+) {
+ let fs = FakeFs::new(server_cx.executor());
+ fs.insert_tree(
+ path!("/code"),
+ json!({
+ "project1": {
+ ".git": {},
+ "README.md": "# project 1",
+ "src": {
+ "lib.rs": "fn one() -> usize { 1 }"
+ }
+ },
+ }),
+ )
+ .await;
+
+ // Simulate a single-CPU environment (e.g. a devcontainer with 1 visible CPU).
+ // This causes the worker pool in project search to spawn num_cpus - 1 = 0 workers,
+ // which silently drops all search channels and produces zero results.
+ server_cx.executor().set_num_cpus(1);
+
+ let (project, _) = init_test(&fs, cx, server_cx).await;
+
+ project
+ .update(cx, |project, cx| {
+ project.find_or_create_worktree(path!("/code/project1"), true, cx)
+ })
+ .await
+ .unwrap();
+
+ cx.run_until_parked();
+
+ do_search_and_assert(
+ &project,
+ "project",
+ Default::default(),
+ false,
+ &[path!("project1/README.md")],
+ cx.clone(),
+ )
+ .await;
+}
+
#[gpui::test]
async fn test_remote_project_search_inclusion(
cx: &mut TestAppContext,