Auto merge of #82300 - andersk:libtest-id, r=Amanieu
libtest: Index tests by a unique TestId This more robustly avoids problems with duplicate `TestDesc`. See #81852 and #82274. Cc `@Mark-Simulacrum.`
This commit is contained in:
commit
5e73bd1040
5 changed files with 75 additions and 41 deletions
|
@ -2,7 +2,11 @@
|
|||
pub use std::hint::black_box;
|
||||
|
||||
use super::{
|
||||
event::CompletedTest, options::BenchMode, test_result::TestResult, types::TestDesc, Sender,
|
||||
event::CompletedTest,
|
||||
options::BenchMode,
|
||||
test_result::TestResult,
|
||||
types::{TestDesc, TestId},
|
||||
Sender,
|
||||
};
|
||||
|
||||
use crate::stats;
|
||||
|
@ -177,8 +181,13 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<CompletedTest>, nocapture: bool, f: F)
|
||||
where
|
||||
pub fn benchmark<F>(
|
||||
id: TestId,
|
||||
desc: TestDesc,
|
||||
monitor_ch: Sender<CompletedTest>,
|
||||
nocapture: bool,
|
||||
f: F,
|
||||
) where
|
||||
F: FnMut(&mut Bencher),
|
||||
{
|
||||
let mut bs = Bencher { mode: BenchMode::Auto, summary: None, bytes: 0 };
|
||||
|
@ -213,7 +222,7 @@ where
|
|||
};
|
||||
|
||||
let stdout = data.lock().unwrap().to_vec();
|
||||
let message = CompletedTest::new(desc, test_result, None, stdout);
|
||||
let message = CompletedTest::new(id, desc, test_result, None, stdout);
|
||||
monitor_ch.send(message).unwrap();
|
||||
}
|
||||
|
||||
|
|
|
@ -3,10 +3,11 @@
|
|||
|
||||
use super::test_result::TestResult;
|
||||
use super::time::TestExecTime;
|
||||
use super::types::TestDesc;
|
||||
use super::types::{TestDesc, TestId};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CompletedTest {
|
||||
pub id: TestId,
|
||||
pub desc: TestDesc,
|
||||
pub result: TestResult,
|
||||
pub exec_time: Option<TestExecTime>,
|
||||
|
@ -15,12 +16,13 @@ pub struct CompletedTest {
|
|||
|
||||
impl CompletedTest {
|
||||
pub fn new(
|
||||
id: TestId,
|
||||
desc: TestDesc,
|
||||
result: TestResult,
|
||||
exec_time: Option<TestExecTime>,
|
||||
stdout: Vec<u8>,
|
||||
) -> Self {
|
||||
Self { desc, result, exec_time, stdout }
|
||||
Self { id, desc, result, exec_time, stdout }
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ pub mod test {
|
|||
time::{TestExecTime, TestTimeOptions},
|
||||
types::{
|
||||
DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc,
|
||||
TestDescAndFn, TestName, TestType,
|
||||
TestDescAndFn, TestId, TestName, TestType,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
@ -215,9 +215,10 @@ where
|
|||
|
||||
// Use a deterministic hasher
|
||||
type TestMap =
|
||||
HashMap<TestDesc, RunningTest, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
|
||||
HashMap<TestId, RunningTest, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
|
||||
|
||||
struct TimeoutEntry {
|
||||
id: TestId,
|
||||
desc: TestDesc,
|
||||
timeout: Instant,
|
||||
}
|
||||
|
@ -249,7 +250,9 @@ where
|
|||
|
||||
let (filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests
|
||||
.into_iter()
|
||||
.partition(|e| matches!(e.testfn, StaticTestFn(_) | DynTestFn(_)));
|
||||
.enumerate()
|
||||
.map(|(i, e)| (TestId(i), e))
|
||||
.partition(|(_, e)| matches!(e.testfn, StaticTestFn(_) | DynTestFn(_)));
|
||||
|
||||
let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
|
||||
|
||||
|
@ -278,7 +281,7 @@ where
|
|||
break;
|
||||
}
|
||||
let timeout_entry = timeout_queue.pop_front().unwrap();
|
||||
if running_tests.contains_key(&timeout_entry.desc) {
|
||||
if running_tests.contains_key(&timeout_entry.id) {
|
||||
timed_out.push(timeout_entry.desc);
|
||||
}
|
||||
}
|
||||
|
@ -294,11 +297,11 @@ where
|
|||
|
||||
if concurrency == 1 {
|
||||
while !remaining.is_empty() {
|
||||
let test = remaining.pop().unwrap();
|
||||
let (id, test) = remaining.pop().unwrap();
|
||||
let event = TestEvent::TeWait(test.desc.clone());
|
||||
notify_about_test_event(event)?;
|
||||
let join_handle =
|
||||
run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::No);
|
||||
run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone(), Concurrent::No);
|
||||
assert!(join_handle.is_none());
|
||||
let completed_test = rx.recv().unwrap();
|
||||
|
||||
|
@ -308,7 +311,7 @@ where
|
|||
} else {
|
||||
while pending > 0 || !remaining.is_empty() {
|
||||
while pending < concurrency && !remaining.is_empty() {
|
||||
let test = remaining.pop().unwrap();
|
||||
let (id, test) = remaining.pop().unwrap();
|
||||
let timeout = time::get_default_test_timeout();
|
||||
let desc = test.desc.clone();
|
||||
|
||||
|
@ -317,13 +320,14 @@ where
|
|||
let join_handle = run_test(
|
||||
opts,
|
||||
!opts.run_tests,
|
||||
id,
|
||||
test,
|
||||
run_strategy,
|
||||
tx.clone(),
|
||||
Concurrent::Yes,
|
||||
);
|
||||
running_tests.insert(desc.clone(), RunningTest { join_handle });
|
||||
timeout_queue.push_back(TimeoutEntry { desc, timeout });
|
||||
running_tests.insert(id, RunningTest { join_handle });
|
||||
timeout_queue.push_back(TimeoutEntry { id, desc, timeout });
|
||||
pending += 1;
|
||||
}
|
||||
|
||||
|
@ -352,13 +356,12 @@ where
|
|||
}
|
||||
|
||||
let mut completed_test = res.unwrap();
|
||||
if let Some(running_test) = running_tests.remove(&completed_test.desc) {
|
||||
if let Some(join_handle) = running_test.join_handle {
|
||||
if let Err(_) = join_handle.join() {
|
||||
if let TrOk = completed_test.result {
|
||||
completed_test.result =
|
||||
TrFailedMsg("panicked after reporting success".to_string());
|
||||
}
|
||||
let running_test = running_tests.remove(&completed_test.id).unwrap();
|
||||
if let Some(join_handle) = running_test.join_handle {
|
||||
if let Err(_) = join_handle.join() {
|
||||
if let TrOk = completed_test.result {
|
||||
completed_test.result =
|
||||
TrFailedMsg("panicked after reporting success".to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -371,10 +374,10 @@ where
|
|||
|
||||
if opts.bench_benchmarks {
|
||||
// All benchmarks run at the end, in serial.
|
||||
for b in filtered_benchs {
|
||||
for (id, b) in filtered_benchs {
|
||||
let event = TestEvent::TeWait(b.desc.clone());
|
||||
notify_about_test_event(event)?;
|
||||
run_test(opts, false, b, run_strategy, tx.clone(), Concurrent::No);
|
||||
run_test(opts, false, id, b, run_strategy, tx.clone(), Concurrent::No);
|
||||
let completed_test = rx.recv().unwrap();
|
||||
|
||||
let event = TestEvent::TeResult(completed_test);
|
||||
|
@ -448,6 +451,7 @@ pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAnd
|
|||
pub fn run_test(
|
||||
opts: &TestOpts,
|
||||
force_ignore: bool,
|
||||
id: TestId,
|
||||
test: TestDescAndFn,
|
||||
strategy: RunStrategy,
|
||||
monitor_ch: Sender<CompletedTest>,
|
||||
|
@ -461,7 +465,7 @@ pub fn run_test(
|
|||
&& !cfg!(target_os = "emscripten");
|
||||
|
||||
if force_ignore || desc.ignore || ignore_because_no_process_support {
|
||||
let message = CompletedTest::new(desc, TrIgnored, None, Vec::new());
|
||||
let message = CompletedTest::new(id, desc, TrIgnored, None, Vec::new());
|
||||
monitor_ch.send(message).unwrap();
|
||||
return None;
|
||||
}
|
||||
|
@ -474,6 +478,7 @@ pub fn run_test(
|
|||
}
|
||||
|
||||
fn run_test_inner(
|
||||
id: TestId,
|
||||
desc: TestDesc,
|
||||
monitor_ch: Sender<CompletedTest>,
|
||||
testfn: Box<dyn FnOnce() + Send>,
|
||||
|
@ -484,6 +489,7 @@ pub fn run_test(
|
|||
|
||||
let runtest = move || match opts.strategy {
|
||||
RunStrategy::InProcess => run_test_in_process(
|
||||
id,
|
||||
desc,
|
||||
opts.nocapture,
|
||||
opts.time.is_some(),
|
||||
|
@ -492,6 +498,7 @@ pub fn run_test(
|
|||
opts.time,
|
||||
),
|
||||
RunStrategy::SpawnPrimary => spawn_test_subprocess(
|
||||
id,
|
||||
desc,
|
||||
opts.nocapture,
|
||||
opts.time.is_some(),
|
||||
|
@ -530,14 +537,14 @@ pub fn run_test(
|
|||
match testfn {
|
||||
DynBenchFn(bencher) => {
|
||||
// Benchmarks aren't expected to panic, so we run them all in-process.
|
||||
crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
|
||||
crate::bench::benchmark(id, desc, monitor_ch, opts.nocapture, |harness| {
|
||||
bencher.run(harness)
|
||||
});
|
||||
None
|
||||
}
|
||||
StaticBenchFn(benchfn) => {
|
||||
// Benchmarks aren't expected to panic, so we run them all in-process.
|
||||
crate::bench::benchmark(desc, monitor_ch, opts.nocapture, benchfn);
|
||||
crate::bench::benchmark(id, desc, monitor_ch, opts.nocapture, benchfn);
|
||||
None
|
||||
}
|
||||
DynTestFn(f) => {
|
||||
|
@ -546,6 +553,7 @@ pub fn run_test(
|
|||
_ => panic!("Cannot run dynamic test fn out-of-process"),
|
||||
};
|
||||
run_test_inner(
|
||||
id,
|
||||
desc,
|
||||
monitor_ch,
|
||||
Box::new(move || __rust_begin_short_backtrace(f)),
|
||||
|
@ -553,6 +561,7 @@ pub fn run_test(
|
|||
)
|
||||
}
|
||||
StaticTestFn(f) => run_test_inner(
|
||||
id,
|
||||
desc,
|
||||
monitor_ch,
|
||||
Box::new(move || __rust_begin_short_backtrace(f)),
|
||||
|
@ -571,6 +580,7 @@ fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
|
|||
}
|
||||
|
||||
fn run_test_in_process(
|
||||
id: TestId,
|
||||
desc: TestDesc,
|
||||
nocapture: bool,
|
||||
report_time: bool,
|
||||
|
@ -599,11 +609,12 @@ fn run_test_in_process(
|
|||
Err(e) => calc_result(&desc, Err(e.as_ref()), &time_opts, &exec_time),
|
||||
};
|
||||
let stdout = data.lock().unwrap_or_else(|e| e.into_inner()).to_vec();
|
||||
let message = CompletedTest::new(desc, test_result, exec_time, stdout);
|
||||
let message = CompletedTest::new(id, desc, test_result, exec_time, stdout);
|
||||
monitor_ch.send(message).unwrap();
|
||||
}
|
||||
|
||||
fn spawn_test_subprocess(
|
||||
id: TestId,
|
||||
desc: TestDesc,
|
||||
nocapture: bool,
|
||||
report_time: bool,
|
||||
|
@ -653,7 +664,7 @@ fn spawn_test_subprocess(
|
|||
(result, test_output, exec_time)
|
||||
})();
|
||||
|
||||
let message = CompletedTest::new(desc, result, exec_time, test_output);
|
||||
let message = CompletedTest::new(id, desc, result, exec_time, test_output);
|
||||
monitor_ch.send(message).unwrap();
|
||||
}
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ pub fn do_not_run_ignored_tests() {
|
|||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
|
||||
run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
|
||||
let result = rx.recv().unwrap().result;
|
||||
assert_ne!(result, TrOk);
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ pub fn ignored_tests_result_in_ignored() {
|
|||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
|
||||
run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
|
||||
let result = rx.recv().unwrap().result;
|
||||
assert_eq!(result, TrIgnored);
|
||||
}
|
||||
|
@ -136,7 +136,7 @@ fn test_should_panic() {
|
|||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
|
||||
run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
|
||||
let result = rx.recv().unwrap().result;
|
||||
assert_eq!(result, TrOk);
|
||||
}
|
||||
|
@ -159,7 +159,7 @@ fn test_should_panic_good_message() {
|
|||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
|
||||
run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
|
||||
let result = rx.recv().unwrap().result;
|
||||
assert_eq!(result, TrOk);
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ fn test_should_panic_bad_message() {
|
|||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
|
||||
run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
|
||||
let result = rx.recv().unwrap().result;
|
||||
assert_eq!(result, TrFailedMsg(failed_msg.to_string()));
|
||||
}
|
||||
|
@ -219,7 +219,7 @@ fn test_should_panic_non_string_message_type() {
|
|||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
|
||||
run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
|
||||
let result = rx.recv().unwrap().result;
|
||||
assert_eq!(result, TrFailedMsg(failed_msg));
|
||||
}
|
||||
|
@ -243,7 +243,15 @@ fn test_should_panic_but_succeeds() {
|
|||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No);
|
||||
run_test(
|
||||
&TestOpts::new(),
|
||||
false,
|
||||
TestId(0),
|
||||
desc,
|
||||
RunStrategy::InProcess,
|
||||
tx,
|
||||
Concurrent::No,
|
||||
);
|
||||
let result = rx.recv().unwrap().result;
|
||||
assert_eq!(
|
||||
result,
|
||||
|
@ -270,7 +278,7 @@ fn report_time_test_template(report_time: bool) -> Option<TestExecTime> {
|
|||
|
||||
let test_opts = TestOpts { time_options, ..TestOpts::new() };
|
||||
let (tx, rx) = channel();
|
||||
run_test(&test_opts, false, desc, RunStrategy::InProcess, tx, Concurrent::No);
|
||||
run_test(&test_opts, false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
|
||||
let exec_time = rx.recv().unwrap().exec_time;
|
||||
exec_time
|
||||
}
|
||||
|
@ -305,7 +313,7 @@ fn time_test_failure_template(test_type: TestType) -> TestResult {
|
|||
|
||||
let test_opts = TestOpts { time_options: Some(time_options), ..TestOpts::new() };
|
||||
let (tx, rx) = channel();
|
||||
run_test(&test_opts, false, desc, RunStrategy::InProcess, tx, Concurrent::No);
|
||||
run_test(&test_opts, false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
|
||||
let result = rx.recv().unwrap().result;
|
||||
|
||||
result
|
||||
|
@ -637,7 +645,7 @@ pub fn test_bench_no_iter() {
|
|||
test_type: TestType::Unknown,
|
||||
};
|
||||
|
||||
crate::bench::benchmark(desc, tx, true, f);
|
||||
crate::bench::benchmark(TestId(0), desc, tx, true, f);
|
||||
rx.recv().unwrap();
|
||||
}
|
||||
|
||||
|
@ -657,7 +665,7 @@ pub fn test_bench_iter() {
|
|||
test_type: TestType::Unknown,
|
||||
};
|
||||
|
||||
crate::bench::benchmark(desc, tx, true, f);
|
||||
crate::bench::benchmark(TestId(0), desc, tx, true, f);
|
||||
rx.recv().unwrap();
|
||||
}
|
||||
|
||||
|
|
|
@ -112,9 +112,13 @@ impl fmt::Debug for TestFn {
|
|||
}
|
||||
}
|
||||
|
||||
// A unique integer associated with each test.
|
||||
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
|
||||
pub struct TestId(pub usize);
|
||||
|
||||
// The definition of a single test. A test runner will run a list of
|
||||
// these.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct TestDesc {
|
||||
pub name: TestName,
|
||||
pub ignore: bool,
|
||||
|
|
Loading…
Add table
Reference in a new issue