diff --git a/Cargo.lock b/Cargo.lock index 5e8a52baedb..14eb4eaeb3f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -468,10 +468,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d379af7f68bfc21714c6c7dea883544201741d2ce8274bb12fa54f89507f52a7" dependencies = [ "async-trait", + "indexmap 1.9.3", "lazy_static", "nom", "pathdiff", + "ron", "serde", + "serde_json", "toml 0.5.11", ] @@ -1164,6 +1167,7 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", + "serde", ] [[package]] @@ -1174,6 +1178,7 @@ checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" dependencies = [ "equivalent", "hashbrown 0.14.0", + "serde", ] [[package]] @@ -1610,6 +1615,7 @@ dependencies = [ "futures-sink", "futures-util", "indexmap 1.9.3", + "indexmap 2.0.0", "libc", "log", "memchr", @@ -2195,6 +2201,18 @@ dependencies = [ "winapi", ] +[[package]] +name = "ron" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" +dependencies = [ + "base64 0.13.1", + "bitflags 1.3.2", + "indexmap 1.9.3", + "serde", +] + [[package]] name = "rustc-demangle" version = "0.1.23" @@ -2402,6 +2420,7 @@ version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ + "indexmap 2.0.0", "itoa", "ryu", "serde", @@ -2928,6 +2947,7 @@ version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ + "indexmap 1.9.3", "serde", ] diff --git a/cargo-nextest/src/dispatch.rs b/cargo-nextest/src/dispatch.rs index 7c27215b9d4..f44d9e5f509 100644 --- a/cargo-nextest/src/dispatch.rs +++ b/cargo-nextest/src/dispatch.rs @@ -16,8 +16,9 @@ use nextest_metadata::{BinaryListSummary, BuildPlatform}; use nextest_runner::{ cargo_config::{CargoConfigs, EnvironmentMap, TargetTriple}, config::{ - get_num_cpus, NextestConfig, NextestProfile, NextestVersionConfig, NextestVersionEval, - PreBuildPlatform, RetryPolicy, TestGroup, TestThreads, ToolConfigFile, VersionOnlyConfig, + get_num_cpus, ConfigExperimental, NextestConfig, NextestProfile, NextestVersionConfig, + NextestVersionEval, PreBuildPlatform, RetryPolicy, TestGroup, TestThreads, ToolConfigFile, + VersionOnlyConfig, }, double_spawn::DoubleSpawnInfo, errors::WriteTestListError, @@ -29,7 +30,7 @@ use nextest_runner::{ platform::BuildPlatforms, reporter::{FinalStatusLevel, StatusLevel, TestOutputDisplay, TestReporterBuilder}, reuse_build::{archive_to_file, ArchiveReporter, MetadataOrPath, PathMapper, ReuseBuildInfo}, - runner::{configure_handle_inheritance, TestRunnerBuilder}, + runner::{configure_handle_inheritance, RunStatsFailureKind, TestRunnerBuilder}, show_config::{ShowNextestVersion, ShowTestGroupSettings, ShowTestGroups, ShowTestGroupsMode}, signal::SignalHandlerKind, target_runner::{PlatformRunner, TargetRunner}, @@ -39,6 +40,7 @@ use once_cell::sync::OnceCell; use owo_colors::{OwoColorize, Style}; use semver::Version; use std::{ + collections::BTreeSet, env::VarError, fmt::Write as _, io::{Cursor, Write}, @@ -229,12 +231,14 @@ impl ConfigOpts { &self, workspace_root: &Utf8Path, graph: &PackageGraph, + experimental: &BTreeSet, ) -> Result { NextestConfig::from_sources( workspace_root, graph, self.config_file.as_deref(), &self.tool_config_files, + experimental, ) .map_err(ExpectedError::config_parse_error) } @@ -483,10 +487,12 @@ struct TestBuildFilter { } impl TestBuildFilter { + #[allow(clippy::too_many_arguments)] fn compute_test_list<'g>( &self, ctx: &TestExecuteContext<'_>, graph: &'g PackageGraph, + workspace_root: Utf8PathBuf, binary_list: Arc, test_filter_builder: TestFilterBuilder, env: EnvironmentMap, @@ -511,6 +517,7 @@ impl TestBuildFilter { test_artifacts, rust_build_meta, &test_filter_builder, + workspace_root, env, // TODO: do we need to allow customizing this? get_num_cpus(), @@ -976,9 +983,23 @@ impl BaseApp { .make_version_only_config(&self.workspace_root)?; self.check_version_config_initial(version_only_config.nextest_version())?; - let config = self - .config_opts - .make_config(&self.workspace_root, self.graph())?; + let experimental = version_only_config.experimental(); + if !experimental.is_empty() { + log::info!( + "experimental features enabled: {}", + experimental + .iter() + .map(|x| x.to_string()) + .collect::>() + .join(", ") + ); + } + + let config = self.config_opts.make_config( + &self.workspace_root, + self.graph(), + version_only_config.experimental(), + )?; Ok((version_only_config, config)) } @@ -1271,6 +1292,7 @@ impl App { self.build_filter.compute_test_list( ctx, self.base.graph(), + self.base.workspace_root.clone(), binary_list, test_filter_builder, env, @@ -1465,7 +1487,7 @@ impl App { let runner = runner_builder.build( &test_list, - profile, + &profile, handler, double_spawn.clone(), target_runner.clone(), @@ -1479,7 +1501,20 @@ impl App { self.base .check_version_config_final(version_only_config.nextest_version())?; if !run_stats.is_success() { - return Err(ExpectedError::test_run_failed()); + match run_stats.failure_kind() { + Some(RunStatsFailureKind::SetupScript) => { + return Err(ExpectedError::setup_script_failed()); + } + Some(RunStatsFailureKind::Test) => { + return Err(ExpectedError::test_run_failed()); + } + None => { + // XXX This means that the final number run of tests was less than the initial + // number. Why can this be except if tests were failed or canceled for some + // reason? + return Err(ExpectedError::test_run_failed()); + } + } } Ok(()) } diff --git a/cargo-nextest/src/errors.rs b/cargo-nextest/src/errors.rs index d969bec031a..fc3a0beff1b 100644 --- a/cargo-nextest/src/errors.rs +++ b/cargo-nextest/src/errors.rs @@ -184,6 +184,8 @@ pub enum ExpectedError { #[from] err: ShowTestGroupsError, }, + #[error("setup script failed")] + SetupScriptFailed, #[error("test run failed")] TestRunFailed, #[cfg(feature = "self-update")] @@ -352,6 +354,10 @@ impl ExpectedError { Self::FilterExpressionParseError { all_errors } } + pub(crate) fn setup_script_failed() -> Self { + Self::SetupScriptFailed + } + pub(crate) fn test_run_failed() -> Self { Self::TestRunFailed } @@ -374,7 +380,6 @@ impl ExpectedError { | Self::StoreDirCreateError { .. } | Self::RootManifestNotFound { .. } | Self::CargoConfigError { .. } - | Self::ConfigParseError { .. } | Self::TestFilterBuilderError { .. } | Self::UnknownHostPlatform { .. } | Self::ArgumentFileReadError { .. } @@ -390,6 +395,15 @@ impl ExpectedError { | Self::DialoguerError { .. } | Self::SignalHandlerSetupError { .. } | Self::ShowTestGroupsError { .. } => NextestExitCode::SETUP_ERROR, + Self::ConfigParseError { err } => { + // Experimental features not being enabled are their own error. + match err.kind() { + ConfigParseErrorKind::ExperimentalFeatureNotEnabled { .. } => { + NextestExitCode::EXPERIMENTAL_FEATURE_NOT_ENABLED + } + _ => NextestExitCode::SETUP_ERROR, + } + } Self::RequiredVersionNotMet { .. } => NextestExitCode::REQUIRED_VERSION_NOT_MET, #[cfg(feature = "self-update")] Self::UpdateVersionParseError { .. } => NextestExitCode::SETUP_ERROR, @@ -402,6 +416,7 @@ impl ExpectedError { Self::BuildExecFailed { .. } | Self::BuildFailed { .. } => { NextestExitCode::BUILD_FAILED } + Self::SetupScriptFailed => NextestExitCode::SETUP_SCRIPT_FAILED, Self::TestRunFailed => NextestExitCode::TEST_RUN_FAILED, Self::ArchiveCreateError { .. } => NextestExitCode::ARCHIVE_CREATION_FAILED, Self::WriteTestListError { .. } | Self::WriteEventError { .. } => { @@ -497,7 +512,7 @@ impl ExpectedError { } Self::ConfigParseError { err } => { match err.kind() { - ConfigParseErrorKind::OverrideError(errors) => { + ConfigParseErrorKind::CompiledDataParseError(errors) => { // Override errors are printed out using miette. for override_error in errors { log::error!( @@ -543,6 +558,57 @@ impl ExpectedError { ); None } + ConfigParseErrorKind::UnknownConfigScripts { + errors, + known_scripts, + } => { + let known_scripts_str = known_scripts + .iter() + .map(|group_name| { + group_name.if_supports_color_2(Stream::Stderr, |x| x.bold()) + }) + .join(", "); + let mut errors_str = String::new(); + for error in errors { + errors_str.push_str(&format!( + " - script `{}` specified within profile `{}`\n", + error.name.if_supports_color_2(Stream::Stderr, |x| x.bold()), + error + .profile_name + .if_supports_color_2(Stream::Stderr, |x| x.bold()) + )); + } + + log::error!( + "for config file `{}`{}, unknown scripts defined \ + (known scripts: {known_scripts_str}):\n{errors_str}", + err.config_file(), + provided_by_tool(err.tool()), + ); + None + } + ConfigParseErrorKind::UnknownExperimentalFeatures { unknown, known } => { + let unknown_str = unknown + .iter() + .map(|feature_name| { + feature_name.if_supports_color_2(Stream::Stderr, |x| x.bold()) + }) + .join(", "); + let known_str = known + .iter() + .map(|feature_name| { + feature_name.if_supports_color_2(Stream::Stderr, |x| x.bold()) + }) + .join(", "); + + log::error!( + "for config file `{}`{}, unknown experimental features defined: + {unknown_str} (known features: {known_str}):", + err.config_file(), + provided_by_tool(err.tool()), + ); + None + } _ => { // These other errors are printed out normally. log::error!("{}", err); @@ -677,6 +743,10 @@ impl ExpectedError { log::error!("failed to write event to output"); Some(err as &dyn Error) } + Self::SetupScriptFailed => { + log::error!("setup script failed"); + None + } Self::TestRunFailed => { log::error!("test run failed"); None diff --git a/fixtures/nextest-tests/.config/nextest.toml b/fixtures/nextest-tests/.config/nextest.toml index 5f0c17ba274..ed81554ebc3 100644 --- a/fixtures/nextest-tests/.config/nextest.toml +++ b/fixtures/nextest-tests/.config/nextest.toml @@ -1,11 +1,26 @@ ## nextest config for this fixture +# Note that these versions are not necessarily the version of nextest that's actually required -- +# they're only for testing with test_show_config_version. nextest-version = { required = "0.9.54", recommended = "0.9.56" } +# This must be on one line for test_setup_scripts_not_enabled to work. +experimental = ["setup-scripts"] + [profile.default] # disable fail-fast to ensure a deterministic test run fail-fast = false +[[profile.default.scripts]] +platform = { host = "cfg(unix)" } +filter = "test(=test_cargo_env_vars)" +setup = "my-script-unix" + +[[profile.default.scripts]] +platform = { host = "cfg(windows)" } +filter = "test(=test_cargo_env_vars)" +setup = "my-script-windows" + [profile.with-retries] retries = 2 @@ -51,3 +66,9 @@ max-threads = 4 [test-groups.unused] max-threads = 20 + +[script.my-script-unix] +command = './scripts/my-script.sh' + +[script.my-script-windows] +command = 'cmd /c "scripts\\my-script.bat"' diff --git a/fixtures/nextest-tests/scripts/my-script.bat b/fixtures/nextest-tests/scripts/my-script.bat new file mode 100644 index 00000000000..fedb0a7bb1c --- /dev/null +++ b/fixtures/nextest-tests/scripts/my-script.bat @@ -0,0 +1,4 @@ +REM If this environment variable is set, exit with non-zero. +if defined __NEXTEST_SETUP_SCRIPT_ERROR exit 1 + +ECHO MY_ENV_VAR=my-env-var>> %NEXTEST_ENV% diff --git a/fixtures/nextest-tests/scripts/my-script.sh b/fixtures/nextest-tests/scripts/my-script.sh new file mode 100755 index 00000000000..37da598ff07 --- /dev/null +++ b/fixtures/nextest-tests/scripts/my-script.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# If this environment variable is set, exit with non-zero. +if [ ! -z "$__NEXTEST_SETUP_SCRIPT_ERROR" ]; then + echo "__NEXTEST_SETUP_SCRIPT_ERROR is set, exiting with 1" + exit 1 +fi + +echo MY_ENV_VAR=my-env-var >> $NEXTEST_ENV diff --git a/fixtures/nextest-tests/tests/basic.rs b/fixtures/nextest-tests/tests/basic.rs index 8021dd42b50..3abf4c5863c 100644 --- a/fixtures/nextest-tests/tests/basic.rs +++ b/fixtures/nextest-tests/tests/basic.rs @@ -6,7 +6,13 @@ use std::{ }; #[test] -fn test_success() {} +fn test_success() { + // Check that MY_ENV_VAR (set by the setup script) isn't enabled. + assert_eq!( + std::env::var("MY_ENV_VAR"), + Err(std::env::VarError::NotPresent) + ); +} #[test] fn test_failure_assert() { @@ -238,6 +244,8 @@ fn test_cargo_env_vars() { Ok("test-PASSED-value-set-by-environment"), ); } + + assert_eq!(std::env::var("MY_ENV_VAR").as_deref(), Ok("my-env-var")); } #[test] diff --git a/integration-tests/tests/integration/main.rs b/integration-tests/tests/integration/main.rs index d5180c26250..319d4f94313 100644 --- a/integration-tests/tests/integration/main.rs +++ b/integration-tests/tests/integration/main.rs @@ -636,3 +636,46 @@ fn test_show_config_version() { insta::assert_snapshot!(output.stdout_as_str()); } + +#[test] +fn test_setup_scripts_not_enabled() { + set_env_vars(); + + let p = TempProject::new().unwrap(); + + // Remove the "experimental" line from the config file. + let config_path = p.workspace_root().join(".config/nextest.toml"); + let s = std::fs::read_to_string(&config_path).unwrap(); + let mut out = String::new(); + for line in s.lines() { + if !line.starts_with("experimental") { + out.push_str(line); + out.push('\n'); + } + } + std::fs::write(&config_path, out).unwrap(); + + let output = CargoNextestCli::new() + .args(["run", "--manifest-path", p.manifest_path().as_str()]) + .unchecked(true) + .output(); + + assert_eq!( + output.exit_code, + Some(NextestExitCode::EXPERIMENTAL_FEATURE_NOT_ENABLED) + ); +} + +#[test] +fn test_setup_script_error() { + set_env_vars(); + let p = TempProject::new().unwrap(); + + let output = CargoNextestCli::new() + .args(["run", "--manifest-path", p.manifest_path().as_str()]) + .env("__NEXTEST_SETUP_SCRIPT_ERROR", "1") + .unchecked(true) + .output(); + + assert_eq!(output.exit_code, Some(NextestExitCode::SETUP_SCRIPT_FAILED)); +} diff --git a/nextest-metadata/src/exit_codes.rs b/nextest-metadata/src/exit_codes.rs index ad39d6f1199..5514589c75f 100644 --- a/nextest-metadata/src/exit_codes.rs +++ b/nextest-metadata/src/exit_codes.rs @@ -28,6 +28,9 @@ impl NextestExitCode { /// Creating a test list produced an error. pub const TEST_LIST_CREATION_FAILED: i32 = 104; + /// A setup script failed. + pub const SETUP_SCRIPT_FAILED: i32 = 105; + /// Writing data to stdout or stderr produced an error. pub const WRITE_OUTPUT_ERROR: i32 = 110; diff --git a/nextest-runner/Cargo.toml b/nextest-runner/Cargo.toml index 4a6555fda69..7e622b85e46 100644 --- a/nextest-runner/Cargo.toml +++ b/nextest-runner/Cargo.toml @@ -19,7 +19,9 @@ future-queue = "0.3.0" bytes = "1.5.0" camino = { version = "1.1.6", features = ["serde1"] } camino-tempfile = "1.0.2" -config = { version = "0.13.3", default-features = false, features = ["toml"] } +# config's "preserve_order" feature is needed for preserving the order of +# setup scripts in .config/nextest.toml. +config = { version = "0.13.3", default-features = false, features = ["toml", "preserve_order"] } cargo_metadata = "0.17.0" cfg-if = "1.0.0" chrono = "0.4.31" @@ -59,6 +61,7 @@ target-spec-miette = "0.3.0" thiserror = "1.0.48" # For parsing of .cargo/config.toml files tokio = { version = "1.32.0", features = [ + "fs", "io-util", "macros", "process", @@ -91,7 +94,7 @@ nextest-workspace-hack = { version = "0.1", path = "../workspace-hack" } console-subscriber = { version = "0.1.10", optional = true } unicode-ident = "1.0.12" unicode-normalization = "0.1.22" -indexmap = "2.0.0" +indexmap = { version = "2.0.0", features = ["serde"] } smallvec = "1.11.1" [target.'cfg(unix)'.dependencies] diff --git a/nextest-runner/src/config/config_impl.rs b/nextest-runner/src/config/config_impl.rs index 261b5888690..0ef572280bf 100644 --- a/nextest-runner/src/config/config_impl.rs +++ b/nextest-runner/src/config/config_impl.rs @@ -2,21 +2,24 @@ // SPDX-License-Identifier: MIT OR Apache-2.0 use super::{ - CompiledOverride, CompiledOverridesByProfile, CustomTestGroup, DeserializedOverride, - NextestVersionDeserialize, RetryPolicy, SettingSource, SlowTimeout, TestGroup, TestGroupConfig, - TestSettings, TestThreads, ThreadsRequired, ToolConfigFile, + CompiledByProfile, CompiledData, ConfigExperimental, CustomTestGroup, DeserializedOverride, + DeserializedProfileScriptConfig, NextestVersionDeserialize, RetryPolicy, ScriptConfig, + ScriptId, SettingSource, SetupScripts, SlowTimeout, TestGroup, TestGroupConfig, TestSettings, + TestThreads, ThreadsRequired, ToolConfigFile, }; use crate::{ errors::{ provided_by_tool, ConfigParseError, ConfigParseErrorKind, ProfileNotFound, - UnknownTestGroupError, + UnknownConfigScriptError, UnknownTestGroupError, }, + list::TestList, platform::BuildPlatforms, reporter::{FinalStatusLevel, StatusLevel, TestOutputDisplay}, }; use camino::{Utf8Path, Utf8PathBuf}; use config::{builder::DefaultState, Config, ConfigBuilder, File, FileFormat, FileSourceFile}; use guppy::graph::PackageGraph; +use indexmap::IndexMap; use nextest_filtering::TestQuery; use once_cell::sync::Lazy; use serde::Deserialize; @@ -51,7 +54,7 @@ pub fn get_num_cpus() -> usize { pub struct NextestConfig { workspace_root: Utf8PathBuf, inner: NextestConfigImpl, - overrides: CompiledOverridesByProfile, + compiled: CompiledByProfile, } impl NextestConfig { @@ -91,6 +94,7 @@ impl NextestConfig { graph: &PackageGraph, config_file: Option<&Utf8Path>, tool_config_files: impl IntoIterator, + experimental: &BTreeSet, ) -> Result where I: Iterator + DoubleEndedIterator, @@ -100,6 +104,7 @@ impl NextestConfig { graph, config_file, tool_config_files, + experimental, |config_file, tool, unknown| { let mut unknown_str = String::new(); if unknown.len() == 1 { @@ -128,6 +133,7 @@ impl NextestConfig { graph: &PackageGraph, config_file: Option<&Utf8Path>, tool_config_files: impl IntoIterator, + experimental: &BTreeSet, mut unknown_callback: impl FnMut(&Utf8Path, Option<&str>, &BTreeSet), ) -> Result where @@ -135,17 +141,18 @@ impl NextestConfig { { let workspace_root = workspace_root.into(); let tool_config_files_rev = tool_config_files.into_iter().rev(); - let (inner, overrides) = Self::read_from_sources( + let (inner, compiled) = Self::read_from_sources( graph, &workspace_root, config_file, tool_config_files_rev, + experimental, &mut unknown_callback, )?; Ok(Self { workspace_root, inner, - overrides, + compiled, }) } @@ -178,7 +185,7 @@ impl NextestConfig { workspace_root: workspace_root.into(), inner: deserialized.into_config_impl(), // The default config does not (cannot) have overrides. - overrides: CompiledOverridesByProfile::default(), + compiled: CompiledByProfile::default(), } } @@ -200,16 +207,18 @@ impl NextestConfig { workspace_root: &Utf8Path, file: Option<&Utf8Path>, tool_config_files_rev: impl Iterator, + experimental: &BTreeSet, unknown_callback: &mut impl FnMut(&Utf8Path, Option<&str>, &BTreeSet), - ) -> Result<(NextestConfigImpl, CompiledOverridesByProfile), ConfigParseError> { + ) -> Result<(NextestConfigImpl, CompiledByProfile), ConfigParseError> { // First, get the default config. let mut composite_builder = Self::make_default_config(); // Overrides are handled additively. // Note that they're stored in reverse order here, and are flipped over at the end. - let mut overrides = CompiledOverridesByProfile::default(); + let mut compiled = CompiledByProfile::default(); let mut known_groups = BTreeSet::new(); + let mut known_scripts = BTreeSet::new(); // Next, merge in tool configs. for ToolConfigFile { config_file, tool } in tool_config_files_rev { @@ -220,9 +229,11 @@ impl NextestConfig { config_file, Some(tool), source.clone(), - &mut overrides, + &mut compiled, + experimental, unknown_callback, &mut known_groups, + &mut known_scripts, )?; // This is the final, composite builder used at the end. @@ -245,9 +256,11 @@ impl NextestConfig { &config_file, None, source.clone(), - &mut overrides, + &mut compiled, + experimental, unknown_callback, &mut known_groups, + &mut known_scripts, )?; composite_builder = composite_builder.add_source(source); @@ -257,13 +270,13 @@ impl NextestConfig { let (config, _unknown) = Self::build_and_deserialize_config(&composite_builder) .map_err(|kind| ConfigParseError::new(config_file, None, kind))?; - // Reverse all the overrides at the end. - overrides.default.reverse(); - for override_ in overrides.other.values_mut() { - override_.reverse(); + // Reverse all the compiled data at the end. + compiled.default.reverse(); + for data in compiled.other.values_mut() { + data.reverse(); } - Ok((config.into_config_impl(), overrides)) + Ok((config.into_config_impl(), compiled)) } #[allow(clippy::too_many_arguments)] @@ -273,9 +286,11 @@ impl NextestConfig { config_file: &Utf8Path, tool: Option<&str>, source: File, - overrides_out: &mut CompiledOverridesByProfile, + compiled_out: &mut CompiledByProfile, + experimental: &BTreeSet, unknown_callback: &mut impl FnMut(&Utf8Path, Option<&str>, &BTreeSet), known_groups: &mut BTreeSet, + known_scripts: &mut BTreeSet, ) -> Result<(), ConfigParseError> { // Try building default builder + this file to get good error attribution and handle // overrides additively. @@ -314,6 +329,45 @@ impl NextestConfig { known_groups.extend(valid_groups); + // If scripts are present, check that the experimental feature is enabled. + if !this_config.scripts.is_empty() + && !experimental.contains(&ConfigExperimental::SetupScripts) + { + return Err(ConfigParseError::new( + config_file, + tool, + ConfigParseErrorKind::ExperimentalFeatureNotEnabled { + feature: ConfigExperimental::SetupScripts, + }, + )); + } + + // Check that setup scripts are named as expected. + let (valid_scripts, invalid_scripts): (BTreeSet<_>, _) = + this_config.scripts.keys().cloned().partition(|script| { + if let Some(tool) = tool { + // The first component must be the tool name. + script + .as_identifier() + .tool_components() + .map_or(false, |(tool_name, _)| tool_name == tool) + } else { + // If a tool is not specified, it must *not* be a tool identifier. + !script.as_identifier().is_tool_identifier() + } + }); + + if !invalid_scripts.is_empty() { + let kind = if tool.is_some() { + ConfigParseErrorKind::InvalidConfigScriptsDefinedByTool(invalid_scripts) + } else { + ConfigParseErrorKind::InvalidConfigScriptsDefined(invalid_scripts) + }; + return Err(ConfigParseError::new(config_file, tool, kind)); + } + + known_scripts.extend(valid_scripts); + let this_config = this_config.into_config_impl(); let unknown_default_profiles: Vec<_> = this_config @@ -335,7 +389,7 @@ impl NextestConfig { } // Compile the overrides for this file. - let this_overrides = CompiledOverridesByProfile::new(graph, &this_config) + let this_compiled = CompiledByProfile::new(graph, &this_config) .map_err(|kind| ConfigParseError::new(config_file, tool, kind))?; // Check that all overrides specify known test groups. @@ -351,19 +405,20 @@ impl NextestConfig { } }; - this_overrides.default.iter().for_each(|override_| { - check_test_group("default", override_.data.test_group.as_ref()); - }); + this_compiled + .default + .overrides + .iter() + .for_each(|override_| { + check_test_group("default", override_.data.test_group.as_ref()); + }); // Check that override test groups are known. - this_overrides - .other - .iter() - .for_each(|(profile_name, overrides)| { - overrides.iter().for_each(|override_| { - check_test_group(profile_name, override_.data.test_group.as_ref()); - }); + this_compiled.other.iter().for_each(|(profile_name, data)| { + data.overrides.iter().for_each(|override_| { + check_test_group(profile_name, override_.data.test_group.as_ref()); }); + }); // If there were any unknown groups, error out. if !unknown_group_errors.is_empty() { @@ -378,16 +433,66 @@ impl NextestConfig { )); } - // Grab the overrides for this config. Add them in reversed order (we'll flip it around at the end). - overrides_out + // Check that scripts are known. + let mut unknown_script_errors = Vec::new(); + let mut check_script_ids = |profile_name: &str, scripts: &[ScriptId]| { + if !scripts.is_empty() && !experimental.contains(&ConfigExperimental::SetupScripts) { + return Err(ConfigParseError::new( + config_file, + tool, + ConfigParseErrorKind::ExperimentalFeatureNotEnabled { + feature: ConfigExperimental::SetupScripts, + }, + )); + } + for script in scripts { + if !known_scripts.contains(script) { + unknown_script_errors.push(UnknownConfigScriptError { + profile_name: profile_name.to_owned(), + name: script.clone(), + }); + } + } + + Ok(()) + }; + + this_compiled .default - .extend(this_overrides.default.into_iter().rev()); - for (name, overrides) in this_overrides.other { - overrides_out + .scripts + .iter() + .try_for_each(|scripts| check_script_ids("default", &scripts.setup))?; + this_compiled + .other + .iter() + .try_for_each(|(profile_name, data)| { + data.scripts + .iter() + .try_for_each(|scripts| check_script_ids(profile_name, &scripts.setup)) + })?; + + // If there were any unknown scripts, error out. + if !unknown_script_errors.is_empty() { + let known_scripts = known_scripts.iter().cloned().collect(); + return Err(ConfigParseError::new( + config_file, + tool, + ConfigParseErrorKind::UnknownConfigScripts { + errors: unknown_script_errors, + known_scripts, + }, + )); + } + + // Grab the overrides and setup scripts for this config. Add them in reversed order (we'll + // flip it around at the end). + compiled_out.default.extend_reverse(this_compiled.default); + for (name, data) in this_compiled.other { + compiled_out .other .entry(name) .or_default() - .extend(overrides.into_iter().rev()); + .extend_reverse(data); } Ok(()) @@ -407,23 +512,22 @@ impl NextestConfig { let mut store_dir = self.workspace_root.join(&self.inner.store.dir); store_dir.push(name); - // Grab the overrides as well. - let overrides = self - .overrides + // Grab the compiled data as well. + let compiled_data = self + .compiled .other .get(name) - .into_iter() - .flatten() - .chain(self.overrides.default.iter()) .cloned() - .collect(); + .unwrap_or_default() + .chain(self.compiled.default.clone()); Ok(NextestProfile { store_dir, default_profile: &self.inner.default_profile, custom_profile, test_groups: &self.inner.test_groups, - overrides, + scripts: &self.inner.scripts, + compiled_data, }) } @@ -448,7 +552,7 @@ impl NextestConfig { } /// The state of nextest profiles before build platforms have been applied. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] pub struct PreBuildPlatform {} /// The state of nextest profiles after build platforms have been applied. @@ -473,7 +577,9 @@ pub struct NextestProfile<'cfg, State = FinalConfig> { default_profile: &'cfg DefaultProfileImpl, custom_profile: Option<&'cfg CustomProfileImpl>, test_groups: &'cfg BTreeMap, - pub(super) overrides: Vec>, + // This is ordered because the scripts are used in the order they're defined. + scripts: &'cfg IndexMap, + pub(super) compiled_data: CompiledData, } impl<'cfg, State> NextestProfile<'cfg, State> { @@ -482,11 +588,16 @@ impl<'cfg, State> NextestProfile<'cfg, State> { &self.store_dir } - /// Returns the test group configuration for this profile. + /// Returns the global test group configuration. pub fn test_group_config(&self) -> &'cfg BTreeMap { self.test_groups } + /// Returns the global script configuration. + pub fn script_config(&self) -> &'cfg IndexMap { + self.scripts + } + #[allow(dead_code)] pub(super) fn custom_profile(&self) -> Option<&'cfg CustomProfileImpl> { self.custom_profile @@ -499,17 +610,14 @@ impl<'cfg> NextestProfile<'cfg, PreBuildPlatform> { /// This is a separate step from parsing the config and reading a profile so that cargo-nextest /// can tell users about configuration parsing errors before building the binary list. pub fn apply_build_platforms(self, build_platforms: &BuildPlatforms) -> NextestProfile<'cfg> { - let overrides = self - .overrides - .into_iter() - .map(|override_| override_.apply_build_platforms(build_platforms)) - .collect(); + let compiled_data = self.compiled_data.apply_build_platforms(build_platforms); NextestProfile { store_dir: self.store_dir, default_profile: self.default_profile, custom_profile: self.custom_profile, + scripts: self.scripts, test_groups: self.test_groups, - overrides, + compiled_data, } } } @@ -586,6 +694,11 @@ impl<'cfg> NextestProfile<'cfg, FinalConfig> { .unwrap_or(self.default_profile.fail_fast) } + /// Returns the list of setup scripts. + pub fn setup_scripts(&self, test_list: &TestList<'_>) -> SetupScripts<'_> { + SetupScripts::new(self, test_list) + } + /// Returns settings for individual tests. pub fn settings_for(&self, query: &TestQuery<'_>) -> TestSettings { TestSettings::new(self, query) @@ -666,6 +779,7 @@ impl<'cfg> NextestJunitConfig<'cfg> { pub(super) struct NextestConfigImpl { store: StoreConfigImpl, test_groups: BTreeMap, + scripts: IndexMap, default_profile: DefaultProfileImpl, other_profiles: HashMap, } @@ -706,13 +820,20 @@ impl NextestConfigImpl { #[serde(rename_all = "kebab-case")] struct NextestConfigDeserialize { store: StoreConfigImpl, - // This is parsed as part of NextestConfigVersionOnly. It's re-parsed here to avoid printing an - // "unknown key" message. + + // These are parsed as part of NextestConfigVersionOnly. They're re-parsed here to avoid + // printing an "unknown key" message. #[allow(unused)] #[serde(default)] nextest_version: Option, + #[allow(unused)] + #[serde(default)] + experimental: BTreeSet, + #[serde(default)] test_groups: BTreeMap, + #[serde(default, rename = "script")] + scripts: IndexMap, #[serde(rename = "profile")] profiles: HashMap, } @@ -729,6 +850,7 @@ impl NextestConfigDeserialize { store: self.store, default_profile, test_groups: self.test_groups, + scripts: self.scripts, other_profiles: self.profiles, } } @@ -753,6 +875,7 @@ pub(super) struct DefaultProfileImpl { slow_timeout: SlowTimeout, leak_timeout: Duration, overrides: Vec, + scripts: Vec, junit: DefaultJunitImpl, } @@ -786,6 +909,7 @@ impl DefaultProfileImpl { .leak_timeout .expect("leak-timeout present in default profile"), overrides: p.overrides, + scripts: p.scripts, junit: DefaultJunitImpl { path: p.junit.path, report_name: p @@ -807,6 +931,10 @@ impl DefaultProfileImpl { pub(super) fn overrides(&self) -> &[DeserializedOverride] { &self.overrides } + + pub(super) fn setup_scripts(&self) -> &[DeserializedProfileScriptConfig] { + &self.scripts + } } #[derive(Clone, Debug)] @@ -843,6 +971,8 @@ pub(super) struct CustomProfileImpl { #[serde(default)] overrides: Vec, #[serde(default)] + scripts: Vec, + #[serde(default)] junit: JunitImpl, } @@ -855,6 +985,10 @@ impl CustomProfileImpl { pub(super) fn overrides(&self) -> &[DeserializedOverride] { &self.overrides } + + pub(super) fn scripts(&self) -> &[DeserializedProfileScriptConfig] { + &self.scripts + } } #[derive(Clone, Debug, Default, Deserialize)] @@ -933,6 +1067,7 @@ mod tests { tool: "my-tool".to_owned(), config_file: tool_path, }][..], + &Default::default(), |_path, tool, ignored| { unknown_keys.insert(tool.map(|s| s.to_owned()), ignored.clone()); }, diff --git a/nextest-runner/src/config/mod.rs b/nextest-runner/src/config/mod.rs index 2a89a75fc93..9bf31e6696c 100644 --- a/nextest-runner/src/config/mod.rs +++ b/nextest-runner/src/config/mod.rs @@ -8,6 +8,7 @@ mod identifier; mod nextest_version; mod overrides; mod retry_policy; +mod scripts; mod slow_timeout; mod test_group; mod test_threads; @@ -19,6 +20,7 @@ pub use identifier::*; pub use nextest_version::*; pub use overrides::*; pub use retry_policy::*; +pub(super) use scripts::*; pub use slow_timeout::*; pub use test_group::*; pub use test_threads::*; diff --git a/nextest-runner/src/config/nextest_version.rs b/nextest-runner/src/config/nextest_version.rs index 576bd2c93fe..107d056fa80 100644 --- a/nextest-runner/src/config/nextest_version.rs +++ b/nextest-runner/src/config/nextest_version.rs @@ -8,7 +8,7 @@ use crate::errors::{ConfigParseError, ConfigParseErrorKind}; use camino::Utf8Path; use semver::Version; use serde::{Deserialize, Deserializer}; -use std::borrow::Cow; +use std::{borrow::Cow, collections::BTreeSet, fmt, str::FromStr}; /// A "version-only" form of the nextest configuration. /// @@ -18,6 +18,9 @@ use std::borrow::Cow; pub struct VersionOnlyConfig { /// The nextest version configuration. nextest_version: NextestVersionConfig, + + /// Experimental features enabled. + experimental: BTreeSet, } impl VersionOnlyConfig { @@ -33,9 +36,8 @@ impl VersionOnlyConfig { I: Iterator + DoubleEndedIterator, { let tool_config_files_rev = tool_config_files.into_iter().rev(); - let nextest_version = - Self::read_from_sources(workspace_root, config_file, tool_config_files_rev)?; - Ok(Self { nextest_version }) + + Self::read_from_sources(workspace_root, config_file, tool_config_files_rev) } /// Returns the nextest version requirement. @@ -43,16 +45,22 @@ impl VersionOnlyConfig { &self.nextest_version } + /// Returns the experimental features enabled. + pub fn experimental(&self) -> &BTreeSet { + &self.experimental + } + fn read_from_sources<'a>( workspace_root: &Utf8Path, config_file: Option<&Utf8Path>, tool_config_files_rev: impl Iterator, - ) -> Result { + ) -> Result { let mut nextest_version = NextestVersionConfig::default(); + let mut experimental = BTreeSet::new(); // Merge in tool configs. for ToolConfigFile { config_file, tool } in tool_config_files_rev { - if let Some(v) = Self::read_and_deserialize(config_file, Some(tool))? { + if let Some(v) = Self::read_and_deserialize(config_file, Some(tool))?.nextest_version { nextest_version.accumulate(v, Some(tool)); } } @@ -66,18 +74,44 @@ impl VersionOnlyConfig { } }; if let Some(config_file) = config_file { - if let Some(v) = Self::read_and_deserialize(&config_file, None)? { + let d = Self::read_and_deserialize(&config_file, None)?; + if let Some(v) = d.nextest_version { nextest_version.accumulate(v, None); } + + // Check for unknown features. + let unknown: BTreeSet<_> = d + .experimental + .into_iter() + .filter(|feature| { + if let Ok(feature) = feature.parse::() { + experimental.insert(feature); + false + } else { + true + } + }) + .collect(); + if !unknown.is_empty() { + let known = ConfigExperimental::known().collect(); + return Err(ConfigParseError::new( + config_file.into_owned(), + None, + ConfigParseErrorKind::UnknownExperimentalFeatures { unknown, known }, + )); + } } - Ok(nextest_version) + Ok(Self { + nextest_version, + experimental, + }) } fn read_and_deserialize( config_file: &Utf8Path, tool: Option<&str>, - ) -> Result, ConfigParseError> { + ) -> Result { let toml_str = std::fs::read_to_string(config_file.as_str()).map_err(|error| { ConfigParseError::new( config_file.clone(), @@ -94,7 +128,17 @@ impl VersionOnlyConfig { ConfigParseErrorKind::VersionOnlyDeserializeError(Box::new(error)), ) })?; - Ok(v.nextest_version) + if tool.is_some() && !v.experimental.is_empty() { + return Err(ConfigParseError::new( + config_file.clone(), + tool, + ConfigParseErrorKind::ExperimentalFeaturesInToolConfig { + features: v.experimental, + }, + )); + } + + Ok(v) } } @@ -104,6 +148,8 @@ impl VersionOnlyConfig { struct VersionOnlyDeserialize { #[serde(default)] nextest_version: Option, + #[serde(default)] + experimental: BTreeSet, } /// Nextest version configuration. @@ -180,6 +226,39 @@ impl NextestVersionConfig { } } +/// Experimental configuration features. +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Ord, PartialOrd)] +#[non_exhaustive] +pub enum ConfigExperimental { + /// Enable support for setup scripts. + SetupScripts, +} + +impl ConfigExperimental { + fn known() -> impl Iterator { + vec![Self::SetupScripts].into_iter() + } +} + +impl FromStr for ConfigExperimental { + type Err = (); + + fn from_str(s: &str) -> Result { + match s { + "setup-scripts" => Ok(Self::SetupScripts), + _ => Err(()), + } + } +} + +impl fmt::Display for ConfigExperimental { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::SetupScripts => write!(f, "setup-scripts"), + } + } +} + /// Specification for a nextest version. Part of [`NextestVersion`]. #[derive(Debug, Default, Clone, PartialEq, Eq)] pub enum NextestVersionReq { diff --git a/nextest-runner/src/config/overrides.rs b/nextest-runner/src/config/overrides.rs index f85224f651b..cd1d4a32137 100644 --- a/nextest-runner/src/config/overrides.rs +++ b/nextest-runner/src/config/overrides.rs @@ -1,10 +1,12 @@ // Copyright (c) The nextest Contributors // SPDX-License-Identifier: MIT OR Apache-2.0 -use super::{NextestConfigImpl, NextestProfile}; +use super::{ + CompiledProfileScripts, DeserializedProfileScriptConfig, NextestConfigImpl, NextestProfile, +}; use crate::{ config::{FinalConfig, PreBuildPlatform, RetryPolicy, SlowTimeout, TestGroup, ThreadsRequired}, - errors::{ConfigParseErrorKind, ConfigParseOverrideError}, + errors::{ConfigParseCompiledDataError, ConfigParseErrorKind}, platform::BuildPlatforms, reporter::TestOutputDisplay, }; @@ -131,7 +133,7 @@ impl TestSettings { let mut junit_store_success_output = None; let mut junit_store_failure_output = None; - for override_ in &profile.overrides { + for override_ in &profile.compiled_data.overrides { if !override_.state.host_eval { continue; } @@ -259,21 +261,22 @@ impl TestSettings { } #[derive(Clone, Debug, Default)] -pub(super) struct CompiledOverridesByProfile { - pub(super) default: Vec>, - pub(super) other: HashMap>>, +pub(super) struct CompiledByProfile { + pub(super) default: CompiledData, + pub(super) other: HashMap>, } -impl CompiledOverridesByProfile { +impl CompiledByProfile { pub(super) fn new( graph: &PackageGraph, config: &NextestConfigImpl, ) -> Result { let mut errors = vec![]; - let default = Self::compile_overrides( + let default = CompiledData::new( graph, "default", config.default_profile().overrides(), + config.default_profile().setup_scripts(), &mut errors, ); let other: HashMap<_, _> = config @@ -281,7 +284,13 @@ impl CompiledOverridesByProfile { .map(|(profile_name, profile)| { ( profile_name.to_owned(), - Self::compile_overrides(graph, profile_name, profile.overrides(), &mut errors), + CompiledData::new( + graph, + profile_name, + profile.overrides(), + profile.scripts(), + &mut errors, + ), ) }) .collect(); @@ -289,23 +298,78 @@ impl CompiledOverridesByProfile { if errors.is_empty() { Ok(Self { default, other }) } else { - Err(ConfigParseErrorKind::OverrideError(errors)) + Err(ConfigParseErrorKind::CompiledDataParseError(errors)) } } +} + +#[derive(Clone, Debug, Default)] +pub(super) struct CompiledData { + pub(super) overrides: Vec>, + pub(super) scripts: Vec>, +} - fn compile_overrides( +impl CompiledData { + fn new( graph: &PackageGraph, profile_name: &str, overrides: &[DeserializedOverride], - errors: &mut Vec, - ) -> Vec> { - overrides + scripts: &[DeserializedProfileScriptConfig], + errors: &mut Vec, + ) -> Self { + let overrides = overrides .iter() .enumerate() .filter_map(|(index, source)| { CompiledOverride::new(graph, profile_name, index, source, errors) }) - .collect() + .collect(); + let scripts = scripts + .iter() + .filter_map(|source| CompiledProfileScripts::new(graph, profile_name, source, errors)) + .collect(); + Self { overrides, scripts } + } + + pub(super) fn extend_reverse(&mut self, other: Self) { + self.overrides.extend(other.overrides.into_iter().rev()); + self.scripts.extend(other.scripts.into_iter().rev()); + } + + pub(super) fn reverse(&mut self) { + self.overrides.reverse(); + self.scripts.reverse(); + } + + pub(super) fn chain(self, other: Self) -> Self { + let mut overrides = self.overrides; + let mut setup_scripts = self.scripts; + overrides.extend(other.overrides); + setup_scripts.extend(other.scripts); + Self { + overrides, + scripts: setup_scripts, + } + } + + pub(super) fn apply_build_platforms( + self, + build_platforms: &BuildPlatforms, + ) -> CompiledData { + let overrides = self + .overrides + .into_iter() + .map(|override_| override_.apply_build_platforms(build_platforms)) + .collect(); + let setup_scripts = self + .scripts + .into_iter() + .map(|setup_script| setup_script.apply_build_platforms(build_platforms)) + .collect(); + CompiledData { + overrides, + scripts: setup_scripts, + } } } @@ -349,13 +413,13 @@ impl CompiledOverride { profile_name: &str, index: usize, source: &DeserializedOverride, - errors: &mut Vec, + errors: &mut Vec, ) -> Option { if source.platform.host.is_none() && source.platform.target.is_none() && source.filter.is_none() { - errors.push(ConfigParseOverrideError { + errors.push(ConfigParseCompiledDataError { profile_name: profile_name.to_owned(), not_specified: true, host_parse_error: None, @@ -397,7 +461,7 @@ impl CompiledOverride { let platform_parse_error = maybe_platform_err.err(); let parse_errors = maybe_parse_err.err(); - errors.push(ConfigParseOverrideError { + errors.push(ConfigParseCompiledDataError { profile_name: profile_name.to_owned(), not_specified: false, host_parse_error: host_platform_parse_error, @@ -455,7 +519,7 @@ pub(crate) enum MaybeTargetSpec { } impl MaybeTargetSpec { - fn new(platform_str: Option<&str>) -> Result { + pub(super) fn new(platform_str: Option<&str>) -> Result { Ok(match platform_str { Some(platform_str) => { MaybeTargetSpec::Provided(TargetSpec::new(platform_str.to_owned())?) @@ -464,7 +528,7 @@ impl MaybeTargetSpec { }) } - fn eval(&self, platform: &Platform) -> bool { + pub(super) fn eval(&self, platform: &Platform) -> bool { match self { MaybeTargetSpec::Provided(spec) => spec .eval(platform) @@ -513,8 +577,8 @@ pub(super) struct DeserializedJunitOutput { #[derive(Clone, Debug, Default)] pub(super) struct PlatformStrings { - host: Option, - target: Option, + pub(super) host: Option, + pub(super) target: Option, } impl<'de> Deserialize<'de> for PlatformStrings { @@ -632,9 +696,14 @@ mod tests { let graph = temp_workspace(workspace_dir.path(), config_contents); let package_id = graph.workspace().iter().next().unwrap().id(); - let nextest_config_result = - NextestConfig::from_sources(graph.workspace().root(), &graph, None, &[][..]) - .expect("config is valid"); + let nextest_config_result = NextestConfig::from_sources( + graph.workspace().root(), + &graph, + None, + &[][..], + &Default::default(), + ) + .expect("config is valid"); let profile = nextest_config_result .profile("default") .expect("valid profile name") @@ -826,19 +895,25 @@ mod tests { let graph = temp_workspace(workspace_path, config_contents); - let err = NextestConfig::from_sources(graph.workspace().root(), &graph, None, []) - .expect_err("config is invalid"); + let err = NextestConfig::from_sources( + graph.workspace().root(), + &graph, + None, + [], + &Default::default(), + ) + .expect_err("config is invalid"); match err.kind() { - ConfigParseErrorKind::OverrideError(override_errors) => { + ConfigParseErrorKind::CompiledDataParseError(compile_errors) => { assert_eq!( - override_errors.len(), + compile_errors.len(), 1, "exactly one override error must be produced" ); - let error = override_errors.first().unwrap(); + let error = compile_errors.first().unwrap(); assert_eq!( error.profile_name, faulty_profile, - "override error profile matches" + "compile error profile matches" ); let handler = miette::JSONReportHandler::new(); let reports = error @@ -859,7 +934,7 @@ mod tests { assert_eq!(&reports, expected_reports, "reports match"); } other => { - panic!("for config error {other:?}, expected ConfigParseErrorKind::OverrideError"); + panic!("for config error {other:?}, expected ConfigParseErrorKind::CompiledDataParseError"); } }; } diff --git a/nextest-runner/src/config/retry_policy.rs b/nextest-runner/src/config/retry_policy.rs index f68eb041004..de1d63a848c 100644 --- a/nextest-runner/src/config/retry_policy.rs +++ b/nextest-runner/src/config/retry_policy.rs @@ -206,8 +206,14 @@ mod tests { let graph = temp_workspace(workspace_dir.path(), config_contents); - let config = NextestConfig::from_sources(graph.workspace().root(), &graph, None, []) - .expect("config is valid"); + let config = NextestConfig::from_sources( + graph.workspace().root(), + &graph, + None, + [], + &Default::default(), + ) + .expect("config is valid"); assert_eq!( config .profile("default") @@ -375,8 +381,14 @@ mod tests { let graph = temp_workspace(workspace_path, config_contents); - let config_err = NextestConfig::from_sources(graph.workspace().root(), &graph, None, []) - .expect_err("config expected to be invalid"); + let config_err = NextestConfig::from_sources( + graph.workspace().root(), + &graph, + None, + [], + &Default::default(), + ) + .expect_err("config expected to be invalid"); let message = match config_err.kind() { ConfigParseErrorKind::DeserializeError(path_error) => match path_error.inner() { @@ -606,8 +618,14 @@ mod tests { let graph = temp_workspace(workspace_path, config_contents); let package_id = graph.workspace().iter().next().unwrap().id(); - let config = - NextestConfig::from_sources(graph.workspace().root(), &graph, None, &[][..]).unwrap(); + let config = NextestConfig::from_sources( + graph.workspace().root(), + &graph, + None, + &[][..], + &Default::default(), + ) + .unwrap(); let query = TestQuery { binary_query: BinaryQuery { package_id, diff --git a/nextest-runner/src/config/scripts.rs b/nextest-runner/src/config/scripts.rs new file mode 100644 index 00000000000..67d6fb726f8 --- /dev/null +++ b/nextest-runner/src/config/scripts.rs @@ -0,0 +1,1156 @@ +// Copyright (c) The nextest Contributors +// SPDX-License-Identifier: MIT OR Apache-2.0 + +//! Setup scripts. + +use super::{ + ConfigIdentifier, FinalConfig, MaybeTargetSpec, NextestProfile, PlatformStrings, + PreBuildPlatform, SlowTimeout, +}; +use crate::{ + double_spawn::{DoubleSpawnContext, DoubleSpawnInfo}, + errors::{ConfigParseCompiledDataError, InvalidConfigScriptName, SetupScriptError}, + list::TestList, + platform::BuildPlatforms, + test_command::{apply_ld_dyld_env, create_command, LocalExecuteContext}, +}; +use camino::Utf8Path; +use camino_tempfile::Utf8TempPath; +use guppy::graph::{cargo::BuildPlatform, PackageGraph}; +use indexmap::IndexMap; +use nextest_filtering::{FilteringExpr, TestQuery}; +use serde::{de::Error, Deserialize}; +use smol_str::SmolStr; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + fmt, + process::Command, + time::Duration, +}; +use tokio::io::{AsyncBufReadExt, BufReader}; + +/// Data about setup scripts, returned by a [`NextestProfile`]. +pub struct SetupScripts<'profile> { + enabled_scripts: IndexMap<&'profile ScriptId, SetupScript<'profile>>, +} + +impl<'profile> SetupScripts<'profile> { + pub(super) fn new( + profile: &'profile NextestProfile<'_, FinalConfig>, + test_list: &TestList<'_>, + ) -> Self { + Self::new_with_queries( + profile, + test_list.iter_tests().filter_map(|test| { + test.test_info + .filter_match + .is_match() + .then(|| test.to_test_query()) + }), + ) + } + + // Creates a new `SetupScripts` instance for the given profile and matching tests. + fn new_with_queries<'a>( + profile: &'profile NextestProfile<'_, FinalConfig>, + matching_tests: impl IntoIterator>, + ) -> Self { + let script_config = profile.script_config(); + let profile_scripts = &profile.compiled_data.scripts; + if profile_scripts.is_empty() { + return Self { + enabled_scripts: IndexMap::new(), + }; + } + + // Build a map of setup scripts to the test configurations that enable them. + let mut by_script_id = HashMap::new(); + for profile_script in profile_scripts { + for script_id in &profile_script.setup { + by_script_id + .entry(script_id) + .or_insert_with(Vec::new) + .push(profile_script); + } + } + + // This is a map from enabled setup scripts to a list of configurations that enabled them. + let mut enabled_ids = HashSet::new(); + for test in matching_tests { + // Look at all the setup scripts activated by this test. + for (&script_id, compiled) in &by_script_id { + if enabled_ids.contains(script_id) { + // This script is already enabled. + continue; + } + if compiled.iter().any(|data| data.is_enabled(&test)) { + enabled_ids.insert(script_id); + } + } + } + + // Build up a map of enabled scripts along with their data, by script ID. + let mut enabled_scripts = IndexMap::new(); + for (script_id, config) in script_config { + if enabled_ids.contains(script_id) { + let compiled = by_script_id + .remove(script_id) + .expect("script id must be present"); + enabled_scripts.insert( + script_id, + SetupScript { + id: script_id.clone(), + config, + compiled, + }, + ); + } + } + + Self { enabled_scripts } + } + + /// Returns the number of enabled setup scripts. + #[inline] + pub fn len(&self) -> usize { + self.enabled_scripts.len() + } + + /// Returns true if there are no enabled setup scripts. + #[inline] + pub fn is_empty(&self) -> bool { + self.enabled_scripts.is_empty() + } + + /// Returns enabled setup scripts in the order they should be run in. + #[inline] + pub(crate) fn into_iter(self) -> impl Iterator> { + self.enabled_scripts.into_values() + } +} + +/// Data about an individual setup script. +/// +/// Returned by [`SetupScripts::iter`]. +#[derive(Clone, Debug)] +#[non_exhaustive] +pub(crate) struct SetupScript<'profile> { + /// The script ID. + pub(crate) id: ScriptId, + + /// The configuration for the script. + pub(crate) config: &'profile ScriptConfig, + + /// The compiled filters to use to check which tests this script is enabled for. + pub(crate) compiled: Vec<&'profile CompiledProfileScripts>, +} + +impl<'profile> SetupScript<'profile> { + /// Turns self into a command that can be executed. + pub(crate) fn make_command( + &self, + double_spawn: &DoubleSpawnInfo, + test_list: &TestList<'_>, + ) -> Result { + let lctx = LocalExecuteContext { + double_spawn, + dylib_path: test_list.updated_dylib_path(), + env: test_list.cargo_env(), + }; + SetupScriptCommand::new( + &lctx, + self.config.program().to_owned(), + self.config.args(), + test_list.workspace_root(), + ) + } + + pub(crate) fn is_enabled(&self, test: &TestQuery<'_>) -> bool { + self.compiled + .iter() + .any(|compiled| compiled.is_enabled(test)) + } +} + +/// Represents a to-be-run setup script command with a certain set of arguments. +pub(crate) struct SetupScriptCommand { + /// The command to be run. + command: std::process::Command, + /// The environment file. + env_path: Utf8TempPath, + /// Double-spawn context. + double_spawn: Option, +} + +impl SetupScriptCommand { + /// Creates a new `SetupScriptCommand` for a setup script. + pub(crate) fn new( + lctx: &LocalExecuteContext<'_>, + program: String, + args: &[String], + cwd: &Utf8Path, + ) -> Result { + let mut cmd = create_command(program, args, lctx.double_spawn); + + // NB: we will always override user-provided environment variables with the + // `CARGO_*` and `NEXTEST_*` variables set directly on `cmd` below. + lctx.env.apply_env(&mut cmd); + + let env_path = camino_tempfile::Builder::new() + .prefix("nextest-env") + .tempfile() + .map_err(SetupScriptError::TempPath)? + .into_temp_path(); + + cmd.current_dir(cwd) + // This environment variable is set to indicate that tests are being run under nextest. + .env("NEXTEST", "1") + // Setup scripts can define environment variables which are written out here. + .env("NEXTEST_ENV", &env_path); + + apply_ld_dyld_env(&mut cmd, lctx.dylib_path); + + let double_spawn = lctx.double_spawn.spawn_context(); + + Ok(Self { + command: cmd, + env_path, + double_spawn, + }) + } + + /// Returns the command to be run. + #[inline] + pub(crate) fn command_mut(&mut self) -> &mut std::process::Command { + &mut self.command + } + + pub(crate) fn spawn(self) -> std::io::Result<(tokio::process::Child, Utf8TempPath)> { + let mut command = tokio::process::Command::from(self.command); + let res = command.spawn(); + if let Some(ctx) = self.double_spawn { + ctx.finish(); + } + let child = res?; + Ok((child, self.env_path)) + } +} + +/// Data obtained by executing setup scripts. This is used to set up the environment for tests. +#[derive(Clone, Debug, Default)] +pub(crate) struct SetupScriptExecuteData<'profile> { + env_maps: Vec<(SetupScript<'profile>, SetupScriptEnvMap)>, +} + +impl<'profile> SetupScriptExecuteData<'profile> { + pub(crate) fn new() -> Self { + Self::default() + } + + pub(crate) fn add_script(&mut self, script: SetupScript<'profile>, env_map: SetupScriptEnvMap) { + self.env_maps.push((script, env_map)); + } + + /// Applies the data from setup scripts to the given test instance. + pub(crate) fn apply(&self, test: &TestQuery<'_>, command: &mut Command) { + for (script, env_map) in &self.env_maps { + if script.is_enabled(test) { + for (key, value) in env_map.env_map.iter() { + command.env(key, value); + } + } + } + } +} + +#[derive(Clone, Debug)] +pub(crate) struct SetupScriptEnvMap { + env_map: BTreeMap, +} + +impl SetupScriptEnvMap { + pub(crate) async fn new(env_path: &Utf8Path) -> Result { + let mut env_map = BTreeMap::new(); + let f = tokio::fs::File::open(env_path).await.map_err(|error| { + SetupScriptError::EnvFileOpen { + path: env_path.to_owned(), + error, + } + })?; + let reader = BufReader::new(f); + let mut lines = reader.lines(); + loop { + let line = lines + .next_line() + .await + .map_err(|error| SetupScriptError::EnvFileRead { + path: env_path.to_owned(), + error, + })?; + let Some(line) = line else { break }; + + // Split this line into key and value. + let (key, value) = match line.split_once('=') { + Some((key, value)) => (key, value), + None => { + return Err(SetupScriptError::EnvFileParse { + path: env_path.to_owned(), + line: line.to_owned(), + }) + } + }; + + // Ban keys starting with `NEXTEST`. + if key.starts_with("NEXTEST") { + return Err(SetupScriptError::EnvFileReservedKey { + key: key.to_owned(), + }); + } + + env_map.insert(key.to_owned(), value.to_owned()); + } + + Ok(Self { env_map }) + } + + #[inline] + pub(crate) fn len(&self) -> usize { + self.env_map.len() + } +} + +#[derive(Clone, Debug)] +pub(crate) struct CompiledProfileScripts { + pub(super) setup: Vec, + pub(super) data: ProfileScriptData, + state: State, +} + +impl CompiledProfileScripts { + pub(super) fn new( + graph: &PackageGraph, + profile_name: &str, + source: &DeserializedProfileScriptConfig, + errors: &mut Vec, + ) -> Option { + if source.platform.host.is_none() + && source.platform.target.is_none() + && source.filter.is_none() + { + errors.push(ConfigParseCompiledDataError { + profile_name: profile_name.to_owned(), + not_specified: true, + host_parse_error: None, + target_parse_error: None, + parse_errors: None, + }); + return None; + } + + let host_spec = MaybeTargetSpec::new(source.platform.host.as_deref()); + let target_spec = MaybeTargetSpec::new(source.platform.target.as_deref()); + let filter_expr = source.filter.as_ref().map_or(Ok(None), |filter| { + Some(FilteringExpr::parse(filter.clone(), graph)).transpose() + }); + + match (host_spec, target_spec, filter_expr) { + (Ok(host_spec), Ok(target_spec), Ok(expr)) => Some(Self { + setup: source.setup.clone(), + data: ProfileScriptData { + host_spec, + target_spec, + expr, + }, + state: PreBuildPlatform {}, + }), + (maybe_host_err, maybe_platform_err, maybe_parse_err) => { + let host_platform_parse_error = maybe_host_err.err(); + let platform_parse_error = maybe_platform_err.err(); + let parse_errors = maybe_parse_err.err(); + + errors.push(ConfigParseCompiledDataError { + profile_name: profile_name.to_owned(), + not_specified: false, + host_parse_error: host_platform_parse_error, + target_parse_error: platform_parse_error, + parse_errors, + }); + None + } + } + } + + pub(super) fn apply_build_platforms( + self, + build_platforms: &BuildPlatforms, + ) -> CompiledProfileScripts { + let host_eval = self.data.host_spec.eval(&build_platforms.host); + let host_test_eval = self.data.target_spec.eval(&build_platforms.host); + let target_eval = build_platforms + .target + .as_ref() + .map_or(host_test_eval, |triple| { + self.data.target_spec.eval(&triple.platform) + }); + + CompiledProfileScripts { + setup: self.setup, + data: self.data, + state: FinalConfig { + host_eval, + host_test_eval, + target_eval, + }, + } + } +} + +impl CompiledProfileScripts { + pub(super) fn is_enabled(&self, query: &TestQuery<'_>) -> bool { + if !self.state.host_eval { + return false; + } + if query.binary_query.platform == BuildPlatform::Host && !self.state.host_test_eval { + return false; + } + if query.binary_query.platform == BuildPlatform::Target && !self.state.target_eval { + return false; + } + + if let Some(expr) = &self.data.expr { + expr.matches_test(query) + } else { + true + } + } +} + +/// The name of a configuration script. +#[derive(Clone, Debug, Eq, PartialEq, Hash, PartialOrd, Ord)] +pub struct ScriptId(pub ConfigIdentifier); + +impl ScriptId { + /// Creates a new script identifier. + pub fn new(identifier: SmolStr) -> Result { + let identifier = ConfigIdentifier::new(identifier).map_err(InvalidConfigScriptName)?; + Ok(Self(identifier)) + } + + /// Returns the name of the script as a [`ConfigIdentifier`]. + pub fn as_identifier(&self) -> &ConfigIdentifier { + &self.0 + } + + #[cfg(test)] + pub(super) fn as_str(&self) -> &str { + self.0.as_str() + } +} + +impl<'de> Deserialize<'de> for ScriptId { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + // Try and deserialize as a string. + let identifier = SmolStr::deserialize(deserializer)?; + Self::new(identifier).map_err(serde::de::Error::custom) + } +} + +impl fmt::Display for ScriptId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +#[derive(Clone, Debug)] +pub(super) struct ProfileScriptData { + host_spec: MaybeTargetSpec, + target_spec: MaybeTargetSpec, + expr: Option, +} + +/// Deserialized form of profile-specific script configuration before compilation. +#[derive(Clone, Debug, Deserialize)] +#[serde(rename_all = "kebab-case")] +pub(super) struct DeserializedProfileScriptConfig { + /// The host and/or target platforms to match against. + #[serde(default)] + pub(super) platform: PlatformStrings, + + /// The filter expression to match against. + #[serde(default)] + filter: Option, + + /// The setup script or scripts to run. + #[serde(deserialize_with = "deserialize_script_ids")] + setup: Vec, +} + +/// Deserialized form of script configuration before compilation. +/// +/// This is defined as a top-level element. +#[derive(Clone, Debug, Deserialize)] +#[serde(rename_all = "kebab-case")] +pub struct ScriptConfig { + /// The command to run. The first element is the program and the second element is a list + /// of arguments. + #[serde(deserialize_with = "deserialize_command")] + pub command: (String, Vec), + + /// An optional slow timeout for this command. + #[serde(default, deserialize_with = "super::deserialize_slow_timeout")] + pub slow_timeout: Option, + + /// An optional leak timeout for this command. + #[serde(default, with = "humantime_serde::option")] + pub leak_timeout: Option, + + /// Whether to capture standard output for this command. + #[serde(default)] + pub capture_stdout: bool, + + /// Whether to capture standard error for this command. + #[serde(default)] + pub capture_stderr: bool, +} + +impl ScriptConfig { + /// Returns the name of the program. + #[inline] + pub fn program(&self) -> &str { + &self.command.0 + } + + /// Returns the arguments to the command. + #[inline] + pub fn args(&self) -> &[String] { + &self.command.1 + } + + /// Returns true if at least some output isn't being captured. + #[inline] + pub fn no_capture(&self) -> bool { + !(self.capture_stdout && self.capture_stderr) + } +} + +fn deserialize_script_ids<'de, D>(deserializer: D) -> Result, D::Error> +where + D: serde::Deserializer<'de>, +{ + struct ScriptIdVisitor; + + impl<'de> serde::de::Visitor<'de> for ScriptIdVisitor { + type Value = Vec; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a script ID (string) or a list of script IDs") + } + + fn visit_str(self, value: &str) -> Result + where + E: serde::de::Error, + { + Ok(vec![ScriptId::new(value.into()).map_err(E::custom)?]) + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: serde::de::SeqAccess<'de>, + { + let mut ids = Vec::new(); + while let Some(value) = seq.next_element::()? { + ids.push(ScriptId::new(value.into()).map_err(A::Error::custom)?); + } + Ok(ids) + } + } + + deserializer.deserialize_any(ScriptIdVisitor) +} + +fn deserialize_command<'de, D>(deserializer: D) -> Result<(String, Vec), D::Error> +where + D: serde::Deserializer<'de>, +{ + struct CommandVisitor; + + impl<'de> serde::de::Visitor<'de> for CommandVisitor { + type Value = (String, Vec); + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a Unix shell command or a list of arguments") + } + + fn visit_str(self, value: &str) -> Result + where + E: serde::de::Error, + { + let mut args = shell_words::split(value).map_err(E::custom)?; + if args.is_empty() { + return Err(E::invalid_value(serde::de::Unexpected::Str(value), &self)); + } + let program = args.remove(0); + Ok((program, args)) + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: serde::de::SeqAccess<'de>, + { + let Some(program) = seq.next_element::()? else { + return Err(A::Error::invalid_length(0, &self)); + }; + let mut args = Vec::new(); + while let Some(value) = seq.next_element::()? { + args.push(value); + } + Ok((program, args)) + } + } + + deserializer.deserialize_any(CommandVisitor) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + config::{test_helpers::*, ConfigExperimental, NextestConfig, ToolConfigFile}, + errors::{ConfigParseErrorKind, UnknownConfigScriptError}, + }; + use camino_tempfile::tempdir; + use display_error_chain::DisplayErrorChain; + use indoc::indoc; + use maplit::btreeset; + use nextest_filtering::BinaryQuery; + use test_case::test_case; + + #[test] + fn test_scripts_basic() { + let config_contents = indoc! {r#" + [[profile.default.scripts]] + platform = { host = "x86_64-unknown-linux-gnu" } + filter = "test(script1)" + setup = ["foo", "bar"] + + [[profile.default.scripts]] + platform = { target = "aarch64-apple-darwin" } + filter = "test(script2)" + setup = "baz" + + [[profile.default.scripts]] + filter = "test(script3)" + # No matter which order scripts are specified here, they must always be run in the + # order defined below. + setup = ["baz", "foo", "@tool:my-tool:toolscript"] + + [script.foo] + command = "command foo" + + [script.bar] + command = ["cargo", "run", "-p", "bar"] + slow-timeout = { period = "60s", terminate-after = 2 } + + [script.baz] + command = "baz" + slow-timeout = "1s" + leak-timeout = "1s" + capture-stdout = true + capture-stderr = true + "# + }; + + let tool_config_contents = indoc! {r#" + [script.'@tool:my-tool:toolscript'] + command = "tool-command" + "# + }; + + let workspace_dir = tempdir().unwrap(); + + let graph = temp_workspace(workspace_dir.path(), config_contents); + let package_id = graph.workspace().iter().next().unwrap().id(); + let tool_path = workspace_dir.path().join(".config/my-tool.toml"); + std::fs::write(&tool_path, tool_config_contents).unwrap(); + + let tool_config_files = [ToolConfigFile { + tool: "my-tool".to_owned(), + config_file: tool_path, + }]; + + // First, check that if the experimental feature isn't enabled, we get an error. + let nextest_config_error = NextestConfig::from_sources( + graph.workspace().root(), + &graph, + None, + &tool_config_files, + &Default::default(), + ) + .unwrap_err(); + match nextest_config_error.kind() { + ConfigParseErrorKind::ExperimentalFeatureNotEnabled { feature } => { + assert_eq!(*feature, ConfigExperimental::SetupScripts); + } + other => panic!("unexpected error kind: {:?}", other), + } + + // Now, check with the experimental feature enabled. + let nextest_config_result = NextestConfig::from_sources( + graph.workspace().root(), + &graph, + None, + &tool_config_files, + &btreeset! { ConfigExperimental::SetupScripts }, + ) + .expect("config is valid"); + let profile = nextest_config_result + .profile("default") + .expect("valid profile name") + .apply_build_platforms(&build_platforms()); + + // This query matches the foo and bar scripts. + let query = TestQuery { + binary_query: BinaryQuery { + package_id, + kind: "lib", + binary_name: "my-binary", + platform: BuildPlatform::Host, + }, + test_name: "script1", + }; + let scripts = SetupScripts::new_with_queries(&profile, std::iter::once(query)); + assert_eq!(scripts.len(), 2, "two scripts should be enabled"); + assert_eq!( + scripts.enabled_scripts.get_index(0).unwrap().0.as_str(), + "foo", + "first script should be foo" + ); + assert_eq!( + scripts.enabled_scripts.get_index(1).unwrap().0.as_str(), + "bar", + "second script should be bar" + ); + + // This query matches the baz script. + let query = TestQuery { + binary_query: BinaryQuery { + package_id, + kind: "lib", + binary_name: "my-binary", + platform: BuildPlatform::Target, + }, + test_name: "script2", + }; + let scripts = SetupScripts::new_with_queries(&profile, std::iter::once(query)); + assert_eq!(scripts.len(), 1, "one script should be enabled"); + assert_eq!( + scripts.enabled_scripts.get_index(0).unwrap().0.as_str(), + "baz", + "first script should be baz" + ); + + // This query matches the baz, foo and tool scripts (but note the order). + let query = TestQuery { + binary_query: BinaryQuery { + package_id, + kind: "lib", + binary_name: "my-binary", + platform: BuildPlatform::Target, + }, + test_name: "script3", + }; + let scripts = SetupScripts::new_with_queries(&profile, std::iter::once(query)); + assert_eq!(scripts.len(), 3, "three scripts should be enabled"); + assert_eq!( + scripts.enabled_scripts.get_index(0).unwrap().0.as_str(), + "@tool:my-tool:toolscript", + "first script should be toolscript" + ); + assert_eq!( + scripts.enabled_scripts.get_index(1).unwrap().0.as_str(), + "foo", + "second script should be foo" + ); + assert_eq!( + scripts.enabled_scripts.get_index(2).unwrap().0.as_str(), + "baz", + "third script should be baz" + ); + } + + #[test_case( + indoc! {r#" + [script.foo] + command = "" + "#}, + "invalid value: string \"\", expected a Unix shell command or a list of arguments" + + ; "empty command" + )] + #[test_case( + indoc! {r#" + [script.foo] + command = [] + "#}, + "invalid length 0, expected a Unix shell command or a list of arguments" + + ; "empty command list" + )] + #[test_case( + indoc! {r#" + [script.foo] + "#}, + "script.foo: missing field `command`" + + ; "missing command" + )] + #[test_case( + indoc! {r#" + [script.foo] + command = "my-command" + slow-timeout = 34 + "#}, + r#"invalid type: integer `34`, expected a table ({ period = "60s", terminate-after = 2 }) or a string ("60s")"# + + ; "slow timeout is not a duration" + )] + #[test_case( + indoc! {r#" + [script.'@tool:foo'] + command = "my-command" + "#}, + r#"invalid configuration script name: tool identifier not of the form "@tool:tool-name:identifier": `@tool:foo`"# + + ; "invalid tool script name" + )] + #[test_case( + indoc! {r#" + [script.'#foo'] + command = "my-command" + "#}, + r#"invalid configuration script name: invalid identifier `#foo`"# + + ; "invalid script name" + )] + fn parse_scripts_invalid_deserialize(config_contents: &str, message: &str) { + let workspace_dir = tempdir().unwrap(); + + let graph = temp_workspace(workspace_dir.path(), config_contents); + + let nextest_config_error = NextestConfig::from_sources( + graph.workspace().root(), + &graph, + None, + &[][..], + &btreeset! { ConfigExperimental::SetupScripts }, + ) + .expect_err("config is invalid"); + let actual_message = DisplayErrorChain::new(nextest_config_error).to_string(); + + assert!( + actual_message.contains(message), + "nextest config error `{actual_message}` contains message `{message}`" + ); + } + + #[test_case( + indoc! {r#" + [script.foo] + command = "my-command" + + [[profile.default.scripts]] + setup = ["foo"] + "#}, + "default", + &[MietteJsonReport { + message: "at least one of `platform` and `filter` should be specified".to_owned(), + labels: vec![], + }] + + ; "neither platform nor filter specified" + )] + #[test_case( + indoc! {r#" + [script.foo] + command = "my-command" + + [[profile.default.scripts]] + platform = {} + setup = ["foo"] + "#}, + "default", + &[MietteJsonReport { + message: "at least one of `platform` and `filter` should be specified".to_owned(), + labels: vec![], + }] + + ; "empty platform map" + )] + #[test_case( + indoc! {r#" + [script.foo] + command = "my-command" + + [[profile.default.scripts]] + platform = { host = 'cfg(target_os = "linux' } + setup = ["foo"] + "#}, + "default", + &[MietteJsonReport { + message: "error parsing cfg() expression".to_owned(), + labels: vec![ + MietteJsonLabel { label: "expected one of `=`, `,`, `)` here".to_owned(), span: MietteJsonSpan { offset: 3, length: 1 } } + ] + }] + + ; "invalid platform expression" + )] + #[test_case( + indoc! {r#" + [script.foo] + command = "my-command" + + [[profile.ci.overrides]] + filter = 'test(/foo)' + setup = ["foo"] + "#}, + "ci", + &[MietteJsonReport { + message: "expected close regex".to_owned(), + labels: vec![ + MietteJsonLabel { label: "missing `/`".to_owned(), span: MietteJsonSpan { offset: 9, length: 0 } } + ] + }] + + ; "invalid filter expression" + )] + fn parse_scripts_invalid_compile( + config_contents: &str, + faulty_profile: &str, + expected_reports: &[MietteJsonReport], + ) { + let workspace_dir = tempdir().unwrap(); + + let graph = temp_workspace(workspace_dir.path(), config_contents); + + let error = NextestConfig::from_sources( + graph.workspace().root(), + &graph, + None, + &[][..], + &btreeset! { ConfigExperimental::SetupScripts }, + ) + .expect_err("config is invalid"); + match error.kind() { + ConfigParseErrorKind::CompiledDataParseError(compile_errors) => { + assert_eq!( + compile_errors.len(), + 1, + "exactly one override error must be produced" + ); + let error = compile_errors.first().unwrap(); + assert_eq!( + error.profile_name, faulty_profile, + "compile error profile matches" + ); + let handler = miette::JSONReportHandler::new(); + let reports = error + .reports() + .map(|report| { + let mut out = String::new(); + handler.render_report(&mut out, report.as_ref()).unwrap(); + + let json_report: MietteJsonReport = serde_json::from_str(&out) + .unwrap_or_else(|err| { + panic!( + "failed to deserialize JSON message produced by miette: {err}" + ) + }); + json_report + }) + .collect::>(); + assert_eq!(&reports, expected_reports, "reports match"); + } + other => { + panic!("for config error {other:?}, expected ConfigParseErrorKind::CompiledDataParseError"); + } + } + } + + #[test_case( + indoc! {r#" + [script.'@tool:foo:bar'] + command = "my-command" + + [[profile.ci.overrides]] + setup = ["@tool:foo:bar"] + "#}, + &["@tool:foo:bar"] + + ; "tool config in main program")] + fn parse_scripts_invalid_defined(config_contents: &str, expected_invalid_scripts: &[&str]) { + let workspace_dir = tempdir().unwrap(); + + let graph = temp_workspace(workspace_dir.path(), config_contents); + + let error = NextestConfig::from_sources( + graph.workspace().root(), + &graph, + None, + &[][..], + &btreeset! { ConfigExperimental::SetupScripts }, + ) + .expect_err("config is invalid"); + match error.kind() { + ConfigParseErrorKind::InvalidConfigScriptsDefined(scripts) => { + assert_eq!( + scripts.len(), + expected_invalid_scripts.len(), + "correct number of scripts defined" + ); + for (script, expected_script) in scripts.iter().zip(expected_invalid_scripts) { + assert_eq!(script.as_str(), *expected_script, "script name matches"); + } + } + other => { + panic!("for config error {other:?}, expected ConfigParseErrorKind::InvalidConfigScriptsDefined"); + } + } + } + + #[test_case( + indoc! {r#" + [script.'blarg'] + command = "my-command" + + [[profile.ci.overrides]] + setup = ["blarg"] + "#}, + &["blarg"] + + ; "non-tool config in tool")] + fn parse_scripts_invalid_defined_by_tool( + tool_config_contents: &str, + expected_invalid_scripts: &[&str], + ) { + let workspace_dir = tempdir().unwrap(); + + let graph = temp_workspace(workspace_dir.path(), ""); + let tool_path = workspace_dir.path().join(".config/my-tool.toml"); + std::fs::write(&tool_path, tool_config_contents).unwrap(); + let tool_config_files = [ToolConfigFile { + tool: "my-tool".to_owned(), + config_file: tool_path, + }]; + + let error = NextestConfig::from_sources( + graph.workspace().root(), + &graph, + None, + &tool_config_files, + &btreeset! { ConfigExperimental::SetupScripts }, + ) + .expect_err("config is invalid"); + match error.kind() { + ConfigParseErrorKind::InvalidConfigScriptsDefinedByTool(scripts) => { + assert_eq!( + scripts.len(), + expected_invalid_scripts.len(), + "exactly one script must be defined" + ); + for (script, expected_script) in scripts.iter().zip(expected_invalid_scripts) { + assert_eq!(script.as_str(), *expected_script, "script name matches"); + } + } + other => { + panic!("for config error {other:?}, expected ConfigParseErrorKind::InvalidConfigScriptsDefinedByTool"); + } + } + } + + #[test_case( + indoc! {r#" + [script.foo] + command = "my-command" + + [[profile.default.scripts]] + filter = "test(script1)" + setup = "bar" + + [[profile.ci.scripts]] + filter = "test(script2)" + setup = ["baz"] + "#}, + vec![ + UnknownConfigScriptError { + profile_name: "default".to_owned(), + name: ScriptId::new("bar".into()).unwrap(), + }, + UnknownConfigScriptError { + profile_name: "ci".to_owned(), + name: ScriptId::new("baz".into()).unwrap(), + }, + ], + &["foo"] + + ; "unknown scripts" + )] + fn parse_scripts_invalid_unknown( + config_contents: &str, + expected_errors: Vec, + expected_known_scripts: &[&str], + ) { + let workspace_dir = tempdir().unwrap(); + + let graph = temp_workspace(workspace_dir.path(), config_contents); + + let error = NextestConfig::from_sources( + graph.workspace().root(), + &graph, + None, + &[][..], + &btreeset! { ConfigExperimental::SetupScripts }, + ) + .expect_err("config is invalid"); + match error.kind() { + ConfigParseErrorKind::UnknownConfigScripts { + errors, + known_scripts, + } => { + assert_eq!( + errors.len(), + expected_errors.len(), + "correct number of errors" + ); + for (error, expected_error) in errors.iter().zip(expected_errors) { + assert_eq!( + error.profile_name, expected_error.profile_name, + "profile name matches" + ); + assert_eq!( + error.name, expected_error.name, + "unknown script name matches" + ); + } + assert_eq!( + known_scripts.len(), + expected_known_scripts.len(), + "correct number of known scripts" + ); + for (script, expected_script) in known_scripts.iter().zip(expected_known_scripts) { + assert_eq!( + script.as_str(), + *expected_script, + "known script name matches" + ); + } + } + other => { + panic!("for config error {other:?}, expected ConfigParseErrorKind::UnknownConfigScripts"); + } + } + } +} diff --git a/nextest-runner/src/config/slow_timeout.rs b/nextest-runner/src/config/slow_timeout.rs index 2c7aba1ae98..ede01a7d533 100644 --- a/nextest-runner/src/config/slow_timeout.rs +++ b/nextest-runner/src/config/slow_timeout.rs @@ -16,6 +16,16 @@ pub struct SlowTimeout { pub(crate) grace_period: Duration, } +impl SlowTimeout { + /// A reasonable value for "maximum slow timeout". + pub(crate) const VERY_LARGE: Self = Self { + // See far_future() in pausable_sleep.rs for why this is roughly 30 years. + period: Duration::from_secs(86400 * 365 * 30), + terminate_after: None, + grace_period: Duration::from_secs(10), + }; +} + fn default_grace_period() -> Duration { Duration::from_secs(10) } @@ -174,8 +184,13 @@ mod tests { let graph = temp_workspace(workspace_dir.path(), config_contents); - let nextest_config_result = - NextestConfig::from_sources(graph.workspace().root(), &graph, None, &[][..]); + let nextest_config_result = NextestConfig::from_sources( + graph.workspace().root(), + &graph, + None, + &[][..], + &Default::default(), + ); match expected_default { Ok(expected_default) => { diff --git a/nextest-runner/src/config/test_group.rs b/nextest-runner/src/config/test_group.rs index c91ba8e01e9..371aa791ae8 100644 --- a/nextest-runner/src/config/test_group.rs +++ b/nextest-runner/src/config/test_group.rs @@ -207,6 +207,7 @@ mod tests { tool: "my-tool".to_owned(), config_file: tool_path.clone(), }][..], + &Default::default(), ); match expected { Ok(expected_groups) => { @@ -294,7 +295,8 @@ mod tests { let graph = temp_workspace(workspace_path, config_contents); let workspace_root = graph.workspace().root(); - let config_res = NextestConfig::from_sources(workspace_root, &graph, None, &[][..]); + let config_res = + NextestConfig::from_sources(workspace_root, &graph, None, &[][..], &Default::default()); match expected { Ok(expected_groups) => { let config = config_res.expect("config is valid"); @@ -452,6 +454,7 @@ mod tests { config_file: tool2_path, }, ][..], + &Default::default(), ) .expect_err("config is invalid"); assert_eq!(config.tool(), tool); diff --git a/nextest-runner/src/config/test_threads.rs b/nextest-runner/src/config/test_threads.rs index 520ece1d642..2548588cda9 100644 --- a/nextest-runner/src/config/test_threads.rs +++ b/nextest-runner/src/config/test_threads.rs @@ -155,7 +155,13 @@ mod tests { let graph = temp_workspace(workspace_dir.path(), config_contents); - let config = NextestConfig::from_sources(graph.workspace().root(), &graph, None, []); + let config = NextestConfig::from_sources( + graph.workspace().root(), + &graph, + None, + [], + &Default::default(), + ); match n_threads { None => assert!(config.is_err()), Some(n) => assert_eq!( diff --git a/nextest-runner/src/config/threads_required.rs b/nextest-runner/src/config/threads_required.rs index 7248bdbebac..0c8b057e99d 100644 --- a/nextest-runner/src/config/threads_required.rs +++ b/nextest-runner/src/config/threads_required.rs @@ -167,7 +167,13 @@ mod tests { let graph = temp_workspace(workspace_dir.path(), config_contents); - let config = NextestConfig::from_sources(graph.workspace().root(), &graph, None, []); + let config = NextestConfig::from_sources( + graph.workspace().root(), + &graph, + None, + [], + &Default::default(), + ); match threads_required { None => assert!(config.is_err()), Some(t) => { diff --git a/nextest-runner/src/config/tool_config.rs b/nextest-runner/src/config/tool_config.rs index 511f775a89a..64c5903a9b4 100644 --- a/nextest-runner/src/config/tool_config.rs +++ b/nextest-runner/src/config/tool_config.rs @@ -208,8 +208,14 @@ mod tests { }, ); - let config = NextestConfig::from_sources(workspace_root, &graph, None, &tool_config_files) - .expect("config is valid"); + let config = NextestConfig::from_sources( + workspace_root, + &graph, + None, + &tool_config_files, + &Default::default(), + ) + .expect("config is valid"); let default_profile = config .profile(NextestConfig::DEFAULT_PROFILE) diff --git a/nextest-runner/src/errors.rs b/nextest-runner/src/errors.rs index 3bb6d59a1de..3c53e584873 100644 --- a/nextest-runner/src/errors.rs +++ b/nextest-runner/src/errors.rs @@ -5,7 +5,7 @@ use crate::{ cargo_config::{TargetTriple, TargetTripleSource}, - config::{CustomTestGroup, TestGroup}, + config::{ConfigExperimental, CustomTestGroup, ScriptId, TestGroup}, helpers::{dylib_path_envvar, extract_abort_status}, reuse_build::ArchiveFormat, runner::AbortStatus, @@ -90,9 +90,9 @@ pub enum ConfigParseErrorKind { /// An error occurred while deserializing the config (version only). #[error(transparent)] VersionOnlyDeserializeError(Box>), - /// Errors occurred while parsing overrides. - #[error("error parsing overrides (destructure this variant for more details)")] - OverrideError(Vec), + /// Errors occurred while parsing compiled data. + #[error("error parsing compiled data (destructure this variant for more details)")] + CompiledDataParseError(Vec), /// An invalid set of test groups was defined by the user. #[error("invalid test groups defined: {}\n(test groups cannot start with '@tool:' unless specified by a tool)", .0.iter().join(", "))] InvalidTestGroupsDefined(BTreeSet), @@ -109,15 +109,60 @@ pub enum ConfigParseErrorKind { /// Known groups up to this point. known_groups: BTreeSet, }, + /// An invalid set of config scripts was defined by the user. + #[error("invalid config scripts defined: {}\n(config scripts cannot start with '@tool:' unless specified by a tool)", .0.iter().join(", "))] + InvalidConfigScriptsDefined(BTreeSet), + /// An invalid set of config scripts was defined by a tool config file. + #[error( + "invalid config scripts defined by tool: {}\n(config scripts must start with '@tool::')", .0.iter().join(", "))] + InvalidConfigScriptsDefinedByTool(BTreeSet), + /// Some config scripts were unknown. + #[error( + "unknown config scripts specified by config (destructure this variant for more details)" + )] + UnknownConfigScripts { + /// The list of errors that occurred. + errors: Vec, + + /// Known scripts up to this point. + known_scripts: BTreeSet, + }, + /// An unknown experimental feature or features were defined. + #[error("unknown experimental features defined (destructure this variant for more details)")] + UnknownExperimentalFeatures { + /// The set of unknown features. + unknown: BTreeSet, + + /// The set of known features. + known: BTreeSet, + }, + /// A tool specified an experimental feature. + /// + /// Tools are not allowed to specify experimental features. + #[error( + "tool config file specifies experimental features `{}` \ + -- only repository config files can do so", + .features.iter().join(", "), + )] + ExperimentalFeaturesInToolConfig { + /// The name of the experimental feature. + features: BTreeSet, + }, + /// An experimental feature was used but not enabled. + #[error("experimental feature `{feature}` is used but not enabled")] + ExperimentalFeatureNotEnabled { + /// The feature that was not enabled. + feature: ConfigExperimental, + }, } -/// An error that occurred while parsing config overrides. +/// An error that occurred while parsing config overrides or setup scripts. /// -/// Part of [`ConfigParseErrorKind::OverrideError`]. +/// Part of [`ConfigParseErrorKind::CompiledDataParseError`]. #[derive(Debug)] #[non_exhaustive] -pub struct ConfigParseOverrideError { - /// The name of the profile under which the override was found. +pub struct ConfigParseCompiledDataError { + /// The name of the profile under which the data was found. pub profile_name: String, /// True if neither the platform nor the filter have been specified. @@ -133,7 +178,7 @@ pub struct ConfigParseOverrideError { pub parse_errors: Option, } -impl ConfigParseOverrideError { +impl ConfigParseCompiledDataError { /// Returns [`miette::Report`]s for each error recorded by self. pub fn reports(&self) -> impl Iterator + '_ { let not_specified_report = self.not_specified.then(|| { @@ -181,6 +226,56 @@ pub(crate) enum RunTestError { CollectOutput(#[from] CollectTestOutputError), } +/// An error that occurred while setting up or running a setup script. +#[derive(Debug, Error)] +pub(crate) enum SetupScriptError { + /// An error occurred while creating a temporary path for the setup script. + #[error("error creating temporary path for setup script")] + TempPath(#[source] std::io::Error), + + /// An error occurred while executing the setup script. + #[error("error executing setup script")] + ExecFail(#[source] std::io::Error), + + /// An error occurred while collecting the output of the setup script. + #[error("error collecting setup script output")] + CollectOutput(#[from] CollectTestOutputError), + + /// An error occurred while waiting for the setup script to exit. + #[error("error waiting for setup script to exit")] + Wait(#[source] std::io::Error), + + /// An error occurred while opening the setup script environment file. + #[error("error opening environment file `{path}`")] + EnvFileOpen { + /// The path to the environment file. + path: Utf8PathBuf, + + /// The underlying error. + #[source] + error: std::io::Error, + }, + + /// An error occurred while reading the setup script environment file. + #[error("error reading environment file `{path}`")] + EnvFileRead { + /// The path to the environment file. + path: Utf8PathBuf, + + /// The underlying error. + #[source] + error: std::io::Error, + }, + + /// An error occurred while parsing the setup script environment file. + #[error("line `{line}` in environment file `{path}` not in KEY=VALUE format")] + EnvFileParse { path: Utf8PathBuf, line: String }, + + /// An environment variable key was reserved. + #[error("key `{key}` begins with `NEXTEST`, which is reserved for internal use")] + EnvFileReservedKey { key: String }, +} + /// An error was returned while collecting test output. #[derive(Debug, Error)] #[non_exhaustive] @@ -205,6 +300,17 @@ pub struct UnknownTestGroupError { pub name: TestGroup, } +/// An unknown script was specified in the config. +#[derive(Clone, Debug, Eq, PartialEq)] +#[non_exhaustive] +pub struct UnknownConfigScriptError { + /// The name of the profile under which the unknown script was found. + pub profile_name: String, + + /// The name of the unknown script. + pub name: ScriptId, +} + /// An error which indicates that a profile was requested but not known to nextest. #[derive(Clone, Debug, Error)] #[error("profile `{profile} not found (known profiles: {})`", .all_profiles.join(", "))] @@ -256,6 +362,11 @@ pub enum InvalidIdentifier { #[error("invalid custom test group name: {0}")] pub struct InvalidCustomTestGroupName(pub InvalidIdentifier); +/// The name of a configuration script is invalid (not a valid identifier). +#[derive(Clone, Debug, Error)] +#[error("invalid configuration script name: {0}")] +pub struct InvalidConfigScriptName(pub InvalidIdentifier); + /// Error returned while parsing a [`ToolConfigFile`](crate::config::ToolConfigFile) value. #[derive(Clone, Debug, Error)] pub enum ToolConfigFileParseError { diff --git a/nextest-runner/src/list/test_list.rs b/nextest-runner/src/list/test_list.rs index 4c4bfcc0c97..c72b6dbefa1 100644 --- a/nextest-runner/src/list/test_list.rs +++ b/nextest-runner/src/list/test_list.rs @@ -13,7 +13,7 @@ use crate::{ test_command::{LocalExecuteContext, TestCommand}, test_filter::TestFilterBuilder, }; -use camino::Utf8PathBuf; +use camino::{Utf8Path, Utf8PathBuf}; use futures::prelude::*; use guppy::{ graph::{PackageGraph, PackageMetadata}, @@ -181,6 +181,7 @@ pub struct TestList<'g> { test_count: usize, rust_build_meta: RustBuildMeta, rust_suites: BTreeMap>, + workspace_root: Utf8PathBuf, env: EnvironmentMap, updated_dylib_path: OsString, // Computed on first access. @@ -194,6 +195,7 @@ impl<'g> TestList<'g> { test_artifacts: I, rust_build_meta: RustBuildMeta, filter: &TestFilterBuilder, + workspace_root: Utf8PathBuf, env: EnvironmentMap, list_threads: usize, ) -> Result @@ -248,6 +250,7 @@ impl<'g> TestList<'g> { Ok(Self { rust_suites, + workspace_root, env, rust_build_meta, updated_dylib_path, @@ -262,6 +265,7 @@ impl<'g> TestList<'g> { test_bin_outputs: impl IntoIterator< Item = (RustTestArtifact<'g>, impl AsRef, impl AsRef), >, + workspace_root: Utf8PathBuf, rust_build_meta: RustBuildMeta, filter: &TestFilterBuilder, env: EnvironmentMap, @@ -291,6 +295,7 @@ impl<'g> TestList<'g> { Ok(Self { rust_suites, + workspace_root, env, rust_build_meta, updated_dylib_path, @@ -330,6 +335,16 @@ impl<'g> TestList<'g> { self.rust_suites.len() } + /// Returns the mapped workspace root. + pub fn workspace_root(&self) -> &Utf8Path { + &self.workspace_root + } + + /// Returns the environment variables to be used when running tests. + pub fn cargo_env(&self) -> &EnvironmentMap { + &self.env + } + /// Returns the updated dynamic library path used for tests. pub fn updated_dylib_path(&self) -> &OsStr { &self.updated_dylib_path @@ -414,6 +429,7 @@ impl<'g> TestList<'g> { pub(crate) fn empty() -> Self { Self { test_count: 0, + workspace_root: Utf8PathBuf::new(), rust_build_meta: RustBuildMeta::empty(), env: EnvironmentMap::empty(), updated_dylib_path: OsString::new(), @@ -1023,6 +1039,7 @@ mod tests { &"should-not-show-up-stderr", ), ], + Utf8PathBuf::from("/fake/path"), rust_build_meta, &test_filter, fake_env, diff --git a/nextest-runner/src/reporter.rs b/nextest-runner/src/reporter.rs index aefe10aff44..769bf161083 100644 --- a/nextest-runner/src/reporter.rs +++ b/nextest-runner/src/reporter.rs @@ -7,14 +7,14 @@ mod aggregator; use crate::{ - config::NextestProfile, + config::{NextestProfile, ScriptId}, errors::WriteEventError, helpers::write_test_name, list::{TestInstance, TestList}, reporter::aggregator::EventAggregator, runner::{ AbortStatus, ExecuteStatus, ExecutionDescription, ExecutionResult, ExecutionStatuses, - RetryData, RunStats, + RetryData, RunStats, SetupScriptExecuteStatus, }, }; pub use aggregator::heuristic_extract_description; @@ -390,6 +390,18 @@ impl<'a> TestReporter<'a> { fn update_progress_bar(event: &TestEvent<'_>, styles: &Styles, progress_bar: &ProgressBar) { match &event.kind { + TestEventKind::SetupScriptStarted { no_capture, .. } => { + // Hide the progress bar if either stderr or stdout are being passed through. + if *no_capture { + progress_bar.set_draw_target(ProgressDrawTarget::hidden()); + } + } + TestEventKind::SetupScriptFinished { no_capture, .. } => { + // Restore the progress bar if it was hidden. + if *no_capture { + progress_bar.set_draw_target(ProgressDrawTarget::stderr()); + } + } TestEventKind::TestStarted { current_stats, running, @@ -435,7 +447,7 @@ impl<'a> RunningState<'a> { fn progress_bar_prefix(self, styles: &Styles) -> String { let (prefix_str, prefix_style) = match self { Self::Running(current_stats) => { - let prefix_style = if current_stats.any_failed() { + let prefix_style = if current_stats.failure_kind().is_some() { styles.fail } else { styles.pass @@ -587,6 +599,58 @@ impl<'a> TestReporterImpl<'a> { writeln!(writer)?; } + TestEventKind::SetupScriptStarted { + index, + total, + script_id, + command, + args, + .. + } => { + write!(writer, "{:>12} ", "SETUP".style(self.styles.pass))?; + // index + 1 so that it displays as e.g. "1/2" and "2/2". + write!(writer, "[{:>9}] ", format!("{}/{}", index + 1, total))?; + + self.write_setup_script(script_id, command, args, writer)?; + writeln!(writer)?; + } + TestEventKind::SetupScriptSlow { + script_id, + command, + args, + elapsed, + will_terminate, + } => { + if !*will_terminate && self.status_level >= StatusLevel::Slow { + write!(writer, "{:>12} ", "SETUP SLOW".style(self.styles.skip))?; + } else if *will_terminate { + write!(writer, "{:>12} ", "TERMINATING".style(self.styles.fail))?; + } + + self.write_slow_duration(*elapsed, writer)?; + self.write_setup_script(script_id, command, args, writer)?; + writeln!(writer)?; + } + TestEventKind::SetupScriptFinished { + script_id, + index, + total, + command, + args, + run_status, + .. + } => { + self.write_setup_script_status_line( + script_id, *index, *total, command, args, run_status, writer, + )?; + // Always display failing setup script output if it exists. We may change this in + // the future. + if !run_status.result.is_success() { + self.write_setup_script_stdout_stderr( + script_id, command, args, run_status, writer, + )?; + } + } TestEventKind::TestStarted { test_instance, .. } => { // In no-capture mode, print out a test start event. if self.no_capture { @@ -762,44 +826,103 @@ impl<'a> TestReporterImpl<'a> { .push((*test_instance, FinalOutput::Skipped(*reason))); } } - TestEventKind::RunBeginCancel { running, reason } => { + TestEventKind::RunBeginCancel { + setup_scripts_running, + running, + reason, + } => { self.cancel_status = self.cancel_status.max(Some(*reason)); - write!(writer, "{:>12} ", "Canceling".style(self.styles.fail))?; - let reason_str = match reason { + let reason_str: &str = match reason { + CancelReason::SetupScriptFailure => "setup script failure", CancelReason::TestFailure => "test failure", CancelReason::ReportError => "error", CancelReason::Signal => "signal", CancelReason::Interrupt => "interrupt", }; - let tests_str = tests_str(*running); - writeln!( + write!( writer, - "due to {}: {} {tests_str} still running", - reason_str.style(self.styles.fail), - running.style(self.styles.count) + "{:>12} due to {}", + "Canceling".style(self.styles.fail), + reason_str.style(self.styles.fail) )?; + + // At the moment, we can have either setup scripts or tests running, but not both. + if *setup_scripts_running > 0 { + let s = setup_scripts_str(*setup_scripts_running); + write!( + writer, + ": {} {s} still running", + setup_scripts_running.style(self.styles.count), + )?; + } else if *running > 0 { + let tests_str = tests_str(*running); + write!( + writer, + ": {} {tests_str} still running", + running.style(self.styles.count), + )?; + } + writeln!(writer)?; } - TestEventKind::RunPaused { running } => { - let tests_str = tests_str(*running); - writeln!( + TestEventKind::RunPaused { + setup_scripts_running, + running, + } => { + write!( writer, - "{:>12} {} running {tests_str} due to {}", + "{:>12} due to {}", "Pausing".style(self.styles.pass), - running.style(self.styles.count), - "signal".style(self.styles.count), + "signal".style(self.styles.count) )?; + + // At the moment, we can have either setup scripts or tests running, but not both. + if *setup_scripts_running > 0 { + let s = setup_scripts_str(*setup_scripts_running); + write!( + writer, + ": {} {s} running", + setup_scripts_running.style(self.styles.count), + )?; + } else if *running > 0 { + let tests_str = tests_str(*running); + write!( + writer, + ": {} {tests_str} running", + running.style(self.styles.count), + )?; + } + writeln!(writer)?; } - TestEventKind::RunContinued { running } => { - let tests_str = tests_str(*running); - writeln!( + TestEventKind::RunContinued { + setup_scripts_running, + running, + } => { + write!( writer, - "{:>12} {} running {tests_str} due to {}", + "{:>12} due to {}", "Continuing".style(self.styles.pass), - running.style(self.styles.count), - "signal".style(self.styles.count), + "signal".style(self.styles.count) )?; + + // At the moment, we can have either setup scripts or tests running, but not both. + if *setup_scripts_running > 0 { + let s = setup_scripts_str(*setup_scripts_running); + write!( + writer, + ": {} {s} running", + setup_scripts_running.style(self.styles.count), + )?; + } else if *running > 0 { + let tests_str = tests_str(*running); + write!( + writer, + ": {} {tests_str} running", + running.style(self.styles.count), + )?; + } + writeln!(writer)?; } TestEventKind::RunFinished { start_time: _start_time, @@ -807,7 +930,7 @@ impl<'a> TestReporterImpl<'a> { run_stats, .. } => { - let summary_style = if run_stats.any_failed() { + let summary_style = if run_stats.failure_kind().is_some() { self.styles.fail } else { self.styles.pass @@ -917,6 +1040,42 @@ impl<'a> TestReporterImpl<'a> { Ok(()) } + #[allow(clippy::too_many_arguments)] + fn write_setup_script_status_line( + &self, + script_id: &ScriptId, + index: usize, + total: usize, + command: &str, + args: &[String], + status: &SetupScriptExecuteStatus, + writer: &mut impl Write, + ) -> io::Result<()> { + match status.result { + ExecutionResult::Pass => { + write!(writer, "{:>12} ", "SETUP PASS".style(self.styles.pass))?; + } + ExecutionResult::Leak => { + write!(writer, "{:>12} ", "SETUP LEAK".style(self.styles.skip))?; + } + other => { + let status_str = short_status_str(other); + write!( + writer, + "{:>12} ", + format!("SETUP {status_str}").style(self.styles.fail), + )?; + } + } + + write!(writer, "[{:>9}] ", format!("{}/{}", index + 1, total))?; + + self.write_setup_script(script_id, command, args, writer)?; + writeln!(writer)?; + + Ok(()) + } + fn write_status_line( &self, test_instance: TestInstance<'a>, @@ -1072,6 +1231,23 @@ impl<'a> TestReporterImpl<'a> { write_test_name(instance.name, &self.styles.list_styles, writer) } + fn write_setup_script( + &self, + script_id: &ScriptId, + command: &str, + args: &[String], + writer: &mut impl Write, + ) -> io::Result<()> { + let full_command = + shell_words::join(std::iter::once(command).chain(args.iter().map(|arg| arg.as_ref()))); + write!( + writer, + "{}: {}", + script_id.style(self.styles.script_id), + full_command + ) + } + fn write_duration(&self, duration: Duration, writer: &mut impl Write) -> io::Result<()> { // * > means right-align. // * 8 is the number of characters to pad to. @@ -1111,6 +1287,40 @@ impl<'a> TestReporterImpl<'a> { Ok(()) } + fn write_setup_script_stdout_stderr( + &self, + script_id: &ScriptId, + command: &str, + args: &[String], + run_status: &SetupScriptExecuteStatus, + writer: &mut impl Write, + ) -> io::Result<()> { + let (header_style, _output_style) = if run_status.result.is_success() { + (self.styles.pass, self.styles.pass_output) + } else { + (self.styles.fail, self.styles.fail_output) + }; + + if !run_status.stdout.is_empty() { + write!(writer, "\n{}", "--- ".style(header_style))?; + write!(writer, "{:21}", "STDOUT:".style(header_style))?; + self.write_setup_script(script_id, command, args, writer)?; + writeln!(writer, "{}", " ---".style(header_style))?; + + self.write_test_output(&run_status.stdout, writer)?; + } + if !run_status.stderr.is_empty() { + write!(writer, "\n{}", "--- ".style(header_style))?; + write!(writer, "{:21}", "STDERR:".style(header_style))?; + self.write_setup_script(script_id, command, args, writer)?; + writeln!(writer, "{}", " ---".style(header_style))?; + + self.write_test_output(&run_status.stderr, writer)?; + } + + writeln!(writer) + } + fn write_stdout_stderr( &self, test_instance: &TestInstance<'a>, @@ -1209,6 +1419,14 @@ impl<'a> TestReporterImpl<'a> { } } +fn setup_scripts_str(count: usize) -> &'static str { + if count == 1 { + "setup script" + } else { + "setup scripts" + } +} + fn tests_str(count: usize) -> &'static str { if count == 1 { "test" @@ -1329,6 +1547,69 @@ pub enum TestEventKind<'a> { run_id: Uuid, }, + /// A setup script started. + SetupScriptStarted { + /// The setup script index. + index: usize, + + /// The total number of setup scripts. + total: usize, + + /// The script ID. + script_id: ScriptId, + + /// The command to run. + command: &'a str, + + /// The arguments to the command. + args: &'a [String], + + /// True if some output from the setup script is being passed through. + no_capture: bool, + }, + + /// A setup script was slow. + SetupScriptSlow { + /// The script ID. + script_id: ScriptId, + + /// The command to run. + command: &'a str, + + /// The arguments to the command. + args: &'a [String], + + /// The amount of time elapsed since the start of execution. + elapsed: Duration, + + /// True if the script has hit its timeout and is about to be terminated. + will_terminate: bool, + }, + + /// A setup script completed execution. + SetupScriptFinished { + /// The setup script index. + index: usize, + + /// The total number of setup scripts. + total: usize, + + /// The script ID. + script_id: ScriptId, + + /// The command to run. + command: &'a str, + + /// The arguments to the command. + args: &'a [String], + + /// True if some output from the setup script was passed through. + no_capture: bool, + + /// The execution status of the setup script. + run_status: SetupScriptExecuteStatus, + }, + // TODO: add events for BinaryStarted and BinaryFinished? May want a slightly different way to // do things, maybe a couple of reporter traits (one for the run as a whole and one for each // binary). @@ -1429,6 +1710,9 @@ pub enum TestEventKind<'a> { /// A cancellation notice was received. RunBeginCancel { + /// The number of setup scripts still running. + setup_scripts_running: usize, + /// The number of tests still running. running: usize, @@ -1438,12 +1722,18 @@ pub enum TestEventKind<'a> { /// A SIGTSTP event was received and the run was paused. RunPaused { + /// The number of setup scripts running. + setup_scripts_running: usize, + /// The number of tests currently running. running: usize, }, /// A SIGCONT event was received and the run is being continued. RunContinued { + /// The number of setup scripts that will be started up again. + setup_scripts_running: usize, + /// The number of tests that will be started up again. running: usize, }, @@ -1468,6 +1758,9 @@ pub enum TestEventKind<'a> { /// The reason why a test run is being cancelled. #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] pub enum CancelReason { + /// A setup script failed. + SetupScriptFailure, + /// A test failed and --no-fail-fast wasn't specified. TestFailure, @@ -1492,6 +1785,7 @@ struct Styles { retry_output: Style, fail_output: Style, skip: Style, + script_id: Style, list_styles: crate::list::Styles, } @@ -1506,6 +1800,7 @@ impl Styles { self.retry_output = Style::new().magenta(); self.fail_output = Style::new().magenta(); self.skip = Style::new().yellow().bold(); + self.script_id = Style::new().blue().bold(); self.list_styles.colorize(); } } diff --git a/nextest-runner/src/reporter/aggregator.rs b/nextest-runner/src/reporter/aggregator.rs index 3c18d0067ce..62f815d0c73 100644 --- a/nextest-runner/src/reporter/aggregator.rs +++ b/nextest-runner/src/reporter/aggregator.rs @@ -65,6 +65,9 @@ impl<'cfg> MetadataJunit<'cfg> { TestEventKind::RunStarted { .. } | TestEventKind::RunPaused { .. } | TestEventKind::RunContinued { .. } => {} + TestEventKind::SetupScriptStarted { .. } + | TestEventKind::SetupScriptSlow { .. } + | TestEventKind::SetupScriptFinished { .. } => {} TestEventKind::TestStarted { .. } => {} TestEventKind::TestSlow { .. } => {} TestEventKind::TestAttemptFailedWillRetry { .. } diff --git a/nextest-runner/src/runner.rs b/nextest-runner/src/runner.rs index 762f2009fa3..fd7fabb7643 100644 --- a/nextest-runner/src/runner.rs +++ b/nextest-runner/src/runner.rs @@ -6,10 +6,14 @@ //! The main structure in this module is [`TestRunner`]. use crate::{ - config::{NextestProfile, RetryPolicy, TestGroup, TestSettings, TestThreads}, + config::{ + NextestProfile, RetryPolicy, ScriptConfig, ScriptId, SetupScript, SetupScriptEnvMap, + SetupScriptExecuteData, SlowTimeout, TestGroup, TestSettings, TestThreads, + }, double_spawn::DoubleSpawnInfo, errors::{ - CollectTestOutputError, ConfigureHandleInheritanceError, RunTestError, TestRunnerBuildError, + CollectTestOutputError, ConfigureHandleInheritanceError, RunTestError, SetupScriptError, + TestRunnerBuildError, }, list::{TestExecuteContext, TestInstance, TestList}, reporter::{ @@ -33,7 +37,10 @@ use std::{ num::NonZeroUsize, pin::Pin, process::{ExitStatus, Stdio}, - sync::atomic::{AtomicBool, Ordering}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, time::{Duration, SystemTime}, }; use tokio::{ @@ -153,7 +160,7 @@ impl TestRunnerBuilder { pub fn build<'a>( self, test_list: &'a TestList, - profile: NextestProfile<'a>, + profile: &'a NextestProfile<'a>, handler_kind: SignalHandlerKind, double_spawn: DoubleSpawnInfo, target_runner: TargetRunner, @@ -236,7 +243,7 @@ impl<'a> TestRunner<'a> { #[derive(Debug)] struct TestRunnerInner<'a> { no_capture: bool, - profile: NextestProfile<'a>, + profile: &'a NextestProfile<'a>, test_threads: usize, // This is Some if the user specifies a retry policy over the command-line. force_retries: Option, @@ -422,6 +429,67 @@ impl<'a> TestRunnerInner<'a> { scope.spawn_cancellable(exec_fut, || ()); { + let setup_scripts = self.profile.setup_scripts(self.test_list); + let total = setup_scripts.len(); + log::debug!("running {} setup scripts", total); + + let mut setup_script_data = SetupScriptExecuteData::new(); + + // Run setup scripts one by one. + for (index, script) in setup_scripts.into_iter().enumerate() { + let this_run_sender = run_sender.clone(); + let (completion_sender, completion_receiver) = tokio::sync::oneshot::channel(); + + let script_id = script.id.clone(); + let config = script.config; + + let script_fut = async move { + // Subscribe to the receiver *before* checking canceled_ref. The ordering is + // important to avoid race conditions with the code that first sets + // canceled_ref and then sends the notification. + let mut this_forward_receiver = forward_sender_ref.subscribe(); + + if canceled_ref.load(Ordering::Acquire) { + // Check for test cancellation. + return; + } + + let _ = this_run_sender.send(InternalTestEvent::SetupScriptStarted { + script_id: script_id.clone(), + config, + index, + total, + }); + + let (status, env_map) = self + .run_setup_script(&script, &this_run_sender, &mut this_forward_receiver) + .await; + let status = status.into_external(); + + let _ = this_run_sender.send(InternalTestEvent::SetupScriptFinished { + script_id, + config, + index, + total, + status, + }); + + drain_forward_receiver(this_forward_receiver).await; + _ = completion_sender.send(env_map.map(|env_map| (script, env_map))); + }; + + // Run this setup script to completion. + scope.spawn_cancellable(script_fut, || ()); + let script_and_env_map = completion_receiver.blocking_recv().unwrap_or_else(|_| { + // This should never happen. + log::warn!("setup script future did not complete -- this is a bug, please report it"); + None + }); + if let Some((script, env_map)) = script_and_env_map { + setup_script_data.add_script(script, env_map); + } + } + // groups is going to be passed to future_queue_grouped. let groups = self .profile @@ -429,6 +497,8 @@ impl<'a> TestRunnerInner<'a> { .iter() .map(|(group_name, config)| (group_name, config.max_threads.compute())); + let setup_script_data = Arc::new(setup_script_data); + let run_fut = futures::stream::iter(self.test_list.iter_tests()) .map(move |test_instance| { let this_run_sender = run_sender.clone(); @@ -436,6 +506,7 @@ impl<'a> TestRunnerInner<'a> { let query = test_instance.to_test_query(); let settings = self.profile.settings_for(&query); + let setup_script_data = setup_script_data.clone(); let threads_required = settings.threads_required().compute(self.test_threads); let test_group = match settings.test_group() { @@ -499,6 +570,7 @@ impl<'a> TestRunnerInner<'a> { test_instance, retry_data, &settings, + &setup_script_data, &this_run_sender, &mut this_forward_receiver, delay, @@ -590,12 +662,227 @@ impl<'a> TestRunnerInner<'a> { // Helper methods // --- + /// Run an individual setup script in its own process. + async fn run_setup_script( + &self, + script: &SetupScript<'a>, + run_sender: &UnboundedSender>, + forward_receiver: &mut tokio::sync::broadcast::Receiver, + ) -> (InternalSetupScriptExecuteStatus, Option) { + let mut stopwatch = crate::time::stopwatch(); + + match self + .run_setup_script_inner(script, &mut stopwatch, run_sender, forward_receiver) + .await + { + Ok((status, env_map)) => (status, env_map), + Err(error) => { + // Put the error chain inside stderr. + let mut stderr = bytes::BytesMut::new(); + writeln!(&mut stderr, "{}", DisplayErrorChain::new(error)).unwrap(); + + ( + InternalSetupScriptExecuteStatus { + stdout: Bytes::new(), + stderr: stderr.freeze(), + result: ExecutionResult::ExecFail, + stopwatch_end: stopwatch.end(), + is_slow: false, + env_count: 0, + }, + None, + ) + } + } + } + + #[allow(clippy::too_many_arguments)] + async fn run_setup_script_inner( + &self, + script: &SetupScript<'a>, + stopwatch: &mut StopwatchStart, + run_sender: &UnboundedSender>, + forward_receiver: &mut tokio::sync::broadcast::Receiver, + ) -> Result<(InternalSetupScriptExecuteStatus, Option), SetupScriptError> + { + let mut cmd = script.make_command(&self.double_spawn, self.test_list)?; + let command_mut = cmd.command_mut(); + + command_mut.env("NEXTEST_RUN_ID", format!("{}", self.run_id)); + command_mut.stdin(Stdio::null()); + imp::set_process_group(command_mut); + + // If creating a job fails, we might be on an old system. Ignore this -- job objects are a + // best-effort thing. + let job = imp::Job::create().ok(); + + // The --no-capture CLI argument overrides the config. + if !self.no_capture { + if script.config.capture_stdout { + command_mut.stdout(std::process::Stdio::piped()); + } + if script.config.capture_stderr { + command_mut.stderr(std::process::Stdio::piped()); + } + } + + let (mut child, env_path) = cmd.spawn().map_err(SetupScriptError::ExecFail)?; + + // If assigning the child to the job fails, ignore this. This can happen if the process has + // exited. + let _ = imp::assign_process_to_job(&child, job.as_ref()); + + let mut status: Option = None; + // Unlike with tests, we don't automatically assume setup scripts are slow if they take a + // long time. For example, consider a setup script that performs a cargo build -- it can + // take an indeterminate amount of time. That's why we set a very large slow timeout rather + // than the test default of 60 seconds. + let slow_timeout = script + .config + .slow_timeout + .unwrap_or(SlowTimeout::VERY_LARGE); + let leak_timeout = script + .config + .leak_timeout + .unwrap_or(Duration::from_millis(100)); + let mut is_slow = false; + + let mut interval_sleep = std::pin::pin!(crate::time::pausable_sleep(slow_timeout.period)); + + let mut timeout_hit = 0; + + // TODO: capture output + let child_stdout = child.stdout.take(); + let child_stderr = child.stderr.take(); + let mut stdout = bytes::BytesMut::new(); + let mut stderr = bytes::BytesMut::new(); + + let (res, leaked) = { + // Set up futures for reading from stdout and stderr. + let mut collect_output_fut = std::pin::pin!(collect_output( + child_stdout, + &mut stdout, + child_stderr, + &mut stderr + )); + let mut collect_output_done = false; + + let res = loop { + tokio::select! { + res = &mut collect_output_fut, if !collect_output_done => { + collect_output_done = true; + res?; + } + res = child.wait() => { + // The test finished executing. + break res; + } + _ = &mut interval_sleep, if status.is_none() => { + is_slow = true; + timeout_hit += 1; + let will_terminate = if let Some(terminate_after) = slow_timeout.terminate_after { + NonZeroUsize::new(timeout_hit as usize) + .expect("timeout_hit cannot be non-zero") + >= terminate_after + } else { + false + }; + + if !slow_timeout.grace_period.is_zero() { + let _ = run_sender.send(InternalTestEvent::SetupScriptSlow { + script_id: script.id.clone(), + config: script.config, + // Pass in the slow timeout period times timeout_hit, since stopwatch.elapsed() tends to be + // slightly longer. + elapsed: timeout_hit * slow_timeout.period, + will_terminate, + }); + } + + if will_terminate { + // attempt to terminate the slow test. + // as there is a race between shutting down a slow test and its own completion + // we silently ignore errors to avoid printing false warnings. + imp::terminate_child(&mut child, TerminateMode::Timeout(slow_timeout.grace_period), forward_receiver, job.as_ref()).await; + status = Some(ExecutionResult::Timeout); + if slow_timeout.grace_period.is_zero() { + break child.wait().await; + } + // Don't break here to give the wait task a chance to finish. + } else { + interval_sleep.as_mut().reset_original_duration(); + } + } + recv = forward_receiver.recv() => { + handle_forward_event( + &mut child, + recv, + stopwatch, + interval_sleep.as_mut(), + forward_receiver, + job.as_ref(), + ).await; + } + } + }; + + // Once the process is done executing, wait up to leak_timeout for the pipes to shut down. + // Previously, this used to hang if spawned grandchildren inherited stdout/stderr but + // didn't shut down properly. Now, this detects those cases and marks them as leaked. + let leaked = loop { + // Ignore stop and continue events here since the leak timeout should be very small. + // TODO: we may want to consider them. + let sleep = tokio::time::sleep(leak_timeout); + + tokio::select! { + res = &mut collect_output_fut, if !collect_output_done => { + collect_output_done = true; + res?; + } + () = sleep, if !collect_output_done => { + break true; + } + else => { + break false; + } + } + }; + + (res, leaked) + }; + + let output = res.map_err(SetupScriptError::Wait)?; + let exit_status = output; + + let status = status.unwrap_or_else(|| create_execution_result(exit_status, leaked)); + + let env_map = if status.is_success() { + Some(SetupScriptEnvMap::new(&env_path).await?) + } else { + None + }; + + Ok(( + InternalSetupScriptExecuteStatus { + stdout: stdout.freeze(), + stderr: stderr.freeze(), + result: status, + stopwatch_end: stopwatch.end(), + is_slow, + env_count: env_map.as_ref().map(|map| map.len()).unwrap_or(0), + }, + env_map, + )) + } + /// Run an individual test in its own process. + #[allow(clippy::too_many_arguments)] async fn run_test( &self, test: TestInstance<'a>, retry_data: RetryData, settings: &TestSettings, + setup_script_data: &SetupScriptExecuteData<'a>, run_sender: &UnboundedSender>, forward_receiver: &mut broadcast::Receiver, delay_before_start: Duration, @@ -608,6 +895,7 @@ impl<'a> TestRunnerInner<'a> { retry_data, &mut stopwatch, settings, + setup_script_data, run_sender, forward_receiver, delay_before_start, @@ -639,6 +927,7 @@ impl<'a> TestRunnerInner<'a> { retry_data: RetryData, stopwatch: &mut StopwatchStart, settings: &TestSettings, + setup_script_data: &SetupScriptExecuteData<'a>, run_sender: &UnboundedSender>, forward_receiver: &mut broadcast::Receiver, delay_before_start: Duration, @@ -654,6 +943,7 @@ impl<'a> TestRunnerInner<'a> { command_mut.env("__NEXTEST_ATTEMPT", format!("{}", retry_data.attempt)); command_mut.env("NEXTEST_RUN_ID", format!("{}", self.run_id)); command_mut.stdin(Stdio::null()); + setup_script_data.apply(&test.to_test_query(), command_mut); imp::set_process_group(command_mut); // If creating a job fails, we might be on an old system. Ignore this -- job objects are a @@ -1140,6 +1430,48 @@ impl InternalExecuteStatus { } } +/// Information about the execution of a setup script. +#[derive(Clone, Debug)] +pub struct SetupScriptExecuteStatus { + /// Standard output for this setup script. + pub stdout: Bytes, + /// Standard error for this setup script. + pub stderr: Bytes, + /// The execution result for this setup script: pass, fail or execution error. + pub result: ExecutionResult, + /// The time at which the script started. + pub start_time: SystemTime, + /// The time it took for the script to run. + pub time_taken: Duration, + /// Whether this script counts as slow. + pub is_slow: bool, + /// The number of environment variables that were set by this script. + pub env_count: usize, +} + +struct InternalSetupScriptExecuteStatus { + stdout: Bytes, + stderr: Bytes, + result: ExecutionResult, + stopwatch_end: StopwatchEnd, + is_slow: bool, + env_count: usize, +} + +impl InternalSetupScriptExecuteStatus { + fn into_external(self) -> SetupScriptExecuteStatus { + SetupScriptExecuteStatus { + stdout: self.stdout, + stderr: self.stderr, + result: self.result, + start_time: self.stopwatch_end.start_time, + time_taken: self.stopwatch_end.duration, + is_slow: self.is_slow, + env_count: self.env_count, + } + } +} + /// Statistics for a test run. #[derive(Copy, Clone, Default, Debug, Eq, PartialEq)] pub struct RunStats { @@ -1151,6 +1483,26 @@ pub struct RunStats { /// The total number of tests that finished running. pub finished_count: usize, + /// The total number of setup scripts that were expected to be run at the beginning. + /// + /// If the test run is canceled, this will be more than `finished_count` at the end. + pub setup_scripts_initial_count: usize, + + /// The total number of setup scripts that finished running. + pub setup_scripts_finished_count: usize, + + /// The number of setup scripts that passed. + pub setup_scripts_passed: usize, + + /// The number of setup scripts that failed. + pub setup_scripts_failed: usize, + + /// The number of setup scripts that encountered an execution failure. + pub setup_scripts_exec_failed: usize, + + /// The number of setup scripts that timed out. + pub setup_scripts_timed_out: usize, + /// The number of tests that passed. Includes `passed_slow`, `flaky` and `leaky`. pub passed: usize, @@ -1187,19 +1539,53 @@ impl RunStats { /// * any tests failed /// * any tests encountered an execution failure pub fn is_success(&self) -> bool { + if self.setup_scripts_initial_count > self.setup_scripts_finished_count { + return false; + } if self.initial_run_count > self.finished_count { return false; } - if self.any_failed() { + if self.failure_kind().is_some() { return false; } true } - /// Returns true if any tests failed or were timed out. + /// Returns the kind of failure recorded by the run stats, if any tests failed or were timed + /// out. #[inline] - pub fn any_failed(&self) -> bool { - self.failed > 0 || self.exec_failed > 0 || self.timed_out > 0 + pub fn failure_kind(&self) -> Option { + if self.setup_scripts_failed > 0 + || self.setup_scripts_exec_failed > 0 + || self.setup_scripts_timed_out > 0 + { + return Some(RunStatsFailureKind::SetupScript); + } + + if self.failed > 0 || self.exec_failed > 0 || self.timed_out > 0 { + return Some(RunStatsFailureKind::Test); + } + + None + } + + fn on_setup_script_finished(&mut self, status: &SetupScriptExecuteStatus) { + self.setup_scripts_finished_count += 1; + + match status.result { + ExecutionResult::Pass | ExecutionResult::Leak => { + self.setup_scripts_passed += 1; + } + ExecutionResult::Fail { .. } => { + self.setup_scripts_failed += 1; + } + ExecutionResult::ExecFail => { + self.setup_scripts_exec_failed += 1; + } + ExecutionResult::Timeout => { + self.setup_scripts_timed_out += 1; + } + } } fn on_test_finished(&mut self, run_statuses: &ExecutionStatuses) { @@ -1245,6 +1631,16 @@ impl RunStats { } } +/// A type summarizing the possible failures within a test run. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum RunStatsFailureKind { + /// A setup script failed. + SetupScript, + + /// A test failed. + Test, +} + #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] enum SignalCount { Once, @@ -1282,6 +1678,7 @@ struct CallbackContext { stopwatch: StopwatchStart, run_stats: RunStats, fail_fast: bool, + setup_scripts_running: usize, running: usize, cancel_state: Option, signal_count: Option, @@ -1302,6 +1699,7 @@ where ..RunStats::default() }, fail_fast, + setup_scripts_running: 0, running: 0, cancel_state: None, signal_count: None, @@ -1339,6 +1737,65 @@ where event: InternalEvent<'a>, ) -> Result, InternalError> { match event { + InternalEvent::Test(InternalTestEvent::SetupScriptStarted { + script_id, + config, + index, + total, + }) => { + self.setup_scripts_running += 1; + self.callback(TestEventKind::SetupScriptStarted { + index, + total, + script_id, + command: config.program(), + args: config.args(), + no_capture: config.no_capture(), + }) + } + InternalEvent::Test(InternalTestEvent::SetupScriptSlow { + script_id, + config, + elapsed, + will_terminate, + }) => self.callback(TestEventKind::SetupScriptSlow { + script_id, + command: config.program(), + args: config.args(), + elapsed, + will_terminate, + }), + InternalEvent::Test(InternalTestEvent::SetupScriptFinished { + script_id, + config, + index, + total, + status, + }) => { + self.setup_scripts_running -= 1; + self.run_stats.on_setup_script_finished(&status); + // Setup scripts failing always cause the entire test run to be cancelled + // (--no-fail-fast is ignored). + let fail_cancel = !status.result.is_success(); + + self.callback(TestEventKind::SetupScriptFinished { + index, + total, + script_id, + command: config.program(), + args: config.args(), + no_capture: config.no_capture(), + run_status: status, + })?; + + if fail_cancel { + Err(InternalError::TestFailureCanceled( + self.begin_cancel(CancelReason::SetupScriptFailure).err(), + )) + } else { + Ok(None) + } + } InternalEvent::Test(InternalTestEvent::Started { test_instance }) => { self.running += 1; self.callback(TestEventKind::TestStarted { @@ -1442,6 +1899,7 @@ where // Debounce stop signals. if !self.stopwatch.is_paused() { self.callback(TestEventKind::RunPaused { + setup_scripts_running: self.setup_scripts_running, running: self.running, })?; self.stopwatch.pause(); @@ -1456,6 +1914,7 @@ where if self.stopwatch.is_paused() { self.stopwatch.resume(); self.callback(TestEventKind::RunContinued { + setup_scripts_running: self.setup_scripts_running, running: self.running, })?; Ok(Some(JobControlEvent::Continue)) @@ -1485,6 +1944,7 @@ where if self.cancel_state < Some(reason) { self.cancel_state = Some(reason); self.basic_callback(TestEventKind::RunBeginCancel { + setup_scripts_running: self.setup_scripts_running, running: self.running, reason, })?; @@ -1512,6 +1972,25 @@ enum InternalEvent<'a> { #[derive(Debug)] enum InternalTestEvent<'a> { + SetupScriptStarted { + script_id: ScriptId, + config: &'a ScriptConfig, + index: usize, + total: usize, + }, + SetupScriptSlow { + script_id: ScriptId, + config: &'a ScriptConfig, + elapsed: Duration, + will_terminate: bool, + }, + SetupScriptFinished { + script_id: ScriptId, + config: &'a ScriptConfig, + index: usize, + total: usize, + status: SetupScriptExecuteStatus, + }, Started { test_instance: TestInstance<'a>, }, @@ -1889,10 +2368,11 @@ mod tests { let profile = config.profile(NextestConfig::DEFAULT_PROFILE).unwrap(); let build_platforms = BuildPlatforms::new(None).unwrap(); let handler_kind = SignalHandlerKind::Noop; + let profile = profile.apply_build_platforms(&build_platforms); let runner = builder .build( &test_list, - profile.apply_build_platforms(&build_platforms), + &profile, handler_kind, DoubleSpawnInfo::disabled(), TargetRunner::empty(), @@ -1963,62 +2443,163 @@ mod tests { .is_success(), "skipped => not considered a failure" ); + + assert!( + !RunStats { + setup_scripts_initial_count: 2, + setup_scripts_finished_count: 1, + ..RunStats::default() + } + .is_success(), + "setup script not finished => failure" + ); + assert!( + !RunStats { + setup_scripts_initial_count: 2, + setup_scripts_finished_count: 2, + setup_scripts_failed: 1, + ..RunStats::default() + } + .is_success(), + "setup script failed => failure" + ); + assert!( + !RunStats { + setup_scripts_initial_count: 2, + setup_scripts_finished_count: 2, + setup_scripts_exec_failed: 1, + ..RunStats::default() + } + .is_success(), + "setup script exec failed => failure" + ); + assert!( + !RunStats { + setup_scripts_initial_count: 2, + setup_scripts_finished_count: 2, + setup_scripts_timed_out: 1, + ..RunStats::default() + } + .is_success(), + "setup script timed out => failure" + ); + assert!( + RunStats { + setup_scripts_initial_count: 2, + setup_scripts_finished_count: 2, + setup_scripts_passed: 2, + ..RunStats::default() + } + .is_success(), + "setup scripts passed => not considered a failure" + ); } #[test] fn test_any_failed() { - assert!( - !RunStats::default().any_failed(), + assert_eq!( + RunStats::default().failure_kind(), + None, "empty run => none failed" ); - assert!( - !RunStats { + assert_eq!( + RunStats { initial_run_count: 42, finished_count: 41, ..RunStats::default() } - .any_failed(), + .failure_kind(), + None, "initial run count > final run count doesn't necessarily mean any failed" ); - assert!( + assert_eq!( RunStats { initial_run_count: 42, finished_count: 42, failed: 1, ..RunStats::default() } - .any_failed(), + .failure_kind(), + Some(RunStatsFailureKind::Test), "failed => failure" ); - assert!( + assert_eq!( RunStats { initial_run_count: 42, finished_count: 42, exec_failed: 1, ..RunStats::default() } - .any_failed(), + .failure_kind(), + Some(RunStatsFailureKind::Test), "exec failed => failure" ); - assert!( + assert_eq!( RunStats { initial_run_count: 42, finished_count: 42, timed_out: 1, ..RunStats::default() } - .any_failed(), + .failure_kind(), + Some(RunStatsFailureKind::Test), "timed out => failure" ); - assert!( - !RunStats { + assert_eq!( + RunStats { initial_run_count: 42, finished_count: 42, skipped: 1, ..RunStats::default() } - .any_failed(), + .failure_kind(), + None, "skipped => not considered a failure" ); + + assert_eq!( + RunStats { + setup_scripts_initial_count: 2, + setup_scripts_finished_count: 2, + setup_scripts_failed: 1, + ..RunStats::default() + } + .failure_kind(), + Some(RunStatsFailureKind::SetupScript), + "setup script failed => failure" + ); + assert_eq!( + RunStats { + setup_scripts_initial_count: 2, + setup_scripts_finished_count: 2, + setup_scripts_exec_failed: 1, + ..RunStats::default() + } + .failure_kind(), + Some(RunStatsFailureKind::SetupScript), + "setup script exec failed => failure" + ); + assert_eq!( + RunStats { + setup_scripts_initial_count: 2, + setup_scripts_finished_count: 2, + setup_scripts_timed_out: 1, + ..RunStats::default() + } + .failure_kind(), + Some(RunStatsFailureKind::SetupScript), + "setup script timed out => failure" + ); + assert_eq!( + RunStats { + setup_scripts_initial_count: 2, + setup_scripts_finished_count: 2, + setup_scripts_passed: 2, + ..RunStats::default() + } + .failure_kind(), + None, + "setup scripts passed => not considered a failure" + ); } } diff --git a/nextest-runner/tests/integration/basic.rs b/nextest-runner/tests/integration/basic.rs index d975f7552c5..b9876cf7521 100644 --- a/nextest-runner/tests/integration/basic.rs +++ b/nextest-runner/tests/integration/basic.rs @@ -95,11 +95,12 @@ fn test_run() -> Result<()> { .profile(NextestConfig::DEFAULT_PROFILE) .expect("default config is valid"); let build_platforms = BuildPlatforms::new(None).unwrap(); + let profile = profile.apply_build_platforms(&build_platforms); let runner = TestRunnerBuilder::default() .build( &test_list, - profile.apply_build_platforms(&build_platforms), + &profile, SignalHandlerKind::Noop, DoubleSpawnInfo::disabled(), TargetRunner::empty(), @@ -198,11 +199,12 @@ fn test_run_ignored() -> Result<()> { .profile(NextestConfig::DEFAULT_PROFILE) .expect("default config is valid"); let build_platforms = BuildPlatforms::new(None).unwrap(); + let profile = profile.apply_build_platforms(&build_platforms); let runner = TestRunnerBuilder::default() .build( &test_list, - profile.apply_build_platforms(&build_platforms), + &profile, SignalHandlerKind::Noop, DoubleSpawnInfo::disabled(), TargetRunner::empty(), @@ -410,7 +412,7 @@ fn test_retries(retries: Option) -> Result<()> { let runner = builder .build( &test_list, - profile, + &profile, SignalHandlerKind::Noop, DoubleSpawnInfo::disabled(), TargetRunner::empty(), @@ -548,7 +550,7 @@ fn test_termination() -> Result<()> { let runner = TestRunnerBuilder::default() .build( &test_list, - profile, + &profile, SignalHandlerKind::Noop, DoubleSpawnInfo::disabled(), TargetRunner::empty(), diff --git a/nextest-runner/tests/integration/fixtures.rs b/nextest-runner/tests/integration/fixtures.rs index ef7d3aebc4c..19b50fa4d57 100644 --- a/nextest-runner/tests/integration/fixtures.rs +++ b/nextest-runner/tests/integration/fixtures.rs @@ -4,11 +4,11 @@ use camino::{Utf8Path, Utf8PathBuf}; use duct::cmd; use guppy::{graph::PackageGraph, MetadataCommand}; -use maplit::btreemap; +use maplit::{btreemap, btreeset}; use nextest_metadata::{FilterMatch, MismatchReason, RustBinaryId}; use nextest_runner::{ cargo_config::{CargoConfigs, EnvironmentMap}, - config::{get_num_cpus, NextestConfig}, + config::{get_num_cpus, ConfigExperimental, NextestConfig}, double_spawn::DoubleSpawnInfo, list::{ BinaryList, RustBuildMeta, RustTestArtifact, TestExecuteContext, TestList, TestListState, @@ -253,8 +253,15 @@ pub(crate) fn workspace_root() -> Utf8PathBuf { } pub(crate) fn load_config() -> NextestConfig { - NextestConfig::from_sources(workspace_root(), &PACKAGE_GRAPH, None, []) - .expect("loaded fixture config") + NextestConfig::from_sources( + workspace_root(), + &PACKAGE_GRAPH, + None, + [], + // Enable setup scripts. + &btreeset! { ConfigExperimental::SetupScripts }, + ) + .expect("loaded fixture config") } pub(crate) static PACKAGE_GRAPH: Lazy = Lazy::new(|| { @@ -364,6 +371,7 @@ impl FixtureTargets { test_bins, self.rust_build_meta.clone(), test_filter, + workspace_root(), self.env.to_owned(), get_num_cpus(), ) diff --git a/nextest-runner/tests/integration/target_runner.rs b/nextest-runner/tests/integration/target_runner.rs index 2ef06e642d9..3eba81200c8 100644 --- a/nextest-runner/tests/integration/target_runner.rs +++ b/nextest-runner/tests/integration/target_runner.rs @@ -219,7 +219,7 @@ fn test_run_with_target_runner() -> Result<()> { let runner = runner .build( &test_list, - profile, + &profile, SignalHandlerKind::Noop, DoubleSpawnInfo::disabled(), target_runner, diff --git a/site/src/SUMMARY.md b/site/src/SUMMARY.md index 553d840794d..2c288e10e74 100644 --- a/site/src/SUMMARY.md +++ b/site/src/SUMMARY.md @@ -37,6 +37,7 @@ --- - [Stability policy](book/stability.md) - [Experimental features](book/experimental-features.md) + - [Setup scripts](book/setup-scripts.md) --- - [How nextest works](book/how-it-works.md) - [Benchmarks](book/benchmarks.md) diff --git a/site/src/book/experimental-features.md b/site/src/book/experimental-features.md index 737cffb3793..d43c0e00ec3 100644 --- a/site/src/book/experimental-features.md +++ b/site/src/book/experimental-features.md @@ -1,4 +1,8 @@ # Experimental features This section documents new features in nextest that aren't stable yet. These features -may be changed or removed at any time, and must be accessed through an environment variable. +may be changed or removed at any time, and must be accessed through a configuration option. + +## Current features + +- [Setup scripts](setup-scripts.md) diff --git a/site/src/book/setup-scripts.md b/site/src/book/setup-scripts.md new file mode 100644 index 00000000000..367fa7b5676 --- /dev/null +++ b/site/src/book/setup-scripts.md @@ -0,0 +1,117 @@ +# Setup scripts + +* **Nextest version:** 0.9.59 and above +* **Enable with:** adding `experimental = ["setup-scripts"]` to `.config/nextest.toml` +* **Tracking issue:** [#978](https://github.com/nextest-rs/nextest/issues/978) + +Nextest supports running *setup scripts* before tests are run. Setup scripts can be scoped to +particular tests via [filter expressions](filter-expressions.md). + +Setup scripts are configured in two parts: *defining scripts*, and *setting up rules* for when they should be executed. + +## Defining scripts + +Setup scripts are defined using the top-level `script` configuration. For example, to define a script named "my-script", which runs `my-script.sh`: + +```toml +[script.my-script] +command = 'my-script.sh' +``` + +Commands can either be specified using Unix shell rules, or as a list of arguments. In the following example, `script1` and `script2` are equivalent. + +```toml +[script.script1] +command = 'script.sh -c "Hello, world!"' + +[script.script2] +command = ['script.sh', '-c', 'Hello, world!'] +``` + +Setup scripts can have the following configuration options attached to them: + +* **`slow-timeout`**: Mark a setup script [as slow](slow-tests.md) or [terminate it](slow-tests.md#terminating-tests-after-a-timeout), using the same configuration as for tests. By default, setup scripts are not marked as slow or terminated (this is different from the slow timeout for tests). +* **`leak-timeout`**: Mark setup scripts [leaky](leaky-tests.md) after a timeout, using the same configuration as for tests. By default, the leak timeout is 100ms. +* **`capture-stdout`**: `true` if the script's standard output should be captured, `false` if not. By default, this is `false`. +* **`capture-stderr`**: `true` if the script's standard error should be captured, `false` if not. By default, this is `false`. + +### Example + +```toml +[script.db-generate] +command = 'cargo run -p db-generate' +slow-timeout = { period = "60s", terminate-after = 2 } +leak-timeout = "1s" +capture-stdout = true +capture-stderr = false +``` + +## Setting up rules + +In configuration, you can create rules for when to use scripts on a per-profile basis. This is done via the `profile..scripts` array. For example, you can set up a script that generates a database if tests from the `db-tests` package, or any packages that depend on it, are run. (This uses the `rdeps` [filter expression](filter-expressions.md) predicate): + +```toml +[[profile.default.scripts]] +filter = 'rdeps(db-tests)' +setup = 'db-generate' +``` + +Setup scripts can also filter based on platform, using the rules listed in [Specifying platforms](specifying-platforms.md): + +```toml +[[profile.default.scripts]] +platform = { host = "cfg(unix)" } +setup = 'script1' +``` + +A list of scripts can also be specified: + +```toml +[[profile.default.scripts]] +filter = 'test(/^script_tests::/)' +setup = ['script1', 'script2'] +``` + +## Script execution + +A given setup script *S* is only executed if the current profile has at least one rule where the `filter` and `platform` predicates match the current execution environment, and the setup script is selected. + +Setup scripts are executed serially, in the order they are defined (*not* the order they're specified in the rules). If any setup script exits with a non-zero exit code, the entire test run is terminated. + +### Environment variables + +Setup scripts can define environment variables that will be exposed to tests that match the script. This is done by writing to the `$NEXTEST_ENV` environment variable from within the script. + +For example, let's say you have a script `my-env-script.sh`: + +```bash +#!/bin/bash + +# Exit with 1 if NEXTEST_ENV isn't defined. +if [ -z "$NEXTEST_ENV" ]; then + exit 1 +fi + +# Write out an environment variable to $NEXTEST_ENV. +echo "NEXTEST_ENV_VAR=Hello, world!" >> "$NEXTEST_ENV" +``` + +And you define a setup script and a corresponding rule: + +```toml +[script.my-env-script] +command = 'my-env-script.sh' + +[[profile.default.scripts]] +filter = 'test(my_env_test)' +setup = 'my-env-script' +``` + +Then, in tests which match this script, the environment variable will be available: + +```rust +#[test] +fn my_env_test() { + assert_eq!(std::env::var("NEXTEST_ENV_VAR"), Ok("Hello, world!".to_string())); +} +``` diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index f6c066b8673..bd74117c343 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -21,6 +21,8 @@ console = { version = "0.15.7" } either = { version = "1.9.0" } futures-channel = { version = "0.3.28", features = ["sink"] } futures-sink = { version = "0.3.28", default-features = false, features = ["std"] } +indexmap-dff4ba8e3ae991db = { package = "indexmap", version = "1.9.3", default-features = false, features = ["serde-1"] } +indexmap-f595c2ba2a3f28df = { package = "indexmap", version = "2.0.0", features = ["serde"] } log = { version = "0.4.20", default-features = false, features = ["std"] } memchr = { version = "2.6.3", features = ["use_std"] } miette = { version = "5.10.0", features = ["fancy"] } @@ -28,10 +30,10 @@ num-traits = { version = "0.2.16", default-features = false, features = ["libm", owo-colors = { version = "3.5.0", default-features = false, features = ["supports-colors"] } rand = { version = "0.8.5" } serde = { version = "1.0.188", features = ["alloc", "derive"] } -serde_json = { version = "1.0.107", features = ["unbounded_depth"] } +serde_json = { version = "1.0.107", features = ["preserve_order", "unbounded_depth"] } similar = { version = "2.2.1", features = ["inline", "unicode"] } target-spec = { version = "3.0.1", default-features = false, features = ["custom", "summaries"] } -tokio = { version = "1.32.0", features = ["io-util", "macros", "process", "rt-multi-thread", "signal", "sync", "time", "tracing"] } +tokio = { version = "1.32.0", features = ["fs", "io-util", "macros", "process", "rt-multi-thread", "signal", "sync", "time", "tracing"] } twox-hash = { version = "1.6.3" } uuid = { version = "1.4.1", features = ["v4"] } @@ -46,7 +48,7 @@ syn-f595c2ba2a3f28df = { package = "syn", version = "2.0.37", features = ["extra futures-core = { version = "0.3.28" } futures-sink = { version = "0.3.28" } futures-util = { version = "0.3.28", features = ["channel", "io", "sink"] } -indexmap = { version = "1.9.3", default-features = false, features = ["std"] } +indexmap-dff4ba8e3ae991db = { package = "indexmap", version = "1.9.3", default-features = false, features = ["std"] } libc = { version = "0.2.148", features = ["extra_traits"] } miniz_oxide = { version = "0.7.1", default-features = false, features = ["with-alloc"] } rustix = { version = "0.38.14", features = ["fs", "termios"] } @@ -59,7 +61,7 @@ libc = { version = "0.2.148", features = ["extra_traits"] } futures-core = { version = "0.3.28" } futures-sink = { version = "0.3.28" } futures-util = { version = "0.3.28", features = ["channel", "io", "sink"] } -indexmap = { version = "1.9.3", default-features = false, features = ["std"] } +indexmap-dff4ba8e3ae991db = { package = "indexmap", version = "1.9.3", default-features = false, features = ["std"] } libc = { version = "0.2.148", features = ["extra_traits"] } miniz_oxide = { version = "0.7.1", default-features = false, features = ["with-alloc"] } rustix = { version = "0.38.14", features = ["fs", "termios"] } @@ -72,7 +74,7 @@ libc = { version = "0.2.148", features = ["extra_traits"] } futures-core = { version = "0.3.28" } futures-sink = { version = "0.3.28" } futures-util = { version = "0.3.28", features = ["channel", "io", "sink"] } -indexmap = { version = "1.9.3", default-features = false, features = ["std"] } +indexmap-dff4ba8e3ae991db = { package = "indexmap", version = "1.9.3", default-features = false, features = ["std"] } tokio = { version = "1.32.0", default-features = false, features = ["net"] } winapi = { version = "0.3.9", default-features = false, features = ["basetsd", "consoleapi", "handleapi", "jobapi2", "minwinbase", "minwindef", "ntsecapi", "processenv", "processthreadsapi", "psapi", "std", "synchapi", "winbase", "wincon", "winerror", "winnt", "ws2ipdef", "ws2tcpip", "wtypesbase"] } windows-sys = { version = "0.48.0", features = ["Win32_Foundation", "Win32_Networking_WinSock", "Win32_Security", "Win32_Storage_FileSystem", "Win32_System_Console", "Win32_System_Diagnostics_Debug", "Win32_System_Environment", "Win32_System_IO", "Win32_System_LibraryLoader", "Win32_System_Memory", "Win32_System_Pipes", "Win32_System_Registry", "Win32_System_SystemServices", "Win32_System_Threading", "Win32_System_Time", "Win32_System_WindowsProgramming", "Win32_UI_Shell"] }