From d10e15e967c11de6aac4a68fe8e177e9dfa1690e Mon Sep 17 00:00:00 2001 From: Lucas Kent Date: Fri, 16 Feb 2024 11:53:11 +1100 Subject: [PATCH] Convert run commands --- .github/workflows/windsock_benches.yaml | 10 +- windsock/src/bench.rs | 44 ++-- windsock/src/cli.rs | 148 ++++++----- windsock/src/lib.rs | 330 +++++++++++------------- windsock/src/list.rs | 4 +- 5 files changed, 260 insertions(+), 276 deletions(-) diff --git a/.github/workflows/windsock_benches.yaml b/.github/workflows/windsock_benches.yaml index e63f558b4..caba5f2cf 100644 --- a/.github/workflows/windsock_benches.yaml +++ b/.github/workflows/windsock_benches.yaml @@ -37,13 +37,13 @@ jobs: echo '1' | sudo tee /proc/sys/kernel/perf_event_paranoid # run some extra cases that arent handled by nextest - cargo windsock --bench-length-seconds 5 --operations-per-second 100 --profilers flamegraph --name cassandra,compression=none,connection_count=1,driver=scylla,operation=read_i64,protocol=v4,shotover=standard,topology=single - cargo windsock --bench-length-seconds 5 --operations-per-second 100 --profilers samply --name cassandra,compression=none,connection_count=1,driver=scylla,operation=read_i64,protocol=v4,shotover=standard,topology=single - cargo windsock --bench-length-seconds 5 --operations-per-second 100 --profilers sys_monitor --name kafka,shotover=standard,size=1B,topology=single - cargo windsock --bench-length-seconds 5 --operations-per-second 100 --profilers shotover_metrics --name redis,encryption=none,operation=get,shotover=standard,topology=single + cargo windsock local-run --bench-length-seconds 5 --operations-per-second 100 --profilers flamegraph cassandra,compression=none,connection_count=1,driver=scylla,operation=read_i64,protocol=v4,shotover=standard,topology=single + cargo windsock local-run --bench-length-seconds 5 --operations-per-second 100 --profilers samply cassandra,compression=none,connection_count=1,driver=scylla,operation=read_i64,protocol=v4,shotover=standard,topology=single + cargo windsock local-run --bench-length-seconds 5 --operations-per-second 100 --profilers sys_monitor kafka,shotover=standard,size=1B,topology=single + cargo windsock local-run --bench-length-seconds 5 --operations-per-second 100 --profilers shotover_metrics redis,encryption=none,operation=get,shotover=standard,topology=single # windsock/examples/cassandra.rs - this can stay here until windsock is moved to its own repo - cargo run --release --example cassandra -- --bench-length-seconds 5 --operations-per-second 100 + cargo run --release --example cassandra -- local-run --bench-length-seconds 5 --operations-per-second 100 - name: Ensure that tests did not create or modify any files that arent .gitignore'd run: | if [ -n "$(git status --porcelain)" ]; then diff --git a/windsock/src/bench.rs b/windsock/src/bench.rs index a6731343a..fa8fe1ce4 100644 --- a/windsock/src/bench.rs +++ b/windsock/src/bench.rs @@ -1,4 +1,4 @@ -use crate::cli::Args; +use crate::cli::RunArgs; use crate::report::{report_builder, Report, ReportArchive}; use crate::tables::ReportColumn; use anyhow::Result; @@ -34,7 +34,7 @@ impl BenchState { pub async fn orchestrate( &mut self, - args: &Args, + args: &RunArgs, running_in_release: bool, cloud_resources: Option, ) { @@ -52,10 +52,10 @@ impl BenchState { PathBuf::new() }; - if args.cloud { + if let Some(cloud_resources) = cloud_resources { self.bench .orchestrate_cloud( - cloud_resources.unwrap(), + cloud_resources, running_in_release, Profiling { results_path, @@ -85,7 +85,7 @@ impl BenchState { }]); } - pub async fn run(&mut self, args: &Args, running_in_release: bool, resources: &str) { + pub async fn run(&mut self, args: &RunArgs, running_in_release: bool, resources: &str) { let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); let process = tokio::spawn(report_builder( self.tags.clone(), @@ -173,9 +173,9 @@ pub trait Bench { /// Call within `Bench::orchestrate_local` to call `Bench::run` async fn execute_run(&self, resources: &str, bench_parameters: &BenchParameters) { - let internal_run = format!("{} {}", self.name(), resources); + let name_and_resources = format!("{} {}", self.name(), resources); let output = tokio::process::Command::new(std::env::current_exe().unwrap().as_os_str()) - .args(run_args_vec(internal_run, bench_parameters)) + .args(run_args_vec(name_and_resources, bench_parameters)) .output() .await .unwrap(); @@ -188,8 +188,8 @@ pub trait Bench { /// Call within `Bench::orchestrate_cloud` to determine how to invoke the uploaded windsock executable fn run_args(&self, resources: &str, bench_parameters: &BenchParameters) -> String { - let internal_run = format!("\"{} {}\"", self.name(), resources); - run_args_vec(internal_run, bench_parameters).join(" ") + let name_and_resources = format!("\"{} {}\"", self.name(), resources); + run_args_vec(name_and_resources, bench_parameters).join(" ") } fn name(&self) -> String { @@ -197,8 +197,9 @@ pub trait Bench { } } -fn run_args_vec(internal_run: String, bench_parameters: &BenchParameters) -> Vec { +fn run_args_vec(name_and_resources: String, bench_parameters: &BenchParameters) -> Vec { let mut args = vec![]; + args.push("internal-run".to_owned()); args.push("--bench-length-seconds".to_owned()); args.push(bench_parameters.runtime_seconds.to_string()); @@ -207,8 +208,7 @@ fn run_args_vec(internal_run: String, bench_parameters: &BenchParameters) -> Vec args.push(ops.to_string()); }; - args.push("--internal-run".to_owned()); - args.push(internal_run); + args.push(name_and_resources); args } @@ -219,7 +219,7 @@ pub struct BenchParameters { } impl BenchParameters { - fn from_args(args: &Args) -> Self { + fn from_args(args: &RunArgs) -> Self { BenchParameters { runtime_seconds: args.bench_length_seconds.unwrap_or(15), operations_per_second: args.operations_per_second, @@ -237,21 +237,15 @@ pub(crate) struct Tags(pub HashMap); impl Tags { pub fn get_name(&self) -> String { - let mut result = if let Some(name) = self.0.get("name") { - name.clone() - } else { - "".to_string() - }; + let mut result = String::new(); let mut tags: Vec<(&String, &String)> = self.0.iter().collect(); tags.sort_by_key(|x| x.0); for (key, value) in tags { - if key != "name" { - if !result.is_empty() { - write!(result, ",").unwrap(); - } - write!(result, "{key}={value}").unwrap(); + if !result.is_empty() { + write!(result, ",").unwrap(); } + write!(result, "{key}={value}").unwrap(); } result } @@ -265,10 +259,8 @@ impl Tags { let key = pair.next().unwrap().to_owned(); let value = pair.next().unwrap().to_owned(); map.insert(key, value); - } else if map.contains_key("name") { - panic!("The name tag was already set and a tag without an '=' was found") } else { - map.insert("name".to_owned(), tag.to_owned()); + panic!("tag without an '=' was found") } } Tags(map) diff --git a/windsock/src/cli.rs b/windsock/src/cli.rs index 59384c7ff..095095235 100644 --- a/windsock/src/cli.rs +++ b/windsock/src/cli.rs @@ -1,5 +1,23 @@ use anyhow::{anyhow, Error}; -use clap::{Parser, Subcommand}; +use clap::{Args, Parser, Subcommand}; + +const ABOUT: &str = r#"Bench Names: + Each benchmark has a unique name, this name is used by many options listed below. + The name is derived from an alphabetical sorting of its tags so you wont find it directly in the bench + implementation but it will be listed in `--list`. + +Tag Filters: + Many options below take tag filters that specify which benches to include. + Tag filters specify which benches to include and the filter results are unioned. + + So: + * The filter "foo=some_value" will include only benches with the tag key `foo` and the tag value `some_value` + * The filter "foo=some_value bar=another_value" will include only benches that match "foo=some_value" and "bar=another_value" + * The filter "" will include all benches + + A filters tags can also be separated by commas allowing names to function as filters. + So: foo=some_value,bar=another_value is a name but it can also be used where a filter is accepted. + "#; #[derive(Subcommand, Clone)] pub enum Command { @@ -7,6 +25,35 @@ pub enum Command { #[clap(verbatim_doc_comment)] List, + /// Create cloud resources for running benches + #[clap(verbatim_doc_comment)] + CloudSetup { + /// e.g. "db=kafka connection_count=100" + #[clap(verbatim_doc_comment)] + filter: String, + }, + + /// Run benches in the cloud using the resources created by cloud-setup + #[clap(verbatim_doc_comment)] + CloudRun(RunArgs), + + /// cleanup cloud resources created by cloud-setup + /// Make sure to call this when your benchmarking session is finished! + #[clap(verbatim_doc_comment)] + CloudCleanup, + + /// cloud-setup, cloud-run and cloud-cleanup combined into a single command. + /// Convient for getting a quick understanding of performance. + /// However, if you are performing optimization work prefer the individual commands as you will get: + /// * more stable results (same cloud instance) + /// * faster results (skip recreating and destroying cloud resources) + #[clap(verbatim_doc_comment)] + CloudSetupRunCleanup(RunArgs), + + /// Run benches entirely on your local machine + #[clap(verbatim_doc_comment)] + LocalRun(RunArgs), + /// The results of the last benchmarks run becomes the new baseline from which future benchmark runs will be compared. #[clap(verbatim_doc_comment)] BaselineSet, @@ -30,7 +77,7 @@ pub enum Command { #[clap(long, verbatim_doc_comment)] ignore_baseline: bool, - /// e.g. "db=kafka OPS=10000" + /// e.g. "db=kafka connection_count=100" #[clap(verbatim_doc_comment)] filter: Option, }, @@ -39,7 +86,7 @@ pub enum Command { /// Comparing various benches against a specific base bench. /// /// Usage: First provide the base benchmark name then provide benchmark names to compare against the base. - /// --compare_by_name "base_name other_name1 other_name2" + /// "base_name other_name1 other_name2" #[clap(verbatim_doc_comment)] CompareByName { filter: String }, @@ -47,39 +94,17 @@ pub enum Command { /// Comparing benches matching tag filters against a specific base bench. /// /// Usage: First provide the base benchmark name then provide tag filters - /// --compare_by_tags "base_name db=kafka OPS=10000" + /// "base_name db=kafka connection_count=10" #[clap(verbatim_doc_comment)] CompareByTags { filter: String }, -} -const ABOUT: &str = r#"Bench Names: - Each benchmark has a unique name, this name is used by many options listed below. - The name is derived from its tags so you wont find it directly in the bench implementation but it will be listed in `--list`. - -Tag Filters: - Many options below take tag filters that specify which benches to include. - Tag filters specify which benches to include and the filter results are unioned. - - So: - * The filter "foo=some_value" will include only benches with the tag key `foo` and the tag value `some_value` - * The filter "foo=some_value bar=another_value" will include only benches that match "foo=some_value" and "bar=another_value" - * The filter "" will include all benches"#; - -#[derive(Parser, Clone)] -#[clap(about=ABOUT)] -pub struct Args { - /// Run all benches that match the specified tag key/values. - /// `tag_key=tag_value foo=bar` + /// Not for human use. Call this from your bench orchestration method to launch your bencher. #[clap(verbatim_doc_comment)] - pub filter: Option, - - #[command(subcommand)] - pub command: Option, - - /// Run a specific bench with the name produced via `--list`. - #[clap(long, verbatim_doc_comment)] - pub name: Option, + InternalRun(RunArgs), +} +#[derive(Args, Clone)] +pub struct RunArgs { /// Instruct benches to profile the application under test with the specified profilers. /// Benches that do not support the specified profilers will be skipped. #[clap(long, verbatim_doc_comment, value_delimiter = ',')] @@ -95,31 +120,28 @@ pub struct Args { #[clap(long, verbatim_doc_comment)] pub operations_per_second: Option, - /// By default windsock will run benches on your local machine. - /// Set this flag to have windsock run the benches in your configured cloud. - #[clap(long, verbatim_doc_comment)] - pub cloud: bool, - - /// Windsock will automatically cleanup cloud resources after benches have been run. - /// However this command exists to force cleanup in case windsock panicked before automatic cleanup could occur. - #[clap(long, verbatim_doc_comment)] - pub cleanup_cloud_resources: bool, - - /// Skip running of benches. - /// Skip automatic deletion of cloud resources on bench run completion. - /// Instead, just create cloud resources and write details of the resources to disk so they may be restored via `--load-cloud-resources-file` - #[clap(long, verbatim_doc_comment)] - pub store_cloud_resources_file: bool, + /// Run all benches that match the specified tag key/values. + /// `tag_key=tag_value foo=bar` + #[clap(verbatim_doc_comment)] + pub filter: Option, +} - /// Skip automatic creation of cloud resources on bench run completion. - /// Skip automatic deletion of cloud resources on bench run completion. - /// Instead, details of the resources are loaded from disk as saved via a previous run using `--store-cloud-resources-file` - #[clap(long, verbatim_doc_comment)] - pub load_cloud_resources_file: bool, +impl RunArgs { + pub fn filter(&self) -> String { + match &self.filter { + // convert a name into a filter by swapping commas for spaces + Some(filter) => filter.replace(',', " "), + // If not provided use the empty filter + None => String::new(), + } + } +} - /// Not for human use. Call this from your bench orchestration method to launch your bencher. - #[clap(long, verbatim_doc_comment)] - pub internal_run: Option, +#[derive(Parser)] +#[clap(about=ABOUT)] +pub struct WindsockArgs { + #[command(subcommand)] + pub command: Option, #[clap(long, hide(true))] list: bool, @@ -131,7 +153,7 @@ pub struct Args { ignored: bool, #[clap(long, hide(true))] - exact: bool, + pub exact: Option, #[clap(long, hide(true))] nocapture: bool, @@ -142,7 +164,7 @@ enum NextestFormat { Terse, } -impl Args { +impl WindsockArgs { pub fn nextest_list(&self) -> bool { self.list } @@ -155,17 +177,21 @@ impl Args { self.list && matches!(&self.format, Some(NextestFormat::Terse)) && self.ignored } - pub fn nextest_run_by_name(&self) -> bool { - self.nocapture && self.exact + pub fn nextest_run_by_name(&self) -> Option<&str> { + if self.nocapture { + self.exact.as_deref() + } else { + None + } } pub fn nextest_invalid_args(&self) -> Option { if self.format.is_some() && self.list { Some(anyhow!("`--format` only exists for nextest compatibility and is not supported without `--list`")) - } else if self.nocapture && !self.exact { + } else if self.nocapture && self.exact.is_none() { Some(anyhow!("`--nocapture` only exists for nextest compatibility and is not supported without `--exact`")) - } else if self.exact && !self.nocapture { - Some(anyhow!("`--nocapture` only exists for nextest compatibility and is not supported without `--nocapture`")) + } else if self.exact.is_some() && !self.nocapture { + Some(anyhow!("`--exact` only exists for nextest compatibility and is not supported without `--nocapture`")) } else { None } diff --git a/windsock/src/lib.rs b/windsock/src/lib.rs index 69c38ec7c..6d2005d9b 100644 --- a/windsock/src/lib.rs +++ b/windsock/src/lib.rs @@ -17,11 +17,11 @@ pub use tables::Goal; use anyhow::{anyhow, Result}; use bench::BenchState; -use clap::Parser; -use cli::{Args, Command}; +use clap::{CommandFactory, Parser}; +use cli::{Command, RunArgs, WindsockArgs}; use cloud::{BenchInfo, Cloud}; use filter::Filter; -use std::{path::Path, process::exit}; +use std::process::exit; use tokio::runtime::Runtime; pub struct Windsock { @@ -70,7 +70,7 @@ impl Windsock Result<()> { - let args = cli::Args::parse(); + let args = WindsockArgs::parse(); let running_in_release = self.running_in_release; if let Some(command) = args.command { @@ -93,40 +93,68 @@ impl Windsock tables::results(ignore_baseline, filter.as_deref().unwrap_or(""))?, Command::CompareByName { filter } => tables::compare_by_name(&filter)?, Command::CompareByTags { filter } => tables::compare_by_tags(&filter)?, + Command::CloudSetup { filter } => { + create_runtime(None).block_on(self.save_cloud_to_disk(filter))? + } + Command::CloudRun(args) => { + create_runtime(None).block_on(self.cloud_run(args, running_in_release))?; + } + Command::CloudCleanup => { + create_runtime(None).block_on(self.cleanup_cloud_resources()); + } + Command::CloudSetupRunCleanup(args) => { + create_runtime(None) + .block_on(self.cloud_setup_run_cleanup(args, running_in_release))?; + } + Command::LocalRun(args) => { + create_runtime(None) + .block_on(self.run_filtered_benches_local(args, running_in_release))?; + } + Command::InternalRun(args) => self.internal_run(&args, running_in_release)?, } - return Ok(()); - } - if args.cleanup_cloud_resources { - let rt = create_runtime(None); - rt.block_on(self.cloud.cleanup_resources()); } else if args.nextest_list() { list::nextest_list(&args, &self.benches); - } else if args.nextest_run_by_name() { - create_runtime(None).block_on(self.run_nextest(args, running_in_release))?; + } else if let Some(name) = args.nextest_run_by_name() { + create_runtime(None).block_on(self.run_nextest(name, running_in_release))?; } else if let Some(err) = args.nextest_invalid_args() { return Err(err); - } else if let Some(internal_run) = &args.internal_run { - self.internal_run(&args, internal_run, running_in_release)?; - } else if let Some(name) = args.name.clone() { - create_runtime(None).block_on(self.run_named_bench(args, name, running_in_release))?; - } else if args.cloud { - create_runtime(None) - .block_on(self.run_filtered_benches_cloud(args, running_in_release))?; } else { - create_runtime(None) - .block_on(self.run_filtered_benches_local(args, running_in_release))?; + WindsockArgs::command().print_help().unwrap(); } Ok(()) } - fn internal_run( + async fn cloud_run(&mut self, args: RunArgs, running_in_release: bool) -> Result<()> { + let bench_infos = self.bench_infos(&args.filter())?; + let resources = self.load_cloud_from_disk(&bench_infos).await?; + self.run_filtered_benches_cloud(args, running_in_release, bench_infos, resources) + .await?; + println!("Cloud resources have not been cleaned up."); + println!("Make sure to use `--cleanup-cloud-resources` when you are finished with them."); + Ok(()) + } + + async fn cloud_setup_run_cleanup( &mut self, - args: &Args, - internal_run: &str, + args: RunArgs, running_in_release: bool, ) -> Result<()> { - let (name, resources) = internal_run.split_at(internal_run.find(' ').unwrap() + 1); + let bench_infos = self.bench_infos(&args.filter())?; + let resources = self.temp_setup_cloud(&bench_infos).await?; + self.run_filtered_benches_cloud(args, running_in_release, bench_infos, resources) + .await?; + self.cleanup_cloud_resources().await; + Ok(()) + } + + fn internal_run(&mut self, args: &RunArgs, running_in_release: bool) -> Result<()> { + let name_and_resources = args + .filter + .as_ref() + .expect("Filter arg must be provided for internal-run"); + let (name, resources) = + name_and_resources.split_at(name_and_resources.find(' ').unwrap() + 1); let name = name.trim(); match self.benches.iter_mut().find(|x| x.tags.get_name() == name) { Some(bench) => { @@ -147,189 +175,137 @@ impl Windsock Result<()> { - // This is not a real bench we are just testing that it works, - // so set some really minimal runtime values - args.bench_length_seconds = Some(2); - args.operations_per_second = Some(100); + async fn run_nextest(&mut self, name: &str, running_in_release: bool) -> Result<()> { + let args = RunArgs { + profilers: vec![], + // This is not a real bench we are just testing that it works, + // so set some really minimal runtime values + bench_length_seconds: Some(2), + operations_per_second: Some(100), + filter: Some(name.to_string()), + }; - let name = args.filter.as_ref().unwrap().clone(); - self.run_named_bench(args, name, running_in_release).await + self.run_filtered_benches_local(args, running_in_release) + .await } - async fn run_named_bench( + fn bench_infos(&mut self, filter: &str) -> Result>> { + let filter = Filter::from_query(filter) + .map_err(|err| anyhow!("Failed to parse FILTER {filter:?}\n{err}"))?; + let mut bench_infos = vec![]; + for bench in &mut self.benches { + if filter.matches(&bench.tags) { + bench_infos.push(BenchInfo { + resources: bench.required_cloud_resources(), + name: bench.tags.get_name(), + }); + } + } + Ok(self.cloud.order_benches(bench_infos)) + } + + async fn load_cloud_from_disk( &mut self, - args: Args, - name: String, - running_in_release: bool, - ) -> Result<()> { - ReportArchive::clear_last_run(); - let resources_path = cloud_resources_path(); + bench_infos: &[BenchInfo], + ) -> Result { + if !bench_infos.is_empty() { + let resources = bench_infos.iter().map(|x| x.resources.clone()).collect(); + Ok(self + .cloud + .load_resources_file(&cloud_resources_path(), resources) + .await) + } else { + Err(anyhow!("No benches found with the specified filter")) + } + } - let bench = match self.benches.iter_mut().find(|x| x.tags.get_name() == name) { - Some(bench) => { - if args - .profilers - .iter() - .all(|x| bench.supported_profilers.contains(x)) - { - bench - } else { - return Err(anyhow!("Specified bench {name:?} was requested to run with the profilers {:?} but it only supports the profilers {:?}", args.profilers, bench.supported_profilers)); - } - } - None => return Err(anyhow!("Specified bench {name:?} does not exist.")), - }; + async fn save_cloud_to_disk(&mut self, filter: String) -> Result<()> { + let bench_infos = self.bench_infos(&filter)?; - let resources = if args.cloud { - let resources = vec![bench.required_cloud_resources()]; - Some(if args.load_cloud_resources_file { - self.cloud - .load_resources_file(&resources_path, resources) - .await - } else { - self.cloud - .create_resources(resources, !args.store_cloud_resources_file) - .await - }) + let resources = if !bench_infos.is_empty() { + let resources = bench_infos.iter().map(|x| x.resources.clone()).collect(); + self.cloud.create_resources(resources, false).await } else { - None + return Err(anyhow!("No benches found with the specified filter")); }; - if args.store_cloud_resources_file { - println!( - "Cloud resources have been created in preparation for running the bench:\n {name}" - ); - println!("Make sure to use `--cleanup-cloud-resources` when you are finished with these resources."); - } else { - bench - .orchestrate(&args, running_in_release, resources.clone()) - .await; - } + self.cloud + .store_resources_file(&cloud_resources_path(), resources) + .await; - if args.cloud { - self.cleanup_cloud_resources(resources, &args, &resources_path) - .await; + println!( + "Cloud resources have been created in preparation for running the following benches:" + ); + for bench in bench_infos { + println!(" {}", bench.name); } + println!("Make sure to use `--cleanup-cloud-resources` when you are finished with these resources"); + Ok(()) } + async fn temp_setup_cloud( + &mut self, + bench_infos: &[BenchInfo], + ) -> Result { + let resources = if !bench_infos.is_empty() { + let resources = bench_infos.iter().map(|x| x.resources.clone()).collect(); + self.cloud.create_resources(resources, true).await + } else { + return Err(anyhow!("No benches found with the specified filter")); + }; + + Ok(resources) + } + async fn run_filtered_benches_cloud( &mut self, - args: Args, + args: RunArgs, running_in_release: bool, + bench_infos: Vec>, + mut resources: Resources, ) -> Result<()> { ReportArchive::clear_last_run(); - let filter = parse_filter(&args)?; - let resources_path = cloud_resources_path(); - let mut bench_infos = vec![]; - for bench in &mut self.benches { - if filter - .as_ref() - .map(|x| { - x.matches(&bench.tags) - && args - .profilers - .iter() - .all(|x| bench.supported_profilers.contains(x)) - }) - .unwrap_or(true) - { - bench_infos.push(BenchInfo { - resources: bench.required_cloud_resources(), - name: bench.tags.get_name(), - }); - } - } - bench_infos = self.cloud.order_benches(bench_infos); - - let mut resources = if !bench_infos.is_empty() { - let resources = bench_infos.iter().map(|x| x.resources.clone()).collect(); - Some(if args.load_cloud_resources_file { - self.cloud - .load_resources_file(&resources_path, resources) - .await - } else { - self.cloud - .create_resources(resources, !args.store_cloud_resources_file) - .await - }) - } else { - None - }; - - if args.store_cloud_resources_file { - println!("Cloud resources have been created in preparation for running the following benches:"); - for bench in bench_infos { - println!(" {}", bench.name); - } - println!("Make sure to use `--cleanup-cloud-resources` when you are finished with these resources"); - } else { - for (i, bench_info) in bench_infos.iter().enumerate() { - for bench in &mut self.benches { - if bench.tags.get_name() == bench_info.name { - if let Some(resources) = &mut resources { - self.cloud - .adjust_resources(&bench_infos, i, resources) - .await; - } - bench - .orchestrate(&args, running_in_release, resources.clone()) - .await; - break; - } + for (i, bench_info) in bench_infos.iter().enumerate() { + for bench in &mut self.benches { + if bench.tags.get_name() == bench_info.name { + self.cloud + .adjust_resources(&bench_infos, i, &mut resources) + .await; + bench + .orchestrate(&args, running_in_release, Some(resources.clone())) + .await; + break; } } } - self.cleanup_cloud_resources(resources, &args, &resources_path) - .await; - Ok(()) } - async fn cleanup_cloud_resources( - &mut self, - resources: Option, - args: &Args, - resources_path: &Path, - ) { - if args.store_cloud_resources_file { - if let Some(resources) = resources { - self.cloud - .store_resources_file(resources_path, resources) - .await; - } - } else if args.load_cloud_resources_file { - println!("Cloud resources have not been cleaned up."); - println!( - "Make sure to use `--cleanup-cloud-resources` when you are finished with them." - ); - } else { - std::fs::remove_file(resources_path).ok(); - self.cloud.cleanup_resources().await; - } + async fn cleanup_cloud_resources(&mut self) { + std::fs::remove_file(cloud_resources_path()).ok(); + self.cloud.cleanup_resources().await; } async fn run_filtered_benches_local( &mut self, - args: Args, + args: RunArgs, running_in_release: bool, ) -> Result<()> { ReportArchive::clear_last_run(); - let filter = parse_filter(&args)?; + let filter = args.filter(); + let filter = Filter::from_query(&filter) + .map_err(|err| anyhow!("Failed to parse FILTER {:?}\n{err}", filter))?; + for bench in &mut self.benches { - if filter - .as_ref() - .map(|x| { - x.matches(&bench.tags) - && args - .profilers - .iter() - .all(|x| bench.supported_profilers.contains(x)) - }) - .unwrap_or(true) + if filter.matches(&bench.tags) + && args + .profilers + .iter() + // TODO: reapply? + .all(|x| bench.supported_profilers.contains(x)) { bench.orchestrate(&args, running_in_release, None).await; } @@ -338,16 +314,6 @@ impl Windsock Result> { - args.filter - .as_ref() - .map(|filter| { - Filter::from_query(filter.as_ref()) - .map_err(|err| anyhow!("Failed to parse FILTER {filter:?}\n{err}")) - }) - .transpose() -} - fn create_runtime(worker_threads: Option) -> Runtime { let mut runtime_builder = tokio::runtime::Builder::new_multi_thread(); runtime_builder.enable_all().thread_name("Windsock-Thread"); diff --git a/windsock/src/list.rs b/windsock/src/list.rs index 2f83f66f0..109297f65 100644 --- a/windsock/src/list.rs +++ b/windsock/src/list.rs @@ -1,4 +1,4 @@ -use crate::{bench::BenchState, cli::Args}; +use crate::{bench::BenchState, cli::WindsockArgs}; pub fn list(benches: &[BenchState]) { // regular usage @@ -9,7 +9,7 @@ pub fn list(benches: &[BenchState( - args: &Args, + args: &WindsockArgs, benches: &[BenchState], ) { if args.nextest_list_all() {