Skip to content

Commit

Permalink
Merge pull request #281 from fitzgen/benchmark-particular-phases
Browse files Browse the repository at this point in the history
Allow benchmarking just one particular compilation/instantiation/execution phase
  • Loading branch information
fitzgen authored Dec 22, 2024
2 parents e5003d5 + 51e1d7c commit 5ff7bd8
Show file tree
Hide file tree
Showing 4 changed files with 199 additions and 95 deletions.
118 changes: 81 additions & 37 deletions crates/cli/src/benchmark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@ use crate::suite::BenchmarkOrSuite;
use anyhow::{anyhow, Context, Result};
use rand::{rngs::SmallRng, Rng, SeedableRng};
use sightglass_data::{Format, Measurement, Phase};
use sightglass_recorder::bench_api::Engine;
use sightglass_recorder::cpu_affinity::bind_to_single_core;
use sightglass_recorder::measure::Measurements;
use sightglass_recorder::{bench_api::BenchApi, benchmark::benchmark, measure::MeasureType};
use sightglass_recorder::{bench_api::BenchApi, benchmark, measure::MeasureType};
use std::{
fs,
io::{self, BufWriter, Write},
Expand Down Expand Up @@ -105,9 +106,10 @@ pub struct BenchmarkCommand {
#[structopt(short("d"), long("working-dir"), parse(from_os_str))]
working_dir: Option<PathBuf>,

/// Stop measuring after the given phase (compilation/instantiation/execution).
#[structopt(long("stop-after"))]
stop_after_phase: Option<Phase>,
/// Benchmark only the given phase (compilation, instantiation, or
/// execution). Benchmarks all phases if omitted.
#[structopt(long("benchmark-phase"))]
benchmark_phase: Option<Phase>,

/// The significance level for confidence intervals. Typical values are 0.01
/// and 0.05, which correspond to 99% and 95% confidence respectively. This
Expand Down Expand Up @@ -178,56 +180,98 @@ impl BenchmarkCommand {
let bytes = fs::read(&wasm_file).context("Attempting to read Wasm bytes")?;
log::debug!("Wasm benchmark size: {} bytes", bytes.len());

let wasm_hash = {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
wasm_file.hash(&mut hasher);
hasher.finish()
};
let stdout = format!("stdout-{:x}-{}.log", wasm_hash, std::process::id());
let stdout = Path::new(&stdout);
let stderr = format!("stderr-{:x}-{}.log", wasm_hash, std::process::id());
let stderr = Path::new(&stderr);
let stdin = None;

let mut measurements = Measurements::new(this_arch(), engine, wasm_file);
let mut measure = self.measure.build();

// Create the bench API engine and cache it for reuse across all
// iterations of this benchmark.
let engine = Engine::new(
&mut bench_api,
&working_dir,
stdout,
stderr,
stdin,
&mut measurements,
&mut measure,
self.engine_flags.as_deref(),
);
let mut engine = Some(engine);

// And if we are benchmarking just a post-compilation phase,
// then eagerly compile the Wasm module for reuse.
let mut module = None;
if let Some(Phase::Instantiation | Phase::Execution) = self.benchmark_phase {
module = Some(engine.take().unwrap().compile(&bytes));
}

// Run the benchmark (compilation, instantiation, and execution) several times in
// this process.
for i in 0..self.iterations_per_process {
let wasm_hash = {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
wasm_file.hash(&mut hasher);
hasher.finish()
};
let stdout = format!("stdout-{:x}-{}-{}.log", wasm_hash, std::process::id(), i);
let stdout = Path::new(&stdout);
let stderr = format!("stderr-{:x}-{}-{}.log", wasm_hash, std::process::id(), i);
let stderr = Path::new(&stderr);
let stdin = None;

benchmark(
&mut bench_api,
&working_dir,
stdout,
stderr,
stdin,
&bytes,
self.stop_after_phase.clone(),
self.engine_flags.as_deref(),
&mut measure,
&mut measurements,
)?;
for _ in 0..self.iterations_per_process {
match self.benchmark_phase {
None => {
let new_engine = benchmark::all(engine.take().unwrap(), &bytes)?;
engine = Some(new_engine);
}
Some(Phase::Compilation) => {
let new_engine =
benchmark::compilation(engine.take().unwrap(), &bytes)?;
engine = Some(new_engine);
}
Some(Phase::Instantiation) => {
let new_module = benchmark::instantiation(module.take().unwrap())?;
module = Some(new_module);
}
Some(Phase::Execution) => {
let new_module = benchmark::execution(module.take().unwrap())?;
module = Some(new_module);
}
}

self.check_output(Path::new(wasm_file), stdout, stderr)?;
measurements.next_iteration();
engine
.as_mut()
.map(|e| e.measurements())
.or_else(|| module.as_mut().map(|m| m.measurements()))
.unwrap()
.next_iteration();
}

drop((engine, module));
all_measurements.extend(measurements.finish());
}
}

// If we are only benchmarking one phase then filter out any
// measurements for other phases. These get included because we have to
// compile at least once to measure instantiation, for example.
if let Some(phase) = self.benchmark_phase {
all_measurements.retain(|m| m.phase == phase);
}

self.write_results(&all_measurements, &mut output_file)?;
Ok(())
}

/// Assert that our actual `stdout` and `stderr` match our expectations.
fn check_output(&self, wasm_file: &Path, stdout: &Path, stderr: &Path) -> Result<()> {
// If we aren't going through all phases and executing the Wasm, then we
// won't have any actual output to check.
if self.stop_after_phase.is_some() {
return Ok(());
match self.benchmark_phase {
None | Some(Phase::Execution) => {}
// If we aren't executing the Wasm, then we won't have any actual
// output to check.
Some(Phase::Compilation | Phase::Instantiation) => return Ok(()),
}

let wasm_file_dir: PathBuf = if let Some(dir) = wasm_file.parent() {
Expand Down Expand Up @@ -326,8 +370,8 @@ impl BenchmarkCommand {
command.env("WASM_BENCH_USE_SMALL_WORKLOAD", "1");
}

if let Some(phase) = self.stop_after_phase {
command.arg("--stop-after").arg(phase.to_string());
if let Some(phase) = self.benchmark_phase {
command.arg("--benchmark-phase").arg(phase.to_string());
}

if let Some(flags) = &self.engine_flags {
Expand Down
30 changes: 26 additions & 4 deletions crates/cli/tests/all/benchmark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,14 @@ use sightglass_data::Measurement;
use std::path::PathBuf;

#[test]
fn benchmark_stop_after_compilation() {
fn benchmark_phase_compilation() {
sightglass_cli_benchmark()
.arg("--raw")
.arg("--processes")
.arg("2")
.arg("--iterations-per-process")
.arg("1")
.arg("--stop-after")
.arg("--benchmark-phase")
.arg("compilation")
.arg(benchmark("noop"))
.assert()
Expand All @@ -25,25 +25,47 @@ fn benchmark_stop_after_compilation() {
}

#[test]
fn benchmark_stop_after_instantiation() {
fn benchmark_phase_instantiation() {
sightglass_cli_benchmark()
.arg("--raw")
.arg("--processes")
.arg("2")
.arg("--iterations-per-process")
.arg("1")
.arg("--stop-after")
.arg("--benchmark-phase")
.arg("instantiation")
.arg(benchmark("noop"))
.assert()
.success()
.stdout(
predicate::str::contains("Compilation")
.not()
.and(predicate::str::contains("Instantiation"))
.and(predicate::str::contains("Execution").not()),
);
}

#[test]
fn benchmark_phase_execution() {
sightglass_cli_benchmark()
.arg("--raw")
.arg("--processes")
.arg("2")
.arg("--iterations-per-process")
.arg("1")
.arg("--benchmark-phase")
.arg("execution")
.arg(benchmark("noop"))
.assert()
.success()
.stdout(
predicate::str::contains("Compilation")
.not()
.and(predicate::str::contains("Instantiation").not())
.and(predicate::str::contains("Execution")),
);
}

#[test]
fn benchmark_json() {
let assert = sightglass_cli_benchmark()
Expand Down
55 changes: 44 additions & 11 deletions crates/recorder/src/bench_api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,10 +74,7 @@ pub struct Engine<'a, 'b, 'c, M> {
engine: *mut c_void,
}

impl<'a, 'b, 'c, M> Engine<'a, 'b, 'c, M>
where
M: Measure,
{
impl<'a, 'b, 'c, M> Engine<'a, 'b, 'c, M> {
/// Construct a new engine from the given `BenchApi`.
// NB: take a mutable reference to the `BenchApi` so that no one else can
// call its API methods out of order.
Expand All @@ -90,7 +87,10 @@ where
measurements: &'a mut Measurements<'c>,
measure: &'a mut M,
execution_flags: Option<&'a str>,
) -> Self {
) -> Self
where
M: Measure,
{
let working_dir = working_dir.display().to_string();
let stdout_path = stdout_path.display().to_string();
let stderr_path = stderr_path.display().to_string();
Expand Down Expand Up @@ -134,6 +134,11 @@ where
}
}

/// Get this engine's measurements.
pub fn measurements(&mut self) -> &mut Measurements<'c> {
unsafe { (*self.measurement_data).get_mut().1 }
}

/// Compile the Wasm into a module.
pub fn compile(self, wasm: &[u8]) -> Module<'a, 'b, 'c, M> {
let result =
Expand All @@ -143,31 +148,43 @@ where
}

/// Bench API callback for the start of compilation.
extern "C" fn compilation_start(data: *mut u8) {
extern "C" fn compilation_start(data: *mut u8)
where
M: Measure,
{
log::debug!("Starting compilation measurement");
let data = data as *mut (*mut M, *mut Measurements<'b>);
let measure = unsafe { data.as_mut().unwrap().0.as_mut().unwrap() };
measure.start(Phase::Compilation);
}

/// Bench API callback for the start of instantiation.
extern "C" fn instantiation_start(data: *mut u8) {
extern "C" fn instantiation_start(data: *mut u8)
where
M: Measure,
{
log::debug!("Starting instantiation measurement");
let data = data as *mut (*mut M, *mut Measurements<'b>);
let measure = unsafe { data.as_mut().unwrap().0.as_mut().unwrap() };
measure.start(Phase::Instantiation);
}

/// Bench API callback for the start of execution.
extern "C" fn execution_start(data: *mut u8) {
extern "C" fn execution_start(data: *mut u8)
where
M: Measure,
{
log::debug!("Starting execution measurement");
let data = data as *mut (*mut M, *mut Measurements<'b>);
let measure = unsafe { data.as_mut().unwrap().0.as_mut().unwrap() };
measure.start(Phase::Execution);
}

/// Bench API callback for the end of compilation.
extern "C" fn compilation_end(data: *mut u8) {
extern "C" fn compilation_end(data: *mut u8)
where
M: Measure,
{
let data = data as *mut (*mut M, *mut Measurements<'b>);
let (measure, measurements) = unsafe {
let data = data.as_mut().unwrap();
Expand All @@ -178,7 +195,10 @@ where
}

/// Bench API callback for the end of instantiation.
extern "C" fn instantiation_end(data: *mut u8) {
extern "C" fn instantiation_end(data: *mut u8)
where
M: Measure,
{
let data = data as *mut (*mut M, *mut Measurements<'b>);
let (measure, measurements) = unsafe {
let data = data.as_mut().unwrap();
Expand All @@ -189,7 +209,10 @@ where
}

/// Bench API callback for the end of execution.
extern "C" fn execution_end(data: *mut u8) {
extern "C" fn execution_end(data: *mut u8)
where
M: Measure,
{
let data = data as *mut (*mut M, *mut Measurements<'b>);
let (measure, measurements) = unsafe {
let data = data.as_mut().unwrap();
Expand All @@ -215,6 +238,16 @@ pub struct Module<'a, 'b, 'c, M> {
}

impl<'a, 'b, 'c, M> Module<'a, 'b, 'c, M> {
/// Turn this module back into an engine.
pub fn into_engine(self) -> Engine<'a, 'b, 'c, M> {
self.engine
}

/// Get this engine's measurements.
pub fn measurements(&mut self) -> &mut Measurements<'c> {
self.engine.measurements()
}

/// Instantiate this module, returning the resulting `Instance`.
pub fn instantiate(self) -> Instance<'a, 'b, 'c, M> {
let result = unsafe { (self.engine.bench_api.wasm_bench_instantiate)(self.engine.engine) };
Expand Down
Loading

0 comments on commit 5ff7bd8

Please sign in to comment.