Skip to content

Commit

Permalink
Include processor in {snr, ttest} processor name
Browse files Browse the repository at this point in the history
  • Loading branch information
TrAyZeN committed Oct 9, 2024
1 parent cfec0b6 commit ca8ad69
Show file tree
Hide file tree
Showing 5 changed files with 55 additions and 56 deletions.
16 changes: 8 additions & 8 deletions benches/cpa.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,21 +53,21 @@ fn bench_cpa(c: &mut Criterion) {

group.measurement_time(std::time::Duration::from_secs(60));

for nb_traces in [5000, 10000, 25000].into_iter() {
let leakages = Array2::random_using((nb_traces, 5000), Uniform::new(-2., 2.), &mut rng);
for num_traces in [5000, 10000, 25000].into_iter() {
let leakages = Array2::random_using((num_traces, 5000), Uniform::new(-2., 2.), &mut rng);
let plaintexts = Array2::random_using(
(nb_traces, 16),
(num_traces, 16),
Uniform::new_inclusive(0u8, 255u8),
&mut rng,
);

group.bench_with_input(
BenchmarkId::new("cpa_sequential", nb_traces),
BenchmarkId::new("cpa_sequential", num_traces),
&(&leakages, &plaintexts),
|b, (leakages, plaintexts)| b.iter(|| cpa_sequential(leakages, plaintexts)),
);
group.bench_with_input(
BenchmarkId::new("cpa_parallel", nb_traces),
BenchmarkId::new("cpa_parallel", num_traces),
&(&leakages, &plaintexts),
|b, (leakages, plaintexts)| {
b.iter(|| {
Expand All @@ -83,15 +83,15 @@ fn bench_cpa(c: &mut Criterion) {
},
);
// For 25000 traces, 60s of measurement_time is too low
if nb_traces <= 10000 {
if num_traces <= 10000 {
group.bench_with_input(
BenchmarkId::new("cpa_normal_sequential", nb_traces),
BenchmarkId::new("cpa_normal_sequential", num_traces),
&(&leakages, &plaintexts),
|b, (leakages, plaintexts)| b.iter(|| cpa_normal_sequential(leakages, plaintexts)),
);
}
group.bench_with_input(
BenchmarkId::new("cpa_normal_parallel", nb_traces),
BenchmarkId::new("cpa_normal_parallel", num_traces),
&(&leakages, &plaintexts),
|b, (leakages, plaintexts)| {
b.iter(|| {
Expand Down
10 changes: 5 additions & 5 deletions benches/dpa.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,19 +43,19 @@ fn bench_dpa(c: &mut Criterion) {

group.measurement_time(std::time::Duration::from_secs(60));

for nb_traces in [1000, 2000, 5000].into_iter() {
let leakages = Array2::random_using((nb_traces, 5000), Uniform::new(-2., 2.), &mut rng);
for num_traces in [1000, 2000, 5000].into_iter() {
let leakages = Array2::random_using((num_traces, 5000), Uniform::new(-2., 2.), &mut rng);
let plaintexts =
Array2::random_using((nb_traces, 16), Uniform::new_inclusive(0, 255), &mut rng);
Array2::random_using((num_traces, 16), Uniform::new_inclusive(0, 255), &mut rng);

group.bench_with_input(
BenchmarkId::new("sequential", nb_traces),
BenchmarkId::new("sequential", num_traces),
&(&leakages, &plaintexts),
|b, (leakages, plaintexts)| b.iter(|| dpa_sequential(leakages, plaintexts)),
);

group.bench_with_input(
BenchmarkId::new("parallel", nb_traces),
BenchmarkId::new("parallel", num_traces),
&(&leakages, &plaintexts),
|b, (leakages, plaintexts)| b.iter(|| dpa_parallel(leakages, plaintexts)),
);
Expand Down
14 changes: 7 additions & 7 deletions benches/snr.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use muscat::leakage_detection::{snr, Snr};
use muscat::leakage_detection::{snr, SnrProcessor};
use ndarray::{Array1, Array2};
use ndarray_rand::rand::{rngs::StdRng, SeedableRng};
use ndarray_rand::rand_distr::Uniform;
use ndarray_rand::RandomExt;

fn snr_sequential(leakages: &Array2<i64>, plaintexts: &Array2<u8>) -> Array1<f64> {
let mut snr = Snr::new(leakages.shape()[1], 256);
let mut snr = SnrProcessor::new(leakages.shape()[1], 256);

for i in 0..leakages.shape()[0] {
snr.process(leakages.row(i), plaintexts.row(i)[0] as usize);
Expand All @@ -27,19 +27,19 @@ fn bench_snr(c: &mut Criterion) {

group.measurement_time(std::time::Duration::from_secs(60));

for nb_traces in [5000, 10000, 25000].into_iter() {
let leakages = Array2::random_using((nb_traces, 5000), Uniform::new(-200, 200), &mut rng);
for num_traces in [5000, 10000, 25000].into_iter() {
let leakages = Array2::random_using((num_traces, 5000), Uniform::new(-200, 200), &mut rng);
let plaintexts =
Array2::random_using((nb_traces, 16), Uniform::new_inclusive(0, 255), &mut rng);
Array2::random_using((num_traces, 16), Uniform::new_inclusive(0, 255), &mut rng);

group.bench_with_input(
BenchmarkId::new("sequential", nb_traces),
BenchmarkId::new("sequential", num_traces),
&(&leakages, &plaintexts),
|b, (leakages, plaintexts)| b.iter(|| snr_sequential(leakages, plaintexts)),
);

group.bench_with_input(
BenchmarkId::new("parallel", nb_traces),
BenchmarkId::new("parallel", num_traces),
&(&leakages, &plaintexts),
|b, (leakages, plaintexts)| b.iter(|| snr_parallel(leakages, plaintexts)),
);
Expand Down
8 changes: 4 additions & 4 deletions examples/snr.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use anyhow::Result;
use indicatif::ProgressIterator;
use muscat::leakage_detection::Snr;
use muscat::leakage_detection::SnrProcessor;
use muscat::quicklog::{BatchIter, Log};
use muscat::util::{progress_bar, save_array};
use rayon::prelude::{ParallelBridge, ParallelIterator};
Expand All @@ -12,7 +12,7 @@ fn main() -> Result<()> {
let leakage_size = log.leakage_size();
let trace_count = log.len();

let result: Snr = log
let result: SnrProcessor = log
.into_iter()
.progress_with(progress_bar(trace_count))
// Process records sharing same leakage numpy files by batches, so batch files get read only
Expand All @@ -24,7 +24,7 @@ fn main() -> Result<()> {
// Use `par_bridge` from rayon crate to make processing multithreaded
.par_bridge()
.fold(
|| Snr::new(leakage_size, 256),
|| SnrProcessor::new(leakage_size, 256),
|mut snr, batch| {
for trace in batch {
// `process` takes an `ArrayView1` argument, which makes possible to pass a
Expand All @@ -35,7 +35,7 @@ fn main() -> Result<()> {
},
)
// Merge the results of each processing thread
.reduce(|| Snr::new(leakage_size, 256), |a, b| a + b);
.reduce(|| SnrProcessor::new(leakage_size, 256), |a, b| a + b);

// Save the resulting SNR trace to a numpy file
save_array("result.npy", &result.snr())?;
Expand Down
63 changes: 31 additions & 32 deletions src/leakage_detection.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@ use ndarray::{s, Array1, Array2, ArrayView1, ArrayView2, Axis};
use rayon::iter::{ParallelBridge, ParallelIterator};
use std::{iter::zip, ops::Add};

/// Compute the SNR of the given traces.
/// Compute the SNR of the given traces using [`SnrProcessor`].
///
/// `get_class` is a function returning the class of the given trace by index.
///
/// # Panics
/// Panic if `batch_size` is 0.
/// - Panic if `batch_size` is 0.
pub fn snr<T, F>(
leakages: ArrayView2<T>,
classes: usize,
Expand All @@ -28,7 +28,7 @@ where
.enumerate()
.par_bridge()
.fold(
|| Snr::new(leakages.shape()[1], classes),
|| SnrProcessor::new(leakages.shape()[1], classes),
|mut snr, (batch_idx, leakage_batch)| {
for i in 0..leakage_batch.shape()[0] {
snr.process(leakage_batch.row(i), get_class(batch_idx + i));
Expand All @@ -41,23 +41,23 @@ where
.snr()
}

/// Processes traces to calculate the Signal-to-Noise Ratio.
/// A Processor that computes the Signal-to-Noise Ratio of the given traces
#[derive(Debug, Clone)]
pub struct Snr {
pub struct SnrProcessor {
mean_var: MeanVar,
/// Sum of traces per class
classes_sum: Array2<i64>,
/// Counts the number of traces per class
classes_count: Array1<usize>,
}

impl Snr {
/// Create a new SNR processor.
impl SnrProcessor {
/// Create a new [`SnrProcessor`].
///
/// # Arguments
///
/// * `size` - Size of the input traces
/// * `num_classes` - Number of classes
/// - `size` - Size of the input traces
/// - `num_classes` - Number of classes
pub fn new(size: usize, num_classes: usize) -> Self {
Self {
mean_var: MeanVar::new(size),
Expand All @@ -69,7 +69,7 @@ impl Snr {
/// Process an input trace to update internal accumulators.
///
/// # Panics
/// Panics in debug if the length of the trace is different from the size of [`Snr`].
/// - Panics in debug if the length of the trace is different from the size of [`SnrProcessor`].
pub fn process<T: Into<i64> + Copy>(&mut self, trace: ArrayView1<T>, class: usize) {
debug_assert!(trace.len() == self.size());

Expand All @@ -82,9 +82,10 @@ impl Snr {
self.classes_count[class] += 1;
}

/// Returns the Signal-to-Noise Ratio of the traces.
/// SNR = V[E[L|X]] / E[V[L|X]]
/// Finalize the processor computation and return the Signal-to-Noise Ratio.
pub fn snr(&self) -> Array1<f64> {
// SNR = V[E[L|X]] / E[V[L|X]]

let size = self.size();

let mut acc: Array1<f64> = Array1::zeros(size);
Expand Down Expand Up @@ -116,23 +117,23 @@ impl Snr {
self.classes_count.len()
}

/// Determine if two [`Snr`] are compatible for addition.
/// Determine if two [`SnrProcessor`] are compatible for addition.
///
/// If they were created with the same parameters, they are compatible.
fn is_compatible_with(&self, other: &Self) -> bool {
self.size() == other.size() && self.num_classes() == other.num_classes()
}
}

impl Add for Snr {
impl Add for SnrProcessor {
type Output = Self;

/// Merge computations of two [`Snr`]. Processors need to be compatible to be merged
/// Merge computations of two [`SnrProcessor`]. Processors need to be compatible to be merged
/// together, otherwise it can panic or yield incoherent result (see
/// [`Snr::is_compatible_with`]).
/// [`SnrProcessor::is_compatible_with`]).
///
/// # Panics
/// Panics in debug if the processors are not compatible.
/// - Panics in debug if the processors are not compatible.
fn add(self, rhs: Self) -> Self::Output {
debug_assert!(self.is_compatible_with(&rhs));

Expand All @@ -144,7 +145,7 @@ impl Add for Snr {
}
}

/// Compute the Welch's T-test of the given traces.
/// Compute the Welch's T-test of the given traces using [`TTestProcessor`].
///
/// # Panics
/// - Panic if `traces.shape()[0] != trace_classes.shape()[0]`
Expand All @@ -166,7 +167,7 @@ where
)
.par_bridge()
.fold(
|| TTest::new(traces.shape()[1]),
|| TTestProcessor::new(traces.shape()[1]),
|mut ttest, (trace_batch, trace_classes_batch)| {
for i in 0..trace_batch.shape()[0] {
ttest.process(trace_batch.row(i), trace_classes_batch[i]);
Expand All @@ -179,18 +180,17 @@ where
.ttest()
}

/// Process traces to calculate Welch's T-Test.
/// A Processor that computes the Welch's T-Test of the given traces.
#[derive(Debug)]
pub struct TTest {
pub struct TTestProcessor {
mean_var_1: MeanVar,
mean_var_2: MeanVar,
}

impl TTest {
/// Create a new Welch's T-Test processor.
impl TTestProcessor {
/// Create a new [`TTestProcessor`].
///
/// # Arguments
///
/// * `size` - Number of samples per trace
pub fn new(size: usize) -> Self {
Self {
Expand All @@ -202,7 +202,6 @@ impl TTest {
/// Process an input trace to update internal accumulators.
///
/// # Arguments
///
/// * `trace` - Input trace.
/// * `class` - Indicates to which of the two partitions the given trace belongs.
///
Expand Down Expand Up @@ -235,20 +234,20 @@ impl TTest {
self.mean_var_1.size()
}

/// Determine if two [`TTest`] are compatible for addition.
/// Determine if two [`TTestProcessor`] are compatible for addition.
///
/// If they were created with the same parameters, they are compatible.
fn is_compatible_with(&self, other: &Self) -> bool {
self.size() == other.size()
}
}

impl Add for TTest {
impl Add for TTestProcessor {
type Output = Self;

/// Merge computations of two [`TTest`]. Processors need to be compatible to be merged
/// Merge computations of two [`TTestProcessor`]. Processors need to be compatible to be merged
/// together, otherwise it can panic or yield incoherent result (see
/// [`TTest::is_compatible_with`]).
/// [`TTestProcessor::is_compatible_with`]).
///
/// # Panics
/// Panics in debug if the processors are not compatible.
Expand All @@ -264,12 +263,12 @@ impl Add for TTest {

#[cfg(test)]
mod tests {
use super::{ttest, TTest};
use super::{ttest, TTestProcessor};
use ndarray::array;

#[test]
fn test_ttest() {
let mut processor = TTest::new(4);
let mut processor = TTestProcessor::new(4);
let traces = [
array![77, 137, 51, 91],
array![72, 61, 91, 83],
Expand Down Expand Up @@ -298,7 +297,7 @@ mod tests {

#[test]
fn test_ttest_helper() {
let mut processor = TTest::new(4);
let mut processor = TTestProcessor::new(4);
let traces = array![
[77, 137, 51, 91],
[72, 61, 91, 83],
Expand Down

0 comments on commit ca8ad69

Please sign in to comment.