From 7866475166fb493be1658f4e8588858e632137bf Mon Sep 17 00:00:00 2001 From: TrAyZeN Date: Mon, 28 Oct 2024 12:39:03 +0100 Subject: [PATCH] Rename leakage to trace --- benches/cpa.rs | 40 ++++++++--------- benches/dpa.rs | 22 +++++----- benches/snr.rs | 22 +++++----- benches/ttest.rs | 10 ++--- examples/cpa.rs | 12 ++--- examples/cpa_partioned.rs | 9 ++-- examples/dpa.rs | 20 ++++----- examples/rank.rs | 6 +-- src/distinguishers/cpa.rs | 75 ++++++++++++++++---------------- src/distinguishers/cpa_normal.rs | 58 ++++++++++++------------ src/distinguishers/dpa.rs | 12 ++--- src/leakage_detection.rs | 14 +++--- 12 files changed, 149 insertions(+), 151 deletions(-) diff --git a/benches/cpa.rs b/benches/cpa.rs index c3bd275..04e7581 100644 --- a/benches/cpa.rs +++ b/benches/cpa.rs @@ -12,12 +12,12 @@ pub fn leakage_model(value: usize, guess: usize) -> usize { hw(sbox((value ^ guess) as u8) as usize) } -fn cpa_sequential(leakages: &Array2, plaintexts: &Array2) -> Cpa { - let mut cpa = CpaProcessor::new(leakages.shape()[1], 256, 0, leakage_model); +fn cpa_sequential(traces: &Array2, plaintexts: &Array2) -> Cpa { + let mut cpa = CpaProcessor::new(traces.shape()[1], 256, 0, leakage_model); - for i in 0..leakages.shape()[0] { + for i in 0..traces.shape()[0] { cpa.update( - leakages.row(i).map(|&x| x as usize).view(), + traces.row(i).map(|&x| x as usize).view(), plaintexts.row(i).map(|&y| y as usize).view(), ); } @@ -29,17 +29,17 @@ pub fn leakage_model_normal(value: ArrayView1, guess: usize) -> usize { hw(sbox((value[1] ^ guess) as u8) as usize) } -fn cpa_normal_sequential(leakages: &Array2, plaintexts: &Array2) -> Cpa { +fn cpa_normal_sequential(traces: &Array2, plaintexts: &Array2) -> Cpa { let batch_size = 500; let mut cpa = - cpa_normal::CpaProcessor::new(leakages.shape()[1], batch_size, 256, leakage_model_normal); + cpa_normal::CpaProcessor::new(traces.shape()[1], batch_size, 256, leakage_model_normal); - for (leakage_batch, plaintext_batch) in zip( - leakages.axis_chunks_iter(Axis(0), batch_size), + for (trace_batch, plaintext_batch) in zip( + traces.axis_chunks_iter(Axis(0), batch_size), plaintexts.axis_chunks_iter(Axis(0), batch_size), ) { - cpa.update(leakage_batch.map(|&x| x as f32).view(), plaintext_batch); + cpa.update(trace_batch.map(|&x| x as f32).view(), plaintext_batch); } cpa.finalize() @@ -54,7 +54,7 @@ fn bench_cpa(c: &mut Criterion) { group.measurement_time(std::time::Duration::from_secs(60)); for num_traces in [5000, 10000, 25000].into_iter() { - let leakages = Array2::random_using((num_traces, 5000), Uniform::new(-2., 2.), &mut rng); + let traces = Array2::random_using((num_traces, 5000), Uniform::new(-2., 2.), &mut rng); let plaintexts = Array2::random_using( (num_traces, 16), Uniform::new_inclusive(0u8, 255u8), @@ -63,16 +63,16 @@ fn bench_cpa(c: &mut Criterion) { group.bench_with_input( BenchmarkId::new("cpa_sequential", num_traces), - &(&leakages, &plaintexts), - |b, (leakages, plaintexts)| b.iter(|| cpa_sequential(leakages, plaintexts)), + &(&traces, &plaintexts), + |b, (traces, plaintexts)| b.iter(|| cpa_sequential(traces, plaintexts)), ); group.bench_with_input( BenchmarkId::new("cpa_parallel", num_traces), - &(&leakages, &plaintexts), - |b, (leakages, plaintexts)| { + &(&traces, &plaintexts), + |b, (traces, plaintexts)| { b.iter(|| { cpa::cpa( - leakages.map(|&x| x as usize).view(), + traces.map(|&x| x as usize).view(), plaintexts.map(|&x| x as usize).view(), 256, 0, @@ -86,17 +86,17 @@ fn bench_cpa(c: &mut Criterion) { if num_traces <= 10000 { group.bench_with_input( BenchmarkId::new("cpa_normal_sequential", num_traces), - &(&leakages, &plaintexts), - |b, (leakages, plaintexts)| b.iter(|| cpa_normal_sequential(leakages, plaintexts)), + &(&traces, &plaintexts), + |b, (traces, plaintexts)| b.iter(|| cpa_normal_sequential(traces, plaintexts)), ); } group.bench_with_input( BenchmarkId::new("cpa_normal_parallel", num_traces), - &(&leakages, &plaintexts), - |b, (leakages, plaintexts)| { + &(&traces, &plaintexts), + |b, (traces, plaintexts)| { b.iter(|| { cpa_normal::cpa( - leakages.map(|&x| x as f32).view(), + traces.map(|&x| x as f32).view(), plaintexts.view(), 256, leakage_model_normal, diff --git a/benches/dpa.rs b/benches/dpa.rs index 7aeb0bb..f5620fe 100644 --- a/benches/dpa.rs +++ b/benches/dpa.rs @@ -10,19 +10,19 @@ fn selection_function(metadata: Array1, guess: usize) -> bool { usize::from(sbox(metadata[1] ^ guess as u8)) & 1 == 1 } -fn dpa_sequential(leakages: &Array2, plaintexts: &Array2) -> Dpa { - let mut dpa = DpaProcessor::new(leakages.shape()[1], 256, selection_function); +fn dpa_sequential(traces: &Array2, plaintexts: &Array2) -> Dpa { + let mut dpa = DpaProcessor::new(traces.shape()[1], 256, selection_function); - for i in 0..leakages.shape()[0] { - dpa.update(leakages.row(i), plaintexts.row(i).to_owned()); + for i in 0..traces.shape()[0] { + dpa.update(traces.row(i), plaintexts.row(i).to_owned()); } dpa.finalize() } -fn dpa_parallel(leakages: &Array2, plaintexts: &Array2) -> Dpa { +fn dpa_parallel(traces: &Array2, plaintexts: &Array2) -> Dpa { dpa( - leakages.view(), + traces.view(), plaintexts .rows() .into_iter() @@ -44,20 +44,20 @@ fn bench_dpa(c: &mut Criterion) { group.measurement_time(std::time::Duration::from_secs(60)); for num_traces in [1000, 2000, 5000].into_iter() { - let leakages = Array2::random_using((num_traces, 5000), Uniform::new(-2., 2.), &mut rng); + let traces = Array2::random_using((num_traces, 5000), Uniform::new(-2., 2.), &mut rng); let plaintexts = Array2::random_using((num_traces, 16), Uniform::new_inclusive(0, 255), &mut rng); group.bench_with_input( BenchmarkId::new("sequential", num_traces), - &(&leakages, &plaintexts), - |b, (leakages, plaintexts)| b.iter(|| dpa_sequential(leakages, plaintexts)), + &(&traces, &plaintexts), + |b, (traces, plaintexts)| b.iter(|| dpa_sequential(traces, plaintexts)), ); group.bench_with_input( BenchmarkId::new("parallel", num_traces), - &(&leakages, &plaintexts), - |b, (leakages, plaintexts)| b.iter(|| dpa_parallel(leakages, plaintexts)), + &(&traces, &plaintexts), + |b, (traces, plaintexts)| b.iter(|| dpa_parallel(traces, plaintexts)), ); } diff --git a/benches/snr.rs b/benches/snr.rs index 26f8844..2982a03 100644 --- a/benches/snr.rs +++ b/benches/snr.rs @@ -5,18 +5,18 @@ use ndarray_rand::rand::{rngs::StdRng, SeedableRng}; use ndarray_rand::rand_distr::Uniform; use ndarray_rand::RandomExt; -fn snr_sequential(leakages: &Array2, plaintexts: &Array2) -> Array1 { - let mut snr = SnrProcessor::new(leakages.shape()[1], 256); +fn snr_sequential(traces: &Array2, plaintexts: &Array2) -> Array1 { + let mut snr = SnrProcessor::new(traces.shape()[1], 256); - for i in 0..leakages.shape()[0] { - snr.process(leakages.row(i), plaintexts.row(i)[0] as usize); + for i in 0..traces.shape()[0] { + snr.process(traces.row(i), plaintexts.row(i)[0] as usize); } snr.snr() } -fn snr_parallel(leakages: &Array2, plaintexts: &Array2) -> Array1 { - snr(leakages.view(), 256, |i| plaintexts.row(i)[0].into(), 500) +fn snr_parallel(traces: &Array2, plaintexts: &Array2) -> Array1 { + snr(traces.view(), 256, |i| plaintexts.row(i)[0].into(), 500) } fn bench_snr(c: &mut Criterion) { @@ -28,20 +28,20 @@ fn bench_snr(c: &mut Criterion) { group.measurement_time(std::time::Duration::from_secs(60)); for num_traces in [5000, 10000, 25000].into_iter() { - let leakages = Array2::random_using((num_traces, 5000), Uniform::new(-200, 200), &mut rng); + let traces = Array2::random_using((num_traces, 5000), Uniform::new(-200, 200), &mut rng); let plaintexts = Array2::random_using((num_traces, 16), Uniform::new_inclusive(0, 255), &mut rng); group.bench_with_input( BenchmarkId::new("sequential", num_traces), - &(&leakages, &plaintexts), - |b, (leakages, plaintexts)| b.iter(|| snr_sequential(leakages, plaintexts)), + &(&traces, &plaintexts), + |b, (traces, plaintexts)| b.iter(|| snr_sequential(traces, plaintexts)), ); group.bench_with_input( BenchmarkId::new("parallel", num_traces), - &(&leakages, &plaintexts), - |b, (leakages, plaintexts)| b.iter(|| snr_parallel(leakages, plaintexts)), + &(&traces, &plaintexts), + |b, (traces, plaintexts)| b.iter(|| snr_parallel(traces, plaintexts)), ); } diff --git a/benches/ttest.rs b/benches/ttest.rs index 6b9c932..236a796 100644 --- a/benches/ttest.rs +++ b/benches/ttest.rs @@ -28,19 +28,19 @@ fn bench_ttest(c: &mut Criterion) { group.measurement_time(std::time::Duration::from_secs(60)); for num_traces in [5000, 10000, 25000].into_iter() { - let leakages = Array2::random_using((num_traces, 5000), Uniform::new(-200, 200), &mut rng); + let traces = Array2::random_using((num_traces, 5000), Uniform::new(-200, 200), &mut rng); let plaintexts = Array1::random_using(num_traces, Standard, &mut rng); group.bench_with_input( BenchmarkId::new("sequential", num_traces), - &(&leakages, &plaintexts), - |b, (leakages, plaintexts)| b.iter(|| ttest_sequential(leakages, plaintexts)), + &(&traces, &plaintexts), + |b, (traces, plaintexts)| b.iter(|| ttest_sequential(traces, plaintexts)), ); group.bench_with_input( BenchmarkId::new("parallel", num_traces), - &(&leakages, &plaintexts), - |b, (leakages, plaintexts)| b.iter(|| ttest_parallel(leakages, plaintexts)), + &(&traces, &plaintexts), + |b, (traces, plaintexts)| b.iter(|| ttest_parallel(traces, plaintexts)), ); } diff --git a/examples/cpa.rs b/examples/cpa.rs index 32ecece..5cf2c33 100644 --- a/examples/cpa.rs +++ b/examples/cpa.rs @@ -27,9 +27,9 @@ fn cpa() -> Result<()> { let folder = String::from("../../data/cw"); let dir_l = format!("{folder}/leakages.npy"); let dir_p = format!("{folder}/plaintexts.npy"); - let leakages = read_array2_from_npy_file::(&dir_l)?; + let traces = read_array2_from_npy_file::(&dir_l)?; let plaintext = read_array2_from_npy_file::(&dir_p)?; - let len_traces = leakages.shape()[0]; + let len_traces = traces.shape()[0]; let cpa_parallel = ((0..len_traces).step_by(batch)) .progress_with(progress_bar(len_traces)) @@ -38,7 +38,7 @@ fn cpa() -> Result<()> { let mut cpa = CpaProcessor::new(size, batch, guess_range, leakage_model); let range_rows = row_number..row_number + batch; let range_samples = start_sample..end_sample; - let sample_traces = leakages + let sample_traces = traces .slice(s![range_rows.clone(), range_samples]) .map(|l| *l as f32); let sample_metadata = plaintext.slice(s![range_rows, ..]).map(|p| *p as usize); @@ -76,13 +76,13 @@ fn success() -> Result<()> { for i in (0..nfiles).progress() { let dir_l = format!("{folder}/l/{i}.npy"); let dir_p = format!("{folder}/p/{i}.npy"); - let leakages = read_array2_from_npy_file::(&dir_l)?; + let traces = read_array2_from_npy_file::(&dir_l)?; let plaintext = read_array2_from_npy_file::(&dir_p)?; - for row in (0..leakages.shape()[0]).step_by(batch) { + for row in (0..traces.shape()[0]).step_by(batch) { let range_samples = start_sample..end_sample; let range_rows = row..row + batch; let range_metadata = 0..plaintext.shape()[1]; - let sample_traces = leakages + let sample_traces = traces .slice(s![range_rows.clone(), range_samples]) .map(|l| *l as f32); let sample_metadata = plaintext.slice(s![range_rows, range_metadata]); diff --git a/examples/cpa_partioned.rs b/examples/cpa_partioned.rs index a596838..c0f14f3 100644 --- a/examples/cpa_partioned.rs +++ b/examples/cpa_partioned.rs @@ -19,7 +19,7 @@ fn cpa() -> Result<()> { let size = 5000; // Number of samples let guess_range = 256; // 2**(key length) let target_byte = 1; - let folder = String::from("../../data"); // Directory of leakages and metadata + let folder = String::from("../../data"); // Directory of traces and metadata let nfiles = 5; // Number of files in the directory. TBD: Automating this value /* Parallel operation using multi-threading on batches */ @@ -28,15 +28,14 @@ fn cpa() -> Result<()> { .map(|n| { let dir_l = format!("{folder}/l{n}.npy"); let dir_p = format!("{folder}/p{n}.npy"); - let leakages = read_array2_from_npy_file::(&dir_l).unwrap(); + let traces = read_array2_from_npy_file::(&dir_l).unwrap(); let plaintext = read_array2_from_npy_file::(&dir_p).unwrap(); - (leakages, plaintext) + (traces, plaintext) }) .par_bridge() .map(|batch| { let mut c = CpaProcessor::new(size, guess_range, target_byte, leakage_model); - let len_leakage = batch.0.shape()[0]; - for i in 0..len_leakage { + for i in 0..batch.0.shape()[0] { c.update( batch.0.row(i).map(|x| *x as usize).view(), batch.1.row(i).map(|y| *y as usize).view(), diff --git a/examples/dpa.rs b/examples/dpa.rs index 0941209..1c909fc 100644 --- a/examples/dpa.rs +++ b/examples/dpa.rs @@ -22,12 +22,12 @@ fn dpa() -> Result<()> { let folder = String::from("../../data/cw"); let dir_l = format!("{folder}/leakages.npy"); let dir_p = format!("{folder}/plaintexts.npy"); - let leakages = read_array2_from_npy_file::(&dir_l)?; + let traces = read_array2_from_npy_file::(&dir_l)?; let plaintext = read_array2_from_npy_file::(&dir_p)?; - let len_traces = 20000; //leakages.shape()[0]; + let len_traces = 20000; //traces.shape()[0]; let mut dpa_proc = DpaProcessor::new(size, guess_range, selection_function); for i in (0..len_traces).progress() { - let tmp_trace = leakages + let tmp_trace = traces .row(i) .slice(s![start_sample..end_sample]) .mapv(|t| t as f32); @@ -50,15 +50,15 @@ fn dpa_success() -> Result<()> { let folder = String::from("../../data/cw"); let dir_l = format!("{folder}/leakages.npy"); let dir_p = format!("{folder}/plaintexts.npy"); - let leakages = read_array2_from_npy_file::(&dir_l)?; + let traces = read_array2_from_npy_file::(&dir_l)?; let plaintext = read_array2_from_npy_file::(&dir_p)?; - let len_traces = leakages.shape()[0]; + let len_traces = traces.shape()[0]; let mut dpa_proc = DpaProcessor::new(size, guess_range, selection_function); let rank_traces: usize = 100; let mut rank = Array1::zeros(guess_range); for i in (0..len_traces).progress() { - let tmp_trace = leakages + let tmp_trace = traces .row(i) .slice(s![start_sample..end_sample]) .mapv(|t| t as f32); @@ -87,15 +87,15 @@ fn dpa_parallel() -> Result<()> { let folder = String::from("../../data/cw"); let dir_l = format!("{folder}/leakages.npy"); let dir_p = format!("{folder}/plaintexts.npy"); - let leakages = read_array2_from_npy_file::(&dir_l)?; + let traces = read_array2_from_npy_file::(&dir_l)?; let plaintext = read_array2_from_npy_file::(&dir_p)?; - let len_traces = 20000; //leakages.shape()[0]; + let len_traces = 20000; // traces.shape()[0]; let batch = 2500; let dpa = (0..len_traces) .step_by(batch) .par_bridge() .map(|range_rows| { - let tmp_leakages = leakages + let tmp_traces = traces .slice(s![range_rows..range_rows + batch, start_sample..end_sample]) .mapv(|l| l as f32); let tmp_metadata = plaintext @@ -104,7 +104,7 @@ fn dpa_parallel() -> Result<()> { let mut dpa_inner = DpaProcessor::new(size, guess_range, selection_function); for i in 0..batch { - let trace = tmp_leakages.row(i); + let trace = tmp_traces.row(i); let metadata = tmp_metadata.row(i).to_owned(); dpa_inner.update(trace, metadata); } diff --git a/examples/rank.rs b/examples/rank.rs index 7f31bc5..3429b6e 100644 --- a/examples/rank.rs +++ b/examples/rank.rs @@ -26,13 +26,13 @@ fn rank() -> Result<()> { for file in (0..nfiles).progress_with(progress_bar(nfiles)) { let dir_l = format!("{folder}/l{file}.npy"); let dir_p = format!("{folder}/p{file}.npy"); - let leakages = read_array2_from_npy_file::(&dir_l)?; + let traces = read_array2_from_npy_file::(&dir_l)?; let plaintext = read_array2_from_npy_file::(&dir_p)?; - for sample in (0..leakages.shape()[0]).step_by(batch_size) { + for sample in (0..traces.shape()[0]).step_by(batch_size) { let l_sample: ndarray::ArrayBase< ndarray::ViewRepr<&FormatTraces>, ndarray::Dim<[usize; 2]>, - > = leakages.slice(s![sample..sample + batch_size, ..]); + > = traces.slice(s![sample..sample + batch_size, ..]); let p_sample = plaintext.slice(s![sample..sample + batch_size, ..]); let x = (0..batch_size) .par_bridge() diff --git a/src/distinguishers/cpa.rs b/src/distinguishers/cpa.rs index 26790ad..b0c5ba8 100644 --- a/src/distinguishers/cpa.rs +++ b/src/distinguishers/cpa.rs @@ -9,10 +9,10 @@ use std::{iter::zip, ops::Add}; /// Compute the [`Cpa`] of the given traces using [`CpaProcessor`]. /// /// # Panics -/// - Panic if `leakages.shape()[0] != plaintexts.shape()[0]` +/// - Panic if `traces.shape()[0] != plaintexts.shape()[0]` /// - Panic if `batch_size` is 0. pub fn cpa( - leakages: ArrayView2, + traces: ArrayView2, plaintexts: ArrayView2, guess_range: usize, target_byte: usize, @@ -23,20 +23,20 @@ where T: Into + Copy + Sync, F: Fn(usize, usize) -> usize + Send + Sync + Copy, { - assert_eq!(leakages.shape()[0], plaintexts.shape()[0]); + assert_eq!(traces.shape()[0], plaintexts.shape()[0]); assert!(batch_size > 0); // From benchmarks fold + reduce_with is faster than map + reduce/reduce_with and fold + reduce zip( - leakages.axis_chunks_iter(Axis(0), batch_size), + traces.axis_chunks_iter(Axis(0), batch_size), plaintexts.axis_chunks_iter(Axis(0), batch_size), ) .par_bridge() .fold( - || CpaProcessor::new(leakages.shape()[1], guess_range, target_byte, leakage_func), - |mut cpa, (leakage_batch, plaintext_batch)| { - for i in 0..leakage_batch.shape()[0] { - cpa.update(leakage_batch.row(i), plaintext_batch.row(i)); + || CpaProcessor::new(traces.shape()[1], guess_range, target_byte, leakage_func), + |mut cpa, (trace_batch, plaintext_batch)| { + for i in 0..trace_batch.shape()[0] { + cpa.update(trace_batch.row(i), plaintext_batch.row(i)); } cpa @@ -96,16 +96,16 @@ where /// Guess range upper excluded bound guess_range: usize, /// Sum of traces - sum_leakages: Array1, + sum_traces: Array1, /// Sum of square of traces - sum_squares_leakages: Array1, + sum_square_traces: Array1, /// Sum of traces per key guess - guess_sum_leakages: Array1, + guess_sum_traces: Array1, /// Sum of square of traces per key guess - guess_sum_squares_leakages: Array1, + guess_sum_squares_traces: Array1, /// Sum of traces per plaintext used /// See 4.3 in - plaintext_sum_leakages: Array2, + plaintext_sum_traces: Array2, /// Leakage model leakage_func: F, /// Number of traces processed @@ -126,11 +126,11 @@ where num_samples, target_byte, guess_range, - sum_leakages: Array1::zeros(num_samples), - sum_squares_leakages: Array1::zeros(num_samples), - guess_sum_leakages: Array1::zeros(guess_range), - guess_sum_squares_leakages: Array1::zeros(guess_range), - plaintext_sum_leakages: Array2::zeros((guess_range, num_samples)), + sum_traces: Array1::zeros(num_samples), + sum_square_traces: Array1::zeros(num_samples), + guess_sum_traces: Array1::zeros(guess_range), + guess_sum_squares_traces: Array1::zeros(guess_range), + plaintext_sum_traces: Array2::zeros((guess_range, num_samples)), leakage_func, num_traces: 0, } @@ -146,16 +146,16 @@ where let partition = plaintext[self.target_byte].into(); for i in 0..self.num_samples { - self.sum_leakages[i] += trace[i].into(); - self.sum_squares_leakages[i] += trace[i].into() * trace[i].into(); + self.sum_traces[i] += trace[i].into(); + self.sum_square_traces[i] += trace[i].into() * trace[i].into(); - self.plaintext_sum_leakages[[partition, i]] += trace[i].into(); + self.plaintext_sum_traces[[partition, i]] += trace[i].into(); } for guess in 0..self.guess_range { let value = (self.leakage_func)(plaintext[self.target_byte].into(), guess); - self.guess_sum_leakages[guess] += value; - self.guess_sum_squares_leakages[guess] += value * value; + self.guess_sum_traces[guess] += value; + self.guess_sum_squares_traces[guess] += value * value; } self.num_traces += 1; @@ -171,26 +171,26 @@ where modeled_leakages[u] = (self.leakage_func)(u, guess); } - let mean_key = self.guess_sum_leakages[guess] as f32 / self.num_traces as f32; + let mean_key = self.guess_sum_traces[guess] as f32 / self.num_traces as f32; let mean_squares_key = - self.guess_sum_squares_leakages[guess] as f32 / self.num_traces as f32; + self.guess_sum_squares_traces[guess] as f32 / self.num_traces as f32; let var_key = mean_squares_key - (mean_key * mean_key); let guess_corr: Vec<_> = (0..self.num_samples) .into_par_iter() .map(|u| { - let mean_leakages = self.sum_leakages[u] as f32 / self.num_traces as f32; + let mean_traces = self.sum_traces[u] as f32 / self.num_traces as f32; let cov = self.sum_mult( - self.plaintext_sum_leakages.slice(s![.., u]), + self.plaintext_sum_traces.slice(s![.., u]), modeled_leakages.view(), ); - let cov = cov as f32 / self.num_traces as f32 - (mean_key * mean_leakages); + let cov = cov as f32 / self.num_traces as f32 - (mean_key * mean_traces); - let mean_squares_leakages = - self.sum_squares_leakages[u] as f32 / self.num_traces as f32; - let var_leakages = mean_squares_leakages - (mean_leakages * mean_leakages); - f32::abs(cov / f32::sqrt(var_key * var_leakages)) + let mean_squares_traces = + self.sum_square_traces[u] as f32 / self.num_traces as f32; + let var_traces = mean_squares_traces - (mean_traces * mean_traces); + f32::abs(cov / f32::sqrt(var_key * var_traces)) }) .collect(); @@ -239,12 +239,11 @@ where num_samples: self.num_samples, target_byte: self.target_byte, guess_range: self.guess_range, - sum_leakages: self.sum_leakages + rhs.sum_leakages, - sum_squares_leakages: self.sum_squares_leakages + rhs.sum_squares_leakages, - guess_sum_leakages: self.guess_sum_leakages + rhs.guess_sum_leakages, - guess_sum_squares_leakages: self.guess_sum_squares_leakages - + rhs.guess_sum_squares_leakages, - plaintext_sum_leakages: self.plaintext_sum_leakages + rhs.plaintext_sum_leakages, + sum_traces: self.sum_traces + rhs.sum_traces, + sum_square_traces: self.sum_square_traces + rhs.sum_square_traces, + guess_sum_traces: self.guess_sum_traces + rhs.guess_sum_traces, + guess_sum_squares_traces: self.guess_sum_squares_traces + rhs.guess_sum_squares_traces, + plaintext_sum_traces: self.plaintext_sum_traces + rhs.plaintext_sum_traces, leakage_func: self.leakage_func, num_traces: self.num_traces + rhs.num_traces, } diff --git a/src/distinguishers/cpa_normal.rs b/src/distinguishers/cpa_normal.rs index 1ecfed2..e5c94d9 100644 --- a/src/distinguishers/cpa_normal.rs +++ b/src/distinguishers/cpa_normal.rs @@ -7,10 +7,10 @@ use crate::distinguishers::cpa::Cpa; /// Compute the [`Cpa`] of the given traces using [`CpaProcessor`]. /// /// # Panics -/// - Panic if `leakages.shape()[0] != plaintexts.shape()[0]` +/// - Panic if `traces.shape()[0] != plaintexts.shape()[0]` /// - Panic if `batch_size` is 0. pub fn cpa( - leakages: ArrayView2, + traces: ArrayView2, plaintexts: ArrayView2, guess_range: usize, leakage_func: F, @@ -21,18 +21,18 @@ where U: Into + Copy + Sync, F: Fn(ArrayView1, usize) -> usize + Send + Sync + Copy, { - assert_eq!(leakages.shape()[0], plaintexts.shape()[0]); + assert_eq!(traces.shape()[0], plaintexts.shape()[0]); assert!(batch_size > 0); zip( - leakages.axis_chunks_iter(Axis(0), batch_size), + traces.axis_chunks_iter(Axis(0), batch_size), plaintexts.axis_chunks_iter(Axis(0), batch_size), ) .par_bridge() .fold( - || CpaProcessor::new(leakages.shape()[1], batch_size, guess_range, leakage_func), - |mut cpa, (leakage_batch, plaintext_batch)| { - cpa.update(leakage_batch, plaintext_batch); + || CpaProcessor::new(traces.shape()[1], batch_size, guess_range, leakage_func), + |mut cpa, (trace_batch, plaintext_batch)| { + cpa.update(trace_batch, plaintext_batch); cpa }, @@ -54,13 +54,13 @@ where /// Guess range upper excluded bound guess_range: usize, /// Sum of traces - sum_leakages: Array1, + sum_traces: Array1, /// Sum of square of traces - sum2_leakages: Array1, + sum_traces2: Array1, /// Sum of traces per key guess - guess_sum_leakages: Array1, + guess_sum_traces: Array1, /// Sum of square of traces per key guess - guess_sum2_leakages: Array1, + guess_sum_traces2: Array1, values: Array2, cov: Array2, /// Batch size @@ -79,10 +79,10 @@ where Self { num_samples, guess_range, - sum_leakages: Array1::zeros(num_samples), - sum2_leakages: Array1::zeros(num_samples), - guess_sum_leakages: Array1::zeros(guess_range), - guess_sum2_leakages: Array1::zeros(guess_range), + sum_traces: Array1::zeros(num_samples), + sum_traces2: Array1::zeros(num_samples), + guess_sum_traces: Array1::zeros(guess_range), + guess_sum_traces2: Array1::zeros(guess_range), values: Array2::zeros((batch_size, guess_range)), cov: Array2::zeros((guess_range, num_samples)), batch_size, @@ -132,13 +132,13 @@ where fn update_key_leakages(&mut self, trace: ArrayView2, guess_range: usize) { for i in 0..self.num_samples { - self.sum_leakages[i] += trace.column(i).sum(); // trace[i] as usize; - self.sum2_leakages[i] += trace.column(i).dot(&trace.column(i)); // (trace[i] * trace[i]) as usize; + self.sum_traces[i] += trace.column(i).sum(); // trace[i] as usize; + self.sum_traces2[i] += trace.column(i).dot(&trace.column(i)); // (trace[i] * trace[i]) as usize; } for guess in 0..guess_range { - self.guess_sum_leakages[guess] += self.values.column(guess).sum(); //self.values[guess] as usize; - self.guess_sum2_leakages[guess] += + self.guess_sum_traces[guess] += self.values.column(guess).sum(); //self.values[guess] as usize; + self.guess_sum_traces2[guess] += self.values.column(guess).dot(&self.values.column(guess)); // (self.values[guess] * self.values[guess]) as usize; } @@ -147,19 +147,19 @@ where /// Finalize the calculation after feeding the overall traces. pub fn finalize(&self) -> Cpa { let cov_n = self.cov.clone() / self.num_traces as f32; - let avg_keys = self.guess_sum_leakages.clone() / self.num_traces as f32; - let std_key = self.guess_sum2_leakages.clone() / self.num_traces as f32; - let avg_leakages = self.sum_leakages.clone() / self.num_traces as f32; - let std_leakages = self.sum2_leakages.clone() / self.num_traces as f32; + let avg_keys = self.guess_sum_traces.clone() / self.num_traces as f32; + let std_key = self.guess_sum_traces2.clone() / self.num_traces as f32; + let avg_traces = self.sum_traces.clone() / self.num_traces as f32; + let std_traces = self.sum_traces2.clone() / self.num_traces as f32; let mut corr = Array2::zeros((self.guess_range, self.num_samples)); for i in 0..self.guess_range { for x in 0..self.num_samples { - let numerator = cov_n[[i, x]] - (avg_keys[i] * avg_leakages[x]); + let numerator = cov_n[[i, x]] - (avg_keys[i] * avg_traces[x]); let denominator_1 = std_key[i] - (avg_keys[i] * avg_keys[i]); - let denominator_2 = std_leakages[x] - (avg_leakages[x] * avg_leakages[x]); + let denominator_2 = std_traces[x] - (avg_traces[x] * avg_traces[x]); if numerator != 0.0 { corr[[i, x]] = f32::abs(numerator / f32::sqrt(denominator_1 * denominator_2)); } @@ -200,10 +200,10 @@ where Self { num_samples: self.num_samples, guess_range: self.guess_range, - sum_leakages: self.sum_leakages + rhs.sum_leakages, - sum2_leakages: self.sum2_leakages + rhs.sum2_leakages, - guess_sum_leakages: self.guess_sum_leakages + rhs.guess_sum_leakages, - guess_sum2_leakages: self.guess_sum2_leakages + rhs.guess_sum2_leakages, + sum_traces: self.sum_traces + rhs.sum_traces, + sum_traces2: self.sum_traces2 + rhs.sum_traces2, + guess_sum_traces: self.guess_sum_traces + rhs.guess_sum_traces, + guess_sum_traces2: self.guess_sum_traces2 + rhs.guess_sum_traces2, values: self.values + rhs.values, cov: self.cov + rhs.cov, batch_size: self.batch_size, diff --git a/src/distinguishers/dpa.rs b/src/distinguishers/dpa.rs index 2a859c1..f038192 100644 --- a/src/distinguishers/dpa.rs +++ b/src/distinguishers/dpa.rs @@ -9,7 +9,7 @@ use crate::util::{argmax_by, argsort_by, max_per_row}; /// # Panics /// Panic if `batch_size` is not strictly positive. pub fn dpa( - leakages: ArrayView2, + traces: ArrayView2, metadata: ArrayView1, guess_range: usize, selection_function: F, @@ -23,15 +23,15 @@ where assert!(batch_size > 0); zip( - leakages.axis_chunks_iter(Axis(0), batch_size), + traces.axis_chunks_iter(Axis(0), batch_size), metadata.axis_chunks_iter(Axis(0), batch_size), ) .par_bridge() .fold( - || DpaProcessor::new(leakages.shape()[1], guess_range, selection_function), - |mut dpa, (leakage_batch, metadata_batch)| { - for i in 0..leakage_batch.shape()[0] { - dpa.update(leakage_batch.row(i), metadata_batch[i].clone()); + || DpaProcessor::new(traces.shape()[1], guess_range, selection_function), + |mut dpa, (trace_batch, metadata_batch)| { + for i in 0..trace_batch.shape()[0] { + dpa.update(trace_batch.row(i), metadata_batch[i].clone()); } dpa diff --git a/src/leakage_detection.rs b/src/leakage_detection.rs index c16d57a..83c04eb 100644 --- a/src/leakage_detection.rs +++ b/src/leakage_detection.rs @@ -11,7 +11,7 @@ use std::{iter::zip, ops::Add}; /// # Panics /// - Panic if `batch_size` is 0. pub fn snr( - leakages: ArrayView2, + traces: ArrayView2, classes: usize, get_class: F, batch_size: usize, @@ -23,15 +23,15 @@ where assert!(batch_size > 0); // From benchmarks fold + reduce_with is faster than map + reduce/reduce_with and fold + reduce - leakages + traces .axis_chunks_iter(Axis(0), batch_size) .enumerate() .par_bridge() .fold( - || SnrProcessor::new(leakages.shape()[1], classes), - |mut snr, (batch_idx, leakage_batch)| { - for i in 0..leakage_batch.shape()[0] { - snr.process(leakage_batch.row(i), get_class(batch_idx + i)); + || SnrProcessor::new(traces.shape()[1], classes), + |mut snr, (batch_idx, trace_batch)| { + for i in 0..trace_batch.shape()[0] { + snr.process(trace_batch.row(i), get_class(batch_idx + i)); } snr }, @@ -318,7 +318,7 @@ mod tests { assert_eq!( processor.ttest(), - ttest(traces.view(), trace_classes.view(), 2,) + ttest(traces.view(), trace_classes.view(), 2) ); } }