Skip to content

Commit

Permalink
Fix clippy warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
TrAyZeN authored and kingofpayne committed Apr 3, 2024
1 parent 4113764 commit 2f49a02
Show file tree
Hide file tree
Showing 6 changed files with 37 additions and 32 deletions.
2 changes: 2 additions & 0 deletions src/cpa.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ impl Cpa {
let _sigkeys = self.sig_keys[i] as f32 / self.len_leakages as f32;
let _sumkeys = self.sum_keys[i] as f32 / self.len_leakages as f32;
let lower1: f32 = _sigkeys - (_sumkeys * _sumkeys);

/* Parallel operation using multi-threading */
let tmp: Vec<f32> = (0..self.len_samples)
.into_par_iter()
Expand All @@ -108,6 +109,7 @@ impl Cpa {
})
.collect();

#[allow(clippy::needless_range_loop)]
for z in 0..self.len_samples {
self.corr[[i, z]] = tmp[z];
}
Expand Down
4 changes: 2 additions & 2 deletions src/cpa_normal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ impl Cpa {
Self {
len_samples: size,
chunk: patch,
guess_range: guess_range,
guess_range,
sum_leakages: Array1::zeros(size),
sum2_leakages: Array1::zeros(size),
sum_keys: Array1::zeros(guess_range as usize),
Expand Down Expand Up @@ -133,7 +133,7 @@ impl Cpa {

let denominator_2: f32 = std_leakages[x] - (avg_leakages[x] * avg_leakages[x]);
if numerator != 0.0 {
self.corr[[i as usize, x]] =
self.corr[[i, x]] =
f32::abs(numerator / f32::sqrt(denominator_1 * denominator_2));
}
}
Expand Down
27 changes: 12 additions & 15 deletions src/preprocessors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use crate::processors::MeanVar;

/// Computes the centered product of "order" leakage samples
/// Used particularly when performing high-order SCA
struct CenteredProduct {
pub struct CenteredProduct {
/// Sum of traces
acc: Array1<i64>,
/// Number of traces processed
Expand All @@ -30,7 +30,7 @@ impl CenteredProduct {
Self {
acc: Array1::zeros(size),
count: 0,
intervals: intervals,
intervals,
processed: false,
mean: Array1::zeros(size),
}
Expand Down Expand Up @@ -58,13 +58,13 @@ impl CenteredProduct {
/// The centered product substract the mean of the traces and then perform products between every input time samples
pub fn apply<T: Into<f64> + Copy>(&mut self, trace: &ArrayView1<T>) -> Array1<f64> {
// First we substract the mean trace
let centered_trace: Array1<f64> = trace.mapv(|x| f64::from(x.into())) - &self.mean;
let centered_trace: Array1<f64> = trace.mapv(|x| x.into()) - &self.mean;
let length_out_trace: usize = self.intervals.iter().map(|x| x.len()).product();

let mut centered_product_trace = Array1::ones(length_out_trace);

// Then we do the products
let mut multi_prod = (0..self.intervals.len())
let multi_prod = (0..self.intervals.len())
.map(|i| self.intervals[i].clone())
.multi_cartesian_product(); //NOTE/TODO: maybe this can go in the struct parameters, which could improve performances

Expand All @@ -75,12 +75,13 @@ impl CenteredProduct {
}
}
println! {"{:?}",centered_product_trace};
return centered_product_trace;

centered_product_trace
}
}

/// Elevates parts of a trace to a certain power
struct Power {
pub struct Power {
intervals: Vec<Range<i32>>,
power: i32,
}
Expand All @@ -90,14 +91,10 @@ impl Power {
///
/// # Arguments
///
/// * `size` - Number of samples per trace
/// * `intervals` - Intervals to elevate to the power
/// * `power` - Power to elevate
pub fn new(size: usize, intervals: Vec<Range<i32>>, power: i32) -> Self {
Self {
intervals: intervals,
power: power,
}
pub fn new(intervals: Vec<Range<i32>>, power: i32) -> Self {
Self { intervals, power }
}

/// Processes an input trace
Expand All @@ -115,7 +112,7 @@ impl Power {
}

/// Standardization of the traces by removing the mean and scaling to unit variance
struct StandardScaler {
pub struct StandardScaler {
/// meanVar processor
meanvar: MeanVar,
/// mean
Expand Down Expand Up @@ -146,7 +143,7 @@ impl StandardScaler {

/// Apply the processing to an input trace
pub fn apply<T: Into<f64> + Copy>(&mut self, trace: &ArrayView1<T>) -> Array1<f64> {
(trace.mapv(|x| f64::from(x.into())) - &self.mean) / &self.std
(trace.mapv(|x| x.into()) - &self.mean) / &self.std
}
}

Expand All @@ -159,7 +156,7 @@ mod tests {
use ndarray::array;

fn round_to_2_digits(x: f64) -> f64 {
return (x * 100 as f64).round() / 100 as f64;
(x * 100f64).round() / 100f64
}

#[test]
Expand Down
24 changes: 17 additions & 7 deletions src/quicklog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,11 @@
use ndarray::Array1;
use npyz::{Deserialize, NpyFile};
use serde_json::map::IntoIter;
use std::{
fs::File,
io::{BufRead, BufReader, Error, Lines, Seek, SeekFrom},
io::{BufRead, BufReader, Lines, Seek, SeekFrom},
marker::PhantomData,
path::Path,
time::Instant,
};

use crate::{trace::Trace, util::read_array_1_from_npy_file};
Expand Down Expand Up @@ -99,6 +97,7 @@ impl<T: Deserialize> Log<T> {
}

/// Returns the number of records in the log
#[allow(clippy::len_without_is_empty)]
pub fn len(&self) -> usize {
self.records.len()
}
Expand Down Expand Up @@ -180,7 +179,7 @@ impl<T: Deserialize> Record<T> {
let buf = BufReader::new(f);
let npy = NpyFile::new(buf).unwrap();
Ok(read_array_1_from_npy_file(npy))
} else if let Some(tid) = self.tid() {
} else if let Some(_tid) = self.tid() {
// Trace is stored in a single file
todo!()
} else {
Expand Down Expand Up @@ -254,7 +253,6 @@ impl CachedLoader {
self.current_path = Some(path)
}
let toff = record.toff();
let start = Instant::now();
let chunk = &self.current_data.as_slice()[toff as usize..];
let npy = NpyFile::new(chunk).unwrap();
Ok(read_array_1_from_npy_file(npy))
Expand All @@ -264,6 +262,12 @@ impl CachedLoader {
}
}

impl Default for CachedLoader {
fn default() -> Self {
Self::new()
}
}

/// Holds a trace batch file content and an offset list in the file, plus the
/// data associated to each trace.
///
Expand All @@ -275,7 +279,7 @@ pub struct Batch<T, U> {
}

impl<T, U> Batch<T, U> {
fn new() -> Self {
pub fn new() -> Self {
Self {
file: Vec::new(),
toffs_and_values: Vec::new(),
Expand All @@ -284,6 +288,12 @@ impl<T, U> Batch<T, U> {
}
}

impl<T, U> Default for Batch<T, U> {
fn default() -> Self {
Self::new()
}
}

impl<T: Deserialize, U> IntoIterator for Batch<T, U> {
type Item = Trace<T, U>;

Expand Down Expand Up @@ -403,7 +413,7 @@ impl<T: Deserialize, U> Iterator for BatchTraceIterator<T, U> {
}

pub fn array_from_bytes<T: Deserialize>(bytes: &[u8], toff: usize) -> Array1<T> {
let chunk = &bytes[toff as usize..];
let chunk = &bytes[toff..];
let npy = NpyFile::new(chunk).unwrap();
read_array_1_from_npy_file(npy)
}
2 changes: 1 addition & 1 deletion src/trace.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
//! Defines the [`Trace`] storage structure.
use ndarray::Array1;

/// A side channel leakage record associated to its leakage data.
Expand All @@ -18,6 +17,7 @@ impl<T, U> Trace<T, U> {
}

/// Returns the number of points in the leakage waveform.
#[allow(clippy::len_without_is_empty)]
pub fn len(&self) -> usize {
self.leakage.len()
}
Expand Down
10 changes: 3 additions & 7 deletions src/util.rs
Original file line number Diff line number Diff line change
@@ -1,15 +1,11 @@
//! Convenient utility functions.
use std::{
fs::File,
io::{self, BufWriter},
time::Duration,
};
use std::{fs::File, io::BufWriter, time::Duration};

use indicatif::{ProgressBar, ProgressStyle};
use ndarray::{Array, Array1, Array2, ArrayView2};
use ndarray_npy::{write_npy, ReadNpyExt, ReadableElement, WriteNpyExt};
use npyz::{Deserialize, NpyFile, WriterBuilder};
use npyz::{Deserialize, NpyFile};

/// Reads a [`NpyFile`] as a [`Array1`]
///
Expand All @@ -19,7 +15,7 @@ use npyz::{Deserialize, NpyFile, WriterBuilder};
pub fn read_array_1_from_npy_file<T: Deserialize, R: std::io::Read>(npy: NpyFile<R>) -> Array1<T> {
let mut v: Vec<T> = Vec::new();
v.reserve_exact(npy.shape()[0] as usize);
v.extend(npy.data().unwrap().into_iter().map(|x| x.unwrap()));
v.extend(npy.data().unwrap().map(|x| x.unwrap()));
Array::from_vec(v)
}

Expand Down

0 comments on commit 2f49a02

Please sign in to comment.