Skip to content

Commit

Permalink
Chore: Cargo clippy cleanups (#4379)
Browse files Browse the repository at this point in the history
This PR cleans up various lint warnings across the codebase.
  • Loading branch information
zbuc authored May 10, 2024
1 parent 2e122d3 commit b168b57
Show file tree
Hide file tree
Showing 21 changed files with 80 additions and 57 deletions.
2 changes: 1 addition & 1 deletion crates/bin/pcli/src/command/init.rs
Original file line number Diff line number Diff line change
Expand Up @@ -361,7 +361,7 @@ impl InitCmd {
let password = ActualTerminal.get_confirmed_password().await?;
CustodyConfig::Encrypted(penumbra_custody::encrypted::Config::create(
&password,
penumbra_custody::encrypted::InnerConfig::SoftKms(spend_key.into()),
penumbra_custody::encrypted::InnerConfig::SoftKms(spend_key),
)?)
}
CustodyConfig::Threshold(c) => {
Expand Down
20 changes: 11 additions & 9 deletions crates/bin/pcli/src/command/query/auction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,10 @@ pub async fn render_dutch_auction(
|| (Amount::zero(), Amount::zero()),
|lp| {
(
lp.reserves_for(input_id).unwrap(),
lp.reserves_for(output_id).unwrap(),
lp.reserves_for(input_id)
.expect("lp doesn't have reserves for input asset"),
lp.reserves_for(output_id)
.expect("lp doesn't have reserves for output asset"),
)
},
);
Expand Down Expand Up @@ -134,13 +136,13 @@ pub async fn render_dutch_auction(
])
.set_content_arrangement(ContentArrangement::DynamicFullWidth)
.add_row(vec![
Cell::new(&truncate_auction_id(&auction_id)).set_delimiter('.'),
Cell::new(&render_sequence(dutch_auction.state.sequence)),
Cell::new(&format!("{start_height} -> {end_height}")),
Cell::new(&dutch_auction.description.step_count.to_string()),
Cell::new(&format!("{}", start_price)),
Cell::new(&format!("{}", end_price)),
Cell::new(&initial_input.format(asset_cache)),
Cell::new(truncate_auction_id(&auction_id)).set_delimiter('.'),
Cell::new(render_sequence(dutch_auction.state.sequence)),
Cell::new(format!("{start_height} -> {end_height}")),
Cell::new(dutch_auction.description.step_count.to_string()),
Cell::new(format!("{}", start_price)),
Cell::new(format!("{}", end_price)),
Cell::new(initial_input.format(asset_cache)),
Cell::new(format!(
"({}, {})",
&auction_input_reserves.format(asset_cache),
Expand Down
8 changes: 6 additions & 2 deletions crates/bin/pcli/src/command/query/ibc_query.rs
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,9 @@ impl IbcCmd {
"Client ID",
"Client Height",
]);
let mut state_str = State::from_i32(channel.state).unwrap().to_string();
let mut state_str = State::from_i32(channel.state)
.expect("invalid state value")
.to_string();

let current_time: time::OffsetDateTime = SystemTime::now().into();
let current_time_tm: tendermint::Time = current_time.try_into()?;
Expand Down Expand Up @@ -270,7 +272,9 @@ impl IbcCmd {
]);

for info in channel_infos {
let mut state_str = State::from_i32(info.channel.state).unwrap().to_string();
let mut state_str = State::from_i32(info.channel.state)
.expect("invalid state value")
.to_string();
let current_time: time::OffsetDateTime = SystemTime::now().into();
let current_time_tm: tendermint::Time = current_time.try_into()?;

Expand Down
2 changes: 1 addition & 1 deletion crates/bin/pcli/src/command/tx/auction/dutch/gda.rs
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ impl GradualAuction {
);

let mut rng = rand::thread_rng();
let exp_dist = Exp::new(1.0 / lambda).unwrap();
let exp_dist = Exp::new(1.0 / lambda).expect("lambda too small");
let mut current_height = start_height as f64;

let mut auction_starts = Vec::with_capacity(num_auctions as usize);
Expand Down
2 changes: 1 addition & 1 deletion crates/bin/pcli/src/terminal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ impl Terminal for ActualTerminal {
}
// Store the byte we read and print it back to the terminal.
bytes.push(b);
stdout.write(&[b]).unwrap();
stdout.write_all(&[b]).expect("stdout write failed");
// Flushing may not be the most efficient but performance isn't critical here.
stdout.flush()?;
}
Expand Down
6 changes: 2 additions & 4 deletions crates/bin/pd/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@ pub mod zipserve;
pub use crate::metrics::register_metrics;
pub use penumbra_app::app::App;

pub const MINIFRONT_ARCHIVE_BYTES: &'static [u8] =
include_bytes!("../../../../assets/minifront.zip");
pub const MINIFRONT_ARCHIVE_BYTES: &[u8] = include_bytes!("../../../../assets/minifront.zip");

pub const NODE_STATUS_ARCHIVE_BYTES: &'static [u8] =
include_bytes!("../../../../assets/node-status.zip");
pub const NODE_STATUS_ARCHIVE_BYTES: &[u8] = include_bytes!("../../../../assets/node-status.zip");
2 changes: 1 addition & 1 deletion crates/bin/pd/src/migrate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ impl Migration {
let post_upgrade_root_hash = storage.commit_in_place(delta).await?;
tracing::info!(?post_upgrade_root_hash, "post-upgrade root hash");

let migration_duration = start_time.elapsed().unwrap();
let migration_duration = start_time.elapsed().expect("start time not set");

// Reload storage so we can make reads against its migrated state:
storage.release().await;
Expand Down
5 changes: 4 additions & 1 deletion crates/bin/pd/src/migrate/testnet72.rs
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,10 @@ pub async fn migrate(
let post_upgrade_root_hash = storage.commit_in_place(delta).await?;
tracing::info!(?post_upgrade_root_hash, "post-upgrade root hash");

(start_time.elapsed().unwrap(), post_upgrade_root_hash)
(
start_time.elapsed().expect("start time not set"),
post_upgrade_root_hash,
)
};

storage.release().await;
Expand Down
5 changes: 4 additions & 1 deletion crates/bin/pd/src/migrate/testnet74.rs
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,10 @@ pub async fn migrate(
let post_upgrade_root_hash = storage.commit_in_place(delta).await?;
tracing::info!(?post_upgrade_root_hash, "post-upgrade root hash");

(start_time.elapsed().unwrap(), post_upgrade_root_hash)
(
start_time.elapsed().expect("start time not set"),
post_upgrade_root_hash,
)
};

tracing::info!(?post_upgrade_root_hash, "post-upgrade root hash");
Expand Down
1 change: 1 addition & 0 deletions crates/bin/pd/src/zipserve.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ pub fn router(prefix: &str, archive_bytes: &'static [u8]) -> axum::Router {
.route(&path2, get(handler2))
}

#[allow(clippy::unwrap_used)]
async fn serve_zip(
archive_bytes: &'static [u8],
Path(mut path): Path<String>,
Expand Down
5 changes: 4 additions & 1 deletion crates/cnidarium/src/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ use std::{path::PathBuf, sync::Arc};
use anyhow::{bail, ensure, Result};
use parking_lot::RwLock;
use rocksdb::{Options, DB};
// HashMap is okay here because we don't care about ordering of substore roots.
#[allow(clippy::disallowed_types)]
use std::collections::HashMap;
use tokio::sync::watch;
use tracing::Span;
Expand Down Expand Up @@ -304,6 +306,7 @@ impl Storage {
let changes = Arc::new(cache.clone_changes());

let mut changes_by_substore = cache.shard_by_prefix(&self.0.multistore_config);
#[allow(clippy::disallowed_types)]
let mut substore_roots = HashMap::new();
let mut multistore_versions =
multistore::MultistoreCache::from_config(self.0.multistore_config.clone());
Expand Down Expand Up @@ -510,7 +513,7 @@ impl Storage {
}

let old_substore_version = snapshot
.substore_version(&substore_config)
.substore_version(substore_config)
.expect("substores must be initialized at startup");

// if the substore exists in `substore_roots`, there have been updates to the substore.
Expand Down
4 changes: 2 additions & 2 deletions crates/cnidarium/src/store/multistore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ impl MultistoreConfig {
// If the key does not contain a delimiter, we return the original key
// routed to the main store. This is because we do not want to allow
// collisions e.g. `prefix_a/key` and `prefix_akey`.
let Some(matching_key) = truncated_key.strip_prefix("/") else {
let Some(matching_key) = truncated_key.strip_prefix('/') else {
return (key, self.main_store.clone());
};

Expand Down Expand Up @@ -155,7 +155,7 @@ impl MultistoreConfig {
.expect("key has the prefix of the matched substore");

let truncated_prefix = truncated_prefix
.strip_prefix("/")
.strip_prefix('/')
.unwrap_or(truncated_prefix);
(truncated_prefix, config)
}
Expand Down
50 changes: 30 additions & 20 deletions crates/cnidarium/src/store/substore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,42 +75,52 @@ impl SubstoreConfig {

pub fn cf_jmt<'s>(&self, db_handle: &'s Arc<rocksdb::DB>) -> &'s ColumnFamily {
let column = self.cf_jmt.as_str();
db_handle.cf_handle(column).expect(&format!(
"jmt column family not found for prefix: {}, substore: {}",
column, self.prefix
))
db_handle.cf_handle(column).unwrap_or_else(|| {
panic!(
"jmt column family not found for prefix: {}, substore: {}",
column, self.prefix
)
})
}

pub fn cf_jmt_values<'s>(&self, db_handle: &'s Arc<rocksdb::DB>) -> &'s ColumnFamily {
let column = self.cf_jmt_values.as_str();
db_handle.cf_handle(column).expect(&format!(
"jmt-values column family not found for prefix: {}, substore: {}",
column, self.prefix
))
db_handle.cf_handle(column).unwrap_or_else(|| {
panic!(
"jmt-values column family not found for prefix: {}, substore: {}",
column, self.prefix
)
})
}

pub fn cf_jmt_keys_by_keyhash<'s>(&self, db_handle: &'s Arc<rocksdb::DB>) -> &'s ColumnFamily {
let column = self.cf_jmt_keys_by_keyhash.as_str();
db_handle.cf_handle(column).expect(&format!(
"jmt-keys-by-keyhash column family not found for prefix: {}, substore: {}",
column, self.prefix
))
db_handle.cf_handle(column).unwrap_or_else(|| {
panic!(
"jmt-keys-by-keyhash column family not found for prefix: {}, substore: {}",
column, self.prefix
)
})
}

pub fn cf_jmt_keys<'s>(&self, db_handle: &'s Arc<rocksdb::DB>) -> &'s ColumnFamily {
let column = self.cf_jmt_keys.as_str();
db_handle.cf_handle(column).expect(&format!(
"jmt-keys column family not found for prefix: {}, substore: {}",
column, self.prefix
))
db_handle.cf_handle(column).unwrap_or_else(|| {
panic!(
"jmt-keys column family not found for prefix: {}, substore: {}",
column, self.prefix
)
})
}

pub fn cf_nonverifiable<'s>(&self, db_handle: &'s Arc<rocksdb::DB>) -> &'s ColumnFamily {
let column = self.cf_nonverifiable.as_str();
db_handle.cf_handle(column).expect(&format!(
"nonverifiable column family not found for prefix: {}, substore: {}",
column, self.prefix
))
db_handle.cf_handle(column).unwrap_or_else(|| {
panic!(
"nonverifiable column family not found for prefix: {}, substore: {}",
column, self.prefix
)
})
}

pub fn latest_version_from_db(
Expand Down
3 changes: 3 additions & 0 deletions crates/cnidarium/src/write_batch.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
use std::sync::Arc;

// HashMap is okay here because we don't care about ordering of substore roots.
#[allow(clippy::disallowed_types)]
use std::collections::HashMap;

use crate::{
Expand All @@ -22,6 +24,7 @@ pub struct StagedWriteBatch {
pub(crate) root_hash: RootHash,
/// The configs, root hashes, and new versions of each substore
/// that was updated in this batch.
#[allow(clippy::disallowed_types)]
pub(crate) substore_roots: HashMap<Arc<SubstoreConfig>, (RootHash, u64)>,
/// Whether or not to perform a migration.
pub(crate) perform_migration: bool,
Expand Down
2 changes: 1 addition & 1 deletion crates/core/component/dex/src/component/rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -524,7 +524,7 @@ impl SimulationService for Server {

let state = self.storage.latest_snapshot();

let mut routing_params = state.routing_params().await.unwrap();
let mut routing_params = state.routing_params().await.expect("routing params unset");
match routing_strategy {
Setting::SingleHop(_) => {
routing_params.max_hops = 1;
Expand Down
1 change: 1 addition & 0 deletions crates/core/component/dex/src/params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ impl From<DexParameters> for pb::DexParameters {
}
}

#[allow(clippy::unwrap_used)]
impl Default for DexParameters {
fn default() -> Self {
// This will get used for generating default chain parameters; put some
Expand Down
1 change: 1 addition & 0 deletions crates/crypto/proof-params/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#![deny(clippy::unwrap_used)]
#![allow(clippy::redundant_static_lifetimes)]
// Requires nightly.
#![cfg_attr(docsrs, feature(doc_auto_cfg))]

Expand Down
2 changes: 1 addition & 1 deletion crates/custody/src/threshold/sign.rs
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ pub fn no_signature_response(
request: &SigningRequest,
) -> Result<Option<SigningResponse>> {
match request {
SigningRequest::TransactionPlan(plan) if required_signatures(request) <= 0 => {
SigningRequest::TransactionPlan(plan) if required_signatures(request) == 0 => {
Ok(Some(SigningResponse::Transaction(AuthorizationData {
effect_hash: Some(plan.effect_hash(fvk)?),
spend_auths: Vec::new(),
Expand Down
4 changes: 2 additions & 2 deletions crates/test/mock-consensus/src/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ impl<C> TestNode<C> {
/// By default, signatures for all of the validators currently within the keyring will be
/// included in the block. Use [`Builder::with_signatures()`] to set a different set of
/// validator signatures.
pub fn block<'e>(&'e mut self) -> Builder<'e, C> {
pub fn block(&mut self) -> Builder<'_, C> {
let signatures = self.generate_signatures().collect();
Builder {
test_node: self,
Expand Down Expand Up @@ -150,7 +150,7 @@ where

let height = {
let height = test_node.height.increment();
test_node.height = height.clone();
test_node.height = height;
tracing::Span::current().record("height", height.value());
height
};
Expand Down
11 changes: 3 additions & 8 deletions crates/test/mock-consensus/src/block/signature.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,8 @@ impl<C> TestNode<C> {
/// Returns an [`Iterator`] of signatures for validators in the keyring.
pub(super) fn generate_signatures(&self) -> impl Iterator<Item = CommitSig> + '_ {
self.keyring
.iter()
// Compute the address of this validator.
.map(|(vk, _)| -> [u8; 20] {
.keys()
.map(|vk| {
<Sha256 as Digest>::digest(vk).as_slice()[0..20]
.try_into()
.expect("")
Expand Down Expand Up @@ -86,11 +85,7 @@ impl<'e, C: 'e> Builder<'e, C> {

CommitInfo {
round,
votes: signatures
.into_iter()
.map(Self::vote)
.filter_map(|v| v)
.collect(),
votes: signatures.into_iter().filter_map(Self::vote).collect(),
}
}

Expand Down
1 change: 0 additions & 1 deletion crates/view/src/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1043,7 +1043,6 @@ impl Storage {
auction_state: u64,
) -> anyhow::Result<()> {
let auction_id = auction_id.0.to_vec();
let auction_state = auction_state;

let pool = self.pool.clone();

Expand Down

0 comments on commit b168b57

Please sign in to comment.