Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: Remove DeserializeWithEpoch trait #3

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions libsigner/src/messages.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@ use clarity::vm::types::QualifiedContractIdentifier;
use hashbrown::{HashMap, HashSet};
use serde::{Deserialize, Serialize};
use stacks_common::codec::{
read_next, read_next_at_most, read_next_at_most_with_epoch, read_next_exact, write_next,
Error as CodecError, StacksMessageCodec, MAX_MESSAGE_LEN,
read_next, read_next_at_most, read_next_exact, write_next, Error as CodecError,
StacksMessageCodec, MAX_MESSAGE_LEN,
};
use stacks_common::types::StacksEpochId;
use stacks_common::util::hash::Sha512Trunc256Sum;
Expand Down Expand Up @@ -366,7 +366,7 @@ impl StacksMessageCodec for SignerMessage {
// I don't think these messages are stored on the blockchain, so `StacksEpochId::latest()` should be fine
let transactions: Vec<StacksTransaction> = {
let mut bound_read = BoundReader::from_reader(fd, MAX_MESSAGE_LEN as u64);
read_next_at_most_with_epoch(&mut bound_read, u32::MAX, StacksEpochId::latest())
read_next_at_most(&mut bound_read, u32::MAX)
}?;
SignerMessage::Transactions(transactions)
}
Expand Down Expand Up @@ -1224,7 +1224,7 @@ impl StacksMessageCodec for RejectCode {
// I don't think these messages are stored on the blockchain, so `StacksEpochId::latest()` should be fine
let transactions: Vec<StacksTransaction> = {
let mut bound_read = BoundReader::from_reader(fd, MAX_MESSAGE_LEN as u64);
read_next_at_most_with_epoch(&mut bound_read, u32::MAX, StacksEpochId::latest())
read_next_at_most(&mut bound_read, u32::MAX)
}?;
RejectCode::MissingTransactions(transactions)
}
Expand Down
69 changes: 0 additions & 69 deletions stacks-common/src/codec/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,15 +87,6 @@ pub trait StacksMessageCodec {
}
}

pub trait DeserializeWithEpoch {
fn consensus_deserialize_with_epoch<R: Read>(
fd: &mut R,
epoch_id: StacksEpochId,
) -> Result<Self, Error>
where
Self: Sized;
}

// impl_byte_array_message_codec!(MARFValue, 40);
impl_byte_array_message_codec!(SortitionId, 32);

Expand Down Expand Up @@ -193,66 +184,6 @@ pub fn read_next_exact<R: Read, T: StacksMessageCodec + Sized>(
read_next_vec::<T, R>(fd, num_items, 0)
}

pub fn read_next_with_epoch<T: DeserializeWithEpoch, R: Read>(
fd: &mut R,
epoch_id: StacksEpochId,
) -> Result<T, Error> {
let item: T = T::consensus_deserialize_with_epoch(fd, epoch_id)?;
Ok(item)
}

fn read_next_vec_with_epoch<T: DeserializeWithEpoch + Sized, R: Read>(
fd: &mut R,
num_items: u32,
max_items: u32,
epoch_id: StacksEpochId,
) -> Result<Vec<T>, Error> {
let len = u32::consensus_deserialize(fd)?;

if max_items > 0 {
if len > max_items {
// too many items
return Err(Error::DeserializeError(format!(
"Array has too many items ({} > {}",
len, max_items
)));
}
} else {
if len != num_items {
// inexact item count
return Err(Error::DeserializeError(format!(
"Array has incorrect number of items ({} != {})",
len, num_items
)));
}
}

if (mem::size_of::<T>() as u128) * (len as u128) > MAX_MESSAGE_LEN as u128 {
return Err(Error::DeserializeError(format!(
"Message occupies too many bytes (tried to allocate {}*{}={})",
mem::size_of::<T>() as u128,
len,
(mem::size_of::<T>() as u128) * (len as u128)
)));
}

let mut ret = Vec::with_capacity(len as usize);
for _i in 0..len {
let next_item = T::consensus_deserialize_with_epoch(fd, epoch_id)?;
ret.push(next_item);
}

Ok(ret)
}

pub fn read_next_at_most_with_epoch<R: Read, T: DeserializeWithEpoch + Sized>(
fd: &mut R,
max_items: u32,
epoch_id: StacksEpochId,
) -> Result<Vec<T>, Error> {
read_next_vec_with_epoch::<T, R>(fd, 0, max_items, epoch_id)
}

impl<T> StacksMessageCodec for Vec<T>
where
T: StacksMessageCodec + Sized,
Expand Down
16 changes: 5 additions & 11 deletions stackslib/src/blockstack_cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ use clarity::vm::errors::{Error as ClarityError, RuntimeErrorType};
use clarity::vm::types::PrincipalData;
use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value};
use stacks_common::address::{b58, AddressHashMode};
use stacks_common::codec::{DeserializeWithEpoch, Error as CodecError, StacksMessageCodec};
use stacks_common::codec::{Error as CodecError, StacksMessageCodec};
use stacks_common::types::chainstate::StacksAddress;
use stacks_common::types::StacksEpochId;
use stacks_common::util::hash::{hex_bytes, to_hex};
Expand Down Expand Up @@ -315,10 +315,8 @@ fn sign_transaction_single_sig_standard(
transaction: &str,
secret_key: &StacksPrivateKey,
) -> Result<StacksTransaction, CliError> {
let transaction = StacksTransaction::consensus_deserialize_with_epoch(
&mut io::Cursor::new(&hex_bytes(transaction)?),
StacksEpochId::latest(),
)?;
let transaction =
StacksTransaction::consensus_deserialize(&mut io::Cursor::new(&hex_bytes(transaction)?))?;

let mut tx_signer = StacksTransactionSigner::new(&transaction);
tx_signer.sign_origin(secret_key)?;
Expand Down Expand Up @@ -665,10 +663,7 @@ fn decode_transaction(args: &[String], _version: TransactionVersion) -> Result<S
let mut cursor = io::Cursor::new(&tx_str);
let mut debug_cursor = LogReader::from_reader(&mut cursor);

match StacksTransaction::consensus_deserialize_with_epoch(
&mut debug_cursor,
StacksEpochId::latest(),
) {
match StacksTransaction::consensus_deserialize(&mut debug_cursor) {
Ok(tx) => Ok(serde_json::to_string(&tx).expect("Failed to serialize transaction to JSON")),
Err(e) => {
let mut ret = String::new();
Expand Down Expand Up @@ -744,8 +739,7 @@ fn decode_block(args: &[String], _version: TransactionVersion) -> Result<String,
let mut cursor = io::Cursor::new(&block_data);
let mut debug_cursor = LogReader::from_reader(&mut cursor);

match StacksBlock::consensus_deserialize_with_epoch(&mut debug_cursor, StacksEpochId::latest())
{
match StacksBlock::consensus_deserialize(&mut debug_cursor) {
Ok(block) => Ok(serde_json::to_string(&block).expect("Failed to serialize block to JSON")),
Err(e) => {
let mut ret = String::new();
Expand Down
15 changes: 3 additions & 12 deletions stackslib/src/chainstate/nakamoto/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ use rusqlite::{params, Connection, OpenFlags, OptionalExtension, ToSql, NO_PARAM
use sha2::{Digest as Sha2Digest, Sha512_256};
use stacks_common::bitvec::BitVec;
use stacks_common::codec::{
read_next, read_next_at_most_with_epoch, write_next, DeserializeWithEpoch, Error as CodecError,
StacksMessageCodec, MAX_MESSAGE_LEN, MAX_PAYLOAD_LEN,
read_next, read_next_at_most, write_next, Error as CodecError, StacksMessageCodec,
MAX_MESSAGE_LEN, MAX_PAYLOAD_LEN,
};
use stacks_common::consts::{
FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, MINER_REWARD_MATURITY,
Expand Down Expand Up @@ -3351,21 +3351,12 @@ impl StacksMessageCodec for NakamotoBlock {
}

fn consensus_deserialize<R: std::io::Read>(fd: &mut R) -> Result<Self, CodecError> {
panic!("NakamotoBlock should be deserialized with consensus_deserialize_with_epoch instead")
}
}

impl DeserializeWithEpoch for NakamotoBlock {
fn consensus_deserialize_with_epoch<R: std::io::Read>(
fd: &mut R,
epoch_id: StacksEpochId,
) -> Result<NakamotoBlock, CodecError> {
let header: NakamotoBlockHeader = read_next(fd)?;

let txs: Vec<StacksTransaction> = {
let mut bound_read = BoundReader::from_reader(fd, u64::from(MAX_MESSAGE_LEN));
// The latest epoch where StacksMicroblock exist is Epoch25
read_next_at_most_with_epoch(&mut bound_read, u32::MAX, epoch_id)
read_next_at_most(&mut bound_read, u32::MAX)
}?;

// all transactions are unique
Expand Down
14 changes: 4 additions & 10 deletions stackslib/src/chainstate/nakamoto/staging_blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState};
use crate::chainstate::stacks::db::StacksChainState;
use crate::chainstate::stacks::index::marf::MarfConnection;
use crate::chainstate::stacks::{Error as ChainstateError, StacksBlock, StacksBlockHeader};
use crate::stacks_common::codec::{DeserializeWithEpoch, StacksMessageCodec};
use crate::stacks_common::codec::StacksMessageCodec;
use crate::stacks_common::types::StacksEpochId;
use crate::util_lib::db::{
query_int, query_row, query_row_panic, query_rows, sqlite_open, tx_begin_immediate, u64_to_sql,
Expand Down Expand Up @@ -218,10 +218,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> {
let Some(block_bytes) = data else {
return Ok(None);
};
let block = NakamotoBlock::consensus_deserialize_with_epoch(
&mut block_bytes.as_slice(),
StacksEpochId::latest(),
)?;
let block = NakamotoBlock::consensus_deserialize(&mut block_bytes.as_slice())?;
if &block.header.consensus_hash != consensus_hash {
error!(
"Staging DB corruption: expected {}, got {}",
Expand Down Expand Up @@ -258,10 +255,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> {
let Some(block_bytes) = res else {
return Ok(None);
};
let block = NakamotoBlock::consensus_deserialize_with_epoch(
&mut block_bytes.as_slice(),
StacksEpochId::latest(),
)?;
let block = NakamotoBlock::consensus_deserialize(&mut block_bytes.as_slice())?;
if &block.header.block_id() != index_block_hash {
error!(
"Staging DB corruption: expected {}, got {}",
Expand Down Expand Up @@ -311,7 +305,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> {
self
.query_row_and_then(query, NO_PARAMS, |row| {
let data: Vec<u8> = row.get("data")?;
let block = NakamotoBlock::consensus_deserialize_with_epoch(&mut data.as_slice(), StacksEpochId::latest())?;
let block = NakamotoBlock::consensus_deserialize(&mut data.as_slice())?;
Ok(Some((
block,
u64::try_from(data.len()).expect("FATAL: block is bigger than a u64"),
Expand Down
7 changes: 2 additions & 5 deletions stackslib/src/chainstate/nakamoto/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ use rand::{thread_rng, RngCore};
use rusqlite::{Connection, ToSql};
use stacks_common::address::AddressHashMode;
use stacks_common::bitvec::BitVec;
use stacks_common::codec::{DeserializeWithEpoch, StacksMessageCodec};
use stacks_common::codec::StacksMessageCodec;
use stacks_common::consts::{
CHAIN_ID_MAINNET, CHAIN_ID_TESTNET, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH,
};
Expand Down Expand Up @@ -101,10 +101,7 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> {
let block_data: Vec<Vec<u8>> = query_rows(self, qry, args)?;
let mut blocks = Vec::with_capacity(block_data.len());
for data in block_data.into_iter() {
let block = NakamotoBlock::consensus_deserialize_with_epoch(
&mut data.as_slice(),
StacksEpochId::latest(),
)?;
let block = NakamotoBlock::consensus_deserialize(&mut data.as_slice())?;
blocks.push(block);
}
Ok(blocks)
Expand Down
Loading
Loading