diff --git a/node/src/components/block_validator/tests.rs b/node/src/components/block_validator/tests.rs index 6a60c086f1..1fc678d0e9 100644 --- a/node/src/components/block_validator/tests.rs +++ b/node/src/components/block_validator/tests.rs @@ -1,17 +1,13 @@ -use std::{ - collections::{HashSet, VecDeque}, - sync::Arc, - time::Duration, -}; +use std::{collections::VecDeque, sync::Arc, time::Duration}; use derive_more::From; use itertools::Itertools; use rand::Rng; use casper_types::{ - bytesrepr::Bytes, runtime_args, system::standard_payment::ARG_AMOUNT, testing::TestRng, - Chainspec, ChainspecRawBytes, Deploy, DeployHash, ExecutableDeployItem, RuntimeArgs, SecretKey, - TimeDiff, Transaction, TransactionHash, U512, + bytesrepr::Bytes, runtime_args, system::standard_payment::ARG_AMOUNT, testing::TestRng, Block, + BlockSignatures, Chainspec, ChainspecRawBytes, Deploy, ExecutableDeployItem, RuntimeArgs, + SecretKey, TestBlockBuilder, TimeDiff, Transaction, U512, }; use crate::{ @@ -19,7 +15,7 @@ use crate::{ consensus::BlockContext, fetcher::{self, FetchItem}, }, - effect::announcements::FatalAnnouncement, + effect::{announcements::FatalAnnouncement, requests::StorageRequest}, reactor::{EventQueueHandle, QueueKind, Scheduler}, types::{BlockPayload, ValidatorMatrix}, utils::{self, Loadable}, @@ -53,10 +49,13 @@ struct MockReactor { } impl MockReactor { - fn new(rng: &mut TestRng) -> Self { + fn new>( + our_secret_key: Arc, + public_keys: I, + ) -> Self { MockReactor { scheduler: utils::leak(Scheduler::new(QueueKind::weights(), None)), - validator_matrix: ValidatorMatrix::new_with_validator(Arc::new(SecretKey::random(rng))), + validator_matrix: ValidatorMatrix::new_with_validators(our_secret_key, public_keys), } } @@ -69,56 +68,76 @@ impl MockReactor { } } - async fn expect_fetch_deploys( - &self, - mut deploys_to_fetch: Vec, - mut deploys_to_not_fetch: HashSet, - ) { - while !deploys_to_fetch.is_empty() || !deploys_to_not_fetch.is_empty() { - let ((_ancestor, reactor_event), _) = self.scheduler.pop().await; - if let ReactorEvent::TransactionFetcher(FetcherRequest { - id, - peer, - validation_metadata: _, - responder, - }) = reactor_event - { - if let Some((position, _)) = deploys_to_fetch - .iter() - .find_position(|&deploy| Transaction::Deploy(deploy.clone()).fetch_id() == id) - { - let deploy = deploys_to_fetch.remove(position); - let response = FetchedData::FromPeer { - item: Box::new(Transaction::Deploy(deploy)), - peer, - }; - responder.respond(Ok(response)).await; - } else if deploys_to_not_fetch.remove(&match id.transaction_hash() { - TransactionHash::Deploy(deploy_hash) => deploy_hash, - TransactionHash::V1(_) => unreachable!("only fetching deploys for now"), - }) { - responder - .respond(Err(fetcher::Error::Absent { - id: Box::new(id), + async fn handle_requests(&self, context: &ValidationContext) { + while let Ok(((_ancestor, event), _)) = + tokio::time::timeout(Duration::from_millis(100), self.scheduler.pop()).await + { + match event { + ReactorEvent::TransactionFetcher(FetcherRequest { + id, + peer, + validation_metadata: _, + responder, + }) => { + if let Some(deploy) = context.get_deploy(id) { + let response = FetchedData::FromPeer { + item: Box::new(Transaction::Deploy(deploy)), peer, - })) - .await - } else { - panic!("unexpected fetch request: {}", id); + }; + responder.respond(Ok(response)).await; + } else { + responder + .respond(Err(fetcher::Error::Absent { + id: Box::new(id), + peer, + })) + .await; + } } - } else { - panic!("unexpected event: {:?}", reactor_event); - } + ReactorEvent::Storage(StorageRequest::GetBlockAndMetadataByHeight { + block_height, + only_from_available_block_range: _, + responder, + }) => { + let maybe_block = context.get_block_with_metadata(block_height); + responder.respond(maybe_block).await; + } + ReactorEvent::FinalitySigFetcher(FetcherRequest { + id, + peer, + validation_metadata: _, + responder, + }) => { + if let Some(signature) = context.get_signature(&id) { + let response = FetchedData::FromPeer { + item: Box::new(signature), + peer, + }; + responder.respond(Ok(response)).await; + } else { + responder + .respond(Err(fetcher::Error::Absent { + id: Box::new(id), + peer, + })) + .await; + } + } + reactor_event => { + panic!("unexpected event: {:?}", reactor_event); + } + }; } } } -pub(super) fn new_proposed_block( +pub(super) fn new_proposed_block_with_cited_signatures( timestamp: Timestamp, transfer: Vec, staking: Vec, install_upgrade: Vec, standard: Vec, + cited_signatures: RewardedSignatures, ) -> ProposedBlock { // Accusations and ancestors are empty, and the random bit is always true: // These values are not checked by the block validator. @@ -129,12 +148,29 @@ pub(super) fn new_proposed_block( install_upgrade, standard, vec![], - Default::default(), + cited_signatures, true, ); ProposedBlock::new(Arc::new(block_payload), block_context) } +pub(super) fn new_proposed_block( + timestamp: Timestamp, + transfer: Vec, + staking: Vec, + install_upgrade: Vec, + standard: Vec, +) -> ProposedBlock { + new_proposed_block_with_cited_signatures( + timestamp, + transfer, + staking, + install_upgrade, + standard, + Default::default(), + ) +} + pub(super) fn new_deploy(rng: &mut TestRng, timestamp: Timestamp, ttl: TimeDiff) -> Deploy { let secret_key = SecretKey::random(rng); let chain_name = "chain".to_string(); @@ -188,99 +224,454 @@ pub(super) fn new_transfer(rng: &mut TestRng, timestamp: Timestamp, ttl: TimeDif ) } -/// Validates a block using a `BlockValidator` component, and returns the result. -async fn validate_block( - rng: &mut TestRng, - timestamp: Timestamp, - deploys: Vec, - transfers: Vec, -) -> bool { - // Assemble the block to be validated. - let transfers_for_block = transfers - .iter() - .map(|deploy| TransactionHashWithApprovals::from(&Transaction::Deploy(deploy.clone()))) - .collect_vec(); - let standard_for_block = deploys - .iter() - .map(|deploy| TransactionHashWithApprovals::from(&Transaction::Deploy(deploy.clone()))) - .collect_vec(); - let proposed_block = new_proposed_block( - timestamp, - transfers_for_block, - vec![], - vec![], - standard_for_block, - ); +type SecretKeys = BTreeMap>; - // Create the reactor and component. - let reactor = MockReactor::new(rng); - let effect_builder = EffectBuilder::new(EventQueueHandle::without_shutdown(reactor.scheduler)); - let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources("local"); - let mut block_validator = BlockValidator::new( - Arc::new(chainspec), - reactor.validator_matrix.clone(), - Config::default(), - ); +struct ValidationContext { + chainspec: Arc, + // Validators + secret_keys: SecretKeys, + // map of height → block + past_blocks: HashMap, + // blocks that will be "stored" during validation + delayed_blocks: HashMap, + deploys: HashMap, + transfers: HashMap, + // map of block height → signatures for the block + signatures: HashMap>, + // map of signatures that aren't stored, but are fetchable + fetchable_signatures: HashMap, - // Pass the block to the component. This future will eventually resolve to the result, i.e. - // whether the block is valid or not. - let bob_node_id = NodeId::random(rng); - let block_height = rng.gen_range(0..1000); - let validation_result = tokio::spawn(effect_builder.validate_block( - bob_node_id, - block_height, - proposed_block.clone(), - )); - let event = reactor.expect_block_validator_event().await; - let effects = block_validator.handle_event(effect_builder, rng, event); - - // If validity could already be determined, the effect will be the validation response. - if block_validator - .validation_states - .values() - .all(BlockValidationState::completed) - { - assert_eq!(1, effects.len()); - for effect in effects { - tokio::spawn(effect).await.unwrap(); // Response. + // fields defining the proposed block that will be validated + deploys_to_include: Vec, + transfers_to_include: Vec, + signatures_to_include: HashMap>, + proposed_block_height: Option, +} + +impl ValidationContext { + fn new() -> Self { + let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources("local"); + Self { + chainspec: Arc::new(chainspec), + secret_keys: BTreeMap::new(), + past_blocks: HashMap::new(), + delayed_blocks: HashMap::new(), + deploys: HashMap::new(), + transfers: HashMap::new(), + fetchable_signatures: HashMap::new(), + signatures: HashMap::new(), + deploys_to_include: vec![], + transfers_to_include: vec![], + signatures_to_include: HashMap::new(), + proposed_block_height: None, + } + } + + fn with_num_validators(mut self, rng: &mut TestRng, num_validators: usize) -> Self { + for _ in 0..num_validators { + let validator_key = Arc::new(SecretKey::random(rng)); + self.secret_keys + .insert(PublicKey::from(&*validator_key), validator_key.clone()); + } + self + } + + fn get_validators(&self) -> Vec { + self.secret_keys.keys().cloned().collect() + } + + fn with_past_blocks( + mut self, + rng: &mut TestRng, + min_height: u64, + max_height: u64, + era: EraId, + ) -> Self { + self.past_blocks + .extend((min_height..=max_height).into_iter().map(|height| { + let block = TestBlockBuilder::new().height(height).era(era).build(rng); + (height, block.into()) + })); + self.proposed_block_height = self + .proposed_block_height + .map(|height| height.max(max_height + 1)) + .or(Some(max_height + 1)); + self + } + + fn with_delayed_blocks( + mut self, + rng: &mut TestRng, + min_height: u64, + max_height: u64, + era: EraId, + ) -> Self { + self.delayed_blocks + .extend((min_height..=max_height).into_iter().map(|height| { + let block = TestBlockBuilder::new().height(height).era(era).build(rng); + (height, block.into()) + })); + self.proposed_block_height = self + .proposed_block_height + .map(|height| height.max(max_height + 1)) + .or(Some(max_height + 1)); + self + } + + fn get_delayed_blocks(&mut self) -> Vec { + let heights = self.delayed_blocks.keys().cloned().collect(); + self.past_blocks + .extend(std::mem::take(&mut self.delayed_blocks)); + heights + } + + fn with_signatures_for_block<'a, I: IntoIterator>( + mut self, + min_height: u64, + max_height: u64, + validators: I, + ) -> Self { + for validator in validators { + for height in min_height..=max_height { + let block = self + .past_blocks + .get(&height) + .or_else(|| self.delayed_blocks.get(&height)) + .expect("should have block"); + let secret_key = self + .secret_keys + .get(validator) + .expect("should have validator"); + let signature = + FinalitySignature::create(*block.hash(), block.era_id(), secret_key); + self.signatures + .entry(height) + .or_default() + .insert(validator.clone(), signature); + } + } + self + } + + fn with_fetchable_signatures<'a, I: IntoIterator>( + mut self, + min_height: u64, + max_height: u64, + validators: I, + ) -> Self { + for validator in validators { + for height in min_height..=max_height { + let block = self.past_blocks.get(&height).expect("should have block"); + let secret_key = self + .secret_keys + .get(validator) + .expect("should have validator"); + let signature = + FinalitySignature::create(*block.hash(), block.era_id(), secret_key); + self.fetchable_signatures + .insert(*signature.fetch_id(), signature); + } + } + self + } + + fn include_signatures<'a, I: IntoIterator>( + mut self, + min_height: u64, + max_height: u64, + validators: I, + ) -> Self { + for validator in validators { + for height in min_height..=max_height { + self.signatures_to_include + .entry(height) + .or_default() + .insert(validator.clone()); + } } - return validation_result.await.unwrap(); + self + } + + fn with_deploys(mut self, deploys: Vec) -> Self { + self.deploys.extend( + deploys + .into_iter() + .map(|deploy| (Transaction::Deploy(deploy.clone()).fetch_id(), deploy)), + ); + self + } + + fn with_transfers(mut self, transfers: Vec) -> Self { + self.transfers.extend( + transfers + .into_iter() + .map(|deploy| (Transaction::Deploy(deploy.clone()).fetch_id(), deploy)), + ); + self + } + + fn include_all_deploys(mut self) -> Self { + self.deploys_to_include + .extend(self.deploys.values().map(|deploy| { + TransactionHashWithApprovals::from(&Transaction::Deploy(deploy.clone())) + })); + self + } + + fn include_all_transfers(mut self) -> Self { + self.transfers_to_include + .extend(self.transfers.values().map(|deploy| { + TransactionHashWithApprovals::from(&Transaction::Deploy(deploy.clone())) + })); + self + } + + fn include_deploys>( + mut self, + deploys: I, + ) -> Self { + self.deploys_to_include.extend(deploys); + self } - // Otherwise the effects must be requests to fetch the block's deploys. - let fetch_results: Vec<_> = effects.into_iter().map(tokio::spawn).collect(); + fn include_transfers>( + mut self, + transfers: I, + ) -> Self { + self.transfers_to_include.extend(transfers); + self + } + + fn get_deploy(&self, id: TransactionId) -> Option { + self.deploys + .get(&id) + .cloned() + .or_else(|| self.transfers.get(&id).cloned()) + } + + fn get_signature(&self, id: &FinalitySignatureId) -> Option { + self.fetchable_signatures.get(id).cloned() + } - // We make our mock reactor answer with the expected deploys and transfers: - reactor - .expect_fetch_deploys( - deploys.into_iter().chain(transfers).collect(), - HashSet::new(), + fn get_block_with_metadata(&self, block_height: u64) -> Option { + self.past_blocks.get(&block_height).map(|block| { + let empty_hashmap = HashMap::new(); + let signatures = self.signatures.get(&block_height).unwrap_or(&empty_hashmap); + let mut block_signatures = BlockSignatures::new(*block.hash(), block.era_id()); + for signature in signatures.values() { + block_signatures.insert_signature(signature.clone()); + } + BlockWithMetadata { + block: block.clone(), + block_signatures, + } + }) + } + + fn proposed_block(&self, timestamp: Timestamp) -> ProposedBlock { + let rewards_window = self.chainspec.core_config.signature_rewards_max_delay; + let rewarded_signatures = self + .proposed_block_height + .map(|proposed_block_height| { + RewardedSignatures::new( + (1..=rewards_window) + .into_iter() + .filter_map(|height_diff| proposed_block_height.checked_sub(height_diff)) + .map(|height| { + let signing_validators = self + .signatures_to_include + .get(&height) + .cloned() + .unwrap_or_default(); + SingleBlockRewardedSignatures::from_validator_set( + &signing_validators, + self.secret_keys.keys(), + ) + }), + ) + }) + .unwrap_or_default(); + new_proposed_block_with_cited_signatures( + timestamp, + self.transfers_to_include.to_vec(), + vec![], + vec![], + self.deploys_to_include.to_vec(), + rewarded_signatures, ) - .await; - - // The resulting `FetchResult`s are passed back into the component. When any deploy turns out - // to be invalid, or once all of them have been validated, the component will respond. - let mut effects = Effects::new(); - for fetch_result in fetch_results { - let events = fetch_result.await.unwrap(); - assert_eq!(1, events.len()); - effects.extend(events.into_iter().flat_map(|found_deploy| { - block_validator.handle_event(effect_builder, rng, found_deploy) - })); } - // We expect exactly one effect: the validation response. This will resolve the result. - assert_eq!(1, effects.len()); - for effect in effects { - tokio::spawn(effect).await.unwrap(); // Response. + /// Validates a block using a `BlockValidator` component, and returns the result. + async fn validate_block(&mut self, rng: &mut TestRng, timestamp: Timestamp) -> bool { + let proposed_block = self.proposed_block(timestamp); + + // Create the reactor and component. + let our_secret_key = self + .secret_keys + .values() + .next() + .expect("should have a secret key") + .clone(); + let reactor = MockReactor::new(our_secret_key, self.secret_keys.keys().cloned()); + let effect_builder = + EffectBuilder::new(EventQueueHandle::without_shutdown(reactor.scheduler)); + let mut block_validator = BlockValidator::new( + self.chainspec.clone(), + reactor.validator_matrix.clone(), + Config::default(), + ); + + // Pass the block to the component. This future will eventually resolve to the result, i.e. + // whether the block is valid or not. + let bob_node_id = NodeId::random(rng); + let block_height = rng.gen_range(0..1000); + let validation_result = tokio::spawn(effect_builder.validate_block( + bob_node_id, + self.proposed_block_height.unwrap_or(block_height), + proposed_block.clone(), + )); + let event = reactor.expect_block_validator_event().await; + let effects = block_validator.handle_event(effect_builder, rng, event); + + // If validity could already be determined, the effect will be the validation response. + if !block_validator.validation_states.is_empty() + && block_validator + .validation_states + .values() + .all(BlockValidationState::completed) + { + assert_eq!(1, effects.len()); + for effect in effects { + tokio::spawn(effect).await.unwrap(); // Response. + } + return validation_result.await.unwrap(); + } + + // Otherwise the effects are either requests to fetch the block's deploys, or to fetch past + // blocks for the purpose of signature validation. + let event_futures: Vec<_> = effects.into_iter().map(tokio::spawn).collect(); + + // We make our mock reactor answer with the expected blocks and/or deploys and transfers: + reactor.handle_requests(self).await; + + // At this point we either responded with requested deploys, or the past blocks. This + // should generate other events (`GotPastBlocksWithMetadata` in the case of past blocks, or + // a bunch of `TransactionFetched` in the case of deploys). We have to handle them. + let mut effects = Effects::new(); + for future in event_futures { + let events = future.await.unwrap(); + effects.extend( + events + .into_iter() + .flat_map(|event| block_validator.handle_event(effect_builder, rng, event)), + ); + } + + // If there are no effects - some blocks have been missing from storage. Announce the + // finalization of the blocks we have in the context. + if effects.is_empty() { + for block_height in self.get_delayed_blocks() { + effects.extend(block_validator.handle_event( + effect_builder, + rng, + Event::BlockStored(block_height), + )); + } + } + + // If there are still no effects, something went wrong. + assert!(!effects.is_empty()); + + // If there were no signatures in the block, the validity of the block should be determined + // at this point. In such a case, return the result. + if !block_validator.validation_states.is_empty() + && block_validator + .validation_states + .values() + .all(BlockValidationState::completed) + { + assert_eq!(1, effects.len()); + for effect in effects { + tokio::spawn(effect).await.unwrap(); + } + return validation_result.await.unwrap(); + } + + // Otherwise, we have more effects to handle. After the blocks have been returned, the + // validator should now ask for the deploys and signatures. + // If some blocks have been delayed, this can be another request for past blocks. + // Let's handle those requests. + let event_futures: Vec<_> = effects.into_iter().map(tokio::spawn).collect(); + + // We make our mock reactor answer with the expected items. + reactor.handle_requests(self).await; + + // Again, we'll have a bunch of events to handle, so we handle them. + let mut effects = Effects::new(); + for future in event_futures { + let events = future.await.unwrap(); + effects.extend( + events + .into_iter() + .flat_map(|event| block_validator.handle_event(effect_builder, rng, event)), + ); + } + + // If there are no effects at this point, something went wrong. + assert!(!effects.is_empty()); + + // If no blocks were delayed, we just returned all the fetched items, so now the validity + // should have been resolved. Return the result if it is so. + if !block_validator.validation_states.is_empty() + && block_validator + .validation_states + .values() + .all(BlockValidationState::completed) + { + assert_eq!(1, effects.len()); + for effect in effects { + tokio::spawn(effect).await.unwrap(); + } + return validation_result.await.unwrap(); + } + + // Otherwise, we have more effects to handle. At this point, all the delayed blocks should + // have been stored and returned, so we just have a bunch of fetch requests to handle. + let event_futures: Vec<_> = effects.into_iter().map(tokio::spawn).collect(); + + // We make our mock reactor answer with the expected items. + reactor.handle_requests(self).await; + + // Again, we'll have a bunch of events to handle. At this point we should have a bunch of + // `TransactionFetched` or `FinalitySignatureFetched` events. We handle them. + let mut effects = Effects::new(); + for future in event_futures { + let events = future.await.unwrap(); + effects.extend( + events + .into_iter() + .flat_map(|event| block_validator.handle_event(effect_builder, rng, event)), + ); + } + + // Nothing more should be requested, so we expect at most one effect: the validation + // response. Zero effects is possible if block validator responded with false before, but + // hasn't marked the state invalid (it can happen when peers are exhausted). In any case, + // the result should be resolved now. + assert!(effects.len() < 2); + for effect in effects { + tokio::spawn(effect).await.unwrap(); // Response. + } + validation_result.await.unwrap() } - validation_result.await.unwrap() } /// Verifies that a block without any deploys or transfers is valid. #[tokio::test] async fn empty_block() { - assert!(validate_block(&mut TestRng::new(), 1000.into(), vec![], vec![]).await); + let mut rng = TestRng::new(); + let mut empty_context = ValidationContext::new().with_num_validators(&mut rng, 1); + assert!(empty_context.validate_block(&mut rng, 1000.into()).await); } /// Verifies that the block validator checks deploy and transfer timestamps and ttl. @@ -299,19 +690,38 @@ async fn ttl() { new_transfer(&mut rng, 900.into(), ttl), ]; + let mut deploys_context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_deploys(deploys.clone()) + .include_all_deploys(); + let mut transfers_context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_transfers(transfers.clone()) + .include_all_transfers(); + let mut both_context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_deploys(deploys) + .with_transfers(transfers) + .include_all_deploys() + .include_all_transfers(); + // Both 1000 and 1100 are timestamps compatible with the deploys and transfers. - assert!(validate_block(&mut rng, 1000.into(), deploys.clone(), transfers.clone()).await); - assert!(validate_block(&mut rng, 1100.into(), deploys.clone(), transfers.clone()).await); + assert!(both_context.validate_block(&mut rng, 1000.into()).await); + assert!(both_context.validate_block(&mut rng, 1100.into()).await); // A block with timestamp 999 can't contain a transfer or deploy with timestamp 1000. - assert!(!validate_block(&mut rng, 999.into(), deploys.clone(), vec![]).await); - assert!(!validate_block(&mut rng, 999.into(), vec![], transfers.clone()).await); - assert!(!validate_block(&mut rng, 999.into(), deploys.clone(), transfers.clone()).await); + assert!(!deploys_context.validate_block(&mut rng, 999.into()).await); + assert!(!transfers_context.validate_block(&mut rng, 999.into()).await); + assert!(!both_context.validate_block(&mut rng, 999.into()).await); // At time 1101, the deploy and transfer from time 900 have expired. - assert!(!validate_block(&mut rng, 1101.into(), deploys.clone(), vec![]).await); - assert!(!validate_block(&mut rng, 1101.into(), vec![], transfers.clone()).await); - assert!(!validate_block(&mut rng, 1101.into(), deploys, transfers).await); + assert!(!deploys_context.validate_block(&mut rng, 1101.into()).await); + assert!( + !transfers_context + .validate_block(&mut rng, 1101.into()) + .await + ); + assert!(!both_context.validate_block(&mut rng, 1101.into()).await); } /// Verifies that a block is invalid if it contains a transfer in the `deploy_hashes` or a @@ -329,27 +739,63 @@ async fn transfer_deploy_mixup_and_replay() { // First we make sure that our transfers and deploys would normally be valid. let deploys = vec![deploy1.clone(), deploy2.clone()]; let transfers = vec![transfer1.clone(), transfer2.clone()]; - assert!(validate_block(&mut rng, timestamp, deploys, transfers).await); + let mut context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_deploys(deploys) + .with_transfers(transfers) + .include_all_deploys() + .include_all_transfers(); + assert!(context.validate_block(&mut rng, timestamp).await); // Now we hide a transfer in the deploys section. This should be invalid. let deploys = vec![deploy1.clone(), deploy2.clone(), transfer2.clone()]; let transfers = vec![transfer1.clone()]; - assert!(!validate_block(&mut rng, timestamp, deploys, transfers).await); + let mut context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_deploys(deploys) + .with_transfers(transfers) + .include_all_deploys() + .include_all_transfers(); + assert!(!context.validate_block(&mut rng, timestamp).await); // A regular deploy in the transfers section is also invalid. let deploys = vec![deploy2.clone()]; let transfers = vec![transfer1.clone(), deploy1.clone(), transfer2.clone()]; - assert!(!validate_block(&mut rng, timestamp, deploys, transfers).await); + let mut context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_deploys(deploys) + .with_transfers(transfers) + .include_all_deploys() + .include_all_transfers(); + assert!(!context.validate_block(&mut rng, timestamp).await); // Each deploy must be unique - let deploys = vec![deploy1.clone(), deploy2.clone(), deploy1.clone()]; + let deploys = vec![deploy1.clone(), deploy2.clone()]; let transfers = vec![transfer1.clone(), transfer2.clone()]; - assert!(!validate_block(&mut rng, timestamp, deploys, transfers).await); + let mut context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_deploys(deploys) + .with_transfers(transfers) + .include_all_deploys() + .include_all_transfers() + .include_deploys(vec![TransactionHashWithApprovals::from( + &Transaction::Deploy(deploy1.clone()), + )]); + assert!(!context.validate_block(&mut rng, timestamp).await); // And each transfer must be unique, too. let deploys = vec![deploy1.clone(), deploy2.clone()]; - let transfers = vec![transfer1.clone(), transfer2.clone(), transfer2.clone()]; - assert!(!validate_block(&mut rng, timestamp, deploys, transfers).await); + let transfers = vec![transfer1.clone(), transfer2.clone()]; + let mut context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_deploys(deploys) + .with_transfers(transfers) + .include_all_deploys() + .include_all_transfers() + .include_transfers(vec![TransactionHashWithApprovals::from( + &Transaction::Deploy(transfer2.clone()), + )]); + assert!(!context.validate_block(&mut rng, timestamp).await); } /// Verifies that the block validator fetches from multiple peers. @@ -385,7 +831,9 @@ async fn should_fetch_from_multiple_peers() { ); // Create the reactor and component. - let reactor = MockReactor::new(&mut rng); + let secret_key = Arc::new(SecretKey::random(&mut rng)); + let public_key = PublicKey::from(&*secret_key); + let reactor = MockReactor::new(secret_key, vec![public_key]); let effect_builder = EffectBuilder::new(EventQueueHandle::without_shutdown(reactor.scheduler)); let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources("local"); @@ -426,18 +874,11 @@ async fn should_fetch_from_multiple_peers() { let fetch_results = fetch_effects.drain(..).map(tokio::spawn).collect_vec(); // Provide the first deploy and transfer on first asking. - let deploys_to_fetch = vec![deploys[0].clone(), transfers[0].clone()]; - let deploys_to_not_fetch = vec![ - *deploys[1].hash(), - *deploys[2].hash(), - *transfers[1].hash(), - *transfers[2].hash(), - ] - .into_iter() - .collect(); - reactor - .expect_fetch_deploys(deploys_to_fetch, deploys_to_not_fetch) - .await; + let context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_deploys(vec![deploys[0].clone()]) + .with_transfers(vec![transfers[0].clone()]); + reactor.handle_requests(&context).await; let mut missing = vec![]; for fetch_result in fetch_results { @@ -465,18 +906,10 @@ async fn should_fetch_from_multiple_peers() { // Provide the first and second deploys and transfers which haven't already been fetched on // second asking. - let deploys_to_fetch = vec![&deploys[0], &deploys[1], &transfers[0], &transfers[1]] - .into_iter() - .filter(|deploy| missing.contains(deploy.hash())) - .cloned() - .collect(); - let deploys_to_not_fetch = vec![*deploys[2].hash(), *transfers[2].hash()] - .into_iter() - .filter(|deploy_hash| missing.contains(deploy_hash)) - .collect(); - reactor - .expect_fetch_deploys(deploys_to_fetch, deploys_to_not_fetch) - .await; + let context = context + .with_deploys(vec![deploys[1].clone()]) + .with_transfers(vec![transfers[1].clone()]); + reactor.handle_requests(&context).await; missing.clear(); for fetch_result in fetch_results { @@ -503,15 +936,10 @@ async fn should_fetch_from_multiple_peers() { let fetch_results = fetch_effects.into_iter().map(tokio::spawn).collect_vec(); // Provide all deploys and transfers not already fetched on third asking. - let deploys_to_fetch = deploys - .iter() - .chain(transfers.iter()) - .filter(|deploy| missing.contains(deploy.hash())) - .cloned() - .collect(); - reactor - .expect_fetch_deploys(deploys_to_fetch, HashSet::new()) - .await; + let context = context + .with_deploys(vec![deploys[2].clone()]) + .with_transfers(vec![transfers[2].clone()]); + reactor.handle_requests(&context).await; let mut effects = Effects::new(); for fetch_result in fetch_results { @@ -536,3 +964,117 @@ async fn should_fetch_from_multiple_peers() { .await .expect("should not hang"); } + +#[tokio::test] +async fn should_validate_block_with_signatures() { + let mut rng = TestRng::new(); + let ttl = TimeDiff::from_millis(200); + let timestamp = Timestamp::from(1000); + let deploy1 = new_deploy(&mut rng, timestamp, ttl); + let deploy2 = new_deploy(&mut rng, timestamp, ttl); + let transfer1 = new_transfer(&mut rng, timestamp, ttl); + let transfer2 = new_transfer(&mut rng, timestamp, ttl); + + let context = ValidationContext::new() + .with_num_validators(&mut rng, 3) + .with_past_blocks(&mut rng, 0, 5, 0.into()) + .with_deploys(vec![deploy1, deploy2]) + .with_transfers(vec![transfer1, transfer2]) + .include_all_deploys() + .include_all_transfers(); + + let validators = context.get_validators(); + + let mut context = context + .with_signatures_for_block(3, 5, &validators) + .include_signatures(3, 5, &validators); + + assert!(context.validate_block(&mut rng, timestamp).await); +} + +#[tokio::test] +async fn should_fetch_missing_signature() { + let mut rng = TestRng::new(); + let ttl = TimeDiff::from_millis(200); + let timestamp = Timestamp::from(1000); + let deploy1 = new_deploy(&mut rng, timestamp, ttl); + let deploy2 = new_deploy(&mut rng, timestamp, ttl); + let transfer1 = new_transfer(&mut rng, timestamp, ttl); + let transfer2 = new_transfer(&mut rng, timestamp, ttl); + + let context = ValidationContext::new() + .with_num_validators(&mut rng, 3) + .with_past_blocks(&mut rng, 0, 5, 0.into()) + .with_deploys(vec![deploy1, deploy2]) + .with_transfers(vec![transfer1, transfer2]) + .include_all_deploys() + .include_all_transfers(); + + let validators = context.get_validators(); + let mut signing_validators = context.get_validators(); + let leftover = signing_validators.pop().unwrap(); // one validator will be missing from the set that signed + + let mut context = context + .with_signatures_for_block(3, 5, &signing_validators) + .with_fetchable_signatures(3, 5, &[leftover]) + .include_signatures(3, 5, &validators); + + assert!(context.validate_block(&mut rng, timestamp).await); +} + +#[tokio::test] +async fn should_fail_if_unable_to_fetch_signature() { + let mut rng = TestRng::new(); + let ttl = TimeDiff::from_millis(200); + let timestamp = Timestamp::from(1000); + let deploy1 = new_deploy(&mut rng, timestamp, ttl); + let deploy2 = new_deploy(&mut rng, timestamp, ttl); + let transfer1 = new_transfer(&mut rng, timestamp, ttl); + let transfer2 = new_transfer(&mut rng, timestamp, ttl); + + let context = ValidationContext::new() + .with_num_validators(&mut rng, 3) + .with_past_blocks(&mut rng, 0, 5, 0.into()) + .with_deploys(vec![deploy1, deploy2]) + .with_transfers(vec![transfer1, transfer2]) + .include_all_deploys() + .include_all_transfers(); + + let validators = context.get_validators(); + let mut signing_validators = context.get_validators(); + let _ = signing_validators.pop(); // one validator will be missing from the set that signed + + let mut context = context + .with_signatures_for_block(3, 5, &signing_validators) + .include_signatures(3, 5, &validators); + + assert!(!context.validate_block(&mut rng, timestamp).await); +} + +#[tokio::test] +async fn should_validate_with_delayed_block() { + let mut rng = TestRng::new(); + let ttl = TimeDiff::from_millis(200); + let timestamp = Timestamp::from(1000); + let deploy1 = new_deploy(&mut rng, timestamp, ttl); + let deploy2 = new_deploy(&mut rng, timestamp, ttl); + let transfer1 = new_transfer(&mut rng, timestamp, ttl); + let transfer2 = new_transfer(&mut rng, timestamp, ttl); + + let context = ValidationContext::new() + .with_num_validators(&mut rng, 3) + .with_past_blocks(&mut rng, 0, 4, 0.into()) + .with_delayed_blocks(&mut rng, 5, 5, 0.into()) + .with_deploys(vec![deploy1, deploy2]) + .with_transfers(vec![transfer1, transfer2]) + .include_all_deploys() + .include_all_transfers(); + + let validators = context.get_validators(); + + let mut context = context + .with_signatures_for_block(3, 5, &validators) + .include_signatures(3, 5, &validators); + + assert!(context.validate_block(&mut rng, timestamp).await); +} diff --git a/node/src/types/validator_matrix.rs b/node/src/types/validator_matrix.rs index 4dbb04cfb2..386b2d909e 100644 --- a/node/src/types/validator_matrix.rs +++ b/node/src/types/validator_matrix.rs @@ -96,6 +96,35 @@ impl ValidatorMatrix { } } + /// Creates a new validator matrix with just a single validator. + #[cfg(test)] + pub(crate) fn new_with_validators>( + secret_signing_key: Arc, + public_keys: I, + ) -> Self { + let public_signing_key = PublicKey::from(&*secret_signing_key); + let finality_threshold_fraction = Ratio::new(1, 3); + let era_id = EraId::new(0); + let weights = EraValidatorWeights::new( + era_id, + public_keys + .into_iter() + .map(|pub_key| (pub_key, 100.into())) + .collect(), + finality_threshold_fraction, + ); + ValidatorMatrix { + inner: Arc::new(RwLock::new(iter::once((era_id, weights)).collect())), + chainspec_validators: None, + chainspec_activation_era: EraId::from(0), + finality_threshold_fraction, + public_signing_key, + secret_signing_key, + auction_delay: 1, + retrograde_latch: None, + } + } + #[cfg(test)] pub(crate) fn public_keys(&self, era_id: &EraId) -> Vec { let mut ret = vec![];