From 9ff993c6ef6e8bbaed4234736136b2ff22d08cc7 Mon Sep 17 00:00:00 2001 From: Chris Czub Date: Wed, 11 Sep 2024 14:12:33 -0400 Subject: [PATCH] Basic ICS23 Transfer Test (#4850) ## Describe your changes Replaces the existing IBC handshake test with a full ICS23 transfer test. Only the "happy path" (no timeouts or any other error cases) is tested in this initial spike. Also introduces some improvements to the underlying `MockClient` to support spent note tracking. The transfer implementation in the `MockRelayer` is currently hardcoded to transfer 50% of the first chain's client's first note to the second chain; this can be fleshed out further in response to future test requirements. ## Issue ticket number and link Closes #4846 ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > tests only --- .../app/tests/common/ibc_tests/relayer.rs | 414 +++++++++++++++++- .../{ibc_handshake.rs => ics23_transfer.rs} | 72 ++- crates/test/mock-client/src/lib.rs | 73 ++- crates/test/mock-consensus/src/block.rs | 42 +- 4 files changed, 584 insertions(+), 17 deletions(-) rename crates/core/app/tests/{ibc_handshake.rs => ics23_transfer.rs} (52%) diff --git a/crates/core/app/tests/common/ibc_tests/relayer.rs b/crates/core/app/tests/common/ibc_tests/relayer.rs index 05a73e1f59..488e68dcca 100644 --- a/crates/core/app/tests/common/ibc_tests/relayer.rs +++ b/crates/core/app/tests/common/ibc_tests/relayer.rs @@ -11,9 +11,12 @@ use { channel::{ channel::{Order, State as ChannelState}, msgs::{ - MsgChannelOpenAck, MsgChannelOpenConfirm, MsgChannelOpenInit, MsgChannelOpenTry, + MsgAcknowledgement, MsgChannelOpenAck, MsgChannelOpenConfirm, + MsgChannelOpenInit, MsgChannelOpenTry, MsgRecvPacket, }, - IdentifiedChannelEnd, Version as ChannelVersion, + packet::Sequence, + ChannelId, IdentifiedChannelEnd, Packet, PortId, TimeoutHeight, + Version as ChannelVersion, }, client::{ msgs::{MsgCreateClient, MsgUpdateClient}, @@ -35,20 +38,31 @@ use { header::Header as TendermintHeader, TrustThreshold, }, + timestamp::Timestamp, DomainType as _, }, + penumbra_asset::{asset::Cache, Value}, penumbra_ibc::{ component::{ChannelStateReadExt as _, ConnectionStateReadExt as _}, - IbcRelay, IBC_COMMITMENT_PREFIX, IBC_PROOF_SPECS, + IbcRelay, IbcToken, IBC_COMMITMENT_PREFIX, IBC_PROOF_SPECS, }, + penumbra_keys::keys::AddressIndex, + penumbra_num::Amount, penumbra_proto::{util::tendermint_proxy::v1::GetBlockByHeightRequest, DomainType}, + penumbra_shielded_pool::{Ics20Withdrawal, OutputPlan, SpendPlan}, penumbra_stake::state_key::chain, - penumbra_transaction::{TransactionParameters, TransactionPlan}, + penumbra_transaction::{ + memo::MemoPlaintext, plan::MemoPlan, TransactionParameters, TransactionPlan, + }, prost::Message as _, + rand::SeedableRng as _, rand_chacha::ChaCha12Core, sha2::Digest, - std::time::Duration, - tendermint::Time, + std::{ + str::FromStr as _, + time::{Duration, SystemTime, UNIX_EPOCH}, + }, + tendermint::{abci::Event, Time}, }; #[allow(unused)] pub struct MockRelayer { @@ -1380,6 +1394,389 @@ impl MockRelayer { Ok(()) } + + /// Sends an IBC transfer from chain A to chain B. + /// + /// Currently hardcoded to send 50% of the first note's value + /// on chain A. + pub async fn transfer_from_a_to_b(&mut self) -> Result<()> { + // Ensure chain A has balance to transfer + let chain_a_client = self.chain_a_ibc.client().await?; + let chain_b_client = self.chain_b_ibc.client().await?; + + let chain_a_note = chain_a_client + .notes + .values() + .cloned() + .next() + .ok_or_else(|| anyhow!("mock client had no note"))?; + + // Get the balance of that asset on chain A + let pretransfer_balance_a: Amount = chain_a_client + .spendable_notes_by_asset(chain_a_note.asset_id()) + .map(|n| n.value().amount) + .sum(); + + // Get the balance of that asset on chain B + // The asset ID of the IBC transferred asset on chain B + // needs to be computed. + let asset_cache = Cache::with_known_assets(); + let denom = asset_cache + .get(&chain_a_note.asset_id()) + .expect("asset ID should exist in asset cache") + .clone(); + let ibc_token = IbcToken::new( + &self.chain_b_ibc.channel_id, + &self.chain_b_ibc.port_id, + &denom.to_string(), + ); + let pretransfer_balance_b: Amount = chain_b_client + .spendable_notes_by_asset(ibc_token.id()) + .map(|n| n.value().amount) + .sum(); + + // We will transfer 50% of the `chain_a_note`'s value to the same address on chain B + let transfer_value = Value { + amount: (chain_a_note.amount().value() / 2).into(), + asset_id: chain_a_note.asset_id(), + }; + + // Prepare and perform the transfer from chain A to chain B + let destination_chain_address = chain_b_client.fvk.payment_address(AddressIndex::new(0)).0; + let denom = asset_cache + .get(&transfer_value.asset_id) + .expect("asset ID should exist in asset cache") + .clone(); + let amount = transfer_value.amount; + // TODO: test timeouts + // For this sunny path test, we'll set the timeouts very far in the future + let timeout_height = Height { + revision_height: 1_000_000, + revision_number: 0, + }; + // get the current time on the local machine + let current_time_ns = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time went backwards") + .as_nanos() as u64; + + // add 2 days to current time + let mut timeout_time = current_time_ns + 1.728e14 as u64; + + // round to the nearest 10 minutes + timeout_time += 600_000_000_000 - (timeout_time % 600_000_000_000); + + let return_address = chain_a_client + .fvk + .ephemeral_address( + rand_chacha::ChaChaRng::seed_from_u64(1312), + AddressIndex::new(0), + ) + .0; + let withdrawal = Ics20Withdrawal { + destination_chain_address: destination_chain_address.to_string(), + denom, + amount, + timeout_height, + timeout_time, + return_address, + // TODO: this is fine to hardcode for now but should ultimately move + // to the mock relayer and be based on the handshake + source_channel: ChannelId::from_str("channel-0")?, + // Penumbra <-> Penumbra so false + use_compat_address: false, + }; + // There will need to be `Spend` and `Output` actions + // within the transaction in order for it to balance + let spend_plan = SpendPlan::new( + &mut rand_chacha::ChaChaRng::seed_from_u64(1312), + chain_a_note.clone(), + chain_a_client + .position(chain_a_note.commit()) + .expect("note should be in mock client's tree"), + ); + let output_plan = OutputPlan::new( + &mut rand_chacha::ChaChaRng::seed_from_u64(1312), + // half the note is being withdrawn, so we can use `transfer_value` both for the withdrawal action + // and the change output + transfer_value.clone(), + chain_a_client.fvk.payment_address(AddressIndex::new(0)).0, + ); + + let plan = { + let ics20_msg = withdrawal.into(); + TransactionPlan { + actions: vec![ics20_msg, spend_plan.into(), output_plan.into()], + // Now fill out the remaining parts of the transaction needed for verification: + memo: Some(MemoPlan::new( + &mut rand_chacha::ChaChaRng::seed_from_u64(1312), + MemoPlaintext::blank_memo( + chain_a_client.fvk.payment_address(AddressIndex::new(0)).0, + ), + )), + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: self.chain_a_ibc.chain_id.clone(), + ..Default::default() + }, + } + .with_populated_detection_data( + rand_chacha::ChaChaRng::seed_from_u64(1312), + Default::default(), + ) + }; + let tx = self + .chain_a_ibc + .client() + .await? + .witness_auth_build(&plan) + .await?; + + let (_end_block_events, deliver_tx_events) = self + .chain_a_ibc + .node + .block() + .with_data(vec![tx.encode_to_vec()]) + .execute() + .await?; + self._sync_chains().await?; + + // Since multiple send_packet events can occur in a single deliver tx response, + // we accumulate all the events and process them in a loop. + let mut recv_tx_deliver_tx_events: Vec = Vec::new(); + // Now that the withdrawal has been processed on Chain A, the relayer + // tells chain B to process the transfer. It does this by forwarding a + // MsgRecvPacket to chain B. + // + // The relayer needs to extract the event that chain A emitted: + for event in deliver_tx_events.iter() { + if event.kind == "send_packet" { + let mut packet_data_hex = None; + let mut sequence = None; + let mut port_on_a = None; + let mut chan_on_a = None; + let mut port_on_b = None; + let mut chan_on_b = None; + let mut timeout_height_on_b = None; + let mut timeout_timestamp_on_b = None; + for attr in &event.attributes { + match attr.key.as_str() { + "packet_data_hex" => packet_data_hex = Some(attr.value.clone()), + "packet_sequence" => sequence = Some(attr.value.clone()), + "packet_src_port" => port_on_a = Some(attr.value.clone()), + "packet_src_channel" => chan_on_a = Some(attr.value.clone()), + "packet_dst_port" => port_on_b = Some(attr.value.clone()), + "packet_dst_channel" => chan_on_b = Some(attr.value.clone()), + "packet_timeout_height" => timeout_height_on_b = Some(attr.value.clone()), + "packet_timeout_timestamp" => { + timeout_timestamp_on_b = Some(attr.value.clone()) + } + _ => (), + } + } + + let port_on_a = port_on_a.expect("port_on_a attribute should be present"); + let chan_on_a = chan_on_a.expect("chan_on_a attribute should be present"); + let port_on_b = port_on_b.expect("port_on_b attribute should be present"); + let chan_on_b = chan_on_b.expect("chan_on_b attribute should be present"); + let sequence = sequence.expect("sequence attribute should be present"); + let timeout_height_on_b = + timeout_height_on_b.expect("timeout_height_on_b attribute should be present"); + let timeout_timestamp_on_b = timeout_timestamp_on_b + .expect("timeout_timestamp_on_b attribute should be present"); + let packet_data_hex = + packet_data_hex.expect("packet_data_hex attribute should be present"); + + // The relayer must fetch the packet commitment proof from chain A + // to include in the MsgRecvPacket + // For a real relayer this would be done with an abci request, but + // since we don't have a real cometbft node, we will just grab it + // from storage + let chain_a_snapshot = self.chain_a_ibc.storage.latest_snapshot(); + let (_commitment, proof_commitment_on_a) = chain_a_snapshot.get_with_proof(format!("ibc-data/commitments/ports/{port_on_a}/channels/{chan_on_a}/sequences/{sequence}").as_bytes().to_vec()).await?; + + // Now update the chains + let _chain_b_height = self._build_and_send_update_client_a().await?; + let chain_a_height = self._build_and_send_update_client_b().await?; + + let proof_height = chain_a_height; + + let msg_recv_packet = MsgRecvPacket { + packet: Packet { + sequence: Sequence::from_str(&sequence)?, + port_on_a: PortId::from_str(&port_on_a)?, + chan_on_a: ChannelId::from_str(&chan_on_a)?, + port_on_b: PortId::from_str(&port_on_b)?, + chan_on_b: ChannelId::from_str(&chan_on_b)?, + data: hex::decode(packet_data_hex)?, + timeout_height_on_b: TimeoutHeight::from_str(&timeout_height_on_b)?, + timeout_timestamp_on_b: Timestamp::from_str(&timeout_timestamp_on_b)?, + }, + proof_commitment_on_a, + proof_height_on_a: Height { + revision_height: proof_height.revision_height, + revision_number: 0, + }, + signer: self.chain_a_ibc.signer.clone(), + }; + + let plan = { + let ics20_msg = penumbra_transaction::ActionPlan::IbcAction( + IbcRelay::RecvPacket(msg_recv_packet), + ) + .into(); + TransactionPlan { + actions: vec![ics20_msg], + // Now fill out the remaining parts of the transaction needed for verification: + memo: None, + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: self.chain_b_ibc.chain_id.clone(), + ..Default::default() + }, + } + }; + + let tx = self + .chain_b_ibc + .client() + .await? + .witness_auth_build(&plan) + .await?; + + let (_end_block_events, dtx_events) = self + .chain_b_ibc + .node + .block() + .with_data(vec![tx.encode_to_vec()]) + .execute() + .await?; + recv_tx_deliver_tx_events.extend(dtx_events.0.into_iter()); + } + } + + self._sync_chains().await?; + + // Now that the transfer packet has been processed by chain B, + // the relayer tells chain A to process the acknowledgement. + for event in recv_tx_deliver_tx_events.iter() { + if event.kind == "write_acknowledgement" { + let mut packet_data_hex = None; + let mut sequence = None; + let mut port_on_a = None; + let mut chan_on_a = None; + let mut port_on_b = None; + let mut chan_on_b = None; + let mut timeout_height_on_b = None; + let mut timeout_timestamp_on_b = None; + let mut packet_ack_hex = None; + for attr in &event.attributes { + match attr.key.as_str() { + "packet_data_hex" => packet_data_hex = Some(attr.value.clone()), + "packet_sequence" => sequence = Some(attr.value.clone()), + "packet_src_port" => port_on_a = Some(attr.value.clone()), + "packet_src_channel" => chan_on_a = Some(attr.value.clone()), + "packet_dst_port" => port_on_b = Some(attr.value.clone()), + "packet_dst_channel" => chan_on_b = Some(attr.value.clone()), + "packet_timeout_height" => timeout_height_on_b = Some(attr.value.clone()), + "packet_timeout_timestamp" => { + timeout_timestamp_on_b = Some(attr.value.clone()) + } + "packet_ack_hex" => packet_ack_hex = Some(attr.value.clone()), + _ => (), + } + } + + let port_on_a = port_on_a.expect("port_on_a attribute should be present"); + let chan_on_a = chan_on_a.expect("chan_on_a attribute should be present"); + let port_on_b = port_on_b.expect("port_on_b attribute should be present"); + let chan_on_b = chan_on_b.expect("chan_on_b attribute should be present"); + let sequence = sequence.expect("sequence attribute should be present"); + let timeout_height_on_b = + timeout_height_on_b.expect("timeout_height_on_b attribute should be present"); + let timeout_timestamp_on_b = timeout_timestamp_on_b + .expect("timeout_timestamp_on_b attribute should be present"); + let packet_data_hex = + packet_data_hex.expect("packet_data_hex attribute should be present"); + let packet_ack_hex = + packet_ack_hex.expect("packet_ack_hex attribute should be present"); + + let chain_b_snapshot = self.chain_b_ibc.storage.latest_snapshot(); + let (_commitment, proof_acked_on_b) = chain_b_snapshot + .get_with_proof( + format!( + "ibc-data/acks/ports/{port_on_b}/channels/{chan_on_b}/sequences/{sequence}" + ) + .as_bytes() + .to_vec(), + ) + .await?; + + // Now update the chains + let _chain_a_height = self._build_and_send_update_client_b().await?; + let chain_b_height = self._build_and_send_update_client_a().await?; + + let proof_height = chain_b_height; + + let msg_ack = MsgAcknowledgement { + signer: self.chain_a_ibc.signer.clone(), + packet: Packet { + sequence: Sequence::from_str(&sequence)?, + port_on_a: PortId::from_str(&port_on_a)?, + chan_on_a: ChannelId::from_str(&chan_on_a)?, + port_on_b: PortId::from_str(&port_on_b)?, + chan_on_b: ChannelId::from_str(&chan_on_b)?, + data: hex::decode(packet_data_hex)?, + timeout_height_on_b: TimeoutHeight::from_str(&timeout_height_on_b)?, + timeout_timestamp_on_b: Timestamp::from_str(&timeout_timestamp_on_b)?, + }, + acknowledgement: hex::decode(packet_ack_hex)?, + proof_acked_on_b, + proof_height_on_b: Height { + revision_height: proof_height.revision_height, + revision_number: 0, + }, + }; + + let plan = { + let ics20_msg = penumbra_transaction::ActionPlan::IbcAction( + IbcRelay::Acknowledgement(msg_ack), + ) + .into(); + TransactionPlan { + actions: vec![ics20_msg], + // Now fill out the remaining parts of the transaction needed for verification: + memo: None, + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: self.chain_a_ibc.chain_id.clone(), + ..Default::default() + }, + } + }; + + let tx = self + .chain_a_ibc + .client() + .await? + .witness_auth_build(&plan) + .await?; + + self.chain_a_ibc + .node + .block() + .with_data(vec![tx.encode_to_vec()]) + .execute() + .await?; + } + } + + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + self._sync_chains().await?; + + Ok(()) + } } // tell chain A about chain B. returns the height of chain b on chain a after update. @@ -1456,5 +1853,8 @@ async fn _build_and_send_update_client( .execute() .await?; - Ok(chain_b_height) + Ok(Height { + revision_height: chain_b_new_height as u64, + revision_number: 0, + }) } diff --git a/crates/core/app/tests/ibc_handshake.rs b/crates/core/app/tests/ics23_transfer.rs similarity index 52% rename from crates/core/app/tests/ibc_handshake.rs rename to crates/core/app/tests/ics23_transfer.rs index 2068d9d6b8..14dc459db2 100644 --- a/crates/core/app/tests/ibc_handshake.rs +++ b/crates/core/app/tests/ics23_transfer.rs @@ -1,6 +1,10 @@ use { + anyhow::anyhow, common::ibc_tests::{MockRelayer, TestNodeWithIBC, ValidatorKeys}, once_cell::sync::Lazy, + penumbra_asset::{asset::Cache, Value}, + penumbra_ibc::IbcToken, + penumbra_num::Amount, std::time::Duration, tap::Tap as _, }; @@ -11,9 +15,11 @@ pub static MAIN_STORE_PROOF_SPEC: Lazy> = mod common; -/// Exercises that the IBC handshake succeeds. +/// Exercises that the IBC handshake succeeds, and that +/// funds can be sent between the two chains successfully, +/// without any testing of error conditions. #[tokio::test] -async fn ibc_handshake() -> anyhow::Result<()> { +async fn ics20_transfer_no_timeouts() -> anyhow::Result<()> { // Install a test logger, and acquire some temporary storage. let guard = common::set_tracing_subscriber(); @@ -73,5 +79,67 @@ async fn ibc_handshake() -> anyhow::Result<()> { // TODO: some testing of failure cases of the handshake process would be good relayer.handshake().await?; + // Grab the note that will be spent during the transfer. + let chain_a_client = relayer.chain_a_ibc.client().await?; + let chain_a_note = chain_a_client + .notes + .values() + .cloned() + .next() + .ok_or_else(|| anyhow!("mock client had no note"))?; + + // Get the balance of that asset on chain A + let pretransfer_balance_a: Amount = chain_a_client + .spendable_notes_by_asset(chain_a_note.asset_id()) + .map(|n| n.value().amount) + .sum(); + + // Get the balance of that asset on chain B + // The asset ID of the IBC transferred asset on chain B + // needs to be computed. + let asset_cache = Cache::with_known_assets(); + let denom = asset_cache + .get(&chain_a_note.asset_id()) + .expect("asset ID should exist in asset cache") + .clone(); + let ibc_token = IbcToken::new( + &relayer.chain_b_ibc.channel_id, + &relayer.chain_b_ibc.port_id, + &denom.to_string(), + ); + let chain_b_client = relayer.chain_b_ibc.client().await?; + let pretransfer_balance_b: Amount = chain_b_client + .spendable_notes_by_asset(ibc_token.id()) + .map(|n| n.value().amount) + .sum(); + + // We will transfer 50% of the `chain_a_note`'s value to the same address on chain B + let transfer_value = Value { + amount: (chain_a_note.amount().value() / 2).into(), + asset_id: chain_a_note.asset_id(), + }; + + // Tell the relayer to process the transfer. + // TODO: currently this just transfers 50% of the first note + // but it'd be nice to have an API with a little more flexibility + relayer.transfer_from_a_to_b().await?; + + // Transfer complete, validate the balances: + let chain_a_client = relayer.chain_a_ibc.client().await?; + let chain_b_client = relayer.chain_b_ibc.client().await?; + let posttransfer_balance_a: Amount = chain_a_client + .spendable_notes_by_asset(chain_a_note.asset_id()) + .map(|n| n.value().amount) + .sum(); + + let posttransfer_balance_b: Amount = chain_b_client + .spendable_notes_by_asset(ibc_token.id()) + .map(|n| n.value().amount) + .sum(); + + assert!(posttransfer_balance_a < pretransfer_balance_a); + assert!(posttransfer_balance_b > pretransfer_balance_b); + assert_eq!(posttransfer_balance_b, transfer_value.amount); + Ok(()).tap(|_| drop(relayer)).tap(|_| drop(guard)) } diff --git a/crates/test/mock-client/src/lib.rs b/crates/test/mock-client/src/lib.rs index a46a29e86c..d8920c0a0a 100644 --- a/crates/test/mock-client/src/lib.rs +++ b/crates/test/mock-client/src/lib.rs @@ -3,7 +3,10 @@ use cnidarium::StateRead; use penumbra_compact_block::{component::StateReadExt as _, CompactBlock, StatePayload}; use penumbra_dex::{swap::SwapPlaintext, swap_claim::SwapClaimPlan}; use penumbra_keys::{keys::SpendKey, FullViewingKey}; -use penumbra_sct::component::{clock::EpochRead, tree::SctRead}; +use penumbra_sct::{ + component::{clock::EpochRead, tree::SctRead}, + Nullifier, +}; use penumbra_shielded_pool::{note, Note, SpendPlan}; use penumbra_tct as tct; use penumbra_transaction::{AuthorizationData, Transaction, TransactionPlan, WitnessData}; @@ -15,7 +18,11 @@ pub struct MockClient { latest_height: u64, sk: SpendKey, pub fvk: FullViewingKey, + /// All notes, whether spent or not. pub notes: BTreeMap, + pub nullifiers: BTreeMap, + /// Whether a note was spent or not. + pub spent_notes: BTreeMap, swaps: BTreeMap, pub sct: penumbra_tct::Tree, } @@ -27,6 +34,8 @@ impl MockClient { fvk: sk.full_viewing_key().clone(), sk, notes: Default::default(), + spent_notes: Default::default(), + nullifiers: Default::default(), sct: Default::default(), swaps: Default::default(), } @@ -103,8 +112,12 @@ impl MockClient { StatePayload::Note { note: payload, .. } => { match payload.trial_decrypt(&self.fvk) { Some(note) => { - self.notes.insert(payload.note_commitment, note.clone()); self.sct.insert(Keep, payload.note_commitment)?; + let nullifier = self + .nullifier(payload.note_commitment) + .expect("newly inserted note should be present in sct"); + self.notes.insert(payload.note_commitment, note.clone()); + self.nullifiers.insert(payload.note_commitment, nullifier); } None => { self.sct.insert(Forget, payload.note_commitment)?; @@ -128,8 +141,18 @@ impl MockClient { let (output_1, output_2) = swap.output_notes(batch_data); // Pre-insert the output notes into our notes table, so that // we can notice them when we scan the block where they are claimed. - self.notes.insert(output_1.commit(), output_1); - self.notes.insert(output_2.commit(), output_2); + // TODO: We should handle tracking the nullifiers for these notes, + // however they aren't inserted into the SCT at this point. + // let nullifier_1 = self + // .nullifier(output_1.commit()) + // .expect("newly inserted swap should be present in sct"); + // let nullifier_2 = self + // .nullifier(output_2.commit()) + // .expect("newly inserted swap should be present in sct"); + self.notes.insert(output_1.commit(), output_1.clone()); + // self.nullifiers.insert(output_1.commit(), nullifier_1); + self.notes.insert(output_2.commit(), output_2.clone()); + // self.nullifiers.insert(output_2.commit(), nullifier_2); } None => { self.sct.insert(Forget, payload.commitment)?; @@ -147,6 +170,24 @@ impl MockClient { } } } + + // Mark spent nullifiers + for nullifier in block.nullifiers { + // skip if we don't know about this nullifier + if !self.nullifiers.values().any(move |n| *n == nullifier) { + continue; + } + + self.spent_notes.insert( + *self + .nullifiers + .iter() + .find_map(|(k, v)| if *v == nullifier { Some(k) } else { None }) + .unwrap(), + (), + ); + } + self.sct.end_block()?; if block.epoch_root.is_some() { self.sct.end_epoch()?; @@ -173,6 +214,17 @@ impl MockClient { self.sct.witness(commitment).map(|proof| proof.position()) } + pub fn nullifier(&self, commitment: note::StateCommitment) -> Option { + let position = self.position(commitment); + + if position.is_none() { + return None; + } + let nk = self.fvk.nullifier_key(); + + Some(Nullifier::derive(&nk, position.unwrap(), &commitment)) + } + pub fn witness_commitment( &self, commitment: note::StateCommitment, @@ -224,4 +276,17 @@ impl MockClient { .values() .filter(move |n| n.asset_id() == asset_id) } + + pub fn spent_note(&self, commitment: ¬e::StateCommitment) -> bool { + self.spent_notes.contains_key(commitment) + } + + pub fn spendable_notes_by_asset( + &self, + asset_id: penumbra_asset::asset::Id, + ) -> impl Iterator + '_ { + self.notes + .values() + .filter(move |n| n.asset_id() == asset_id && !self.spent_note(&n.commit())) + } } diff --git a/crates/test/mock-consensus/src/block.rs b/crates/test/mock-consensus/src/block.rs index 76a8a77c93..7eb45b10a6 100644 --- a/crates/test/mock-consensus/src/block.rs +++ b/crates/test/mock-consensus/src/block.rs @@ -6,8 +6,10 @@ use { crate::TestNode, prost::Message, sha2::{Digest, Sha256}, + std::ops::Deref, tap::Tap, tendermint::{ + abci::Event, account, block::{self, header::Version, Block, Commit, Header, Round}, evidence, @@ -114,7 +116,7 @@ where /// included in the block. Use [`Builder::without_signatures()`] to disable producing /// validator signatures. #[instrument(level = "info", skip_all, fields(height, time))] - pub async fn execute(self) -> Result<(), anyhow::Error> { + pub async fn execute(self) -> Result<(EndBlockEvents, DeliverTxEvents), anyhow::Error> { // Calling `finish` finishes the previous block // and prepares the current block. let (test_node, block) = self.finish()?; @@ -136,11 +138,21 @@ where trace!("sending block"); test_node.begin_block(header, last_commit_info).await?; + let mut deliver_tx_responses = Vec::new(); for tx in data { let tx = tx.into(); - test_node.deliver_tx(tx).await?; + // The caller may want to access the DeliverTx responses + deliver_tx_responses.push(test_node.deliver_tx(tx).await?); } - test_node.end_block().await?; + + // The CheckTx, BeginBlock, DeliverTx, EndBlock methods include an Events field. + // The mock consensus code only handles EndBlock and DeliverTx events. + // Extract the events emitted during end_block. + let events = test_node.end_block().await?.events; + let deliver_tx_events = deliver_tx_responses + .iter() + .flat_map(|response| response.events.clone()) + .collect::>(); // the commit call will set test_node.last_app_hash, preparing // for the next block to begin execution @@ -160,7 +172,7 @@ where // If an `on_block` callback was set, call it now. test_node.on_block.as_mut().map(move |f| f(block)); - Ok(()) + Ok((EndBlockEvents(events), DeliverTxEvents(deliver_tx_events))) } /// Consumes this builder, returning its [`TestNode`] reference and a [`Block`]. @@ -337,3 +349,25 @@ impl CommitHashingExt for Commit { } } } + +#[derive(Debug, Clone)] +pub struct EndBlockEvents(pub Vec); + +#[derive(Debug, Clone)] +pub struct DeliverTxEvents(pub Vec); + +impl Deref for DeliverTxEvents { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Deref for EndBlockEvents { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +}