Skip to content

Commit

Permalink
dex: correct swap claim bsod check
Browse files Browse the repository at this point in the history
This is a cherry-pick of PR #4239, by:
Author:    Lucas Meier <[email protected]>
Date:      Thu Apr 18 14:55:50 2024 -0700

This PR:
1. fix a bug in the swap claim circuit
2. includes a migration routine for testnet 71

Swap claim proofs take a `BatchSwapOutputData` as part of their
public inputs. This data is used to compute a user's pro-rated share
of a batch swap at a given block.

For that reason, it is important the output data used in the proof
be correct. A swap claim must be bound to the specific BSOD that
was produced by that user's swap.

The swap claim circuit's validation only checked that the supplied
BSOD was produced at the same relative block height as the user's
swap. It did not check that the epochs were the same.  As a result,
it was possible to generate swap claim proofs using a BSOD produced
during a different epoch, on potentially much more advantageous
terms.
  • Loading branch information
cronokirby authored and erwanor committed Apr 29, 2024
1 parent 2c160ad commit 6ed3bd9
Show file tree
Hide file tree
Showing 22 changed files with 337 additions and 71 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion crates/bench/benches/swap_claim.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ fn swap_claim_proving_time(c: &mut Criterion) {
unfilled_2: Amount::from(50u64),
height: height.into(),
trading_pair: swap_plaintext.trading_pair,
epoch_starting_height: (epoch_duration * position.epoch()).into(),
sct_position_prefix: position,
};
let (lambda_1, lambda_2) = output_data.pro_rata_outputs((delta_1_i, delta_2_i));

Expand Down
2 changes: 1 addition & 1 deletion crates/bin/pcli/tests/proof.rs
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ fn swap_claim_parameters_vs_current_swap_claim_circuit() {
unfilled_2: Amount::from(50u64),
height: height.into(),
trading_pair: swap_plaintext.trading_pair,
epoch_starting_height: (epoch_duration * position.epoch()).into(),
sct_position_prefix: position,
};
let (lambda_1, lambda_2) = output_data.pro_rata_outputs((delta_1_i, delta_2_i));

Expand Down
1 change: 1 addition & 0 deletions crates/bin/pd/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ penumbra-proto = { workspace = true, default-features = true }
penumbra-sct = { workspace = true, default-features = true }
penumbra-shielded-pool = { workspace = true, features = ["parallel"], default-features = true }
penumbra-stake = { workspace = true, features = ["parallel"], default-features = true }
penumbra-tct = { workspace = true, default-features = true }
penumbra-tendermint-proxy = { path = "../../util/tendermint-proxy" }
penumbra-tower-trace = { path = "../../util/tower-trace" }
penumbra-transaction = { workspace = true, default-features = true }
Expand Down
4 changes: 2 additions & 2 deletions crates/bin/pd/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use cnidarium::{StateDelta, Storage};
use metrics_exporter_prometheus::PrometheusBuilder;
use pd::{
cli::{Opt, RootCommand, TestnetCommand},
migrate::Migration::Testnet70,
migrate::Migration::Testnet72,
testnet::{
config::{get_testnet_dir, parse_tm_address, url_has_necessary_parts},
generate::TestnetConfig,
Expand Down Expand Up @@ -432,7 +432,7 @@ async fn main() -> anyhow::Result<()> {
migrate_archive,
} => {
tracing::info!("migrating state in {}", target_directory.display());
Testnet70
Testnet72
.migrate(target_directory.clone(), genesis_start)
.await
.context("failed to upgrade state")?;
Expand Down
12 changes: 10 additions & 2 deletions crates/bin/pd/src/migrate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
//! node operators must coordinate to perform a chain upgrade.
//! This module declares how local `pd` state should be altered, if at all,
//! in order to be compatible with the network post-chain-upgrade.
mod testnet72;

use anyhow::Context;
use futures::StreamExt as _;
use std::path::PathBuf;
Expand All @@ -28,6 +30,9 @@ pub enum Migration {
SimpleMigration,
/// Testnet-70 migration: move swap executions from the jmt to nv-storage.
Testnet70,
/// Testnet-72 migration:
/// - Migrate `BatchSwapOutputData` to new protobuf, replacing epoch height with index.
Testnet72,
}

impl Migration {
Expand All @@ -37,7 +42,7 @@ impl Migration {
genesis_start: Option<tendermint::time::Time>,
) -> anyhow::Result<()> {
match self {
Migration::Noop => (),
Migration::Noop => Ok(()),
Migration::SimpleMigration => {
let rocksdb_dir = path_to_export.join("rocksdb");
let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?;
Expand Down Expand Up @@ -101,6 +106,7 @@ impl Migration {
crate::testnet::generate::TestnetValidator::initial_state();
std::fs::write(validator_state_path, fresh_validator_state)
.expect("can write validator state");
Ok(())
}
Migration::Testnet70 => {
// Our goal is to fetch all swap executions from the jmt and store them in nv-storage.
Expand Down Expand Up @@ -189,9 +195,11 @@ impl Migration {
duration = migration_duration.as_secs(),
"successful migration!"
);

Ok(())
}
Migration::Testnet72 => testnet72::migrate(path_to_export, genesis_start).await,
}
Ok(())
}
}

Expand Down
206 changes: 206 additions & 0 deletions crates/bin/pd/src/migrate/testnet72.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,206 @@
//! Contains functions related to the migration script of Testnet72
use anyhow;
use cnidarium::{Snapshot, StateDelta, StateRead, StateWrite, Storage};
use futures::StreamExt as _;
use jmt::RootHash;
use penumbra_app::app::StateReadExt as _;
use penumbra_app::SUBSTORE_PREFIXES;
use penumbra_proto::core::component::sct::v1::query_service_server::QueryService;
use penumbra_proto::penumbra::core::component as pb;
use penumbra_proto::StateWriteProto;
use penumbra_sct::component::clock::{EpochManager, EpochRead};
use penumbra_sct::component::rpc::Server as SctServer;
use penumbra_tct::Position;
use prost::Message;
use std::path::PathBuf;
use std::sync::Arc;
use tonic::IntoRequest;

use crate::testnet::generate::TestnetConfig;

/// The context holding various query services we need to help perform the migration.
#[derive(Clone)]
struct Context {
sct_server: Arc<SctServer>,
}

impl Context {
/// Create a new context from the state storage.
fn new(storage: Storage) -> Self {
Self {
sct_server: Arc::new(SctServer::new(storage)),
}
}

/// Use storage to lookup the index of an epoch based on its starting heights
async fn epoch_height_to_index(&self, epoch_starting_height: u64) -> anyhow::Result<u64> {
Ok(self
.sct_server
.epoch_by_height(
pb::sct::v1::EpochByHeightRequest {
height: epoch_starting_height,
}
.into_request(),
)
.await?
.into_inner()
.epoch
.expect(&format!(
"epoch at height {} should be present",
epoch_starting_height
))
.index)
}

/// Translate the protobuf for a BSOD by populating the correct data and emptying the
/// deprecated field.
#[allow(deprecated)]
async fn translate_bsod(
&self,
bsod: pb::dex::v1::BatchSwapOutputData,
) -> anyhow::Result<pb::dex::v1::BatchSwapOutputData> {
let sct_position_prefix: u64 = {
let epoch = self
.epoch_height_to_index(bsod.epoch_starting_height)
.await?;
Position::from((
u16::try_from(epoch).expect("epoch should fit in 16 bits"),
u16::try_from(bsod.height - bsod.epoch_starting_height)
.expect("block index should fit in 16 bits"),
0,
))
.into()
};
Ok(pb::dex::v1::BatchSwapOutputData {
sct_position_prefix,
epoch_starting_height: Default::default(),
..bsod
})
}

async fn translate_compact_block(
&self,
compact_block: pb::compact_block::v1::CompactBlock,
) -> anyhow::Result<pb::compact_block::v1::CompactBlock> {
let mut swap_outputs = Vec::with_capacity(compact_block.swap_outputs.len());
for bsod in compact_block.swap_outputs {
swap_outputs.push(self.translate_bsod(bsod).await?);
}
Ok(pb::compact_block::v1::CompactBlock {
swap_outputs,
..compact_block
})
}
}

/// Translate all of the BSODs inside dex storage to the new format.
async fn translate_dex_storage(
ctx: Context,
delta: &mut StateDelta<Snapshot>,
) -> anyhow::Result<()> {
let mut stream = delta.prefix_raw("dex/output/");
while let Some(r) = stream.next().await {
let (key, bsod_bytes) = r?;
let bsod = pb::dex::v1::BatchSwapOutputData::decode(bsod_bytes.as_slice())?;
let bsod = ctx.translate_bsod(bsod).await?;
delta.put_proto(key, bsod);
}
Ok(())
}

/// Translate all of the compact block storage to hold the new BSOD data inside the compact blocks.
async fn translate_compact_block_storage(
ctx: Context,
delta: &mut StateDelta<Snapshot>,
) -> anyhow::Result<()> {
let mut stream = delta.nonverifiable_prefix_raw("compactblock/".as_bytes());
while let Some(r) = stream.next().await {
let (key, compactblock_bytes) = r?;
let block = pb::compact_block::v1::CompactBlock::decode(compactblock_bytes.as_slice())?;
let block = ctx.translate_compact_block(block).await?;
delta.nonverifiable_put_raw(key, block.encode_to_vec());
}
Ok(())
}

/// Run the full migration, given an export path and a start time for genesis.
pub async fn migrate(
path_to_export: PathBuf,
genesis_start: Option<tendermint::time::Time>,
) -> anyhow::Result<()> {
let rocksdb_dir = path_to_export.join("rocksdb");
let storage = Storage::load(rocksdb_dir.clone(), SUBSTORE_PREFIXES.to_vec()).await?;
let export_state = storage.latest_snapshot();
let root_hash = export_state.root_hash().await.expect("can get root hash");
let pre_upgrade_root_hash: RootHash = root_hash.into();
let pre_upgrade_height = export_state
.get_block_height()
.await
.expect("can get block height");
let post_upgrade_height = pre_upgrade_height.wrapping_add(1);

let mut delta = StateDelta::new(export_state);
let (migration_duration, post_upgrade_root_hash) = {
let start_time = std::time::SystemTime::now();
let ctx = Context::new(storage.clone());

// Translate inside dex storage.
translate_dex_storage(ctx.clone(), &mut delta).await?;
// Translate inside compact block storage.
translate_compact_block_storage(ctx.clone(), &mut delta).await?;

delta.put_block_height(0u64);
let post_upgrade_root_hash = storage.commit_in_place(delta).await?;
tracing::info!(?post_upgrade_root_hash, "post-upgrade root hash");

(start_time.elapsed().unwrap(), post_upgrade_root_hash)
};

storage.release().await;
let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?;
let migrated_state = storage.latest_snapshot();

// The migration is complete, now we need to generate a genesis file. To do this, we need
// to lookup a validator view from the chain, and specify the post-upgrade app hash and
// initial height.
let chain_id = migrated_state.get_chain_id().await?;
let app_state = penumbra_genesis::Content {
chain_id,
..Default::default()
};
let mut genesis = TestnetConfig::make_genesis(app_state.clone()).expect("can make genesis");
genesis.app_hash = post_upgrade_root_hash
.0
.to_vec()
.try_into()
.expect("infaillible conversion");
genesis.initial_height = post_upgrade_height as i64;
genesis.genesis_time = genesis_start.unwrap_or_else(|| {
let now = tendermint::time::Time::now();
tracing::info!(%now, "no genesis time provided, detecting a testing setup");
now
});
let checkpoint = post_upgrade_root_hash.0.to_vec();
let genesis = TestnetConfig::make_checkpoint(genesis, Some(checkpoint));

let genesis_json = serde_json::to_string(&genesis).expect("can serialize genesis");
tracing::info!("genesis: {}", genesis_json);
let genesis_path = path_to_export.join("genesis.json");
std::fs::write(genesis_path, genesis_json).expect("can write genesis");

let validator_state_path = path_to_export.join("priv_validator_state.json");
let fresh_validator_state = crate::testnet::generate::TestnetValidator::initial_state();
std::fs::write(validator_state_path, fresh_validator_state).expect("can write validator state");

tracing::info!(
pre_upgrade_height,
post_upgrade_height,
?pre_upgrade_root_hash,
?post_upgrade_root_hash,
duration = migration_duration.as_secs(),
"successful migration!"
);

Ok(())
}
2 changes: 1 addition & 1 deletion crates/core/app/src/app/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -634,7 +634,7 @@ impl App {
///
/// Increment this manually after fixing the root cause for a chain halt: updated nodes will then be
/// able to proceed past the block height of the halt.
const TOTAL_HALT_COUNT: u64 = 1;
const TOTAL_HALT_COUNT: u64 = 2;

#[async_trait]
pub trait StateReadExt: StateRead {
Expand Down
Loading

0 comments on commit 6ed3bd9

Please sign in to comment.