Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

auction: credit VCB when ending an auction #4394

Merged
merged 9 commits into from
May 21, 2024
4 changes: 2 additions & 2 deletions crates/bin/pd/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use cnidarium::Storage;
use metrics_exporter_prometheus::PrometheusBuilder;
use pd::{
cli::{Opt, RootCommand, TestnetCommand},
migrate::Migration::ReadyToStart,
migrate::Migration::Testnet76,
testnet::{
config::{get_testnet_dir, parse_tm_address, url_has_necessary_parts},
generate::TestnetConfig,
Expand Down Expand Up @@ -439,7 +439,7 @@ async fn main() -> anyhow::Result<()> {
let pd_migrate_span = tracing::error_span!("pd_migrate");
pd_migrate_span
.in_scope(|| tracing::info!("migrating pd state in {}", pd_home.display()));
ReadyToStart
Testnet76
.migrate(pd_home.clone(), comet_home, Some(genesis_start), force)
.instrument(pd_migrate_span)
.await
Expand Down
7 changes: 7 additions & 0 deletions crates/bin/pd/src/migrate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ mod reset_halt_bit;
mod simple;
mod testnet72;
mod testnet74;
mod testnet76;

use anyhow::{ensure, Context};
use penumbra_governance::StateReadExt;
Expand Down Expand Up @@ -38,6 +39,9 @@ pub enum Migration {
/// - Update arb executions to include the amount of filled input in the output
/// - Add `AuctionParameters` to the consensus state
Testnet74,
/// Testnet-76 migration:
/// - Heal the auction component's VCB tally.
Testnet76,
}

impl Migration {
Expand Down Expand Up @@ -87,6 +91,9 @@ impl Migration {
Migration::Testnet74 => {
testnet74::migrate(storage, pd_home.clone(), genesis_start).await?
}
Migration::Testnet76 => {
testnet76::migrate(storage, pd_home.clone(), genesis_start).await?
}
};

if let Some(comet_home) = comet_home {
Expand Down
147 changes: 147 additions & 0 deletions crates/bin/pd/src/migrate/testnet76.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
//! Contains functions related to the migration script of Testnet74

use anyhow;
use cnidarium::{Snapshot, StateDelta, Storage};
use futures::TryStreamExt;
use jmt::RootHash;
use pbjson_types::Any;
use penumbra_app::app::StateReadExt as _;
use penumbra_asset::Balance;
use penumbra_auction::auction::dutch::DutchAuction;
use penumbra_proto::{DomainType, StateReadProto, StateWriteProto};
use penumbra_sct::component::clock::{EpochManager, EpochRead};
use std::path::PathBuf;
use tracing::instrument;

use crate::testnet::generate::TestnetConfig;

#[instrument(skip_all)]
/// Reconstruct a correct tally of the auction component's VCB balance.
/// This is achieved by:
/// 1. Iterating over all auctions in the chain state.
/// 2. Summing the input and output reserves of each auction.
/// NB: This is sufficient because auctions with deployed LPs have value that is
/// // *outside* of the auction component, and recorded in the DEX VCB instead.
/// 3. Writing the aggregate VCB balance for each asset to the chain state.
async fn heal_auction_vcb(delta: &mut StateDelta<Snapshot>) -> anyhow::Result<()> {
let key_prefix_auctions = penumbra_auction::state_key::auction_store::prefix();
let all_auctions = delta
.prefix_proto::<Any>(&key_prefix_auctions)
.map_ok(|(_, v)| DutchAuction::decode(v.value).expect("only dutch auctions"))
.try_collect::<Vec<DutchAuction>>()
.await?;

let total_vcb = all_auctions
.into_iter()
.filter(|auction| auction.state.sequence <= 1)
.fold(Balance::zero(), |acc, auction| {
let input_reserves = penumbra_asset::Value {
asset_id: auction.description.input.asset_id,
amount: auction.state.input_reserves,
};

let output_reserves = penumbra_asset::Value {
asset_id: auction.description.output_id,
amount: auction.state.output_reserves,
};

tracing::debug!(id = ?auction.description.id(), ?input_reserves, ?output_reserves, "aggregating auction into the component's VCB balance");

acc + Balance::from(input_reserves) + Balance::from(output_reserves)
});

for value in total_vcb.provided() {
tracing::debug!(?value, "writing aggregate VCB balance for asset");
let key_vcb_balance =
penumbra_auction::state_key::value_balance::for_asset(&value.asset_id);
delta.put(key_vcb_balance, value.amount);
}

Ok(())
}

/// Run the full migration, given an export path and a start time for genesis.
///
/// Menu:
/// - Reconstruct a correct VCB balance for the auction component.
#[instrument]
pub async fn migrate(
storage: Storage,
pd_home: PathBuf,
genesis_start: Option<tendermint::time::Time>,
) -> anyhow::Result<()> {
// Setup:
let snapshot = storage.latest_snapshot();
let chain_id = snapshot.get_chain_id().await?;
let root_hash = snapshot.root_hash().await.expect("can get root hash");
let pre_upgrade_root_hash: RootHash = root_hash.into();
let pre_upgrade_height = snapshot
.get_block_height()
.await
.expect("can get block height");
let post_upgrade_height = pre_upgrade_height.wrapping_add(1);

// We initialize a `StateDelta` and start by reaching into the JMT for all entries matching the
// swap execution prefix. Then, we write each entry to the nv-storage.
let mut delta = StateDelta::new(snapshot);
let (migration_duration, post_upgrade_root_hash) = {
let start_time = std::time::SystemTime::now();

// Reconstruct a VCB balance for the auction component.
heal_auction_vcb(&mut delta).await?;

delta.put_block_height(0u64);
let post_upgrade_root_hash = storage.commit_in_place(delta).await?;
tracing::info!(?post_upgrade_root_hash, "post-upgrade root hash");

(
start_time.elapsed().expect("start time is set"),
post_upgrade_root_hash,
)
};

storage.release().await;

// The migration is complete, now we need to generate a genesis file. To do this, we need
// to lookup a validator view from the chain, and specify the post-upgrade app hash and
// initial height.
let app_state = penumbra_app::genesis::Content {
chain_id,
..Default::default()
};
let mut genesis = TestnetConfig::make_genesis(app_state.clone()).expect("can make genesis");
genesis.app_hash = post_upgrade_root_hash
.0
.to_vec()
.try_into()
.expect("infaillible conversion");
genesis.initial_height = post_upgrade_height as i64;
genesis.genesis_time = genesis_start.unwrap_or_else(|| {
let now = tendermint::time::Time::now();
tracing::info!(%now, "no genesis time provided, detecting a testing setup");
now
});

let checkpoint = post_upgrade_root_hash.0.to_vec();
let genesis = TestnetConfig::make_checkpoint(genesis, Some(checkpoint));

let genesis_json = serde_json::to_string(&genesis).expect("can serialize genesis");
tracing::info!("genesis: {}", genesis_json);
let genesis_path = pd_home.join("genesis.json");
std::fs::write(genesis_path, genesis_json).expect("can write genesis");

let validator_state_path = pd_home.join("priv_validator_state.json");
let fresh_validator_state = crate::testnet::generate::TestnetValidator::initial_state();
std::fs::write(validator_state_path, fresh_validator_state).expect("can write validator state");

tracing::info!(
pre_upgrade_height,
post_upgrade_height,
?pre_upgrade_root_hash,
?post_upgrade_root_hash,
duration = migration_duration.as_secs(),
"successful migration!"
);

Ok(())
}
3 changes: 2 additions & 1 deletion crates/cnidarium/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,11 @@
//!
//! With the `rpc` feature enabled, this crate also provides a GRPC interface to
//! the key-value store using Tonic.

#![deny(clippy::unwrap_used)]
// Requires nightly.
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
// We use `HashMap`s opportunistically.
#![allow(clippy::disallowed_types)]

mod cache;
mod delta;
Expand Down
6 changes: 3 additions & 3 deletions crates/cnidarium/src/snapshot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ impl StateRead for Snapshot {
let db = self.0.db.clone();

let (prefix_truncated, config) = self.0.multistore_cache.config.match_prefix_str(prefix);
tracing::debug!(substore_key = prefix_truncated, substore_prefix = config.prefix, prefix_supplied = ?prefix, "matched prefix, fetching substore");
tracing::trace!(substore_key = prefix_truncated, substore_prefix = config.prefix, prefix_supplied = ?prefix, "matched prefix, fetching substore");

let version = self
.substore_version(&config)
Expand Down Expand Up @@ -332,7 +332,7 @@ impl StateRead for Snapshot {
let db = self.0.db.clone();

let (prefix_truncated, config) = self.0.multistore_cache.config.match_prefix_str(prefix);
tracing::debug!(substore_key = prefix_truncated, substore_prefix = config.prefix, prefix_supplied = ?prefix, "matched prefix, fetching substore");
tracing::trace!(substore_key = prefix_truncated, substore_prefix = config.prefix, prefix_supplied = ?prefix, "matched prefix, fetching substore");

let version = self
.substore_version(&config)
Expand Down Expand Up @@ -378,7 +378,7 @@ impl StateRead for Snapshot {
let db = self.0.db.clone();

let (truncated_prefix, config) = self.0.multistore_cache.config.match_prefix_bytes(prefix);
tracing::debug!(substore_key = ?truncated_prefix, substore_prefix = config.prefix, prefix_supplied = ?prefix, "matched prefix, fetching substore");
tracing::trace!(substore_key = ?truncated_prefix, substore_prefix = config.prefix, prefix_supplied = ?prefix, "matched prefix, fetching substore");
let version = self
.substore_version(&config)
.expect("the substore exists and has been initialized");
Expand Down
2 changes: 0 additions & 2 deletions crates/cnidarium/src/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@ use std::{path::PathBuf, sync::Arc};
use anyhow::{bail, ensure, Result};
use parking_lot::RwLock;
use rocksdb::{Options, DB};
// HashMap is okay here because we don't care about ordering of substore roots.
#[allow(clippy::disallowed_types)]
use std::collections::HashMap;
use tokio::sync::watch;
use tracing::Span;
Expand Down
1 change: 0 additions & 1 deletion crates/cnidarium/src/write_batch.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
use std::sync::Arc;

// HashMap is okay here because we don't care about ordering of substore roots.
#[allow(clippy::disallowed_types)]
use std::collections::HashMap;

use crate::{
Expand Down
18 changes: 9 additions & 9 deletions crates/core/app/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
[package]
name = "penumbra-app"
version = {workspace = true}
authors = {workspace = true}
edition = {workspace = true}
repository = {workspace = true}
homepage = {workspace = true}
license = {workspace = true}
publish = false
name = "penumbra-app"
version = { workspace = true }
authors = { workspace = true }
edition = { workspace = true}
repository = { workspace = true }
homepage = { workspace = true }
license = { workspace = true }
publish = false

[features]
default = ["std"]
std = ["ark-ff/std", "ibc-types/std"]
std = ["ark-ff/std", "ibc-types/std"]

[dependencies]
anyhow = { workspace = true }
Expand Down
Loading
Loading