diff --git a/.cargo/config b/.cargo/config deleted file mode 100644 index c5d8c8e32e..0000000000 --- a/.cargo/config +++ /dev/null @@ -1,5 +0,0 @@ -[build] -# Enable Tokio's `tracing` support for `tokio-console` -# rustflags = ["--cfg", "tokio_unstable"] -# Note(erwan): We decided to disable it for the time being, -# I'm keeping this around to be able to reactivate it on a whim. diff --git a/.dockerignore b/.dockerignore index f6b007af47..1a64da5fd6 100644 --- a/.dockerignore +++ b/.dockerignore @@ -6,6 +6,7 @@ !Cargo.toml !Cargo.lock !.cargo/ +!rust-toolchain.toml # testnets for 'pd testnet generate' defaults !testnets/ diff --git a/.github/workflows/buf-pull-request.yml b/.github/workflows/buf-pull-request.yml index 627c668fe7..37ca86722e 100644 --- a/.github/workflows/buf-pull-request.yml +++ b/.github/workflows/buf-pull-request.yml @@ -54,9 +54,6 @@ jobs: with: lfs: true - - name: Install rust toolchain - uses: dtolnay/rust-toolchain@stable - - uses: bufbuild/buf-setup-action@v1 with: buf_api_token: ${{ secrets.BUF_TOKEN }} diff --git a/.github/workflows/docs-lint.yml b/.github/workflows/docs-lint.yml index 1cae9f27de..90a98b5c81 100644 --- a/.github/workflows/docs-lint.yml +++ b/.github/workflows/docs-lint.yml @@ -13,9 +13,9 @@ jobs: with: lfs: false - - name: Install rust toolchain + - name: Install nightly rust toolchain # The script for rustdoc build requires nightly toolchain. - uses: dtolnay/rust-toolchain@nightly + run: rustup toolchain install nightly # Loading cache takes ~15s, but saves us minutes of build. - name: Load rust cache @@ -36,9 +36,6 @@ jobs: with: lfs: false - - name: Install rust toolchain - uses: dtolnay/rust-toolchain@stable - - name: Load rust cache uses: astriaorg/buildjet-rust-cache@v2.5.1 diff --git a/.github/workflows/notes.yml b/.github/workflows/notes.yml index 1a9eff4789..d6f3de83e3 100644 --- a/.github/workflows/notes.yml +++ b/.github/workflows/notes.yml @@ -17,8 +17,10 @@ jobs: uses: actions/checkout@v4 with: lfs: true + - name: Install rust toolchain - uses: dtolnay/rust-toolchain@nightly + run: rustup toolchain install nightly + - name: Load Rust caching uses: astriaorg/buildjet-rust-cache@v2.5.1 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 381227be8f..85d07c04ba 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,4 +1,4 @@ -# Copyright 2022-2023, axodotdev +# Copyright 2022-2024, axodotdev # SPDX-License-Identifier: MIT or Apache-2.0 # # CI that: @@ -6,10 +6,11 @@ # * checks for a Git Tag that looks like a release # * builds artifacts with cargo-dist (archives, installers, hashes) # * uploads those artifacts to temporary workflow zip -# * on success, uploads the artifacts to a Github Release™ +# * on success, uploads the artifacts to a GitHub Release # -# Note that the Github Release™ will be created with a generated +# Note that the GitHub Release will be created with a generated # title/body based on your changelogs. + name: Release permissions: @@ -21,28 +22,29 @@ permissions: # PACKAGE_NAME must be the name of a Cargo package in your workspace, and VERSION # must be a Cargo-style SemVer Version (must have at least major.minor.patch). # -# If PACKAGE_NAME is specified, then the release will be for that +# If PACKAGE_NAME is specified, then the announcement will be for that # package (erroring out if it doesn't have the given version or isn't cargo-dist-able). # -# If PACKAGE_NAME isn't specified, then the release will be for all +# If PACKAGE_NAME isn't specified, then the announcement will be for all # (cargo-dist-able) packages in the workspace with that version (this mode is # intended for workspaces with only one dist-able package, or with all dist-able # packages versioned/released in lockstep). # # If you push multiple tags at once, separate instances of this workflow will -# spin up, creating an independent Github Release™ for each one. However Github +# spin up, creating an independent announcement for each one. However, GitHub # will hard limit this to 3 tags per commit, as it will assume more tags is a # mistake. # -# If there's a prerelease-style suffix to the version, then the Github Release™ +# If there's a prerelease-style suffix to the version, then the release(s) # will be marked as a prerelease. on: push: tags: - '**[0-9]+.[0-9]+.[0-9]+*' + pull_request: jobs: - # Run 'cargo dist plan' to determine what tasks we need to do + # Run 'cargo dist plan' (or host) to determine what tasks we need to do plan: runs-on: ubuntu-latest outputs: @@ -56,65 +58,72 @@ jobs: - uses: actions/checkout@v4 with: submodules: recursive - - name: Install Rust - run: rustup update "1.75" --no-self-update && rustup default "1.75" - name: Install cargo-dist - run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.5.0/cargo-dist-installer.sh | sh" + # we specify bash to get pipefail; it guards against the `curl` command + # failing. otherwise `sh` won't catch that `curl` returned non-0 + shell: bash + run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.13.3/cargo-dist-installer.sh | sh" + # sure would be cool if github gave us proper conditionals... + # so here's a doubly-nested ternary-via-truthiness to try to provide the best possible + # functionality based on whether this is a pull_request, and whether it's from a fork. + # (PRs run on the *source* but secrets are usually on the *target* -- that's *good* + # but also really annoying to build CI around when it needs secrets to work right.) - id: plan run: | - cargo dist plan ${{ !github.event.pull_request && format('--tag={0}', github.ref_name) || '' }} --output-format=json > dist-manifest.json - echo "cargo dist plan ran successfully" - cat dist-manifest.json - echo "manifest=$(jq -c "." dist-manifest.json)" >> "$GITHUB_OUTPUT" + cargo dist ${{ (!github.event.pull_request && format('host --steps=create --tag={0}', github.ref_name)) || 'plan' }} --output-format=json > plan-dist-manifest.json + echo "cargo dist ran successfully" + cat plan-dist-manifest.json + echo "manifest=$(jq -c "." plan-dist-manifest.json)" >> "$GITHUB_OUTPUT" - name: "Upload dist-manifest.json" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: artifacts - path: dist-manifest.json + name: artifacts-plan-dist-manifest + path: plan-dist-manifest.json # Build and packages all the platform-specific things - upload-local-artifacts: + build-local-artifacts: + name: build-local-artifacts (${{ join(matrix.targets, ', ') }}) # Let the initial task tell us to not run (currently very blunt) - needs: plan - if: ${{ fromJson(needs.plan.outputs.val).releases != null && (needs.plan.outputs.publishing == 'true' || fromJson(needs.plan.outputs.val).ci.github.pr_run_mode == 'upload') }} + needs: + - plan + if: ${{ fromJson(needs.plan.outputs.val).ci.github.artifacts_matrix.include != null && (needs.plan.outputs.publishing == 'true' || fromJson(needs.plan.outputs.val).ci.github.pr_run_mode == 'upload') }} strategy: fail-fast: false - # We override the generated `matrix` so we can specify custom runners, - # for faster build times. This works for Linux & macOS. To generate the base template, run: - # `cargo dist plan --output-format json`. That JSON content has been adapted to YAML below. - # matrix: ${{ fromJson(needs.plan.outputs.val).ci.github.artifacts_matrix }} - matrix: - include: - - runner: buildjet-16vcpu-ubuntu-2204 - dist_args: --artifacts=local --target=x86_64-unknown-linux-gnu - install_dist: curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.5.0/cargo-dist-installer.sh | sh - targets: - - x86_64-unknown-linux-gnu - - runner: macos-12-xl - dist_args: --artifacts=local --target=aarch64-apple-darwin - install_dist: curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.5.0/cargo-dist-installer.sh | sh - targets: - - aarch64-apple-darwin - - runner: macos-12-xl - dist_args: --artifacts=local --target=x86_64-apple-darwin - install_dist: curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.5.0/cargo-dist-installer.sh | sh - targets: - - x86_64-apple-darwin - + # Target platforms/runners are computed by cargo-dist in create-release. + # Each member of the matrix has the following arguments: + # + # - runner: the github runner + # - dist-args: cli flags to pass to cargo dist + # - install-dist: expression to run to install cargo-dist on the runner + # + # Typically there will be: + # - 1 "global" task that builds universal installers + # - N "local" tasks that build each platform's binaries and platform-specific installers + matrix: ${{ fromJson(needs.plan.outputs.val).ci.github.artifacts_matrix }} runs-on: ${{ matrix.runner }} env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} BUILD_MANIFEST_NAME: target/distrib/${{ join(matrix.targets, '-') }}-dist-manifest.json - RUSTFLAGS: "--cfg tokio_unstable" steps: + - name: enable windows longpaths + run: | + git config --global core.longpaths true - uses: actions/checkout@v4 with: + submodules: recursive lfs: true - - name: Install Rust - run: rustup update "1.75" --no-self-update && rustup default "1.75" - uses: swatinem/rust-cache@v2 + with: + key: ${{ join(matrix.targets, '-') }} - name: Install cargo-dist run: ${{ matrix.install_dist }} + # Get the dist-manifest + - name: Fetch local artifacts + uses: actions/download-artifact@v4 + with: + pattern: artifacts-* + path: target/distrib/ + merge-multiple: true - name: Install dependencies run: | ${{ matrix.packages_install }} @@ -130,54 +139,135 @@ jobs: # inconsistent syntax between shell and powershell. shell: bash run: | - # Parse out what we just built and upload it to the Github Release™ + # Parse out what we just built and upload it to scratch storage echo "paths<> "$GITHUB_OUTPUT" - jq --raw-output ".artifacts[]?.path | select( . != null )" dist-manifest.json >> "$GITHUB_OUTPUT" + jq --raw-output ".upload_files[]" dist-manifest.json >> "$GITHUB_OUTPUT" echo "EOF" >> "$GITHUB_OUTPUT" cp dist-manifest.json "$BUILD_MANIFEST_NAME" - name: "Upload artifacts" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: artifacts + name: artifacts-build-local-${{ join(matrix.targets, '_') }} path: | ${{ steps.cargo-dist.outputs.paths }} ${{ env.BUILD_MANIFEST_NAME }} - should-publish: + # Build and package all the platform-agnostic(ish) things + build-global-artifacts: needs: - plan - - upload-local-artifacts - if: ${{ needs.plan.outputs.publishing == 'true' }} - runs-on: ubuntu-latest + - build-local-artifacts + runs-on: "ubuntu-20.04" + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + BUILD_MANIFEST_NAME: target/distrib/global-dist-manifest.json steps: - - name: print tag - run: echo "ok we're publishing!" + - uses: actions/checkout@v4 + with: + submodules: recursive + lfs: true + - name: Install cargo-dist + shell: bash + run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.13.3/cargo-dist-installer.sh | sh" + # Get all the local artifacts for the global tasks to use (for e.g. checksums) + - name: Fetch local artifacts + uses: actions/download-artifact@v4 + with: + pattern: artifacts-* + path: target/distrib/ + merge-multiple: true + - id: cargo-dist + shell: bash + run: | + cargo dist build ${{ needs.plan.outputs.tag-flag }} --output-format=json "--artifacts=global" > dist-manifest.json + echo "cargo dist ran successfully" - # Create a Github Release with all the results once everything is done - publish-release: - needs: [plan, should-publish] - runs-on: ubuntu-latest + # Parse out what we just built and upload it to scratch storage + echo "paths<> "$GITHUB_OUTPUT" + jq --raw-output ".upload_files[]" dist-manifest.json >> "$GITHUB_OUTPUT" + echo "EOF" >> "$GITHUB_OUTPUT" + + cp dist-manifest.json "$BUILD_MANIFEST_NAME" + - name: "Upload artifacts" + uses: actions/upload-artifact@v4 + with: + name: artifacts-build-global + path: | + ${{ steps.cargo-dist.outputs.paths }} + ${{ env.BUILD_MANIFEST_NAME }} + # Determines if we should publish/announce + host: + needs: + - plan + - build-local-artifacts + - build-global-artifacts + # Only run if we're "publishing", and only if local and global didn't fail (skipped is fine) + if: ${{ always() && needs.plan.outputs.publishing == 'true' && (needs.build-global-artifacts.result == 'skipped' || needs.build-global-artifacts.result == 'success') && (needs.build-local-artifacts.result == 'skipped' || needs.build-local-artifacts.result == 'success') }} + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + runs-on: "ubuntu-20.04" + outputs: + val: ${{ steps.host.outputs.manifest }} + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - name: Install cargo-dist + run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.13.3/cargo-dist-installer.sh | sh" + # Fetch artifacts from scratch-storage + - name: Fetch artifacts + uses: actions/download-artifact@v4 + with: + pattern: artifacts-* + path: target/distrib/ + merge-multiple: true + # This is a harmless no-op for GitHub Releases, hosting for that happens in "announce" + - id: host + shell: bash + run: | + cargo dist host ${{ needs.plan.outputs.tag-flag }} --steps=upload --steps=release --output-format=json > dist-manifest.json + echo "artifacts uploaded and released successfully" + cat dist-manifest.json + echo "manifest=$(jq -c "." dist-manifest.json)" >> "$GITHUB_OUTPUT" + - name: "Upload dist-manifest.json" + uses: actions/upload-artifact@v4 + with: + # Overwrite the previous copy + name: artifacts-dist-manifest + path: dist-manifest.json + + # Create a GitHub Release while uploading all files to it + announce: + needs: + - plan + - host + # use "always() && ..." to allow us to wait for all publish jobs while + # still allowing individual publish jobs to skip themselves (for prereleases). + # "host" however must run to completion, no skipping allowed! + if: ${{ always() && needs.host.result == 'success' }} + runs-on: "ubuntu-20.04" env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v4 with: submodules: recursive - - name: "Download artifacts" - uses: actions/download-artifact@v3 + - name: "Download GitHub Artifacts" + uses: actions/download-artifact@v4 with: - name: artifacts + pattern: artifacts-* path: artifacts + merge-multiple: true - name: Cleanup run: | # Remove the granular manifests - rm artifacts/*-dist-manifest.json - - name: Create Release + rm -f artifacts/*-dist-manifest.json + - name: Create GitHub Release uses: ncipollo/release-action@v1 with: tag: ${{ needs.plan.outputs.tag }} - name: ${{ fromJson(needs.plan.outputs.val).announcement_title }} - body: ${{ fromJson(needs.plan.outputs.val).announcement_github_body }} - prerelease: ${{ fromJson(needs.plan.outputs.val).announcement_is_prerelease }} + name: ${{ fromJson(needs.host.outputs.val).announcement_title }} + body: ${{ fromJson(needs.host.outputs.val).announcement_github_body }} + prerelease: ${{ fromJson(needs.host.outputs.val).announcement_is_prerelease }} artifacts: "artifacts/*" diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 2fe0fe7f5f..09f9982ffc 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -10,10 +10,9 @@ jobs: with: lfs: true - - name: Install rust toolchain - uses: dtolnay/rust-toolchain@stable - with: - targets: wasm32-unknown-unknown + # The `rust-toolchain.toml` file dictates which version of rust to setup. + - name: check rust version + run: rustc --version - name: Install nextest uses: taiki-e/install-action@nextest @@ -24,9 +23,8 @@ jobs: - name: Run cargo check, failing on warnings run: cargo check --release --all-targets env: - # The `-D warnings` option causes an error on warnings; - # we must duplicate the rustflags from `.cargo/config.toml`. - RUSTFLAGS: "-D warnings --cfg tokio_unstable" + # The `-D warnings` option causes an error on warnings. + RUSTFLAGS: "-D warnings" - name: Check wasm compatibility run: ./deployments/scripts/check-wasm-compat.sh @@ -55,10 +53,6 @@ jobs: runs-on: buildjet-8vcpu-ubuntu-2204 steps: - uses: actions/checkout@v4 - - name: Install rust toolchain - uses: dtolnay/rust-toolchain@stable - with: - components: rustfmt - name: Load rust cache uses: astriaorg/buildjet-rust-cache@v2.5.1 - run: cargo fmt --all -- --check @@ -68,8 +62,6 @@ jobs: runs-on: buildjet-8vcpu-ubuntu-2204 steps: - uses: actions/checkout@v4 - - name: Install rust toolchain - uses: dtolnay/rust-toolchain@stable - name: Load rust cache uses: astriaorg/buildjet-rust-cache@v2.5.1 - name: install cargo-hack diff --git a/.github/workflows/smoke.yml b/.github/workflows/smoke.yml index 2f9dc7035a..cceb802326 100644 --- a/.github/workflows/smoke.yml +++ b/.github/workflows/smoke.yml @@ -17,9 +17,6 @@ jobs: with: lfs: true - - name: Install rust toolchain - uses: dtolnay/rust-toolchain@stable - - name: Load rust cache uses: astriaorg/buildjet-rust-cache@v2.5.1 diff --git a/.github/workflows/summoner_smoke.yml b/.github/workflows/summoner_smoke.yml index 6c535c13b7..7ff64d76cb 100644 --- a/.github/workflows/summoner_smoke.yml +++ b/.github/workflows/summoner_smoke.yml @@ -20,9 +20,6 @@ jobs: with: lfs: true - - name: Install rust toolchain - uses: dtolnay/rust-toolchain@stable - - name: Load rust cache uses: astriaorg/buildjet-rust-cache@v2.5.1 diff --git a/Cargo.lock b/Cargo.lock index 30272d9635..6c2e5d1b26 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2943,6 +2943,12 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02296996cb8796d7c6e3bc2d9211b7802812d36999a51bb754123ead7d37d026" +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + [[package]] name = "hyper" version = "0.14.28" @@ -4380,12 +4386,14 @@ dependencies = [ "futures", "hex", "http-body", + "humantime", "ibc-proto", "ibc-types", "indicatif", "jmt", "ndarray", "once_cell", + "pbjson-types", "penumbra-app", "penumbra-asset", "penumbra-auction", @@ -4542,6 +4550,7 @@ dependencies = [ "penumbra-sct", "penumbra-shielded-pool", "penumbra-stake", + "penumbra-tct", "penumbra-tendermint-proxy", "penumbra-tower-trace", "penumbra-transaction", @@ -5772,6 +5781,7 @@ dependencies = [ "metrics", "once_cell", "parking_lot", + "pbjson-types", "penumbra-app", "penumbra-asset", "penumbra-auction", diff --git a/Cargo.toml b/Cargo.toml index 5c842d8a46..244894c18f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,23 +58,26 @@ members = [ # Config for 'cargo dist' [workspace.metadata.dist] # The preferred cargo-dist version to use in CI (Cargo.toml SemVer syntax) -cargo-dist-version = "0.5.0" -# The preferred Rust toolchain to use in CI (rustup toolchain syntax) -rust-toolchain-version = "1.75" +cargo-dist-version = "0.13.3" # CI backends to support ci = ["github"] +# The archive format to use for non-windows builds (defaults .tar.xz) +unix-archive = ".tar.gz" # Target platforms to build apps for (Rust target-triple syntax) -targets = [ - "x86_64-unknown-linux-gnu", - "aarch64-apple-darwin", - "x86_64-apple-darwin", -] +targets = ["aarch64-apple-darwin", "x86_64-apple-darwin", "x86_64-unknown-linux-gnu"] # The installers to generate for each app -installers = [] +installers = ["shell"] # Publish jobs to run in CI -pr-run-mode = "skip" -# We override RUSTFLAGS, so we must permit changes from the default template. +pr-run-mode = "plan" +# Skip checking whether the specified configuration files are up to date allow-dirty = ["ci"] +# Whether to install an updater program +install-updater = false + +[workspace.metadata.dist.github-custom-runners] +aarch64-apple-darwin = "macos-13-large" +x86_64-apple-darwin = "macos-13-large" +x86_64-unknown-linux-gnu = "buildjet-32vcpu-ubuntu-2204" # The profile that 'cargo dist' will build with [profile.dist] @@ -144,6 +147,7 @@ futures = { version = "0.3.28" } hex = { version = "0.4.3" } http = { version = "0.2.9" } http-body = { version = "0.4.5" } +humantime = { version = "2.1" } ibc-proto = { default-features = false, version = "0.41.0" } ibc-types = { default-features = false, version = "0.12.0" } ibig = { version = "0.3" } diff --git a/crates/bench/benches/swap_claim.rs b/crates/bench/benches/swap_claim.rs index 2ba03515bb..cb1e3503ad 100644 --- a/crates/bench/benches/swap_claim.rs +++ b/crates/bench/benches/swap_claim.rs @@ -70,7 +70,7 @@ fn swap_claim_proving_time(c: &mut Criterion) { unfilled_2: Amount::from(50u64), height: height.into(), trading_pair: swap_plaintext.trading_pair, - epoch_starting_height: (epoch_duration * position.epoch()).into(), + sct_position_prefix: position, }; let (lambda_1, lambda_2) = output_data.pro_rata_outputs((delta_1_i, delta_2_i)); diff --git a/crates/bin/pcli/Cargo.toml b/crates/bin/pcli/Cargo.toml index 58c31b088d..c68aecff0d 100644 --- a/crates/bin/pcli/Cargo.toml +++ b/crates/bin/pcli/Cargo.toml @@ -32,6 +32,7 @@ parallel = [ [dependencies] anyhow = {workspace = true} +pbjson-types = { workspace = true } ark-ff = {workspace = true, default-features = false} async-stream = {workspace = true} base64 = {workspace = true} @@ -52,6 +53,7 @@ ed25519-consensus = {workspace = true} futures = {workspace = true} hex = {workspace = true} http-body = {workspace = true} +humantime = {workspace = true} ibc-proto = {workspace = true, default-features = true} ibc-types = {workspace = true, features = ["std", "with_serde"], default-features = true} indicatif = {workspace = true} diff --git a/crates/bin/pcli/src/command.rs b/crates/bin/pcli/src/command.rs index 5ea22e6888..7759cc3c79 100644 --- a/crates/bin/pcli/src/command.rs +++ b/crates/bin/pcli/src/command.rs @@ -8,7 +8,6 @@ pub use view::ViewCmd; use self::ceremony::CeremonyCmd; -mod auction; mod ceremony; mod debug; mod init; diff --git a/crates/bin/pcli/src/command/ceremony.rs b/crates/bin/pcli/src/command/ceremony.rs index 0e396f374f..5cd73ca96c 100644 --- a/crates/bin/pcli/src/command/ceremony.rs +++ b/crates/bin/pcli/src/command/ceremony.rs @@ -139,7 +139,7 @@ impl CeremonyCmd { bid, address ); - handle_bid(app, *coordinator_address, index, bid).await?; + handle_bid(app, coordinator_address.clone(), index, bid).await?; println!("connecting to coordinator..."); // After we bid, we need to wait a couple of seconds just for the transaction to be diff --git a/crates/bin/pcli/src/command/query.rs b/crates/bin/pcli/src/command/query.rs index a096c9239a..778bb8e6f5 100644 --- a/crates/bin/pcli/src/command/query.rs +++ b/crates/bin/pcli/src/command/query.rs @@ -1,22 +1,25 @@ use anyhow::{anyhow, Context, Result}; +mod auction; +mod chain; +mod community_pool; +mod dex; +mod governance; +mod ibc_query; mod shielded_pool; -use colored_json::ToColoredJson; -use shielded_pool::ShieldedPool; mod tx; -use tx::Tx; -mod chain; +mod validator; + +use auction::AuctionCmd; use chain::ChainCmd; -mod dex; +use colored_json::ToColoredJson; +use community_pool::CommunityPoolCmd; use dex::DexCmd; -mod governance; use governance::GovernanceCmd; -mod community_pool; -use community_pool::CommunityPoolCmd; -mod validator; -pub(super) use validator::ValidatorCmd; -mod ibc_query; use ibc_query::IbcCmd; +use shielded_pool::ShieldedPool; +use tx::Tx; +pub(super) use validator::ValidatorCmd; use crate::App; @@ -75,6 +78,9 @@ pub enum QueryCmd { #[clap(long, default_value = "")] nv_key_regex: String, }, + /// Queries information about a Dutch auction. + #[clap(subcommand)] + Auction(AuctionCmd), } impl QueryCmd { @@ -116,6 +122,10 @@ impl QueryCmd { return ibc.exec(app).await; } + if let QueryCmd::Auction(auction) = self { + return auction.exec(app).await; + } + // TODO: this is a hack; we should replace all raw state key uses with RPC methods. if let QueryCmd::ShieldedPool(ShieldedPool::CompactBlock { height }) = self { use penumbra_proto::core::component::compact_block::v1::{ @@ -143,6 +153,7 @@ impl QueryCmd { | QueryCmd::Governance(_) | QueryCmd::CommunityPool(_) | QueryCmd::Watch { .. } + | QueryCmd::Auction { .. } | QueryCmd::Ibc(_) => { unreachable!("query handled in guard"); } @@ -181,6 +192,7 @@ impl QueryCmd { | QueryCmd::Governance { .. } | QueryCmd::Key { .. } | QueryCmd::Watch { .. } + | QueryCmd::Auction { .. } | QueryCmd::Ibc(_) => true, } } @@ -198,6 +210,7 @@ impl QueryCmd { | QueryCmd::Governance { .. } | QueryCmd::CommunityPool { .. } | QueryCmd::Watch { .. } + | QueryCmd::Auction { .. } | QueryCmd::Ibc(_) => { unreachable!("query is special cased") } diff --git a/crates/bin/pcli/src/command/query/auction.rs b/crates/bin/pcli/src/command/query/auction.rs new file mode 100644 index 0000000000..5661f93d8b --- /dev/null +++ b/crates/bin/pcli/src/command/query/auction.rs @@ -0,0 +1,77 @@ +use crate::App; +use clap::Subcommand; +use comfy_table::{presets, Table}; +use penumbra_auction::auction::dutch::DutchAuction; +use penumbra_auction::auction::AuctionId; +use penumbra_proto::core::component::auction::v1alpha1 as pb_auction; +use penumbra_proto::core::component::auction::v1alpha1::query_service_client::QueryServiceClient; +use penumbra_proto::core::component::auction::v1alpha1::AuctionStateByIdRequest; +use penumbra_proto::DomainType; +use penumbra_proto::Name; + +#[derive(Debug, Subcommand)] +pub enum AuctionCmd { + /// Commands related to Dutch auctions + Dutch { + #[clap(index = 1)] + auction_id: AuctionId, + }, +} + +impl AuctionCmd { + pub async fn exec(&self, app: &mut App) -> anyhow::Result<()> { + match self { + AuctionCmd::Dutch { auction_id } => { + let auction_id = auction_id.clone(); + let mut client = QueryServiceClient::new(app.pd_channel().await?); + let rsp = client + .auction_state_by_id(AuctionStateByIdRequest { + id: Some(auction_id.into()), + }) + .await? + .into_inner(); + + let pb_auction_state = rsp + .auction + .ok_or_else(|| anyhow::anyhow!("auction state is missing!"))?; + + if pb_auction_state.type_url == pb_auction::DutchAuction::type_url() { + let dutch_auction = DutchAuction::decode(pb_auction_state.value)?; + println!("dutch auction with id {auction_id:?}"); + + let mut table = Table::new(); + table.load_preset(presets::NOTHING); + table + .set_header(vec![ + "Auction Id", + "State", + "Start height", + "End height", + "Step count", + ]) // TODO: make this more useful + .add_row(vec![ + &auction_id.to_string(), + &render_state(dutch_auction.state.sequence), + &dutch_auction.description.start_height.to_string(), + &dutch_auction.description.end_height.to_string(), + &dutch_auction.description.step_count.to_string(), + ]); + println!("{table}"); + } else { + unimplemented!("only supporting dutch auctions at the moment, come back later"); + } + } + } + Ok(()) + } +} + +fn render_state(state: u64) -> String { + if state == 0 { + format!("Opened") + } else if state == 1 { + format!("Closed") + } else { + format!("Withdrawn (seq={state})") + } +} diff --git a/crates/bin/pcli/src/command/query/validator.rs b/crates/bin/pcli/src/command/query/validator.rs index 311cc2b87a..c34826b6cc 100644 --- a/crates/bin/pcli/src/command/query/validator.rs +++ b/crates/bin/pcli/src/command/query/validator.rs @@ -1,16 +1,33 @@ -use std::{fs::File, io::Write}; +use std::{ + fs::File, + io::Write, + ops::{Deref, RangeInclusive}, + time::Duration, +}; -use anyhow::{Context, Result}; +use anyhow::{anyhow, Context, Error, Result}; use colored::Colorize; use comfy_table::{presets, Table}; use futures::TryStreamExt; -use penumbra_num::Amount; -use penumbra_proto::core::component::stake::v1::{ - query_service_client::QueryServiceClient as StakeQueryServiceClient, ValidatorInfoRequest, +use penumbra_app::params::AppParameters; +use penumbra_num::{fixpoint::U128x128, Amount}; +use penumbra_proto::{ + core::{ + app::v1::{ + query_service_client::QueryServiceClient as AppQueryServiceClient, AppParametersRequest, + }, + component::stake::v1::{ + query_service_client::QueryServiceClient as StakeQueryServiceClient, + GetValidatorInfoRequest, GetValidatorInfoResponse, ValidatorInfoRequest, + ValidatorStatusRequest, ValidatorUptimeRequest, + }, + }, + DomainType, }; use penumbra_stake::{ - validator::{self, ValidatorToml}, - IdentityKey, + rate::RateData, + validator::{self, Info, Status, Validator, ValidatorToml}, + IdentityKey, Uptime, BPS_SQUARED_SCALING_FACTOR, }; use crate::App; @@ -35,6 +52,16 @@ pub enum ValidatorCmd { /// The identity key of the validator to fetch. identity_key: String, }, + /// Get the uptime of the validator. + Uptime { + /// The identity key of the validator to fetch. + identity_key: String, + }, + /// Fetch the current status for a particular validator. + Status { + /// The identity key of the validator to fetch. + identity_key: String, + }, } impl ValidatorCmd { @@ -155,62 +182,297 @@ impl ValidatorCmd { println!("{table}"); } ValidatorCmd::Definition { file, identity_key } => { + // Parse the identity key and construct the RPC request. + let request = tonic::Request::new(GetValidatorInfoRequest { + identity_key: identity_key + .parse::() + .map(|ik| ik.to_proto()) + .map(Some)?, + }); + + // Instantiate an RPC client and send the request. + let GetValidatorInfoResponse { validator_info } = app + .pd_channel() + .await + .map(StakeQueryServiceClient::new)? + .get_validator_info(request) + .await? + .into_inner(); + + // Coerce the validator information into TOML, or return an error if it was not + // found within the client's response. + let serialize = |v| toml::to_string_pretty(&v).map_err(Error::from); + let toml = validator_info + .ok_or_else(|| anyhow!("response did not include validator info"))? + .try_into() + .context("parsing validator info") + .map(|Info { validator, .. }| validator) + .map(ValidatorToml::from) + .and_then(serialize)?; + + // Write to a file if an output file was specified, otherwise print to stdout. + if let Some(file) = file { + File::create(file) + .with_context(|| format!("cannot create file {file:?}"))? + .write_all(toml.as_bytes()) + .context("could not write file")?; + } else { + println!("{}", toml); + } + } + ValidatorCmd::Uptime { identity_key } => { let identity_key = identity_key.parse::()?; - /* - use penumbra_proto::client::specific::ValidatorStatusRequest; + let mut client = StakeQueryServiceClient::new(app.pd_channel().await?); - let mut client = opt.specific_client().await?; - let status: ValidatorStatus = client - .validator_status(ValidatorStatusRequest { - chain_id: "".to_string(), // TODO: fill in + // What's the uptime? + let uptime: Uptime = client + .validator_uptime(ValidatorUptimeRequest { identity_key: Some(identity_key.into()), }) .await? .into_inner() + .uptime + .ok_or_else(|| anyhow::anyhow!("uptime must be present in response"))? .try_into()?; - // why isn't the validator definition part of the status? - // why do we have all these different validator messages? - // do we need them? - status.state. - */ - - // Intsead just download everything - let mut client = StakeQueryServiceClient::new(app.pd_channel().await?); - - let validators = client - .validator_info(ValidatorInfoRequest { - show_inactive: true, - ..Default::default() + // Is the validator active? + let status: validator::Status = client + .validator_status(ValidatorStatusRequest { + identity_key: Some(identity_key.into()), }) .await? .into_inner() - .try_collect::>() + .status + .ok_or_else(|| anyhow::anyhow!("status must be present in response"))? + .try_into()?; + let state = status.state; + let active = matches!(state, validator::State::Active); + + // Get the chain parameters + let mut client = AppQueryServiceClient::new(app.pd_channel().await?); + let params: AppParameters = client + .app_parameters(tonic::Request::new(AppParametersRequest {})) .await? - .into_iter() - .map(TryInto::try_into) - .collect::, _>>()?; + .into_inner() + .app_parameters + .ok_or_else(|| anyhow::anyhow!("empty AppParametersResponse message"))? + .try_into()?; - let validator: ValidatorToml = validators - .iter() - .map(|info| &info.validator) - .find(|v| v.identity_key == identity_key) - .cloned() - .ok_or_else(|| anyhow::anyhow!("Could not find validator {}", identity_key))? - .into(); + let as_of_height = uptime.as_of_height(); + let missed_blocks = uptime.num_missed_blocks(); + let window_len = uptime.missed_blocks_window(); - if let Some(file) = file { - File::create(file) - .with_context(|| format!("cannot create file {file:?}"))? - .write_all(toml::to_string_pretty(&validator)?.as_bytes()) - .context("could not write file")?; - } else { - println!("{}", toml::to_string_pretty(&validator)?); + let mut downtime_ranges: Vec> = vec![]; + for missed_block in uptime.missed_blocks() { + if let Some(range) = downtime_ranges.last_mut() { + if range.end() + 1 == missed_block { + *range = *range.start()..=missed_block; + } else { + downtime_ranges.push(missed_block..=missed_block); + } + } else { + downtime_ranges.push(missed_block..=missed_block); + } + } + + let percent_uptime = + 100.0 * (window_len as f64 - missed_blocks as f64) / window_len as f64; + let signed_blocks = window_len as u64 - missed_blocks as u64; + let min_uptime_blocks = + window_len as u64 - params.stake_params.missed_blocks_maximum; + let percent_min_uptime = 100.0 * min_uptime_blocks as f64 / window_len as f64; + let percent_max_downtime = + 100.0 * params.stake_params.missed_blocks_maximum as f64 / window_len as f64; + let percent_downtime = 100.0 * missed_blocks as f64 / window_len as f64; + let percent_downtime_penalty = + // Converting from basis points squared to percentage + params.stake_params.slashing_penalty_downtime as f64 / 100.0 / 100.0; + let min_remaining_downtime_blocks = (window_len as u64) + .saturating_sub(missed_blocks as u64) + .saturating_sub(min_uptime_blocks); + let min_remaining_downtime = humantime::Duration::from(Duration::from_secs( + (min_remaining_downtime_blocks * 5) as u64, + )); + let cumulative_downtime = + humantime::Duration::from(Duration::from_secs((missed_blocks * 5) as u64)); + let percent_grace = 100.0 * min_remaining_downtime_blocks as f64 + / (window_len - min_uptime_blocks as usize) as f64; + let window_len_len = window_len.to_string().len(); + + println!("{state} validator: as of block {as_of_height}"); + println!("Unmissed signing: {percent_uptime:>6.2}% = {signed_blocks:width$}/{window_len} most-recent blocks", width = window_len_len); + if active { + println!("Required signing: {percent_min_uptime:>6.2}% = {min_uptime_blocks:width$}/{window_len} most-recent blocks", width = window_len_len); + } + println!("Salient downtime: {percent_downtime:>6.2}% = {missed_blocks:width$}/{window_len} most-recent blocks ~ {cumulative_downtime} cumulative downtime", width = window_len_len); + if active { + println!("Unexpended grace: {percent_grace:>6.2}% = {min_remaining_downtime_blocks:width$}/{window_len} forthcoming blocks ~ {min_remaining_downtime} at minimum before penalty", width = window_len_len); + println!( "Downtime penalty: {percent_downtime_penalty:>6.2}% - if downtime exceeds {percent_max_downtime:.2}%, penalty will be applied to all delegations"); + } + if !downtime_ranges.is_empty() { + println!("Downtime details:"); + let mut max_blocks_width = 0; + let mut max_start_width = 0; + let mut max_end_width = 0; + for range in downtime_ranges.iter() { + let blocks = range.end() - range.start() + 1; + max_blocks_width = max_blocks_width.max(blocks.to_string().len()); + max_start_width = max_start_width.max(range.start().to_string().len()); + if blocks != 1 { + max_end_width = max_end_width.max(range.end().to_string().len()); + } + } + for range in downtime_ranges.iter() { + let blocks = range.end() - range.start() + 1; + let estimated_duration = + humantime::Duration::from(Duration::from_secs((blocks * 5) as u64)); + if blocks == 1 { + let height = range.start(); + println!( + " • {blocks:width$} missed: block {height:>height_width$} {empty:>duration_width$}(~ {estimated_duration})", + width = max_blocks_width, + height_width = max_start_width, + duration_width = max_end_width + 5, + empty = "", + ); + } else { + let start = range.start(); + let end = range.end(); + println!( + " • {blocks:width$} missed: blocks {start:>start_width$} ..= {end:>end_width$} (~ {estimated_duration})", + width = max_blocks_width, + start_width = max_start_width, + end_width = max_end_width, + ); + }; + } } } + ValidatorCmd::Status { identity_key } => { + // Parse the identity key and construct the RPC request. + let request = tonic::Request::new(GetValidatorInfoRequest { + identity_key: identity_key + .parse::() + .map(|ik| ik.to_proto()) + .map(Some)?, + }); + + // Instantiate an RPC client and send the request. + let GetValidatorInfoResponse { validator_info } = app + .pd_channel() + .await + .map(StakeQueryServiceClient::new)? + .get_validator_info(request) + .await? + .into_inner(); + + // Parse the validator status, or return an error if it was not found within the + // client's response. + let info = validator_info + .ok_or_else(|| anyhow!("response did not include validator info"))? + .try_into() + .context("parsing validator info")?; + + // Initialize a table, add a header and insert this validator's information. + let mut table = Table::new(); + table + .load_preset(presets::NOTHING) + .set_header(vec![ + "Voting Power", + "Commission", + "State", + "Bonding State", + "Exchange Rate", + "Identity Key", + "Name", + ]) + .add_row(StatusRow::new(info)); + println!("{table}"); + } } Ok(()) } } + +/// A row within the `status` command's table output. +struct StatusRow { + power: f64, + commission: u16, + state: validator::State, + bonding_state: validator::BondingState, + exchange_rate: U128x128, + identity_key: IdentityKey, + name: String, +} + +impl StatusRow { + /// Constructs a new [`StatusRow`]. + fn new( + Info { + validator: + Validator { + funding_streams, + identity_key, + name, + .. + }, + status: + Status { + state, + bonding_state, + voting_power, + .. + }, + rate_data: + RateData { + validator_exchange_rate, + .. + }, + }: Info, + ) -> Self { + // Calculate the scaled voting power, exchange rate, and commissions. + let power = (voting_power.value() as f64) * 1e-6; + let commission = funding_streams.iter().map(|fs| fs.rate_bps()).sum(); + let exchange_rate = { + let rate_bps_sq = U128x128::from(validator_exchange_rate); + (rate_bps_sq / BPS_SQUARED_SCALING_FACTOR.deref()).expect("nonzero scaling factor") + }; + + Self { + power, + commission, + state, + bonding_state, + exchange_rate, + identity_key, + name, + } + } +} + +impl Into for StatusRow { + fn into(self) -> comfy_table::Row { + let Self { + power, + commission, + state, + bonding_state, + exchange_rate, + identity_key, + name, + } = self; + + [ + format!("{power:.3}"), + format!("{commission}bps"), + state.to_string(), + bonding_state.to_string(), + exchange_rate.to_string(), + identity_key.to_string(), + name, + ] + .into() + } +} diff --git a/crates/bin/pcli/src/command/tx.rs b/crates/bin/pcli/src/command/tx.rs index 68a72c0f30..3fd90f5b5b 100644 --- a/crates/bin/pcli/src/command/tx.rs +++ b/crates/bin/pcli/src/command/tx.rs @@ -25,12 +25,13 @@ use ibc_types::lightclients::tendermint::client_state::ClientState as Tendermint use rand_core::OsRng; use regex::Regex; +use auction::AuctionCmd; use liquidity_position::PositionCmd; use penumbra_asset::{asset, asset::Metadata, Value, STAKING_TOKEN_ASSET_ID}; use penumbra_dex::{lp::position, swap_claim::SwapClaimPlan}; use penumbra_fee::Fee; use penumbra_governance::{proposal::ProposalToml, proposal_state::State as ProposalState, Vote}; -use penumbra_keys::keys::AddressIndex; +use penumbra_keys::{keys::AddressIndex, Address}; use penumbra_num::Amount; use penumbra_proto::{ core::component::{ @@ -63,8 +64,7 @@ use proposal::ProposalCmd; use crate::App; -use super::auction::AuctionCmd; - +mod auction; mod liquidity_position; mod proposal; mod replicate; @@ -346,7 +346,7 @@ impl TxCmd { .map(|v| v.parse()) .collect::, _>>()?; let to = to - .parse() + .parse::
() .map_err(|_| anyhow::anyhow!("address is invalid"))?; let return_address = app @@ -364,7 +364,7 @@ impl TxCmd { .set_gas_prices(gas_prices) .set_fee_tier((*fee_tier).into()); for value in values.iter().cloned() { - planner.output(value, to); + planner.output(value, to.clone()); } let plan = planner .memo(memo_plaintext)? diff --git a/crates/bin/pcli/src/command/auction.rs b/crates/bin/pcli/src/command/tx/auction.rs similarity index 56% rename from crates/bin/pcli/src/command/auction.rs rename to crates/bin/pcli/src/command/tx/auction.rs index db478b093c..f1df24291a 100644 --- a/crates/bin/pcli/src/command/auction.rs +++ b/crates/bin/pcli/src/command/tx/auction.rs @@ -1,13 +1,15 @@ -use super::tx::FeeTier; +use super::FeeTier; use crate::App; -use anyhow::Context; +use anyhow::{anyhow, bail, Context}; use clap::Subcommand; -use penumbra_asset::{asset, Value}; -use penumbra_auction::auction::AuctionId; +use penumbra_asset::Value; +use penumbra_auction::auction::{dutch::DutchAuction, AuctionId}; +use penumbra_dex::lp::position::Position; use penumbra_keys::keys::AddressIndex; -use penumbra_num::Amount; -use penumbra_proto::view::v1::GasPricesRequest; +use penumbra_proto::{view::v1::GasPricesRequest, DomainType, Name}; +use penumbra_view::SpendableNoteRecord; use penumbra_wallet::plan::Planner; +use rand::RngCore; use rand_core::OsRng; #[derive(Debug, Subcommand)] @@ -23,25 +25,22 @@ pub enum DutchCmd { /// Schedule a Dutch auction, a tool to help accomplish price discovery. #[clap(display_order = 100, name = "schedule")] DutchAuctionSchedule { - /// Source address initiating the auction. - #[clap(long, display_order = 100)] + /// Source account initiating the auction. + #[clap(long, display_order = 100, default_value = "0")] source: u32, /// The value the seller wishes to auction. #[clap(long, display_order = 200)] input: String, - /// The asset ID of the target asset the seller wishes to acquire. - #[clap(long, display_order = 300)] - output: String, /// The maximum output the seller can receive. /// /// This implicitly defines the starting price for the auction. #[clap(long, display_order = 400)] - max_output: u64, + max_output: String, /// The minimum output the seller is willing to receive. /// /// This implicitly defines the ending price for the auction. #[clap(long, display_order = 500)] - min_output: u64, + min_output: String, /// The block height at which the auction begins. /// /// This allows the seller to schedule an auction at a future time. @@ -58,53 +57,49 @@ pub enum DutchCmd { /// `end_height - start_height` must be a multiple of `step_count`. #[clap(long, display_order = 800)] step_count: u64, - /// A random nonce used to allow identical auctions to have - /// distinct auction IDs. - #[clap(long, display_order = 900)] - nonce: u64, /// The selected fee tier to multiply the fee amount by. #[clap(short, long, value_enum, default_value_t, display_order = 1000)] fee_tier: FeeTier, }, - /// Withdraws the reserves of the Dutch auction. - #[clap(display_order = 200, name = "withdraw")] - DutchAuctionWithdraw { - /// Source address withdrawing from the auction. - #[clap(long, display_order = 100)] + /// Terminate a Dutch auction. + #[clap(display_order = 300, name = "end")] + DutchAuctionEnd { + /// Source account terminating the auction. + #[clap(long, display_order = 100, default_value = "0")] source: u32, - /// The auction to withdraw funds from. + /// Identifier of the auction. #[clap(long, display_order = 200)] auction_id: String, - /// The sequence number of the withdrawal. - #[clap(long, display_order = 300)] - seq: u64, - /// The amount of the input asset directly owned by the auction. - /// - /// The auction may also own the input asset indirectly, - /// via the reserves of `current_position` if it exists. - #[clap(long, display_order = 400)] - reserves_input: String, - /// The amount of the output asset directly owned by the auction. - /// - /// The auction may also own the output asset indirectly, - /// via the reserves of `current_position` if it exists. - #[clap(long, display_order = 500)] - reserves_output: String, /// The selected fee tier to multiply the fee amount by. - #[clap(short, long, value_enum, default_value_t, display_order = 600)] + #[clap(short, long, value_enum, default_value_t, display_order = 300)] fee_tier: FeeTier, }, - /// Ends a Dutch auction. - #[clap(display_order = 300, name = "end")] - DutchAuctionEnd { - /// Source address withdrawing from auction. + /// Withdraw a Dutch auction, and claim its reserves. + #[clap(display_order = 200, name = "withdraw")] + DutchAuctionWithdraw { + /// Source account withdrawing from the auction. #[clap(long, display_order = 100)] source: u32, - /// Identifier of the auction. + /// The auction to withdraw funds from. #[clap(long, display_order = 200)] auction_id: String, + // /// The sequence number of the withdrawal. + // #[clap(long, display_order = 300)] + // seq: u64, + // /// The amount of the input asset directly owned by the auction. + // /// + // /// The auction may also own the input asset indirectly, + // /// via the reserves of `current_position` if it exists. + // #[clap(long, display_order = 400)] + // reserves_input: String, + // /// The amount of the output asset directly owned by the auction. + // /// + // /// The auction may also own the output asset indirectly, + // /// via the reserves of `current_position` if it exists. + // #[clap(long, display_order = 500)] + // reserves_output: String, /// The selected fee tier to multiply the fee amount by. - #[clap(short, long, value_enum, default_value_t, display_order = 300)] + #[clap(short, long, value_enum, default_value_t, display_order = 600)] fee_tier: FeeTier, }, } @@ -127,37 +122,34 @@ impl DutchCmd { DutchCmd::DutchAuctionSchedule { source, input, - output, max_output, min_output, start_height, end_height, step_count, - nonce: _, fee_tier, } => { + let mut nonce = [0u8; 32]; + OsRng.fill_bytes(&mut nonce); + let input = input.parse::()?; - let output = output.parse::()?; - let max_output = Amount::from(*max_output); - let min_output = Amount::from(*min_output); + let max_output = max_output.parse::()?; + let min_output = min_output.parse::()?; + let output_id = max_output.asset_id; - let mut planner = Planner::new(OsRng); - planner + let plan = Planner::new(OsRng) .set_gas_prices(gas_prices) - .set_fee_tier((*fee_tier).into()); - - planner.dutch_auction_schedule( - input, - output, - max_output, - min_output, - *start_height, - *end_height, - *step_count, - [0; 32], - ); - - let plan = planner + .set_fee_tier((*fee_tier).into()) + .dutch_auction_schedule( + input, + output_id, + max_output.amount, + min_output.amount, + *start_height, + *end_height, + *step_count, + nonce, + ) .plan( app.view .as_mut() @@ -169,26 +161,17 @@ impl DutchCmd { app.build_and_submit_transaction(plan).await?; Ok(()) } - DutchCmd::DutchAuctionWithdraw { - source, + DutchCmd::DutchAuctionEnd { auction_id, - seq, - reserves_input, - reserves_output, + source, fee_tier, } => { let auction_id = auction_id.parse::()?; - let reserves_input = reserves_input.parse::()?; - let reserves_output = reserves_output.parse::()?; - let mut planner = Planner::new(OsRng); - planner + let plan = Planner::new(OsRng) .set_gas_prices(gas_prices) - .set_fee_tier((*fee_tier).into()); - - planner.dutch_auction_withdraw(auction_id, *seq, reserves_input, reserves_output); - - let plan = planner + .set_fee_tier((*fee_tier).into()) + .dutch_auction_end(auction_id) .plan( app.view .as_mut() @@ -200,21 +183,57 @@ impl DutchCmd { app.build_and_submit_transaction(plan).await?; Ok(()) } - DutchCmd::DutchAuctionEnd { - auction_id, + DutchCmd::DutchAuctionWithdraw { source, + auction_id, + // seq, + // reserves_input, + // reserves_output, fee_tier, } => { let auction_id = auction_id.parse::()?; - let mut planner = Planner::new(OsRng); - planner - .set_gas_prices(gas_prices) - .set_fee_tier((*fee_tier).into()); + use pbjson_types::Any; + use penumbra_view::ViewClient; + let view_client = app.view(); + let (auction_id, _, auction_raw, _): ( + AuctionId, + SpendableNoteRecord, + Option, + Vec, + ) = view_client + .auctions(None, true, true) + .await? + .into_iter() + .find(|(id, _, _, _)| &auction_id == id) + .ok_or_else(|| anyhow!("the auction id is unknown from the view service!"))?; - planner.dutch_auction_end(auction_id); + let Some(raw_da_state) = auction_raw else { + bail!("auction state is missing from view server response") + }; + + use penumbra_proto::core::component::auction::v1alpha1 as pb_auction; + // We're processing a Dutch auction: + assert_eq!(raw_da_state.type_url, pb_auction::DutchAuction::type_url()); + + let dutch_auction = DutchAuction::decode(raw_da_state.value)?; + + let reserves_input = Value { + amount: dutch_auction.state.input_reserves, + asset_id: dutch_auction.description.input.asset_id, + }; + let reserves_output = Value { + amount: dutch_auction.state.output_reserves, + asset_id: dutch_auction.description.output_id, + }; + let seq = dutch_auction.state.sequence + 1; + + let mut planner = Planner::new(OsRng); let plan = planner + .set_gas_prices(gas_prices) + .set_fee_tier((*fee_tier).into()) + .dutch_auction_withdraw(auction_id, seq, reserves_input, reserves_output) .plan( app.view .as_mut() diff --git a/crates/bin/pcli/src/command/view.rs b/crates/bin/pcli/src/command/view.rs index c2c3b50b94..3d57d1333c 100644 --- a/crates/bin/pcli/src/command/view.rs +++ b/crates/bin/pcli/src/command/view.rs @@ -9,7 +9,10 @@ use wallet_id::WalletIdCmd; use crate::App; +use self::auction::AuctionCmd; + mod address; +mod auction; mod balance; mod staked; mod wallet_id; @@ -19,6 +22,8 @@ mod tx; #[derive(Debug, clap::Subcommand)] pub enum ViewCmd { + /// View your auction information + Auction(AuctionCmd), /// View your wallet id WalletId(WalletIdCmd), /// View one of your addresses, either by numerical index, or a random ephemeral one. @@ -44,6 +49,7 @@ pub enum ViewCmd { impl ViewCmd { pub fn offline(&self) -> bool { match self { + ViewCmd::Auction(auction_cmd) => auction_cmd.offline(), ViewCmd::WalletId(wallet_id_cmd) => wallet_id_cmd.offline(), ViewCmd::Address(address_cmd) => address_cmd.offline(), ViewCmd::Balance(balance_cmd) => balance_cmd.offline(), @@ -60,6 +66,9 @@ impl ViewCmd { let full_viewing_key = app.config.full_viewing_key.clone(); match self { + ViewCmd::Auction(auction_cmd) => { + auction_cmd.exec(app.view(), &full_viewing_key).await? + } ViewCmd::WalletId(wallet_id_cmd) => { wallet_id_cmd.exec(&full_viewing_key)?; } diff --git a/crates/bin/pcli/src/command/view/auction.rs b/crates/bin/pcli/src/command/view/auction.rs new file mode 100644 index 0000000000..6e82f089eb --- /dev/null +++ b/crates/bin/pcli/src/command/view/auction.rs @@ -0,0 +1,31 @@ +use anyhow::Result; +use penumbra_keys::FullViewingKey; +use penumbra_view::ViewClient; + +#[derive(Debug, clap::Args)] +pub struct AuctionCmd { + #[clap(long)] + /// If set, includes the inactive auctions as well. + pub include_inactive: bool, +} + +impl AuctionCmd { + pub fn offline(&self) -> bool { + false + } + + pub async fn exec( + &self, + view_client: &mut impl ViewClient, + _fvk: &FullViewingKey, + ) -> Result<()> { + let auctions = view_client + .auctions(None, self.include_inactive, false) + .await?; + + auctions.iter().for_each(|(id, snr, _, _)| { + println!("{id:?} {}", snr.note.amount()); + }); + Ok(()) + } +} diff --git a/crates/bin/pcli/tests/proof.rs b/crates/bin/pcli/tests/proof.rs index adaa09277e..e84b1b2a0d 100644 --- a/crates/bin/pcli/tests/proof.rs +++ b/crates/bin/pcli/tests/proof.rs @@ -278,7 +278,7 @@ fn swap_claim_parameters_vs_current_swap_claim_circuit() { unfilled_2: Amount::from(50u64), height: height.into(), trading_pair: swap_plaintext.trading_pair, - epoch_starting_height: (epoch_duration * position.epoch()).into(), + sct_position_prefix: position, }; let (lambda_1, lambda_2) = output_data.pro_rata_outputs((delta_1_i, delta_2_i)); diff --git a/crates/bin/pclientd/tests/network_integration.rs b/crates/bin/pclientd/tests/network_integration.rs index 7f02c207ce..6c9c282171 100644 --- a/crates/bin/pclientd/tests/network_integration.rs +++ b/crates/bin/pclientd/tests/network_integration.rs @@ -6,7 +6,7 @@ //! where no tokens have been delegated, and the address with index 0 //! was distributedp 1cube. -use std::process::Command as StdCommand; +use std::{ops::Deref, process::Command as StdCommand}; use anyhow::Context; use assert_cmd::cargo::CommandCargoExt; @@ -120,7 +120,7 @@ async fn transaction_send_flow() -> anyhow::Result<()> { let plan = view_client .transaction_planner(TransactionPlannerRequest { outputs: vec![tpr::Output { - address: Some((*test_keys::ADDRESS_1).into()), + address: Some(test_keys::ADDRESS_1.deref().clone().into()), value: Some( Value { amount: 1_000_000u64.into(), @@ -304,7 +304,7 @@ async fn swap_claim_flow() -> anyhow::Result<()> { amount: Some(num::Amount { lo: 0, hi: 0 }), asset_id: None, }), - claim_address: Some((*test_keys::ADDRESS_1).into()), + claim_address: Some(test_keys::ADDRESS_1.deref().clone().into()), }], ..Default::default() }) diff --git a/crates/bin/pd/Cargo.toml b/crates/bin/pd/Cargo.toml index 111b7036cd..9293e9b17d 100644 --- a/crates/bin/pd/Cargo.toml +++ b/crates/bin/pd/Cargo.toml @@ -31,94 +31,77 @@ download-proving-keys = ["penumbra-proof-params/download-proving-keys"] anyhow = "1" [dependencies] -anyhow = { workspace = true } -ark-ff = { workspace = true, default-features = true } -async-stream = { workspace = true } -async-trait = { workspace = true } -axum = "0.6" -axum-server = { workspace = true, features = ["tls-rustls"] } -base64 = { workspace = true } -bincode = { workspace = true } -blake2b_simd = { workspace = true } -bytes = { workspace = true } -chrono = { workspace = true, default-features = false, features = ["serde"] } -clap = { workspace = true, features = ["derive", "env"] } -cnidarium = { workspace = true, features = [ - "migration", - "rpc", -], default-features = true } -csv = "1.1" -decaf377 = { workspace = true, features = [ - "parallel", -], default-features = true } -decaf377-rdsa = { workspace = true } -directories = { workspace = true } -ed25519-consensus = { workspace = true } -flate2 = "1.0.28" -fs_extra = "1.3.0" -futures = { workspace = true } -hex = { workspace = true } -http = { workspace = true } -ibc-proto = { workspace = true, default-features = false, features = [ - "server", -] } -ibc-types = { workspace = true, default-features = true } -ics23 = { workspace = true } -jmt = { workspace = true } -metrics = { workspace = true } -metrics-exporter-prometheus = { version = "0.13", features = ["http-listener"] } -metrics-tracing-context = { workspace = true } -metrics-util = "0.16.2" -mime_guess = "2" -once_cell = { workspace = true } -pbjson-types = { workspace = true } -penumbra-app = { workspace = true } -penumbra-asset = { workspace = true, default-features = true } -penumbra-auto-https = { path = "../../util/auto-https" } -penumbra-compact-block = { workspace = true, default-features = true } -penumbra-custody = { workspace = true } -penumbra-auction = { workspace = true, features = [ - "parallel", -], default-features = true } -penumbra-dex = { workspace = true, features = [ - "parallel", -], default-features = true } -penumbra-fee = { workspace = true, default-features = true } -penumbra-governance = { workspace = true, features = [ - "parallel", -], default-features = true } -penumbra-ibc = { workspace = true, features = ["rpc"], default-features = true } -penumbra-keys = { workspace = true, default-features = true } -penumbra-num = { workspace = true, default-features = true } -penumbra-proto = { workspace = true, default-features = true } -penumbra-sct = { workspace = true, default-features = true } -penumbra-shielded-pool = { workspace = true, features = [ - "parallel", -], default-features = true } -penumbra-stake = { workspace = true, features = [ - "parallel", -], default-features = true } -penumbra-tendermint-proxy = { path = "../../util/tendermint-proxy" } -penumbra-tower-trace = { path = "../../util/tower-trace" } -penumbra-transaction = { workspace = true, default-features = true } -pin-project = { workspace = true } -pin-project-lite = { workspace = true } -prost = { workspace = true } -prost-types = { workspace = true } -rand = { workspace = true } -rand_chacha = { workspace = true } -rand_core = { workspace = true, features = ["getrandom"] } -regex = { workspace = true } -reqwest = { version = "0.11", features = ["json", "stream"] } -rocksdb = { workspace = true } -serde = { workspace = true, features = ["derive"] } -serde_json = { workspace = true } -serde_with = { workspace = true, features = ["hex"] } -sha2 = { workspace = true } -tar = "0.4.40" -tempfile = { workspace = true } -tendermint = { workspace = true } -tendermint-config = { workspace = true } +anyhow = { workspace = true } +ark-ff = { workspace = true, default-features = true } +async-stream = { workspace = true } +async-trait = { workspace = true } +axum = "0.6" +axum-server = { workspace = true, features = ["tls-rustls"] } +base64 = { workspace = true } +bincode = { workspace = true } +blake2b_simd = { workspace = true } +bytes = { workspace = true } +chrono = { workspace = true, default-features = false, features = ["serde"] } +clap = { workspace = true, features = ["derive", "env"] } +cnidarium = { workspace = true, features = ["migration", "rpc"], default-features = true } +csv = "1.1" +decaf377 = { workspace = true, features = ["parallel"], default-features = true } +decaf377-rdsa = { workspace = true } +directories = { workspace = true } +ed25519-consensus = { workspace = true } +flate2 = "1.0.28" +fs_extra = "1.3.0" +futures = { workspace = true } +hex = { workspace = true } +http = { workspace = true } +ibc-proto = { workspace = true, default-features = false, features = ["server"] } +ibc-types = { workspace = true, default-features = true } +ics23 = { workspace = true } +jmt = { workspace = true } +metrics = { workspace = true } +metrics-exporter-prometheus = { version = "0.13", features = ["http-listener"] } +metrics-tracing-context = { workspace = true } +metrics-util = "0.16.2" +mime_guess = "2" +once_cell = { workspace = true } +pbjson-types = { workspace = true } +penumbra-app = { workspace = true } +penumbra-asset = { workspace = true, default-features = true } +penumbra-auto-https = { path = "../../util/auto-https" } +penumbra-compact-block = { workspace = true, default-features = true } +penumbra-custody = { workspace = true } +penumbra-auction = { workspace = true, features = ["parallel"], default-features = true } +penumbra-dex = { workspace = true, features = ["parallel"], default-features = true } +penumbra-fee = { workspace = true, default-features = true } +penumbra-governance = { workspace = true, features = ["parallel"], default-features = true } +penumbra-ibc = { workspace = true, features = ["rpc"], default-features = true } +penumbra-keys = { workspace = true, default-features = true } +penumbra-proto = { workspace = true, default-features = true } +penumbra-sct = { workspace = true, default-features = true } +penumbra-shielded-pool = { workspace = true, features = ["parallel"], default-features = true } +penumbra-stake = { workspace = true, features = ["parallel"], default-features = true } +penumbra-tct = { workspace = true, default-features = true } +penumbra-tendermint-proxy = { path = "../../util/tendermint-proxy" } +penumbra-tower-trace = { path = "../../util/tower-trace" } +penumbra-transaction = { workspace = true, default-features = true } +pin-project = { workspace = true } +pin-project-lite = { workspace = true } +prost = { workspace = true } +prost-types = { workspace = true } +rand = { workspace = true } +rand_chacha = { workspace = true } +rand_core = { workspace = true, features = ["getrandom"] } +regex = { workspace = true } +reqwest = { version = "0.11", features = ["json", "stream"] } +rocksdb = { workspace = true } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +serde_with = { workspace = true, features = ["hex"] } +sha2 = { workspace = true } +tar = "0.4.40" +tempfile = { workspace = true } +tendermint = { workspace = true } +tendermint-config = { workspace = true } tendermint-light-client-verifier = { workspace = true } tendermint-proto = { workspace = true } tendermint-rpc = { workspace = true, features = ["http-client"] } diff --git a/crates/bin/pd/src/migrate.rs b/crates/bin/pd/src/migrate.rs index 384f29caa6..eaa978c784 100644 --- a/crates/bin/pd/src/migrate.rs +++ b/crates/bin/pd/src/migrate.rs @@ -4,6 +4,8 @@ //! node operators must coordinate to perform a chain upgrade. //! This module declares how local `pd` state should be altered, if at all, //! in order to be compatible with the network post-chain-upgrade. +mod testnet72; + use anyhow::Context; use futures::StreamExt as _; use std::path::PathBuf; @@ -29,6 +31,9 @@ pub enum Migration { SimpleMigration, /// Testnet-70 migration: move swap executions from the jmt to nv-storage. Testnet70, + /// Testnet-72 migration: + /// - Migrate `BatchSwapOutputData` to new protobuf, replacing epoch height with index. + Testnet72, /// Testnet-74 migration: change liquidity positions to be ordered in descending order rather than ascending. Testnet74, } @@ -40,7 +45,7 @@ impl Migration { genesis_start: Option, ) -> anyhow::Result<()> { match self { - Migration::Noop => (), + Migration::Noop => Ok(()), Migration::SimpleMigration => { let rocksdb_dir = path_to_export.join("rocksdb"); let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?; @@ -104,6 +109,7 @@ impl Migration { crate::testnet::generate::TestnetValidator::initial_state(); std::fs::write(validator_state_path, fresh_validator_state) .expect("can write validator state"); + Ok(()) } Migration::Testnet70 => { // Our goal is to fetch all swap executions from the jmt and store them in nv-storage. @@ -192,7 +198,10 @@ impl Migration { duration = migration_duration.as_secs(), "successful migration!" ); + + Ok(()) } + Migration::Testnet72 => testnet72::migrate(path_to_export, genesis_start).await, Migration::Testnet74 => { // Lookups for liquidity positions based on starting asset were ordered backwards // and returning the positions with the least liquidity first. This migration @@ -303,7 +312,6 @@ impl Migration { ); } } - Ok(()) } } diff --git a/crates/bin/pd/src/migrate/testnet72.rs b/crates/bin/pd/src/migrate/testnet72.rs new file mode 100644 index 0000000000..e6c8f6bcd9 --- /dev/null +++ b/crates/bin/pd/src/migrate/testnet72.rs @@ -0,0 +1,206 @@ +//! Contains functions related to the migration script of Testnet72 + +use anyhow; +use cnidarium::{Snapshot, StateDelta, StateRead, StateWrite, Storage}; +use futures::StreamExt as _; +use jmt::RootHash; +use penumbra_app::app::StateReadExt as _; +use penumbra_app::SUBSTORE_PREFIXES; +use penumbra_proto::core::component::sct::v1::query_service_server::QueryService; +use penumbra_proto::penumbra::core::component as pb; +use penumbra_proto::StateWriteProto; +use penumbra_sct::component::clock::{EpochManager, EpochRead}; +use penumbra_sct::component::rpc::Server as SctServer; +use penumbra_tct::Position; +use prost::Message; +use std::path::PathBuf; +use std::sync::Arc; +use tonic::IntoRequest; + +use crate::testnet::generate::TestnetConfig; + +/// The context holding various query services we need to help perform the migration. +#[derive(Clone)] +struct Context { + sct_server: Arc, +} + +impl Context { + /// Create a new context from the state storage. + fn new(storage: Storage) -> Self { + Self { + sct_server: Arc::new(SctServer::new(storage)), + } + } + + /// Use storage to lookup the index of an epoch based on its starting heights + async fn epoch_height_to_index(&self, epoch_starting_height: u64) -> anyhow::Result { + Ok(self + .sct_server + .epoch_by_height( + pb::sct::v1::EpochByHeightRequest { + height: epoch_starting_height, + } + .into_request(), + ) + .await? + .into_inner() + .epoch + .expect(&format!( + "epoch at height {} should be present", + epoch_starting_height + )) + .index) + } + + /// Translate the protobuf for a BSOD by populating the correct data and emptying the + /// deprecated field. + #[allow(deprecated)] + async fn translate_bsod( + &self, + bsod: pb::dex::v1::BatchSwapOutputData, + ) -> anyhow::Result { + let sct_position_prefix: u64 = { + let epoch = self + .epoch_height_to_index(bsod.epoch_starting_height) + .await?; + Position::from(( + u16::try_from(epoch).expect("epoch should fit in 16 bits"), + u16::try_from(bsod.height - bsod.epoch_starting_height) + .expect("block index should fit in 16 bits"), + 0, + )) + .into() + }; + Ok(pb::dex::v1::BatchSwapOutputData { + sct_position_prefix, + epoch_starting_height: Default::default(), + ..bsod + }) + } + + async fn translate_compact_block( + &self, + compact_block: pb::compact_block::v1::CompactBlock, + ) -> anyhow::Result { + let mut swap_outputs = Vec::with_capacity(compact_block.swap_outputs.len()); + for bsod in compact_block.swap_outputs { + swap_outputs.push(self.translate_bsod(bsod).await?); + } + Ok(pb::compact_block::v1::CompactBlock { + swap_outputs, + ..compact_block + }) + } +} + +/// Translate all of the BSODs inside dex storage to the new format. +async fn translate_dex_storage( + ctx: Context, + delta: &mut StateDelta, +) -> anyhow::Result<()> { + let mut stream = delta.prefix_raw("dex/output/"); + while let Some(r) = stream.next().await { + let (key, bsod_bytes) = r?; + let bsod = pb::dex::v1::BatchSwapOutputData::decode(bsod_bytes.as_slice())?; + let bsod = ctx.translate_bsod(bsod).await?; + delta.put_proto(key, bsod); + } + Ok(()) +} + +/// Translate all of the compact block storage to hold the new BSOD data inside the compact blocks. +async fn translate_compact_block_storage( + ctx: Context, + delta: &mut StateDelta, +) -> anyhow::Result<()> { + let mut stream = delta.nonverifiable_prefix_raw("compactblock/".as_bytes()); + while let Some(r) = stream.next().await { + let (key, compactblock_bytes) = r?; + let block = pb::compact_block::v1::CompactBlock::decode(compactblock_bytes.as_slice())?; + let block = ctx.translate_compact_block(block).await?; + delta.nonverifiable_put_raw(key, block.encode_to_vec()); + } + Ok(()) +} + +/// Run the full migration, given an export path and a start time for genesis. +pub async fn migrate( + path_to_export: PathBuf, + genesis_start: Option, +) -> anyhow::Result<()> { + let rocksdb_dir = path_to_export.join("rocksdb"); + let storage = Storage::load(rocksdb_dir.clone(), SUBSTORE_PREFIXES.to_vec()).await?; + let export_state = storage.latest_snapshot(); + let root_hash = export_state.root_hash().await.expect("can get root hash"); + let pre_upgrade_root_hash: RootHash = root_hash.into(); + let pre_upgrade_height = export_state + .get_block_height() + .await + .expect("can get block height"); + let post_upgrade_height = pre_upgrade_height.wrapping_add(1); + + let mut delta = StateDelta::new(export_state); + let (migration_duration, post_upgrade_root_hash) = { + let start_time = std::time::SystemTime::now(); + let ctx = Context::new(storage.clone()); + + // Translate inside dex storage. + translate_dex_storage(ctx.clone(), &mut delta).await?; + // Translate inside compact block storage. + translate_compact_block_storage(ctx.clone(), &mut delta).await?; + + delta.put_block_height(0u64); + let post_upgrade_root_hash = storage.commit_in_place(delta).await?; + tracing::info!(?post_upgrade_root_hash, "post-upgrade root hash"); + + (start_time.elapsed().unwrap(), post_upgrade_root_hash) + }; + + storage.release().await; + let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?; + let migrated_state = storage.latest_snapshot(); + + // The migration is complete, now we need to generate a genesis file. To do this, we need + // to lookup a validator view from the chain, and specify the post-upgrade app hash and + // initial height. + let chain_id = migrated_state.get_chain_id().await?; + let app_state = penumbra_app::genesis::Content { + chain_id, + ..Default::default() + }; + let mut genesis = TestnetConfig::make_genesis(app_state.clone()).expect("can make genesis"); + genesis.app_hash = post_upgrade_root_hash + .0 + .to_vec() + .try_into() + .expect("infaillible conversion"); + genesis.initial_height = post_upgrade_height as i64; + genesis.genesis_time = genesis_start.unwrap_or_else(|| { + let now = tendermint::time::Time::now(); + tracing::info!(%now, "no genesis time provided, detecting a testing setup"); + now + }); + let checkpoint = post_upgrade_root_hash.0.to_vec(); + let genesis = TestnetConfig::make_checkpoint(genesis, Some(checkpoint)); + + let genesis_json = serde_json::to_string(&genesis).expect("can serialize genesis"); + tracing::info!("genesis: {}", genesis_json); + let genesis_path = path_to_export.join("genesis.json"); + std::fs::write(genesis_path, genesis_json).expect("can write genesis"); + + let validator_state_path = path_to_export.join("priv_validator_state.json"); + let fresh_validator_state = crate::testnet::generate::TestnetValidator::initial_state(); + std::fs::write(validator_state_path, fresh_validator_state).expect("can write validator state"); + + tracing::info!( + pre_upgrade_height, + post_upgrade_height, + ?pre_upgrade_root_hash, + ?post_upgrade_root_hash, + duration = migration_duration.as_secs(), + "successful migration!" + ); + + Ok(()) +} diff --git a/crates/cnidarium/src/gen/proto_descriptor.bin.no_lfs b/crates/cnidarium/src/gen/proto_descriptor.bin.no_lfs index 67d781308d..d50e1bd881 100644 Binary files a/crates/cnidarium/src/gen/proto_descriptor.bin.no_lfs and b/crates/cnidarium/src/gen/proto_descriptor.bin.no_lfs differ diff --git a/crates/core/app/src/action_handler/transaction.rs b/crates/core/app/src/action_handler/transaction.rs index 859ff816c8..9219a8350b 100644 --- a/crates/core/app/src/action_handler/transaction.rs +++ b/crates/core/app/src/action_handler/transaction.rs @@ -110,6 +110,8 @@ impl AppActionHandler for Transaction { #[cfg(test)] mod tests { + use std::ops::Deref; + use anyhow::Result; use penumbra_asset::{Value, STAKING_TOKEN_ASSET_ID}; use penumbra_fee::Fee; @@ -163,10 +165,14 @@ mod tests { actions: vec![ SpendPlan::new(&mut OsRng, note, auth_path.position()).into(), SpendPlan::new(&mut OsRng, note2, auth_path2.position()).into(), - OutputPlan::new(&mut OsRng, value, *test_keys::ADDRESS_1).into(), + OutputPlan::new(&mut OsRng, value, test_keys::ADDRESS_1.deref().clone()).into(), ], detection_data: Some(DetectionDataPlan { - clue_plans: vec![CluePlan::new(&mut OsRng, *test_keys::ADDRESS_1, 1)], + clue_plans: vec![CluePlan::new( + &mut OsRng, + test_keys::ADDRESS_1.deref().clone(), + 1, + )], }), memo: None, }; @@ -228,7 +234,7 @@ mod tests { }, actions: vec![ SpendPlan::new(&mut OsRng, note, auth_path.position()).into(), - OutputPlan::new(&mut OsRng, value, *test_keys::ADDRESS_1).into(), + OutputPlan::new(&mut OsRng, value, test_keys::ADDRESS_1.deref().clone()).into(), ], detection_data: None, memo: None, diff --git a/crates/core/app/src/app/mod.rs b/crates/core/app/src/app/mod.rs index f918e1c642..150047225b 100644 --- a/crates/core/app/src/app/mod.rs +++ b/crates/core/app/src/app/mod.rs @@ -634,7 +634,7 @@ impl App { /// /// Increment this manually after fixing the root cause for a chain halt: updated nodes will then be /// able to proceed past the block height of the halt. -const TOTAL_HALT_COUNT: u64 = 1; +const TOTAL_HALT_COUNT: u64 = 2; #[async_trait] pub trait StateReadExt: StateRead { diff --git a/crates/core/app/src/rpc.rs b/crates/core/app/src/rpc.rs index 0fed11c5a2..cc809a46e6 100644 --- a/crates/core/app/src/rpc.rs +++ b/crates/core/app/src/rpc.rs @@ -19,6 +19,7 @@ use { client::v1::query_server::QueryServer as ClientQueryServer, connection::v1::query_server::QueryServer as ConnectionQueryServer, }, + penumbra_auction::component::rpc::Server as AuctionServer, penumbra_compact_block::component::rpc::Server as CompactBlockServer, penumbra_dex::component::rpc::Server as DexServer, penumbra_fee::component::rpc::Server as FeeServer, @@ -27,6 +28,7 @@ use { core::{ app::v1::query_service_server::QueryServiceServer as AppQueryServiceServer, component::{ + auction::v1alpha1::query_service_server::QueryServiceServer as AuctionQueryServiceServer, compact_block::v1::query_service_server::QueryServiceServer as CompactBlockQueryServiceServer, dex::v1::{ query_service_server::QueryServiceServer as DexQueryServiceServer, @@ -79,6 +81,9 @@ pub fn router( .add_service(we(StorageQueryServiceServer::new(StorageServer::new( storage.clone(), )))) + .add_service(AuctionQueryServiceServer::new(AuctionServer::new( + storage.clone(), + ))) .add_service(we(AppQueryServiceServer::new(AppQueryServer::new( storage.clone(), )))) diff --git a/crates/core/app/tests/app_can_define_and_delegate_to_a_validator.rs b/crates/core/app/tests/app_can_define_and_delegate_to_a_validator.rs index a3b26cfa52..b783bf2db4 100644 --- a/crates/core/app/tests/app_can_define_and_delegate_to_a_validator.rs +++ b/crates/core/app/tests/app_can_define_and_delegate_to_a_validator.rs @@ -1,5 +1,5 @@ use { - self::common::{BuilderExt, TestNodeExt}, + self::common::{BuilderExt, TestNodeExt, ValidatorDataReadExt}, anyhow::anyhow, cnidarium::TempStorage, decaf377_rdsa::{SigningKey, SpendAuth, VerificationKey}, @@ -16,6 +16,7 @@ use { GovernanceKey, IdentityKey, }, rand_core::OsRng, + std::ops::Deref, tap::Tap, tracing::{error_span, info, Instrument}, }; @@ -251,13 +252,16 @@ async fn app_can_define_and_delegate_to_a_validator() -> anyhow::Result<()> { let output = OutputPlan::new( &mut rand_core::OsRng, delegate.delegation_value(), - *test_keys::ADDRESS_1, + test_keys::ADDRESS_1.deref().clone(), ); let mut plan = TransactionPlan { actions: vec![spend.into(), output.into(), delegate.into()], // Now fill out the remaining parts of the transaction needed for verification: - memo: MemoPlan::new(&mut OsRng, MemoPlaintext::blank_memo(*test_keys::ADDRESS_0)) - .map(Some)?, + memo: MemoPlan::new( + &mut OsRng, + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), + ) + .map(Some)?, detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { chain_id: TestNode::<()>::CHAIN_ID.to_string(), @@ -410,14 +414,17 @@ async fn app_can_define_and_delegate_to_a_validator() -> anyhow::Result<()> { let output = OutputPlan::new( &mut rand_core::OsRng, undelegate.unbonded_value(), - *test_keys::ADDRESS_1, + test_keys::ADDRESS_1.deref().clone(), ); let mut plan = TransactionPlan { actions: vec![spend.into(), output.into(), undelegate.into()], // Now fill out the remaining parts of the transaction needed for verification: - memo: MemoPlan::new(&mut OsRng, MemoPlaintext::blank_memo(*test_keys::ADDRESS_0)) - .map(Some)?, + memo: MemoPlan::new( + &mut OsRng, + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), + ) + .map(Some)?, detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { chain_id: TestNode::<()>::CHAIN_ID.to_string(), diff --git a/crates/core/app/tests/app_can_disable_community_pool_spends.rs b/crates/core/app/tests/app_can_disable_community_pool_spends.rs index bac8e9a281..3648b06bba 100644 --- a/crates/core/app/tests/app_can_disable_community_pool_spends.rs +++ b/crates/core/app/tests/app_can_disable_community_pool_spends.rs @@ -1,4 +1,5 @@ use { + self::common::ValidatorDataReadExt, anyhow::anyhow, cnidarium::TempStorage, decaf377_rdsa::VerificationKey, @@ -26,13 +27,13 @@ use { DomainType, }, penumbra_shielded_pool::{genesis::Allocation, OutputPlan, SpendPlan}, - penumbra_stake::{component::validator_handler::ValidatorDataRead, DelegationToken}, + penumbra_stake::DelegationToken, penumbra_transaction::{ memo::MemoPlaintext, plan::MemoPlan, ActionPlan, TransactionParameters, TransactionPlan, }, rand::Rng, rand_core::OsRng, - std::collections::BTreeMap, + std::{collections::BTreeMap, ops::Deref}, tap::{Tap, TapFallible}, tracing::{error_span, info, Instrument}, }; @@ -203,7 +204,7 @@ async fn app_can_disable_community_pool_spends() -> anyhow::Result<()> { CommunityPoolSpend { value }.into(), CommunityPoolOutput { value, - address: *test_keys::ADDRESS_0, + address: test_keys::ADDRESS_0.deref().clone(), } .into(), ], @@ -232,12 +233,17 @@ async fn app_can_disable_community_pool_spends() -> anyhow::Result<()> { actions: vec![ proposal, // Next, create a new output of the exact same amount. - OutputPlan::new(&mut OsRng, proposal_nft_value, *test_keys::ADDRESS_0).into(), + OutputPlan::new( + &mut OsRng, + proposal_nft_value, + test_keys::ADDRESS_0.deref().clone(), + ) + .into(), ], // Now fill out the remaining parts of the transaction needed for verification: memo: Some(MemoPlan::new( &mut OsRng, - MemoPlaintext::blank_memo(*test_keys::ADDRESS_0), + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), )?), detection_data: None, transaction_parameters: TransactionParameters { diff --git a/crates/core/app/tests/app_can_propose_community_pool_spends.rs b/crates/core/app/tests/app_can_propose_community_pool_spends.rs index 80d43d450e..fe308b9761 100644 --- a/crates/core/app/tests/app_can_propose_community_pool_spends.rs +++ b/crates/core/app/tests/app_can_propose_community_pool_spends.rs @@ -1,4 +1,5 @@ use { + self::common::ValidatorDataReadExt, anyhow::anyhow, cnidarium::TempStorage, decaf377_rdsa::VerificationKey, @@ -26,13 +27,13 @@ use { DomainType, }, penumbra_shielded_pool::{genesis::Allocation, OutputPlan, SpendPlan}, - penumbra_stake::{component::validator_handler::ValidatorDataRead, DelegationToken}, + penumbra_stake::DelegationToken, penumbra_transaction::{ memo::MemoPlaintext, plan::MemoPlan, ActionPlan, TransactionParameters, TransactionPlan, }, rand::Rng, rand_core::OsRng, - std::collections::BTreeMap, + std::{collections::BTreeMap, ops::Deref}, tap::{Tap, TapFallible}, tracing::{error_span, info, Instrument}, }; @@ -197,7 +198,7 @@ async fn app_can_propose_community_pool_spends() -> anyhow::Result<()> { CommunityPoolSpend { value }.into(), CommunityPoolOutput { value, - address: *test_keys::ADDRESS_0, + address: test_keys::ADDRESS_0.deref().clone(), } .into(), ], @@ -226,12 +227,17 @@ async fn app_can_propose_community_pool_spends() -> anyhow::Result<()> { actions: vec![ proposal, // Next, create a new output of the exact same amount. - OutputPlan::new(&mut OsRng, proposal_nft_value, *test_keys::ADDRESS_0).into(), + OutputPlan::new( + &mut OsRng, + proposal_nft_value, + test_keys::ADDRESS_0.deref().clone(), + ) + .into(), ], // Now fill out the remaining parts of the transaction needed for verification: memo: Some(MemoPlan::new( &mut OsRng, - MemoPlaintext::blank_memo(*test_keys::ADDRESS_0), + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), )?), detection_data: None, transaction_parameters: TransactionParameters { diff --git a/crates/core/app/tests/app_can_spend_notes_and_detect_outputs.rs b/crates/core/app/tests/app_can_spend_notes_and_detect_outputs.rs index d4bf8577d1..547525cca6 100644 --- a/crates/core/app/tests/app_can_spend_notes_and_detect_outputs.rs +++ b/crates/core/app/tests/app_can_spend_notes_and_detect_outputs.rs @@ -13,6 +13,7 @@ use { memo::MemoPlaintext, plan::MemoPlan, TransactionParameters, TransactionPlan, }, rand_core::OsRng, + std::ops::Deref, tap::{Tap, TapFallible}, tracing::info, }; @@ -63,12 +64,17 @@ async fn app_can_spend_notes_and_detect_outputs() -> anyhow::Result<()> { ) .into(), // Next, create a new output of the exact same amount. - OutputPlan::new(&mut OsRng, input_note.value(), *test_keys::ADDRESS_1).into(), + OutputPlan::new( + &mut OsRng, + input_note.value(), + test_keys::ADDRESS_1.deref().clone(), + ) + .into(), ], // Now fill out the remaining parts of the transaction needed for verification: memo: Some(MemoPlan::new( &mut OsRng, - MemoPlaintext::blank_memo(*test_keys::ADDRESS_0), + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), )?), detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { diff --git a/crates/core/app/tests/app_can_undelegate_from_a_validator.rs b/crates/core/app/tests/app_can_undelegate_from_a_validator.rs index 1f7f2c7573..12365df179 100644 --- a/crates/core/app/tests/app_can_undelegate_from_a_validator.rs +++ b/crates/core/app/tests/app_can_undelegate_from_a_validator.rs @@ -1,5 +1,5 @@ use { - self::common::{BuilderExt, TestNodeExt}, + self::common::{BuilderExt, TestNodeExt, ValidatorDataReadExt}, anyhow::anyhow, ark_ff::UniformRand, cnidarium::TempStorage, @@ -18,6 +18,7 @@ use { memo::MemoPlaintext, plan::MemoPlan, TransactionParameters, TransactionPlan, }, rand_core::OsRng, + std::ops::Deref, tap::Tap, tracing::{error_span, info, Instrument}, }; @@ -132,13 +133,16 @@ async fn app_can_undelegate_from_a_validator() -> anyhow::Result<()> { let output = OutputPlan::new( &mut rand_core::OsRng, delegate.delegation_value(), - *test_keys::ADDRESS_1, + test_keys::ADDRESS_1.deref().clone(), ); let mut plan = TransactionPlan { actions: vec![spend.into(), output.into(), delegate.into()], // Now fill out the remaining parts of the transaction needed for verification: - memo: MemoPlan::new(&mut OsRng, MemoPlaintext::blank_memo(*test_keys::ADDRESS_0)) - .map(Some)?, + memo: MemoPlan::new( + &mut OsRng, + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), + ) + .map(Some)?, detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { chain_id: TestNode::<()>::CHAIN_ID.to_string(), @@ -230,13 +234,16 @@ async fn app_can_undelegate_from_a_validator() -> anyhow::Result<()> { let output = OutputPlan::new( &mut rand_core::OsRng, undelegate.unbonded_value(), - *test_keys::ADDRESS_1, + test_keys::ADDRESS_1.deref().clone(), ); let mut plan = TransactionPlan { actions: vec![spend.into(), output.into(), undelegate.into()], // Now fill out the remaining parts of the transaction needed for verification: - memo: MemoPlan::new(&mut OsRng, MemoPlaintext::blank_memo(*test_keys::ADDRESS_0)) - .map(Some)?, + memo: MemoPlan::new( + &mut OsRng, + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), + ) + .map(Some)?, detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { chain_id: TestNode::<()>::CHAIN_ID.to_string(), @@ -317,8 +324,11 @@ async fn app_can_undelegate_from_a_validator() -> anyhow::Result<()> { let mut plan = TransactionPlan { actions: vec![claim.into()], // Now fill out the remaining parts of the transaction needed for verification: - memo: MemoPlan::new(&mut OsRng, MemoPlaintext::blank_memo(*test_keys::ADDRESS_0)) - .map(Some)?, + memo: MemoPlan::new( + &mut OsRng, + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), + ) + .map(Some)?, detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { chain_id: TestNode::<()>::CHAIN_ID.to_string(), diff --git a/crates/core/app/tests/app_rejects_validator_definitions_with_invalid_auth_sigs.rs b/crates/core/app/tests/app_rejects_validator_definitions_with_invalid_auth_sigs.rs index 679a4048cb..a245d03488 100644 --- a/crates/core/app/tests/app_rejects_validator_definitions_with_invalid_auth_sigs.rs +++ b/crates/core/app/tests/app_rejects_validator_definitions_with_invalid_auth_sigs.rs @@ -1,5 +1,5 @@ use { - self::common::BuilderExt, + self::common::{BuilderExt, ValidatorDataReadExt}, cnidarium::TempStorage, decaf377_rdsa::{SigningKey, SpendAuth, VerificationKey}, penumbra_app::{genesis::AppState, server::consensus::Consensus}, @@ -7,10 +7,7 @@ use { penumbra_mock_client::MockClient, penumbra_mock_consensus::TestNode, penumbra_proto::DomainType, - penumbra_stake::{ - component::validator_handler::ValidatorDataRead as _, validator::Validator, FundingStreams, - GovernanceKey, IdentityKey, - }, + penumbra_stake::{validator::Validator, FundingStreams, GovernanceKey, IdentityKey}, rand_core::OsRng, tap::Tap, tracing::{error_span, info, Instrument}, diff --git a/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_missing_blocks.rs b/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_missing_blocks.rs index 126888c365..e9f93ba232 100644 --- a/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_missing_blocks.rs +++ b/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_missing_blocks.rs @@ -1,7 +1,5 @@ -mod common; - use { - self::common::BuilderExt, + self::common::{BuilderExt, ValidatorDataReadExt}, anyhow::Context, cnidarium::TempStorage, penumbra_app::{genesis::AppState, server::consensus::Consensus}, @@ -11,6 +9,8 @@ use { tracing::{error_span, trace, Instrument}, }; +mod common; + #[tokio::test] async fn app_tracks_uptime_for_genesis_validator_missing_blocks() -> anyhow::Result<()> { // Install a test logger, acquire some temporary storage, and start the test node. diff --git a/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_signing_blocks.rs b/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_signing_blocks.rs index 8c9be9a2d2..4f20881b8e 100644 --- a/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_signing_blocks.rs +++ b/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_signing_blocks.rs @@ -1,5 +1,5 @@ use { - self::common::BuilderExt, + self::common::{BuilderExt, ValidatorDataReadExt}, anyhow::Context, cnidarium::TempStorage, penumbra_app::{genesis::AppState, server::consensus::Consensus}, diff --git a/crates/core/app/tests/app_tracks_uptime_for_validators_only_once_active.rs b/crates/core/app/tests/app_tracks_uptime_for_validators_only_once_active.rs index 8a4553e786..2c3f228f0c 100644 --- a/crates/core/app/tests/app_tracks_uptime_for_validators_only_once_active.rs +++ b/crates/core/app/tests/app_tracks_uptime_for_validators_only_once_active.rs @@ -1,5 +1,5 @@ use { - self::common::{BuilderExt, TestNodeExt}, + self::common::{BuilderExt, TestNodeExt, ValidatorDataReadExt}, cnidarium::TempStorage, decaf377_rdsa::{SigningKey, SpendAuth, VerificationKey}, penumbra_app::{ @@ -16,6 +16,7 @@ use { FundingStreams, GovernanceKey, IdentityKey, Uptime, }, rand_core::OsRng, + std::ops::Deref, tap::Tap, tracing::{error_span, Instrument}, }; @@ -191,13 +192,16 @@ async fn app_tracks_uptime_for_validators_only_once_active() -> anyhow::Result<( let output = OutputPlan::new( &mut rand_core::OsRng, delegate.delegation_value(), - *test_keys::ADDRESS_1, + test_keys::ADDRESS_1.deref().clone(), ); let mut plan = TransactionPlan { actions: vec![spend.into(), output.into(), delegate.into()], // Now fill out the remaining parts of the transaction needed for verification: - memo: MemoPlan::new(&mut OsRng, MemoPlaintext::blank_memo(*test_keys::ADDRESS_0)) - .map(Some)?, + memo: MemoPlan::new( + &mut OsRng, + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), + ) + .map(Some)?, detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { chain_id: TestNode::<()>::CHAIN_ID.to_string(), @@ -312,14 +316,17 @@ async fn app_tracks_uptime_for_validators_only_once_active() -> anyhow::Result<( let output = OutputPlan::new( &mut rand_core::OsRng, undelegate.unbonded_value(), - *test_keys::ADDRESS_1, + test_keys::ADDRESS_1.deref().clone(), ); let mut plan = TransactionPlan { actions: vec![spend.into(), output.into(), undelegate.into()], // Now fill out the remaining parts of the transaction needed for verification: - memo: MemoPlan::new(&mut OsRng, MemoPlaintext::blank_memo(*test_keys::ADDRESS_0)) - .map(Some)?, + memo: MemoPlan::new( + &mut OsRng, + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), + ) + .map(Some)?, detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { chain_id: TestNode::<()>::CHAIN_ID.to_string(), diff --git a/crates/core/app/tests/common/mod.rs b/crates/core/app/tests/common/mod.rs index 1a6e788508..ad71cc246e 100644 --- a/crates/core/app/tests/common/mod.rs +++ b/crates/core/app/tests/common/mod.rs @@ -5,7 +5,7 @@ pub use { self::{ temp_storage_ext::TempStorageExt, test_node_builder_ext::BuilderExt, - test_node_ext::TestNodeExt, + test_node_ext::TestNodeExt, validator_read_ext::ValidatorDataReadExt, }, penumbra_test_subscriber::set_tracing_subscriber, }; @@ -22,3 +22,9 @@ mod temp_storage_ext; /// /// See [`TestNodeExt`]. mod test_node_ext; + +/// Helpful additions for reading validator information. +/// +/// See [`ValidatorDataRead`][penumbra_stake::component::validator_handler::ValidatorDataRead], +/// and [`ValidatorDataReadExt`]. +mod validator_read_ext; diff --git a/crates/core/app/tests/common/validator_read_ext.rs b/crates/core/app/tests/common/validator_read_ext.rs new file mode 100644 index 0000000000..5788bed9d7 --- /dev/null +++ b/crates/core/app/tests/common/validator_read_ext.rs @@ -0,0 +1,39 @@ +use { + async_trait::async_trait, + futures::TryStreamExt, + penumbra_proto::StateReadProto, + penumbra_stake::{ + component::validator_handler::ValidatorDataRead, state_key, validator::Validator, + IdentityKey, + }, +}; + +/// All [`ValidatorDataRead`]s implement [`ValidatorDataReadExt`]. +impl ValidatorDataReadExt for T {} + +/// Additional extensions to [`ValidatorDataRead`] for use in test cases. +#[async_trait] +pub trait ValidatorDataReadExt: ValidatorDataRead { + /// Returns a list of **all** known validators' metadata. + /// + /// This is not included in [`ValidatorDataRead`] because it is liable to become expensive + /// over time as more validators are defined. This should only be used in test cases. + async fn validator_definitions(&self) -> anyhow::Result> { + self.prefix(state_key::validators::definitions::prefix()) + .map_ok(|(_key, validator)| validator) + .try_collect() + .await + } + + /// Returns a list of **all** known validators' identity keys. + /// + /// This is not included in [`ValidatorDataRead`] because it is liable to become expensive + /// over time as more validators are defined. This should only be used in test cases. + async fn validator_identity_keys(&self) -> anyhow::Result> { + self.prefix(state_key::validators::definitions::prefix()) + .map_ok(|(_key, validator)| validator) + .map_ok(|validator: Validator| validator.identity_key) + .try_collect() + .await + } +} diff --git a/crates/core/app/tests/mock_consensus_can_define_a_genesis_validator.rs b/crates/core/app/tests/mock_consensus_can_define_a_genesis_validator.rs index e1954bc743..2d478ea52a 100644 --- a/crates/core/app/tests/mock_consensus_can_define_a_genesis_validator.rs +++ b/crates/core/app/tests/mock_consensus_can_define_a_genesis_validator.rs @@ -1,5 +1,5 @@ use { - self::common::BuilderExt, + self::common::{BuilderExt, ValidatorDataReadExt}, anyhow::anyhow, cnidarium::TempStorage, penumbra_app::{genesis::AppState, server::consensus::Consensus}, diff --git a/crates/core/app/tests/swap_and_swap_claim.rs b/crates/core/app/tests/swap_and_swap_claim.rs index 586e5027f3..aa329da576 100644 --- a/crates/core/app/tests/swap_and_swap_claim.rs +++ b/crates/core/app/tests/swap_and_swap_claim.rs @@ -58,7 +58,7 @@ async fn swap_and_swap_claim() -> anyhow::Result<()> { let delta_1 = Amount::from(100_000u64); let delta_2 = Amount::from(0u64); let fee = Fee::default(); - let claim_address: Address = *test_keys::ADDRESS_0; + let claim_address: Address = test_keys::ADDRESS_0.deref().clone(); let plaintext = SwapPlaintext::new(&mut rng, trading_pair, delta_1, delta_2, fee, claim_address); @@ -295,7 +295,7 @@ async fn swap_with_nonzero_fee() -> anyhow::Result<()> { let delta_1 = Amount::from(100_000u64); let delta_2 = Amount::from(0u64); let fee = Fee::from_staking_token_amount(Amount::from(1u64)); - let claim_address: Address = *test_keys::ADDRESS_0; + let claim_address: Address = test_keys::ADDRESS_0.deref().clone(); let plaintext = SwapPlaintext::new(&mut rng, trading_pair, delta_1, delta_2, fee, claim_address); diff --git a/crates/core/asset/src/asset/denom_metadata.rs b/crates/core/asset/src/asset/denom_metadata.rs index 07006615c1..582a476a98 100644 --- a/crates/core/asset/src/asset/denom_metadata.rs +++ b/crates/core/asset/src/asset/denom_metadata.rs @@ -333,6 +333,10 @@ impl Metadata { REGISTRY.parse_denom(&denom.denom) } + pub fn is_auction_nft(&self) -> bool { + self.starts_with("auctionnft_") + } + pub fn is_opened_position_nft(&self) -> bool { let prefix = "lpnft_opened_".to_string(); diff --git a/crates/core/asset/src/asset/registry.rs b/crates/core/asset/src/asset/registry.rs index a178f1cc93..72c2eb02d0 100644 --- a/crates/core/asset/src/asset/registry.rs +++ b/crates/core/asset/src/asset/registry.rs @@ -418,7 +418,7 @@ pub static REGISTRY: Lazy = Lazy::new(|| { ]) }) as for<'r> fn(&'r str) -> _) .add_asset( - "^auctionnft_(?P[0-9]+)_(?Ppaucid1[a-zA-HJ-NP-Z0-9]+)$", + "^auctionnft_(?P[a-z_0-9]+_pauctid1[a-zA-HJ-NP-Z0-9]+)$", &[ /* no display units - nft, unit 1 */ ], (|data: &str| { assert!(!data.is_empty()); diff --git a/crates/core/component/auction/src/auction/nft.rs b/crates/core/component/auction/src/auction/nft.rs index ac1e7fa64b..0147dd585a 100644 --- a/crates/core/component/auction/src/auction/nft.rs +++ b/crates/core/component/auction/src/auction/nft.rs @@ -1,7 +1,8 @@ use crate::auction::id::AuctionId; use anyhow::{anyhow, Result}; -use penumbra_asset::asset::{self}; +use penumbra_asset::asset::{self, Metadata}; use penumbra_proto::{core::component::auction::v1alpha1 as pb, DomainType}; +use regex::Regex; /// An non-fungible token (NFT) tracking the state and ownership of an auction. #[derive(Debug, Clone)] @@ -54,3 +55,39 @@ impl TryFrom for AuctionNft { Ok(AuctionNft::new(id, seq)) } } + +impl TryFrom for AuctionNft { + type Error = anyhow::Error; + + fn try_from(denom: Metadata) -> Result { + let regex = Regex::new( + "^auctionnft_(?P[0-9]+)_(?Ppauctid1[a-zA-HJ-NP-Z0-9]+)$", + ) + .expect("regex is valid"); + + let denom_string = denom.to_string(); + + let captures = regex + .captures(&denom_string) + .ok_or_else(|| anyhow!("denom {} is not a valid auction nft", denom))?; + + let seq_num = captures + .name("seq_num") + .ok_or_else(|| anyhow!("sequence number not found"))? + .as_str(); + let auction_id = captures + .name("auction_id") + .ok_or_else(|| anyhow!("auction ID not found"))? + .as_str(); + + let seq_num: u64 = seq_num + .parse() + .map_err(|_| anyhow!("Failed to parse seq_num to u64"))?; + + let auction_id: AuctionId = auction_id + .parse() + .map_err(|_| anyhow!("Failed to parse auction_id to AuctionId"))?; + + Ok(AuctionNft::new(auction_id, seq_num)) + } +} diff --git a/crates/core/component/auction/src/component/auction_store.rs b/crates/core/component/auction/src/component/auction_store.rs index a0722d3d1d..6d6669057c 100644 --- a/crates/core/component/auction/src/component/auction_store.rs +++ b/crates/core/component/auction/src/component/auction_store.rs @@ -31,7 +31,7 @@ pub(crate) trait AuctionStoreRead: StateRead { return Ok(None); }; - let dutch_auction_type_str = pb::DutchAuction::full_name(); + let dutch_auction_type_str = pb::DutchAuction::type_url(); anyhow::ensure!( any_auction.type_url == dutch_auction_type_str, diff --git a/crates/core/component/auction/src/component/dutch_auction.rs b/crates/core/component/auction/src/component/dutch_auction.rs index 6e23627d9c..a356a92c59 100644 --- a/crates/core/component/auction/src/component/dutch_auction.rs +++ b/crates/core/component/auction/src/component/dutch_auction.rs @@ -438,14 +438,14 @@ trait Inner: StateWrite { let id = new_state.description.id(); let key = state_key::auction_store::by_id(id); let pb_state: pb::DutchAuction = new_state.into(); - let raw_auction = pb_state.encode_length_delimited_to_vec(); + let raw_auction = pb_state.encode_to_vec(); let any_auction = prost_types::Any { - type_url: pb::DutchAuction::full_name(), + type_url: pb::DutchAuction::type_url(), value: raw_auction, }; - let raw_any = any_auction.encode_length_delimited_to_vec(); + let raw_any = any_auction.encode_to_vec(); self.put_raw(key, raw_any); } diff --git a/crates/core/component/auction/src/component/rpc.rs b/crates/core/component/auction/src/component/rpc.rs index 57c8a233cc..7b7c4cb3ff 100755 --- a/crates/core/component/auction/src/component/rpc.rs +++ b/crates/core/component/auction/src/component/rpc.rs @@ -44,13 +44,13 @@ impl QueryService for Server { .try_into() .map_err(|_| Status::invalid_argument("invalid auction id"))?; - let auction_data = state + let raw_auction = state .get_raw_auction(id) .await .ok_or_else(|| tonic::Status::not_found("auction data not found for specified id"))?; Ok(tonic::Response::new(AuctionStateByIdResponse { - auction: Some(auction_data), + auction: Some(raw_auction), positions: Vec::new(), })) } diff --git a/crates/core/component/dex/src/batch_swap_output_data.rs b/crates/core/component/dex/src/batch_swap_output_data.rs index f40a446b46..33afb327f5 100644 --- a/crates/core/component/dex/src/batch_swap_output_data.rs +++ b/crates/core/component/dex/src/batch_swap_output_data.rs @@ -8,6 +8,7 @@ use ark_r1cs_std::{ use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; use decaf377::{r1cs::FqVar, Fq}; use penumbra_proto::{penumbra::core::component::dex::v1 as pb, DomainType}; +use penumbra_tct::Position; use serde::{Deserialize, Serialize}; use penumbra_num::fixpoint::{bit_constrain, U128x128, U128x128Var}; @@ -36,8 +37,8 @@ pub struct BatchSwapOutputData { pub height: u64, /// The trading pair associated with the batch swap. pub trading_pair: TradingPair, - /// The starting block height of the epoch for which the batch swap data is valid. - pub epoch_starting_height: u64, + /// The position prefix where this batch swap occurred. The commitment index must be 0. + pub sct_position_prefix: Position, } impl BatchSwapOutputData { @@ -117,19 +118,19 @@ impl ToConstraintField for BatchSwapOutputData { .expect("U128x128 types are Bls12-377 field members"), ); public_inputs.extend( - Fq::from(self.height) + self.trading_pair .to_field_elements() - .expect("Fq types are Bls12-377 field members"), + .expect("trading_pair is a Bls12-377 field member"), ); public_inputs.extend( - self.trading_pair + Fq::from(self.sct_position_prefix.epoch()) .to_field_elements() - .expect("trading_pair is a Bls12-377 field member"), + .expect("Position types are Bls12-377 field members"), ); public_inputs.extend( - Fq::from(self.epoch_starting_height) + Fq::from(self.sct_position_prefix.block()) .to_field_elements() - .expect("Fq types are Bls12-377 field members"), + .expect("Position types are Bls12-377 field members"), ); Some(public_inputs) } @@ -142,9 +143,9 @@ pub struct BatchSwapOutputDataVar { pub lambda_2: U128x128Var, pub unfilled_1: U128x128Var, pub unfilled_2: U128x128Var, - pub height: FqVar, pub trading_pair: TradingPairVar, - pub epoch_starting_height: FqVar, + pub epoch: FqVar, + pub block_within_epoch: FqVar, } impl AllocVar for BatchSwapOutputDataVar { @@ -168,18 +169,23 @@ impl AllocVar for BatchSwapOutputDataVar { let unfilled_1 = U128x128Var::new_variable(cs.clone(), || Ok(unfilled_1_fixpoint), mode)?; let unfilled_2_fixpoint: U128x128 = output_data.unfilled_2.into(); let unfilled_2 = U128x128Var::new_variable(cs.clone(), || Ok(unfilled_2_fixpoint), mode)?; - let height = FqVar::new_variable(cs.clone(), || Ok(Fq::from(output_data.height)), mode)?; - // Check the height is 64 bits - let _ = bit_constrain(height.clone(), 64); let trading_pair = TradingPairVar::new_variable_unchecked( cs.clone(), || Ok(output_data.trading_pair), mode, )?; - let epoch_starting_height = - FqVar::new_variable(cs, || Ok(Fq::from(output_data.epoch_starting_height)), mode)?; - // Check the epoch starting height is 64 bits - let _ = bit_constrain(epoch_starting_height.clone(), 64); + let epoch = FqVar::new_variable( + cs.clone(), + || Ok(Fq::from(output_data.sct_position_prefix.epoch())), + mode, + )?; + bit_constrain(epoch.clone(), 16)?; + let block_within_epoch = FqVar::new_variable( + cs.clone(), + || Ok(Fq::from(output_data.sct_position_prefix.block())), + mode, + )?; + bit_constrain(block_within_epoch.clone(), 16)?; Ok(Self { delta_1, @@ -189,8 +195,8 @@ impl AllocVar for BatchSwapOutputDataVar { unfilled_1, unfilled_2, trading_pair, - height, - epoch_starting_height, + epoch, + block_within_epoch, }) } } @@ -201,6 +207,7 @@ impl DomainType for BatchSwapOutputData { impl From for pb::BatchSwapOutputData { fn from(s: BatchSwapOutputData) -> Self { + #[allow(deprecated)] pb::BatchSwapOutputData { delta_1: Some(s.delta_1.into()), delta_2: Some(s.delta_2.into()), @@ -209,8 +216,12 @@ impl From for pb::BatchSwapOutputData { unfilled_1: Some(s.unfilled_1.into()), unfilled_2: Some(s.unfilled_2.into()), height: s.height, - epoch_starting_height: s.epoch_starting_height, trading_pair: Some(s.trading_pair.into()), + sct_position_prefix: s.sct_position_prefix.into(), + // Deprecated fields we explicitly fill with defaults. + // We could instead use a `..Default::default()` here, but that would silently + // work if we were to add fields to the domain type. + epoch_starting_height: Default::default(), } } } @@ -276,6 +287,14 @@ impl From for pb::BatchSwapOutputDataResponse { impl TryFrom for BatchSwapOutputData { type Error = anyhow::Error; fn try_from(s: pb::BatchSwapOutputData) -> Result { + let sct_position_prefix = { + let prefix = Position::from(s.sct_position_prefix); + anyhow::ensure!( + prefix.commitment() == 0, + "sct_position_prefix.commitment() != 0" + ); + prefix + }; Ok(Self { delta_1: s .delta_1 @@ -306,7 +325,7 @@ impl TryFrom for BatchSwapOutputData { .trading_pair .ok_or_else(|| anyhow!("Missing trading_pair"))? .try_into()?, - epoch_starting_height: s.epoch_starting_height, + sct_position_prefix, }) } } @@ -421,9 +440,9 @@ mod tests { lambda_2: Amount::from(1u32), unfilled_1: Amount::from(1u32), unfilled_2: Amount::from(1u32), - height: 1, + height: 0, trading_pair, - epoch_starting_height: 1, + sct_position_prefix: 0u64.into(), }, } } @@ -444,7 +463,7 @@ mod tests { unfilled_2: Amount::from(50u64), height: 0u64, trading_pair, - epoch_starting_height: 0u64, + sct_position_prefix: 0u64.into(), }; // Now suppose our user's contribution is: diff --git a/crates/core/component/dex/src/component/circuit_breaker/value.rs b/crates/core/component/dex/src/component/circuit_breaker/value.rs index bc5c06a8b1..f911aa8b12 100644 --- a/crates/core/component/dex/src/component/circuit_breaker/value.rs +++ b/crates/core/component/dex/src/component/circuit_breaker/value.rs @@ -161,7 +161,7 @@ mod tests { unfilled_2: 0u64.into(), height: 1, trading_pair: pair_1.into_directed_trading_pair().into(), - epoch_starting_height: 0, + sct_position_prefix: Default::default(), }, None, None, @@ -250,7 +250,7 @@ mod tests { let routing_params = state.routing_params().await.unwrap(); // This call should panic due to the outflow of gn not being covered by the circuit breaker. state - .handle_batch_swaps(trading_pair, swap_flow, 0, 0, routing_params) + .handle_batch_swaps(trading_pair, swap_flow, 0, routing_params) .await .expect("unable to process batch swaps"); } diff --git a/crates/core/component/dex/src/component/dex.rs b/crates/core/component/dex/src/component/dex.rs index bdb9ada0c5..0172e34c62 100644 --- a/crates/core/component/dex/src/component/dex.rs +++ b/crates/core/component/dex/src/component/dex.rs @@ -7,7 +7,6 @@ use cnidarium_component::Component; use penumbra_asset::{asset, Value, STAKING_TOKEN_ASSET_ID}; use penumbra_num::Amount; use penumbra_proto::{StateReadProto, StateWriteProto}; -use penumbra_sct::component::clock::EpochRead; use tendermint::v0_37::abci; use tracing::instrument; @@ -56,7 +55,6 @@ impl Component for Dex { // 2. For each batch swap during the block, calculate clearing prices and set in the JMT. - let current_epoch = state.get_current_epoch().await.expect("epoch is set"); let routing_params = state.routing_params().await.expect("dex params are set"); for (trading_pair, swap_flows) in state.swap_flows() { @@ -69,7 +67,6 @@ impl Component for Dex { .height .try_into() .expect("height is part of the end block data"), - current_epoch.start_height, // Always include both ends of the target pair as fixed candidates. routing_params .clone() diff --git a/crates/core/component/dex/src/component/router/route_and_fill.rs b/crates/core/component/dex/src/component/router/route_and_fill.rs index b18a786200..41445ae23c 100644 --- a/crates/core/component/dex/src/component/router/route_and_fill.rs +++ b/crates/core/component/dex/src/component/router/route_and_fill.rs @@ -5,6 +5,7 @@ use async_trait::async_trait; use cnidarium::StateWrite; use penumbra_asset::{asset, Value}; use penumbra_num::Amount; +use penumbra_sct::component::clock::EpochRead; use tracing::instrument; use crate::{ @@ -23,21 +24,13 @@ use super::fill_route::FillError; /// a block's batch swap flows. #[async_trait] pub trait HandleBatchSwaps: StateWrite + Sized { - #[instrument(skip( - self, - trading_pair, - batch_data, - block_height, - epoch_starting_height, - params - ))] + #[instrument(skip(self, trading_pair, batch_data, block_height, params))] async fn handle_batch_swaps( self: &mut Arc, trading_pair: TradingPair, batch_data: SwapFlow, - // TODO: why not read these 2 from the state? + // This will be read from the ABCI request block_height: u64, - epoch_starting_height: u64, params: RoutingParams, ) -> Result<()> where @@ -95,9 +88,9 @@ pub trait HandleBatchSwaps: StateWrite + Sized { ), None => (0u64.into(), delta_2), }; + let epoch = self.get_current_epoch().await.expect("epoch is set"); let output_data = BatchSwapOutputData { height: block_height, - epoch_starting_height, trading_pair, delta_1, delta_2, @@ -105,6 +98,15 @@ pub trait HandleBatchSwaps: StateWrite + Sized { lambda_2, unfilled_1, unfilled_2, + sct_position_prefix: ( + u16::try_from(epoch.index).expect("epoch index should be small enough"), + // The block index is determined by looking at how many blocks have elapsed since + // the start of the epoch. + u16::try_from(block_height - epoch.start_height) + .expect("block index should be small enough"), + 0, + ) + .into(), }; // Fetch the swap execution object that should have been modified during the routing and filling. diff --git a/crates/core/component/dex/src/component/router/tests.rs b/crates/core/component/dex/src/component/router/tests.rs index ccb530b738..0c26b602f6 100644 --- a/crates/core/component/dex/src/component/router/tests.rs +++ b/crates/core/component/dex/src/component/router/tests.rs @@ -1024,7 +1024,7 @@ async fn best_position_route_and_fill() -> anyhow::Result<()> { .unwrap(); let routing_params = state.routing_params().await.unwrap(); state - .handle_batch_swaps(trading_pair, swap_flow, 0u32.into(), 0, routing_params) + .handle_batch_swaps(trading_pair, swap_flow, 0u32.into(), routing_params) .await .expect("unable to process batch swaps"); @@ -1165,7 +1165,7 @@ async fn multi_hop_route_and_fill() -> anyhow::Result<()> { .unwrap(); let routing_params = state.routing_params().await.unwrap(); state - .handle_batch_swaps(trading_pair, swap_flow, 0u32.into(), 0, routing_params) + .handle_batch_swaps(trading_pair, swap_flow, 0u32.into(), routing_params) .await .expect("unable to process batch swaps"); diff --git a/crates/core/component/dex/src/component/tests.rs b/crates/core/component/dex/src/component/tests.rs index 5b16b09418..409a9849e5 100644 --- a/crates/core/component/dex/src/component/tests.rs +++ b/crates/core/component/dex/src/component/tests.rs @@ -632,7 +632,7 @@ async fn swap_execution_tests() -> anyhow::Result<()> { .unwrap(); let routing_params = state.routing_params().await.unwrap(); state - .handle_batch_swaps(trading_pair, swap_flow, 0, 0, routing_params) + .handle_batch_swaps(trading_pair, swap_flow, 0, routing_params) .await .expect("unable to process batch swaps"); @@ -740,7 +740,7 @@ async fn swap_execution_tests() -> anyhow::Result<()> { .unwrap(); let routing_params = state.routing_params().await.unwrap(); state - .handle_batch_swaps(trading_pair, swap_flow, 0u32.into(), 0, routing_params) + .handle_batch_swaps(trading_pair, swap_flow, 0u32.into(), routing_params) .await .expect("unable to process batch swaps"); @@ -756,8 +756,8 @@ async fn swap_execution_tests() -> anyhow::Result<()> { unfilled_1: 0u32.into(), unfilled_2: 0u32.into(), height: 0, - epoch_starting_height: 0, trading_pair, + sct_position_prefix: Default::default(), } ); diff --git a/crates/core/component/dex/src/swap/plaintext.rs b/crates/core/component/dex/src/swap/plaintext.rs index 8bde2f5db6..45e5ce66b5 100644 --- a/crates/core/component/dex/src/swap/plaintext.rs +++ b/crates/core/component/dex/src/swap/plaintext.rs @@ -70,7 +70,7 @@ impl SwapPlaintext { batch_data.pro_rata_outputs((self.delta_1_i, self.delta_2_i)); let output_1_note = Note::from_parts( - self.claim_address, + self.claim_address.clone(), Value { amount: lambda_1_i, asset_id: self.trading_pair.asset_1(), @@ -80,7 +80,7 @@ impl SwapPlaintext { .expect("claim address is valid"); let output_2_note = Note::from_parts( - self.claim_address, + self.claim_address.clone(), Value { amount: lambda_2_i, asset_id: self.trading_pair.asset_2(), @@ -344,7 +344,7 @@ impl From<&SwapPlaintext> for [u8; SWAP_LEN_BYTES] { bytes[80..96].copy_from_slice(&swap.delta_2_i.to_le_bytes()); bytes[96..112].copy_from_slice(&swap.claim_fee.0.amount.to_le_bytes()); bytes[112..144].copy_from_slice(&swap.claim_fee.0.asset_id.to_bytes()); - let pb_address = pb_keys::Address::from(swap.claim_address); + let pb_address = pb_keys::Address::from(swap.claim_address.clone()); bytes[144..224].copy_from_slice(&pb_address.inner); bytes[224..256].copy_from_slice(&swap.rseed.to_bytes()); bytes diff --git a/crates/core/component/dex/src/swap_claim/proof.rs b/crates/core/component/dex/src/swap_claim/proof.rs index c8698d131c..7d86071e65 100644 --- a/crates/core/component/dex/src/swap_claim/proof.rs +++ b/crates/core/component/dex/src/swap_claim/proof.rs @@ -123,11 +123,16 @@ fn check_satisfaction( anyhow::bail!("claim fee did not match public input"); } - let block: u64 = private.state_commitment_proof.position().block().into(); - let note_commitment_block_height: u64 = public.output_data.epoch_starting_height + block; - if note_commitment_block_height != public.output_data.height { - anyhow::bail!("swap commitment height did not match public input"); - } + anyhow::ensure!( + private.state_commitment_proof.position().block() + == public.output_data.sct_position_prefix.block(), + "scm block did not match batch swap" + ); + anyhow::ensure!( + private.state_commitment_proof.position().epoch() + == public.output_data.sct_position_prefix.epoch(), + "scm epoch did not match batch swap" + ); if private.swap_plaintext.trading_pair != public.output_data.trading_pair { anyhow::bail!("trading pair did not match public input"); @@ -255,12 +260,12 @@ impl ConstraintSynthesizer for SwapClaimCircuit { claimed_fee_var.enforce_equal(&swap_plaintext_var.claim_fee)?; // Validate the swap commitment's height matches the output data's height (i.e. the clearing price height). - let block = position_var.block()?; - let note_commitment_block_height_var = - output_data_var.epoch_starting_height.clone() + block; output_data_var - .height - .enforce_equal(¬e_commitment_block_height_var)?; + .block_within_epoch + .enforce_equal(&position_var.block()?)?; + output_data_var + .epoch + .enforce_equal(&position_var.epoch()?)?; // Validate that the output data's trading pair matches the note commitment's trading pair. output_data_var @@ -359,7 +364,7 @@ impl DummyWitness for SwapClaimCircuit { unfilled_2: Amount::from(10u64), height: 0, trading_pair: swap_plaintext.trading_pair, - epoch_starting_height: 0, + sct_position_prefix: Default::default(), }; let note_blinding_1 = Fq::from(1); let note_blinding_2 = Fq::from(1); @@ -642,7 +647,7 @@ mod tests { unfilled_2: test_bsod.unfilled_2, height: height.into(), trading_pair: swap_plaintext.trading_pair, - epoch_starting_height: (epoch_duration * position.epoch()).into(), + sct_position_prefix: Default::default(), }; let (lambda_1, lambda_2) = output_data.pro_rata_outputs((delta_1_i, delta_2_i)); @@ -774,7 +779,7 @@ mod tests { unfilled_2: test_bsod.unfilled_2, height: height.into(), trading_pair: swap_plaintext.trading_pair, - epoch_starting_height: (epoch_duration * position.epoch()).into(), + sct_position_prefix: Default::default() }; let (lambda_1, lambda_2) = output_data.pro_rata_outputs((delta_1_i, delta_2_i)); @@ -874,7 +879,7 @@ mod tests { unfilled_2: test_bsod.unfilled_2, height: height.into(), trading_pair: swap_plaintext.trading_pair, - epoch_starting_height: (epoch_duration * dummy_position.epoch()).into(), + sct_position_prefix: Default::default() }; let (lambda_1, lambda_2) = output_data.pro_rata_outputs((delta_1_i, delta_2_i)); diff --git a/crates/core/component/governance/src/delegator_vote/proof.rs b/crates/core/component/governance/src/delegator_vote/proof.rs index 201afd1ed2..f4a61fe267 100644 --- a/crates/core/component/governance/src/delegator_vote/proof.rs +++ b/crates/core/component/governance/src/delegator_vote/proof.rs @@ -453,7 +453,7 @@ mod tests { asset_id: asset::Id(Fq::from(asset_id64)), }; let note = Note::from_parts( - sender, + sender.clone(), value_to_send, Rseed(rseed_randomness), ).expect("should be able to create note"); @@ -469,7 +469,7 @@ mod tests { for i in 0..num_commitments { // To avoid duplicate note commitments, we use the `i` counter as the Rseed randomness let rseed = Rseed([i as u8; 32]); - let dummy_note_commitment = Note::from_parts(sender, value_to_send, rseed).expect("can create note").commit(); + let dummy_note_commitment = Note::from_parts(sender.clone(), value_to_send, rseed).expect("can create note").commit(); sct.insert(tct::Witness::Keep, dummy_note_commitment).expect("can insert note commitment into SCT"); } @@ -480,7 +480,7 @@ mod tests { // All proposals should have a position commitment index of zero, so we need to end the epoch // and get the position that corresponds to the first commitment in the new epoch. sct.end_epoch().expect("should be able to end an epoch"); - let first_note_commitment = Note::from_parts(sender, value_to_send, Rseed([u8::MAX; 32])).expect("can create note").commit(); + let first_note_commitment = Note::from_parts(sender.clone(), value_to_send, Rseed([u8::MAX; 32])).expect("can create note").commit(); sct.insert(tct::Witness::Keep, first_note_commitment).expect("can insert note commitment into SCT"); let start_position = sct.witness(first_note_commitment).expect("can witness note commitment").position(); @@ -529,7 +529,7 @@ mod tests { asset_id: asset::Id(Fq::from(asset_id64)), }; let note = Note::from_parts( - sender, + sender.clone(), value_to_send, Rseed(rseed_randomness), ).expect("should be able to create note"); @@ -545,7 +545,7 @@ mod tests { for i in 0..num_commitments { // To avoid duplicate note commitments, we use the `i` counter as the Rseed randomness let rseed = Rseed([i as u8; 32]); - let dummy_note_commitment = Note::from_parts(sender, value_to_send, rseed).expect("can create note").commit(); + let dummy_note_commitment = Note::from_parts(sender.clone(), value_to_send, rseed).expect("can create note").commit(); sct.insert(tct::Witness::Keep, dummy_note_commitment).expect("can insert note commitment into SCT"); } diff --git a/crates/core/component/shielded-pool/src/component/note_manager.rs b/crates/core/component/shielded-pool/src/component/note_manager.rs index dd221fac73..a4d13ef343 100644 --- a/crates/core/component/shielded-pool/src/component/note_manager.rs +++ b/crates/core/component/shielded-pool/src/component/note_manager.rs @@ -52,7 +52,7 @@ pub trait NoteManager: StateWrite { .as_bytes()[0..32] .try_into()?; - let note = Note::from_parts(*address, value, Rseed(rseed_bytes))?; + let note = Note::from_parts(address.clone(), value, Rseed(rseed_bytes))?; self.add_note_payload(note.payload(), source).await; Ok(()) diff --git a/crates/core/component/shielded-pool/src/note.rs b/crates/core/component/shielded-pool/src/note.rs index 930cb089de..c1575443b6 100644 --- a/crates/core/component/shielded-pool/src/note.rs +++ b/crates/core/component/shielded-pool/src/note.rs @@ -137,7 +137,7 @@ impl Note { Ok(Note { value, rseed, - address, + address: address.clone(), transmission_key_s: Fq::from_bytes(address.transmission_key().0) .map_err(|_| Error::InvalidTransmissionKey)?, }) @@ -155,12 +155,12 @@ impl Note { /// random blinding factor. pub fn generate(rng: &mut (impl Rng + CryptoRng), address: &Address, value: Value) -> Self { let rseed = Rseed::generate(rng); - Note::from_parts(*address, value, rseed) + Note::from_parts(address.clone(), value, rseed) .expect("transmission key in address is always valid") } pub fn address(&self) -> Address { - self.address + self.address.clone() } pub fn diversified_generator(&self) -> decaf377::Element { diff --git a/crates/core/component/shielded-pool/src/output/plan.rs b/crates/core/component/shielded-pool/src/output/plan.rs index ef7308294f..86e31817f3 100644 --- a/crates/core/component/shielded-pool/src/output/plan.rs +++ b/crates/core/component/shielded-pool/src/output/plan.rs @@ -68,7 +68,7 @@ impl OutputPlan { } pub fn output_note(&self) -> Note { - Note::from_parts(self.dest_address, self.value, self.rseed) + Note::from_parts(self.dest_address.clone(), self.value, self.rseed) .expect("transmission key in address is always valid") } diff --git a/crates/core/component/shielded-pool/src/spend/proof.rs b/crates/core/component/shielded-pool/src/spend/proof.rs index a818fc8267..8852ad07c6 100644 --- a/crates/core/component/shielded-pool/src/spend/proof.rs +++ b/crates/core/component/shielded-pool/src/spend/proof.rs @@ -429,7 +429,7 @@ mod tests { asset_id: asset::Id(Fq::from(asset_id64)), }; let note = Note::from_parts( - sender, + sender.clone(), value_to_send, Rseed(rseed_randomness), ).expect("should be able to create note"); @@ -445,7 +445,7 @@ mod tests { for i in 0..num_commitments { // To avoid duplicate note commitments, we use the `i` counter as the Rseed randomness let rseed = Rseed([i as u8; 32]); - let dummy_note_commitment = Note::from_parts(sender, value_to_send, rseed).expect("can create note").commit(); + let dummy_note_commitment = Note::from_parts(sender.clone(), value_to_send, rseed).expect("can create note").commit(); sct.insert(tct::Witness::Keep, dummy_note_commitment).expect("should be able to insert note commitments into the SCT"); } @@ -498,7 +498,7 @@ mod tests { asset_id: asset::Id(Fq::from(asset_id64)), }; let note = Note::from_parts( - sender, + sender.clone(), value_to_send, Rseed(rseed_randomness), ).expect("should be able to create note"); @@ -514,7 +514,7 @@ mod tests { for i in 0..num_commitments { // To avoid duplicate note commitments, we use the `i` counter as the Rseed randomness let rseed = Rseed([i as u8; 32]); - let dummy_note_commitment = Note::from_parts(sender, value_to_send, rseed).expect("can create note").commit(); + let dummy_note_commitment = Note::from_parts(sender.clone(), value_to_send, rseed).expect("can create note").commit(); sct.insert(tct::Witness::Keep, dummy_note_commitment).expect("should be able to insert note commitments into the SCT"); } let incorrect_anchor = sct.root(); @@ -641,7 +641,7 @@ mod tests { asset_id: asset::Id(Fq::from(asset_id64)), }; let note = Note::from_parts( - sender, + sender.clone(), value_to_send, Rseed(rseed_randomness), ).expect("should be able to create note"); @@ -657,12 +657,12 @@ mod tests { for i in 0..num_commitments { // To avoid duplicate note commitments, we use the `i` counter as the Rseed randomness let rseed = Rseed([i as u8; 32]); - let dummy_note_commitment = Note::from_parts(sender, value_to_send, rseed).expect("can create note").commit(); + let dummy_note_commitment = Note::from_parts(sender.clone(), value_to_send, rseed).expect("can create note").commit(); sct.insert(tct::Witness::Keep, dummy_note_commitment).expect("should be able to insert note commitments into the SCT"); } // Insert one more note commitment and witness it. let rseed = Rseed([num_commitments as u8; 32]); - let dummy_note_commitment = Note::from_parts(sender, value_to_send, rseed).expect("can create note").commit(); + let dummy_note_commitment = Note::from_parts(sender.clone(), value_to_send, rseed).expect("can create note").commit(); sct.insert(tct::Witness::Keep, dummy_note_commitment).expect("should be able to insert note commitments into the SCT"); let incorrect_position = sct.witness(dummy_note_commitment).expect("can witness note commitment").position(); @@ -715,7 +715,7 @@ mod tests { asset_id: asset::Id(Fq::from(asset_id64)), }; let note = Note::from_parts( - sender, + sender.clone(), value_to_send, Rseed(rseed_randomness), ).expect("should be able to create note"); @@ -731,7 +731,7 @@ mod tests { for i in 0..num_commitments { // To avoid duplicate note commitments, we use the `i` counter as the Rseed randomness let rseed = Rseed([i as u8; 32]); - let dummy_note_commitment = Note::from_parts(sender, value_to_send, rseed).expect("can create note").commit(); + let dummy_note_commitment = Note::from_parts(sender.clone(), value_to_send, rseed).expect("can create note").commit(); sct.insert(tct::Witness::Keep, dummy_note_commitment).expect("should be able to insert note commitments into the SCT"); } @@ -784,7 +784,7 @@ mod tests { asset_id: asset::Id(Fq::from(asset_id64)), }; let note = Note::from_parts( - sender, + sender.clone(), value_to_send, Rseed(rseed_randomness), ).expect("should be able to create note"); @@ -799,7 +799,7 @@ mod tests { for i in 0..num_commitments { // To avoid duplicate note commitments, we use the `i` counter as the Rseed randomness let rseed = Rseed([i as u8; 32]); - let dummy_note_commitment = Note::from_parts(sender, value_to_send, rseed).expect("can create note").commit(); + let dummy_note_commitment = Note::from_parts(sender.clone(), value_to_send, rseed).expect("can create note").commit(); sct.insert(tct::Witness::Keep, dummy_note_commitment).expect("should be able to insert note commitments into the SCT"); } @@ -853,7 +853,7 @@ mod tests { asset_id: asset::Id(Fq::from(asset_id64)), }; let note = Note::from_parts( - sender, + sender.clone(), value_to_send, Rseed(rseed_randomness), ).expect("should be able to create note"); @@ -1015,7 +1015,7 @@ mod tests { let mut sct = tct::Tree::new(); for _ in 0..5 { - let note_commitment = make_random_note_commitment(address); + let note_commitment = make_random_note_commitment(address.clone()); sct.insert(tct::Witness::Keep, note_commitment).unwrap(); let anchor = sct.root(); let state_commitment_proof = sct.witness(note_commitment).unwrap(); @@ -1043,12 +1043,12 @@ mod tests { sct.end_block().expect("can end block"); for _ in 0..100 { - let note_commitment = make_random_note_commitment(address); + let note_commitment = make_random_note_commitment(address.clone()); sct.insert(tct::Witness::Forget, note_commitment).unwrap(); } for _ in 0..5 { - let note_commitment = make_random_note_commitment(address); + let note_commitment = make_random_note_commitment(address.clone()); sct.insert(tct::Witness::Keep, note_commitment).unwrap(); let anchor = sct.root(); let state_commitment_proof = sct.witness(note_commitment).unwrap(); @@ -1076,12 +1076,12 @@ mod tests { sct.end_epoch().expect("can end epoch"); for _ in 0..100 { - let note_commitment = make_random_note_commitment(address); + let note_commitment = make_random_note_commitment(address.clone()); sct.insert(tct::Witness::Forget, note_commitment).unwrap(); } for _ in 0..5 { - let note_commitment = make_random_note_commitment(address); + let note_commitment = make_random_note_commitment(address.clone()); sct.insert(tct::Witness::Keep, note_commitment).unwrap(); let anchor = sct.root(); let state_commitment_proof = sct.witness(note_commitment).unwrap(); diff --git a/crates/core/component/stake/src/component/rpc.rs b/crates/core/component/stake/src/component/rpc.rs index 2a4432024b..ad7a27ad87 100644 --- a/crates/core/component/stake/src/component/rpc.rs +++ b/crates/core/component/stake/src/component/rpc.rs @@ -1,22 +1,23 @@ use std::pin::Pin; -use async_stream::try_stream; use cnidarium::Storage; -use futures::{StreamExt, TryStreamExt}; +use futures::StreamExt; use penumbra_proto::{ core::component::stake::v1::{ query_service_server::QueryService, CurrentValidatorRateRequest, - CurrentValidatorRateResponse, ValidatorInfoRequest, ValidatorInfoResponse, - ValidatorPenaltyRequest, ValidatorPenaltyResponse, ValidatorStatusRequest, - ValidatorStatusResponse, + CurrentValidatorRateResponse, GetValidatorInfoRequest, GetValidatorInfoResponse, + ValidatorInfoRequest, ValidatorInfoResponse, ValidatorPenaltyRequest, + ValidatorPenaltyResponse, ValidatorStatusRequest, ValidatorStatusResponse, + ValidatorUptimeRequest, ValidatorUptimeResponse, }, DomainType, }; +use tap::{TapFallible, TapOptional}; use tonic::Status; -use tracing::instrument; +use tracing::{error_span, instrument, Instrument, Span}; -use super::{validator_handler::ValidatorDataRead, SlashingData}; -use crate::validator; +use super::{validator_handler::ValidatorDataRead, ConsensusIndexRead, SlashingData}; +use crate::validator::{Info, State}; // TODO: Hide this and only expose a Router? pub struct Server { @@ -31,6 +32,38 @@ impl Server { #[tonic::async_trait] impl QueryService for Server { + #[instrument(skip(self, request))] + async fn get_validator_info( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + let state = self.storage.latest_snapshot(); + let GetValidatorInfoRequest { identity_key } = request.into_inner(); + + // Take the identity key from the inbound request. + let identity_key = identity_key + .ok_or_else(|| Status::invalid_argument("an identity key must be provided"))? + .try_into() + .tap_err(|error| tracing::debug!(?error, "request contained an invalid identity key")) + .map_err(|_| Status::invalid_argument("invalid identity key"))?; + + // Look up the information for the validator with the given identity key. + let info = state + .get_validator_info(&identity_key) + .await + .tap_err(|error| tracing::error!(?error, %identity_key, "failed to get validator info")) + .map_err(|_| Status::invalid_argument("failed to get validator info"))? + .tap_none(|| tracing::debug!(%identity_key, "validator info was not found")) + .ok_or_else(|| Status::not_found("validator info was not found"))?; + + // Construct the outbound response. + let resp = GetValidatorInfoResponse { + validator_info: Some(info.to_proto()), + }; + + Ok(tonic::Response::new(resp)) + } + type ValidatorInfoStream = Pin> + Send>>; @@ -39,38 +72,67 @@ impl QueryService for Server { &self, request: tonic::Request, ) -> Result, Status> { - let state = self.storage.latest_snapshot(); + use futures::TryStreamExt; + + // Get the latest snapshot from the backing storage, and determine whether or not the + // response should include inactive validator definitions. + let snapshot = self.storage.latest_snapshot(); + let ValidatorInfoRequest { show_inactive } = request.into_inner(); + + // Returns `true` if we should include a validator in the outbound response. + let filter_inactive = move |info: &Info| { + let should = match info.status.state { + State::Active => true, + _ if show_inactive => true, // Include other validators if the request asked us to. + _ => false, // Otherwise, skip this entry. + }; + futures::future::ready(should) + }; - let validators = state - .validator_definitions() // TODO(erwan): think through a UX for defined validators. Then we can remove `validator_list` entirely. - .await - .map_err(|e| tonic::Status::unavailable(format!("error listing validators: {e}")))?; + // Converts information about a validator into a RPC response. + let to_resp = |info: Info| { + let validator_info = Some(info.to_proto()); + ValidatorInfoResponse { validator_info } + }; + + // Creates a span that follows from the current tracing context. + let make_span = |identity_key| -> Span { + let span = error_span!("fetching validator information", %identity_key); + let current = Span::current(); + span.follows_from(current); + span + }; - let show_inactive = request.get_ref().show_inactive; - let s = try_stream! { - for v in validators { - let info = state.get_validator_info(&v.identity_key) + // Get a stream of identity keys corresponding to validators in the consensus set. + let consensus_set = snapshot + .consensus_set_stream() + .map_err(|e| format!("error getting consensus set: {e}")) + .map_err(Status::unavailable)?; + + // Adapt the stream of identity keys into a stream of validator information. + // Define a span indicating that the spawned future follows from the current context. + let validators = async_stream::try_stream! { + for await identity_key in consensus_set { + let identity_key = identity_key?; + let span = make_span(identity_key); + yield snapshot + .get_validator_info(&identity_key) + .instrument(span) .await? .expect("known validator must be present"); - // Slashed and inactive validators are not shown by default. - if !show_inactive && info.status.state != validator::State::Active { - continue; - } - yield info.to_proto(); } }; - Ok(tonic::Response::new( - s.map_ok(|info| ValidatorInfoResponse { - validator_info: Some(info), - }) - .map_err(|e: anyhow::Error| { - tonic::Status::unavailable(format!("error getting validator info: {e}")) - }) - // TODO: how do we instrument a Stream - //.instrument(Span::current()) - .boxed(), - )) + // Construct the outbound response. + let stream = validators + .try_filter(filter_inactive) + .map_ok(to_resp) + .map_err(|e: anyhow::Error| format!("error getting validator info: {e}")) + .map_err(Status::unavailable) + .into_stream() + .boxed(); + + Ok(tonic::Response::new(stream)) } #[instrument(skip(self, request))] @@ -146,4 +208,30 @@ impl QueryService for Server { None => Err(Status::not_found("current validator rate not found")), } } + + #[instrument(skip(self, request))] + async fn validator_uptime( + &self, + request: tonic::Request, + ) -> Result, Status> { + let state = self.storage.latest_snapshot(); + let identity_key = request + .into_inner() + .identity_key + .ok_or_else(|| tonic::Status::invalid_argument("empty message"))? + .try_into() + .map_err(|_| tonic::Status::invalid_argument("invalid identity key"))?; + + let uptime_data = state + .get_validator_uptime(&identity_key) + .await + .map_err(|e| tonic::Status::internal(e.to_string()))?; + + match uptime_data { + Some(u) => Ok(tonic::Response::new(ValidatorUptimeResponse { + uptime: Some(u.into()), + })), + None => Err(Status::not_found("validator uptime not found")), + } + } } diff --git a/crates/core/component/stake/src/component/validator_handler/validator_manager.rs b/crates/core/component/stake/src/component/validator_handler/validator_manager.rs index ed68ab8538..caf1b78c67 100644 --- a/crates/core/component/stake/src/component/validator_handler/validator_manager.rs +++ b/crates/core/component/stake/src/component/validator_handler/validator_manager.rs @@ -8,9 +8,15 @@ use { }, StateReadExt as _, StateWriteExt as _, }, + event, rate::{BaseRateData, RateData}, state_key, - validator::{self, BondingState::*, State, State::*, Validator}, + validator::{ + self, + BondingState::*, + State::{self, *}, + Validator, + }, DelegationToken, IdentityKey, Penalty, Uptime, }, anyhow::{ensure, Result}, @@ -19,8 +25,10 @@ use { penumbra_asset::asset, penumbra_num::Amount, penumbra_proto::StateWriteProto, - penumbra_sct::component::clock::{EpochManager, EpochRead}, - penumbra_sct::component::StateReadExt as _, + penumbra_sct::component::{ + clock::{EpochManager, EpochRead}, + StateReadExt as _, + }, penumbra_shielded_pool::component::AssetRegistry, std::collections::BTreeMap, tendermint::abci::types::Misbehavior, @@ -83,6 +91,8 @@ pub trait ValidatorManager: StateWrite { /// Execute a legal state transition, updating the validator records and /// implementing the necessary side effects. /// + /// Returns a `(old_state, new_state)` tuple, corresponding to the executed transition. + /// /// # Errors /// This method errors on illegal state transitions, but will otherwise try to do what /// you ask it to do. It is the caller's responsibility to ensure that the state transitions @@ -94,7 +104,7 @@ pub trait ValidatorManager: StateWrite { &mut self, identity_key: &IdentityKey, new_state: validator::State, - ) -> Result<()> { + ) -> Result<(State, State)> { let old_state = self .get_validator_state(identity_key) .await? @@ -118,7 +128,7 @@ pub trait ValidatorManager: StateWrite { identity_key: &IdentityKey, old_state: validator::State, new_state: validator::State, - ) -> Result<()> { + ) -> Result<(State, State)> { let validator_state_path = state_key::validators::state::by_id(identity_key); let current_height = self.get_block_height().await?; @@ -294,7 +304,7 @@ pub trait ValidatorManager: StateWrite { Self::state_machine_metrics(old_state, new_state); - Ok(()) + Ok((old_state, new_state)) } #[instrument(skip(self))] @@ -649,8 +659,20 @@ pub trait ValidatorManager: StateWrite { ) })?; - self.set_validator_state(&validator.identity_key, validator::State::Tombstoned) - .await + let (old_state, new_state) = self + .set_validator_state(&validator.identity_key, validator::State::Tombstoned) + .await?; + + if let (Inactive | Jailed | Active, Tombstoned) = (old_state, new_state) { + let current_height = self.get_block_height().await?; + self.record_proto(event::tombstone_validator( + current_height, + validator.identity_key.clone(), + evidence, + )); + } + + Ok(()) } fn state_machine_metrics(old_state: validator::State, new_state: validator::State) { diff --git a/crates/core/component/stake/src/component/validator_handler/validator_store.rs b/crates/core/component/stake/src/component/validator_handler/validator_store.rs index 3f30895d12..151571d47b 100644 --- a/crates/core/component/stake/src/component/validator_handler/validator_store.rs +++ b/crates/core/component/stake/src/component/validator_handler/validator_store.rs @@ -8,7 +8,7 @@ use crate::{ use anyhow::Result; use async_trait::async_trait; use cnidarium::{StateRead, StateWrite}; -use futures::{Future, FutureExt, TryStreamExt}; +use futures::{Future, FutureExt}; use penumbra_num::Amount; use penumbra_proto::{state::future::DomainFuture, DomainType, StateReadProto, StateWriteProto}; use std::pin::Pin; @@ -227,23 +227,6 @@ pub trait ValidatorDataRead: StateRead { .map_ok(|opt: Option| opt.map(|v: Validator| v.consensus_key)) .boxed() } - - /// Returns a list of **all** known validators metadata. - async fn validator_definitions(&self) -> Result> { - self.prefix(state_key::validators::definitions::prefix()) - .map_ok(|(_key, validator)| validator) - .try_collect() - .await - } - - /// Returns a list of **all** known validators identity keys. - async fn validator_identity_keys(&self) -> Result> { - self.prefix(state_key::validators::definitions::prefix()) - .map_ok(|(_key, validator)| validator) - .map_ok(|validator: Validator| validator.identity_key) - .try_collect() - .await - } } impl ValidatorDataRead for T {} diff --git a/crates/core/component/stake/src/event.rs b/crates/core/component/stake/src/event.rs index 069c249b47..f6530f016c 100644 --- a/crates/core/component/stake/src/event.rs +++ b/crates/core/component/stake/src/event.rs @@ -1,5 +1,6 @@ -use crate::{Delegate, Undelegate}; -use tendermint::abci::{Event, EventAttributeIndexExt}; +use crate::{Delegate, IdentityKey, Undelegate}; +use penumbra_proto::core::component::stake::v1 as pb; +use tendermint::abci::{types::Misbehavior, Event, EventAttributeIndexExt}; pub fn delegate(delegate: &Delegate) -> Event { Event::new( @@ -20,3 +21,17 @@ pub fn undelegate(undelegate: &Undelegate) -> Event { ], ) } + +pub fn tombstone_validator( + current_height: u64, + identity_key: IdentityKey, + evidence: &Misbehavior, +) -> pb::EventTombstoneValidator { + pb::EventTombstoneValidator { + evidence_height: evidence.height.value(), + current_height, + identity_key: Some(identity_key.into()), + address: evidence.validator.address.to_vec(), + voting_power: evidence.validator.power.value(), + } +} diff --git a/crates/core/component/stake/src/funding_stream.rs b/crates/core/component/stake/src/funding_stream.rs index 4a114d0968..67a63c4dd8 100644 --- a/crates/core/component/stake/src/funding_stream.rs +++ b/crates/core/component/stake/src/funding_stream.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; /// A destination for a portion of a validator's commission of staking rewards. #[allow(clippy::large_enum_variant)] -#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Clone, Copy)] +#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Clone)] #[serde(try_from = "pb::FundingStream", into = "pb::FundingStream")] pub enum FundingStream { ToAddress { @@ -25,7 +25,7 @@ pub enum FundingStream { } #[allow(clippy::large_enum_variant)] -#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[derive(Debug, PartialEq, Eq, Clone)] pub enum Recipient { Address(Address), CommunityPool, @@ -41,7 +41,7 @@ impl FundingStream { pub fn recipient(&self) -> Recipient { match self { - FundingStream::ToAddress { address, .. } => Recipient::Address(*address), + FundingStream::ToAddress { address, .. } => Recipient::Address(address.clone()), FundingStream::ToCommunityPool { .. } => Recipient::CommunityPool, } } diff --git a/crates/core/component/stake/src/uptime.rs b/crates/core/component/stake/src/uptime.rs index e44ab52f62..c52eaeadba 100644 --- a/crates/core/component/stake/src/uptime.rs +++ b/crates/core/component/stake/src/uptime.rs @@ -78,10 +78,35 @@ impl Uptime { self.signatures.iter_zeros().len() } + /// Enumerates the missed blocks over the window in terms of absolute block height. + pub fn missed_blocks(&self) -> impl Iterator + '_ { + // The height of the next block to be recorded (not yet recorded): + let current_height = self.as_of_block_height; + // The length of the window of blocks being recorded: + let window_len = self.signatures.len(); + // The earliest height of a block that has been recorded: + let earliest_height = current_height.saturating_sub(window_len as u64 - 1); + // The range of block heights that have been recorded: + let all_heights = earliest_height..=current_height; + // Filter out the heights that were signed: + all_heights.filter_map(move |height| { + // Index the bit vector as the ring buffer that it is, and invert the bit corresponding + // to this height to find out whether it was missed: + let index = (height as usize) % window_len; + let signed = self.signatures[index]; + Some(height).filter(|_| !signed) + }) + } + /// Returns the block height up to which this tracker has recorded. pub fn as_of_height(&self) -> u64 { self.as_of_block_height } + + /// Returns the size of the window of blocks being recorded. + pub fn missed_blocks_window(&self) -> usize { + self.signatures.len() + } } impl DomainType for Uptime { @@ -121,6 +146,9 @@ impl TryFrom for Uptime { mod tests { use super::*; + use proptest::prelude::*; + use std::collections::VecDeque; + #[test] fn counts_missed_blocks() { let window = 128; @@ -142,6 +170,64 @@ mod tests { assert!(uptime.mark_height_as_signed(0, true).is_err()); } + /// Basic check that if we miss block 1, we report that we missed block 1. + #[test] + fn enumerate_missed_first_block() { + let window = 128; + let mut uptime = Uptime::new(0, window); + + // Mark the first block as missed + uptime.mark_height_as_signed(1, false).unwrap(); + let missed_blocks: Vec<_> = uptime.missed_blocks().collect(); + + // Check that exactly the first block is missed + assert_eq!(missed_blocks, vec![1]); + } + + proptest! { + /// Ensure that the `Uptime` struct simulates a fixed size queue of (height, signed) tuples, + /// and that the `missed_blocks` iterator returns the correct missed blocks. + #[test] + fn enumerate_uptime_simulates_bounded_queue( + (window_len, signed_blocks) in + (1..=16usize).prop_flat_map(move |window_len| { + proptest::collection::vec(proptest::bool::ANY, 0..window_len * 2) + .prop_map(move |signed_blocks| (window_len, signed_blocks)) + }) + ) { + // We're going to simulate the `Uptime` struct with a VecDeque of (height, signed) + // tuples whose length we will keep bounded by the window length. + let mut uptime = Uptime::new(0, window_len); + let mut simulated_uptime = VecDeque::new(); + + // For each (height, signed) tuple in our generated sequence, mark the height as signed + // or not signed. + for (height, signed) in signed_blocks.into_iter().enumerate() { + // Convert the height to a u64 and add 1 because the `Uptime` struct starts out with + // an internal height of 0: + let height = height as u64 + 1; + // Mark it using the real `Uptime` struct: + uptime.mark_height_as_signed(height, signed).unwrap(); + // Mark it using our simulated `VecDeque`, taking care to keep its length bounded by + // the window length: + simulated_uptime.push_back((height, signed)); + if simulated_uptime.len() > window_len { + simulated_uptime.pop_front(); + } + } + + // Compare the missed blocks from the real `Uptime` struct with the simulated `VecDeque`: + let missed_blocks: Vec<_> = uptime.missed_blocks().collect(); + + // Retain only the heights from the simulated `VecDeque` that were not signed: + simulated_uptime.retain(|(_, signed)| !signed); + let simulated_missed_blocks: Vec<_> = + simulated_uptime.into_iter().map(|(height, _)| height).collect(); + + prop_assert_eq!(missed_blocks, simulated_missed_blocks); + } + } + #[test] fn proto_round_trip() { // make a weird size window diff --git a/crates/core/keys/src/address.rs b/crates/core/keys/src/address.rs index 616ec7a482..e6dc61e918 100644 --- a/crates/core/keys/src/address.rs +++ b/crates/core/keys/src/address.rs @@ -1,4 +1,9 @@ -use std::io::{Cursor, Read, Write}; +//! [Payment address][Address] facilities. + +use std::{ + io::{Cursor, Read, Write}, + sync::OnceLock, +}; use anyhow::Context; use ark_serialize::CanonicalDeserialize; @@ -16,29 +21,70 @@ pub use view::AddressView; use crate::{fmd, ka, keys::Diversifier}; +/// The length of an [`Address`] in bytes. pub const ADDRESS_LEN_BYTES: usize = 80; + /// Number of bits in the address short form divided by the number of bits per Bech32m character pub const ADDRESS_NUM_CHARS_SHORT_FORM: usize = 24; /// A valid payment address. -#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Clone, Eq, Serialize, Deserialize)] #[serde(try_from = "pb::Address", into = "pb::Address")] pub struct Address { + /// The address diversifier. d: Diversifier, - /// cached copy of the diversified base - g_d: decaf377::Element, + /// A cached copy of the diversified base. + g_d: OnceLock, + /// The public key for this payment address. + /// /// extra invariant: the bytes in pk_d should be the canonical encoding of an /// s value (whether or not it is a valid decaf377 encoding) /// this ensures we can use a PaymentAddress to form a note commitment, /// which involves hashing s as a field element. pk_d: ka::Public, - /// transmission key s value + /// The transmission key s value. transmission_key_s: Fq, + /// The clue key for this payment address. ck_d: fmd::ClueKey, } +impl std::cmp::PartialEq for Address { + fn eq( + &self, + rhs @ Self { + d: rhs_d, + g_d: rhs_g_d, + pk_d: rhs_pk_d, + transmission_key_s: rhs_transmission_key_s, + ck_d: rhs_ck_d, + }: &Self, + ) -> bool { + let lhs @ Self { + d: lhs_d, + g_d: lhs_g_d, + pk_d: lhs_pk_d, + transmission_key_s: lhs_transmission_key_s, + ck_d: lhs_ck_d, + } = self; + + // When a `OnceLock` value is compared, it will only call `get()`, refraining from + // initializing the value. To make sure that an address that *hasn't* yet accessed its + // diversified base is considered equal to an address that *has*, compute the base points + // if they have not already been generated. + lhs.diversified_generator(); + rhs.diversified_generator(); + + // Compare all of the fields. + lhs_d.eq(rhs_d) + && lhs_g_d.eq(rhs_g_d) + && lhs_pk_d.eq(rhs_pk_d) + && lhs_transmission_key_s.eq(rhs_transmission_key_s) + && lhs_ck_d.eq(rhs_ck_d) + } +} + impl std::cmp::PartialOrd for Address { fn partial_cmp(&self, other: &Self) -> Option { Some(self.to_vec().cmp(&other.to_vec())) @@ -69,7 +115,7 @@ impl Address { // don't need an error type here, caller will probably .expect anyways Some(Self { d, - g_d: d.diversified_generator(), + g_d: OnceLock::new(), pk_d, ck_d, transmission_key_s, @@ -79,26 +125,36 @@ impl Address { } } + /// Returns a reference to the address diversifier. pub fn diversifier(&self) -> &Diversifier { &self.d } + /// Returns a reference to the diversified base. + /// + /// This method computes the diversified base if it has not been computed yet. This value is + /// cached after it has been computed once. pub fn diversified_generator(&self) -> &decaf377::Element { - &self.g_d + self.g_d + .get_or_init(|| self.diversifier().diversified_generator()) } + /// Returns a reference to the transmission key. pub fn transmission_key(&self) -> &ka::Public { &self.pk_d } + /// Returns a reference to the clue key. pub fn clue_key(&self) -> &fmd::ClueKey { &self.ck_d } + /// Returns a reference to the transmission key `s` value. pub fn transmission_key_s(&self) -> &Fq { &self.transmission_key_s } + /// Converts this address to a vector of bytes. pub fn to_vec(&self) -> Vec { let mut bytes = std::io::Cursor::new(Vec::new()); bytes @@ -114,7 +170,7 @@ impl Address { f4jumble(bytes.get_ref()).expect("can jumble") } - /// A randomized dummy address. + /// Generates a randomized dummy address. pub fn dummy(rng: &mut R) -> Self { loop { let mut diversifier_bytes = [0u8; 16]; @@ -151,7 +207,7 @@ impl Address { /// Compat (bech32 non-m) address format pub fn compat_encoding(&self) -> String { - let proto_address = pb::Address::from(*self); + let proto_address = pb::Address::from(self); bech32str::encode( &proto_address.inner, bech32str::compat_address::BECH32_PREFIX, @@ -166,6 +222,12 @@ impl DomainType for Address { impl From
for pb::Address { fn from(a: Address) -> Self { + Self::from(&a) + } +} + +impl From<&Address> for pb::Address { + fn from(a: &Address) -> Self { pb::Address { inner: a.to_vec(), // Always produce encodings without the alt format. @@ -193,7 +255,7 @@ impl TryFrom for Address { impl std::fmt::Display for Address { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let proto_address = pb::Address::from(*self); + let proto_address = pb::Address::from(self); f.write_str(&bech32str::encode( &proto_address.inner, bech32str::address::BECH32_PREFIX, @@ -286,6 +348,18 @@ impl TryFrom<&[u8]> for Address { } } +/// Assert the addresses are both [`Send`] and [`Sync`]. +// NB: allow dead code, because this block only contains compile-time assertions. +#[allow(dead_code)] +mod assert_address_is_send_and_sync { + fn is_send() {} + fn is_sync() {} + fn f() { + is_send::(); + is_sync::(); + } +} + #[cfg(test)] mod tests { use std::str::FromStr; @@ -316,7 +390,7 @@ mod tests { alt_bech32m: bech32m_addr, } .encode_to_vec(); - let proto_addr_direct: pb::Address = dest.into(); + let proto_addr_direct: pb::Address = dest.clone().into(); let addr_from_proto: Address = proto_addr_direct .try_into() .expect("can convert from proto back to address"); diff --git a/crates/core/keys/src/address/r1cs.rs b/crates/core/keys/src/address/r1cs.rs index 635b72ade3..069f53abc1 100644 --- a/crates/core/keys/src/address/r1cs.rs +++ b/crates/core/keys/src/address/r1cs.rs @@ -36,7 +36,7 @@ impl AllocVar for AddressVar { ) -> Result { let ns = cs.into(); let cs = ns.cs(); - let address: Address = *f()?.borrow(); + let address: Address = f()?.borrow().to_owned(); let diversified_generator: ElementVar = AllocVar::::new_variable( cs.clone(), diff --git a/crates/core/keys/src/address/view.rs b/crates/core/keys/src/address/view.rs index 3dcf9c28c2..fd1cf6db0a 100644 --- a/crates/core/keys/src/address/view.rs +++ b/crates/core/keys/src/address/view.rs @@ -11,7 +11,7 @@ use super::Address; /// /// This type allows working with addresses and address indexes without knowing /// the corresponding FVK. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(try_from = "pb::AddressView", into = "pb::AddressView")] pub enum AddressView { Opaque { @@ -27,8 +27,8 @@ pub enum AddressView { impl AddressView { pub fn address(&self) -> Address { match self { - AddressView::Opaque { address } => *address, - AddressView::Decoded { address, .. } => *address, + AddressView::Opaque { address } => address.clone(), + AddressView::Decoded { address, .. } => address.clone(), } } } @@ -121,49 +121,57 @@ mod tests { let addr2_1 = fvk2.payment_address(1.into()).0; assert_eq!( - fvk1.view_address(addr1_0), + fvk1.view_address(addr1_0.clone()), AddressView::Decoded { - address: addr1_0, + address: addr1_0.clone(), index: 0.into(), wallet_id: fvk1.wallet_id(), } ); assert_eq!( - fvk2.view_address(addr1_0), - AddressView::Opaque { address: addr1_0 } + fvk2.view_address(addr1_0.clone()), + AddressView::Opaque { + address: addr1_0.clone() + } ); assert_eq!( - fvk1.view_address(addr1_1), + fvk1.view_address(addr1_1.clone()), AddressView::Decoded { - address: addr1_1, + address: addr1_1.clone(), index: 1.into(), wallet_id: fvk1.wallet_id(), } ); assert_eq!( - fvk2.view_address(addr1_1), - AddressView::Opaque { address: addr1_1 } + fvk2.view_address(addr1_1.clone()), + AddressView::Opaque { + address: addr1_1.clone() + } ); assert_eq!( - fvk1.view_address(addr2_0), - AddressView::Opaque { address: addr2_0 } + fvk1.view_address(addr2_0.clone()), + AddressView::Opaque { + address: addr2_0.clone() + } ); assert_eq!( - fvk2.view_address(addr2_0), + fvk2.view_address(addr2_0.clone()), AddressView::Decoded { - address: addr2_0, + address: addr2_0.clone(), index: 0.into(), wallet_id: fvk2.wallet_id(), } ); assert_eq!( - fvk1.view_address(addr2_1), - AddressView::Opaque { address: addr2_1 } + fvk1.view_address(addr2_1.clone()), + AddressView::Opaque { + address: addr2_1.clone() + } ); assert_eq!( - fvk2.view_address(addr2_1), + fvk2.view_address(addr2_1.clone()), AddressView::Decoded { - address: addr2_1, + address: addr2_1.clone(), index: 1.into(), wallet_id: fvk2.wallet_id(), } diff --git a/crates/core/transaction/src/memo.rs b/crates/core/transaction/src/memo.rs index 1f76604784..484655247a 100644 --- a/crates/core/transaction/src/memo.rs +++ b/crates/core/transaction/src/memo.rs @@ -66,7 +66,7 @@ impl MemoPlaintext { } pub fn return_address(&self) -> Address { - self.return_address + self.return_address.clone() } pub fn text(&self) -> &str { @@ -284,7 +284,7 @@ mod tests { // On the sender side, we have to encrypt the memo to put into the transaction-level, // and also the memo key to put on the action-level (output). let memo = MemoPlaintext { - return_address: dest, + return_address: dest.clone(), text: String::from("Hi"), }; let memo_key = PayloadKey::random_key(&mut OsRng); @@ -331,7 +331,7 @@ mod tests { // On the sender side, we have to encrypt the memo to put into the transaction-level, // and also the memo key to put on the action-level (output). - let memo = MemoPlaintext::new(dest, "Hello, friend".into())?; + let memo = MemoPlaintext::new(dest.clone(), "Hello, friend".into())?; let memo_key = PayloadKey::random_key(&mut OsRng); let ciphertext = MemoCiphertext::encrypt(memo_key.clone(), &memo).expect("can encrypt memo"); diff --git a/crates/core/transaction/src/plan.rs b/crates/core/transaction/src/plan.rs index dbbb70931e..3f36c7f42c 100644 --- a/crates/core/transaction/src/plan.rs +++ b/crates/core/transaction/src/plan.rs @@ -318,7 +318,9 @@ impl TransactionPlan { /// Convenience method to get all the destination addresses for each `OutputPlan`s. pub fn dest_addresses(&self) -> Vec
{ - self.output_plans().map(|plan| plan.dest_address).collect() + self.output_plans() + .map(|plan| plan.dest_address.clone()) + .collect() } /// Convenience method to get the number of `OutputPlan`s in this transaction. @@ -492,7 +494,7 @@ mod tests { .unwrap() .id(), }), - addr, + addr.clone(), ); let mut rng = OsRng; diff --git a/crates/crypto/proof-params/src/gen/swapclaim_id.rs b/crates/crypto/proof-params/src/gen/swapclaim_id.rs index 0293098666..e90d28aa7c 100644 --- a/crates/crypto/proof-params/src/gen/swapclaim_id.rs +++ b/crates/crypto/proof-params/src/gen/swapclaim_id.rs @@ -1,3 +1,3 @@ -pub const PROVING_KEY_ID: &'static str = "groth16pk1vs60etmlvwfzmn2ve0ljz0vfkzjlrhjpue5svm5ry6l076qukjcsw566rp"; -pub const VERIFICATION_KEY_ID: &'static str = "groth16vk18qjn0kxmypk8gmfc6zhjukhyxk0agmunfnhpxmf3yxq266q6sgaqwe94rc"; +pub const PROVING_KEY_ID: &'static str = "groth16pk1pfpj2hullzpeqzzyfqw85q03zz8mthht07zd3vkc562lfe776xgsvu3mfy"; +pub const VERIFICATION_KEY_ID: &'static str = "groth16vk1qyhwaxh5kq6lk2tm6fnxctynqqf7vt5j64u92zm8d8pndy7yap4qsyw855"; diff --git a/crates/crypto/proof-params/src/gen/swapclaim_pk.bin b/crates/crypto/proof-params/src/gen/swapclaim_pk.bin index 96b1d164b6..a401b19bc9 100644 --- a/crates/crypto/proof-params/src/gen/swapclaim_pk.bin +++ b/crates/crypto/proof-params/src/gen/swapclaim_pk.bin @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8501f1dad9ac85d80c6421b17b838f8c6478431babfa836d3d33b5398fa6b6ad -size 26003952 +oid sha256:1190707f9815bf0135169547b888716d6731cdbe1bc4ea2fbd22655a03fe56cd +size 25957872 diff --git a/crates/crypto/proof-params/src/gen/swapclaim_vk.param b/crates/crypto/proof-params/src/gen/swapclaim_vk.param index 4bd2b584a5..72be8023cd 100644 Binary files a/crates/crypto/proof-params/src/gen/swapclaim_vk.param and b/crates/crypto/proof-params/src/gen/swapclaim_vk.param differ diff --git a/crates/proto/src/gen/penumbra.core.component.dex.v1.rs b/crates/proto/src/gen/penumbra.core.component.dex.v1.rs index 644a2c7922..503e87f82f 100644 --- a/crates/proto/src/gen/penumbra.core.component.dex.v1.rs +++ b/crates/proto/src/gen/penumbra.core.component.dex.v1.rs @@ -508,8 +508,12 @@ pub struct BatchSwapOutputData { #[prost(message, optional, tag = "8")] pub trading_pair: ::core::option::Option, /// The starting block height of the epoch for which the batch swap data is valid. + #[deprecated] #[prost(uint64, tag = "9")] pub epoch_starting_height: u64, + /// The prefix (epoch, block) of the position where this batch swap occurred. + #[prost(uint64, tag = "10")] + pub sct_position_prefix: u64, } impl ::prost::Name for BatchSwapOutputData { const NAME: &'static str = "BatchSwapOutputData"; diff --git a/crates/proto/src/gen/penumbra.core.component.dex.v1.serde.rs b/crates/proto/src/gen/penumbra.core.component.dex.v1.serde.rs index b6efda7985..4dd79b0281 100644 --- a/crates/proto/src/gen/penumbra.core.component.dex.v1.serde.rs +++ b/crates/proto/src/gen/penumbra.core.component.dex.v1.serde.rs @@ -614,6 +614,9 @@ impl serde::Serialize for BatchSwapOutputData { if self.epoch_starting_height != 0 { len += 1; } + if self.sct_position_prefix != 0 { + len += 1; + } let mut struct_ser = serializer.serialize_struct("penumbra.core.component.dex.v1.BatchSwapOutputData", len)?; if let Some(v) = self.delta_1.as_ref() { struct_ser.serialize_field("delta1", v)?; @@ -644,6 +647,10 @@ impl serde::Serialize for BatchSwapOutputData { #[allow(clippy::needless_borrow)] struct_ser.serialize_field("epochStartingHeight", ToString::to_string(&self.epoch_starting_height).as_str())?; } + if self.sct_position_prefix != 0 { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("sctPositionPrefix", ToString::to_string(&self.sct_position_prefix).as_str())?; + } struct_ser.end() } } @@ -671,6 +678,8 @@ impl<'de> serde::Deserialize<'de> for BatchSwapOutputData { "tradingPair", "epoch_starting_height", "epochStartingHeight", + "sct_position_prefix", + "sctPositionPrefix", ]; #[allow(clippy::enum_variant_names)] @@ -684,6 +693,7 @@ impl<'de> serde::Deserialize<'de> for BatchSwapOutputData { Height, TradingPair, EpochStartingHeight, + SctPositionPrefix, __SkipField__, } impl<'de> serde::Deserialize<'de> for GeneratedField { @@ -715,6 +725,7 @@ impl<'de> serde::Deserialize<'de> for BatchSwapOutputData { "height" => Ok(GeneratedField::Height), "tradingPair" | "trading_pair" => Ok(GeneratedField::TradingPair), "epochStartingHeight" | "epoch_starting_height" => Ok(GeneratedField::EpochStartingHeight), + "sctPositionPrefix" | "sct_position_prefix" => Ok(GeneratedField::SctPositionPrefix), _ => Ok(GeneratedField::__SkipField__), } } @@ -743,6 +754,7 @@ impl<'de> serde::Deserialize<'de> for BatchSwapOutputData { let mut height__ = None; let mut trading_pair__ = None; let mut epoch_starting_height__ = None; + let mut sct_position_prefix__ = None; while let Some(k) = map_.next_key()? { match k { GeneratedField::Delta1 => { @@ -803,6 +815,14 @@ impl<'de> serde::Deserialize<'de> for BatchSwapOutputData { Some(map_.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) ; } + GeneratedField::SctPositionPrefix => { + if sct_position_prefix__.is_some() { + return Err(serde::de::Error::duplicate_field("sctPositionPrefix")); + } + sct_position_prefix__ = + Some(map_.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) + ; + } GeneratedField::__SkipField__ => { let _ = map_.next_value::()?; } @@ -818,6 +838,7 @@ impl<'de> serde::Deserialize<'de> for BatchSwapOutputData { height: height__.unwrap_or_default(), trading_pair: trading_pair__, epoch_starting_height: epoch_starting_height__.unwrap_or_default(), + sct_position_prefix: sct_position_prefix__.unwrap_or_default(), }) } } diff --git a/crates/proto/src/gen/penumbra.core.component.stake.v1.rs b/crates/proto/src/gen/penumbra.core.component.stake.v1.rs index ea032a9fc1..80d1430b8f 100644 --- a/crates/proto/src/gen/penumbra.core.component.stake.v1.rs +++ b/crates/proto/src/gen/penumbra.core.component.stake.v1.rs @@ -592,6 +592,34 @@ impl ::prost::Name for Penalty { ::prost::alloc::format!("penumbra.core.component.stake.v1.{}", Self::NAME) } } +/// Requests information about a specific validator. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetValidatorInfoRequest { + /// The identity key of the validator. + #[prost(message, optional, tag = "2")] + pub identity_key: ::core::option::Option, +} +impl ::prost::Name for GetValidatorInfoRequest { + const NAME: &'static str = "GetValidatorInfoRequest"; + const PACKAGE: &'static str = "penumbra.core.component.stake.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("penumbra.core.component.stake.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetValidatorInfoResponse { + #[prost(message, optional, tag = "1")] + pub validator_info: ::core::option::Option, +} +impl ::prost::Name for GetValidatorInfoResponse { + const NAME: &'static str = "GetValidatorInfoResponse"; + const PACKAGE: &'static str = "penumbra.core.component.stake.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("penumbra.core.component.stake.v1.{}", Self::NAME) + } +} /// Requests information on the chain's validators. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -703,6 +731,32 @@ impl ::prost::Name for CurrentValidatorRateResponse { ::prost::alloc::format!("penumbra.core.component.stake.v1.{}", Self::NAME) } } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ValidatorUptimeRequest { + #[prost(message, optional, tag = "2")] + pub identity_key: ::core::option::Option, +} +impl ::prost::Name for ValidatorUptimeRequest { + const NAME: &'static str = "ValidatorUptimeRequest"; + const PACKAGE: &'static str = "penumbra.core.component.stake.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("penumbra.core.component.stake.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ValidatorUptimeResponse { + #[prost(message, optional, tag = "1")] + pub uptime: ::core::option::Option, +} +impl ::prost::Name for ValidatorUptimeResponse { + const NAME: &'static str = "ValidatorUptimeResponse"; + const PACKAGE: &'static str = "penumbra.core.component.stake.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("penumbra.core.component.stake.v1.{}", Self::NAME) + } +} /// Staking configuration data. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -763,6 +817,32 @@ impl ::prost::Name for GenesisContent { ::prost::alloc::format!("penumbra.core.component.stake.v1.{}", Self::NAME) } } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventTombstoneValidator { + /// The height at which the offense occurred. + #[prost(uint64, tag = "1")] + pub evidence_height: u64, + /// The height at which the evidence was processed. + #[prost(uint64, tag = "2")] + pub current_height: u64, + /// The validator identity key. + #[prost(message, optional, tag = "4")] + pub identity_key: ::core::option::Option, + /// The validator's Comet address. + #[prost(bytes = "vec", tag = "5")] + pub address: ::prost::alloc::vec::Vec, + /// The voting power for the validator. + #[prost(uint64, tag = "6")] + pub voting_power: u64, +} +impl ::prost::Name for EventTombstoneValidator { + const NAME: &'static str = "EventTombstoneValidator"; + const PACKAGE: &'static str = "penumbra.core.component.stake.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("penumbra.core.component.stake.v1.{}", Self::NAME) + } +} /// Generated client implementations. #[cfg(feature = "rpc")] pub mod query_service_client { @@ -850,6 +930,37 @@ pub mod query_service_client { self.inner = self.inner.max_encoding_message_size(limit); self } + /// Queries for information about a specific validator. + pub async fn get_validator_info( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/penumbra.core.component.stake.v1.QueryService/GetValidatorInfo", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "penumbra.core.component.stake.v1.QueryService", + "GetValidatorInfo", + ), + ); + self.inner.unary(req, path, codec).await + } /// Queries the current validator set, with filtering. pub async fn validator_info( &mut self, @@ -971,6 +1082,36 @@ pub mod query_service_client { ); self.inner.unary(req, path, codec).await } + pub async fn validator_uptime( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/penumbra.core.component.stake.v1.QueryService/ValidatorUptime", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "penumbra.core.component.stake.v1.QueryService", + "ValidatorUptime", + ), + ); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -981,6 +1122,14 @@ pub mod query_service_server { /// Generated trait containing gRPC methods that should be implemented for use with QueryServiceServer. #[async_trait] pub trait QueryService: Send + Sync + 'static { + /// Queries for information about a specific validator. + async fn get_validator_info( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the ValidatorInfo method. type ValidatorInfoStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, @@ -1016,6 +1165,13 @@ pub mod query_service_server { tonic::Response, tonic::Status, >; + async fn validator_uptime( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } /// Query operations for the staking component. #[derive(Debug)] @@ -1097,6 +1253,53 @@ pub mod query_service_server { fn call(&mut self, req: http::Request) -> Self::Future { let inner = self.inner.clone(); match req.uri().path() { + "/penumbra.core.component.stake.v1.QueryService/GetValidatorInfo" => { + #[allow(non_camel_case_types)] + struct GetValidatorInfoSvc(pub Arc); + impl< + T: QueryService, + > tonic::server::UnaryService + for GetValidatorInfoSvc { + type Response = super::GetValidatorInfoResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_validator_info(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetValidatorInfoSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } "/penumbra.core.component.stake.v1.QueryService/ValidatorInfo" => { #[allow(non_camel_case_types)] struct ValidatorInfoSvc(pub Arc); @@ -1284,6 +1487,52 @@ pub mod query_service_server { }; Box::pin(fut) } + "/penumbra.core.component.stake.v1.QueryService/ValidatorUptime" => { + #[allow(non_camel_case_types)] + struct ValidatorUptimeSvc(pub Arc); + impl< + T: QueryService, + > tonic::server::UnaryService + for ValidatorUptimeSvc { + type Response = super::ValidatorUptimeResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::validator_uptime(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ValidatorUptimeSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { Ok( diff --git a/crates/proto/src/gen/penumbra.core.component.stake.v1.serde.rs b/crates/proto/src/gen/penumbra.core.component.stake.v1.serde.rs index 639ecfbd62..b4b6f3d3bd 100644 --- a/crates/proto/src/gen/penumbra.core.component.stake.v1.serde.rs +++ b/crates/proto/src/gen/penumbra.core.component.stake.v1.serde.rs @@ -901,6 +901,185 @@ impl<'de> serde::Deserialize<'de> for DelegationChanges { deserializer.deserialize_struct("penumbra.core.component.stake.v1.DelegationChanges", FIELDS, GeneratedVisitor) } } +impl serde::Serialize for EventTombstoneValidator { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.evidence_height != 0 { + len += 1; + } + if self.current_height != 0 { + len += 1; + } + if self.identity_key.is_some() { + len += 1; + } + if !self.address.is_empty() { + len += 1; + } + if self.voting_power != 0 { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("penumbra.core.component.stake.v1.EventTombstoneValidator", len)?; + if self.evidence_height != 0 { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("evidenceHeight", ToString::to_string(&self.evidence_height).as_str())?; + } + if self.current_height != 0 { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("currentHeight", ToString::to_string(&self.current_height).as_str())?; + } + if let Some(v) = self.identity_key.as_ref() { + struct_ser.serialize_field("identityKey", v)?; + } + if !self.address.is_empty() { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("address", pbjson::private::base64::encode(&self.address).as_str())?; + } + if self.voting_power != 0 { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("votingPower", ToString::to_string(&self.voting_power).as_str())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for EventTombstoneValidator { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "evidence_height", + "evidenceHeight", + "current_height", + "currentHeight", + "identity_key", + "identityKey", + "address", + "voting_power", + "votingPower", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + EvidenceHeight, + CurrentHeight, + IdentityKey, + Address, + VotingPower, + __SkipField__, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "evidenceHeight" | "evidence_height" => Ok(GeneratedField::EvidenceHeight), + "currentHeight" | "current_height" => Ok(GeneratedField::CurrentHeight), + "identityKey" | "identity_key" => Ok(GeneratedField::IdentityKey), + "address" => Ok(GeneratedField::Address), + "votingPower" | "voting_power" => Ok(GeneratedField::VotingPower), + _ => Ok(GeneratedField::__SkipField__), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = EventTombstoneValidator; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct penumbra.core.component.stake.v1.EventTombstoneValidator") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut evidence_height__ = None; + let mut current_height__ = None; + let mut identity_key__ = None; + let mut address__ = None; + let mut voting_power__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::EvidenceHeight => { + if evidence_height__.is_some() { + return Err(serde::de::Error::duplicate_field("evidenceHeight")); + } + evidence_height__ = + Some(map_.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) + ; + } + GeneratedField::CurrentHeight => { + if current_height__.is_some() { + return Err(serde::de::Error::duplicate_field("currentHeight")); + } + current_height__ = + Some(map_.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) + ; + } + GeneratedField::IdentityKey => { + if identity_key__.is_some() { + return Err(serde::de::Error::duplicate_field("identityKey")); + } + identity_key__ = map_.next_value()?; + } + GeneratedField::Address => { + if address__.is_some() { + return Err(serde::de::Error::duplicate_field("address")); + } + address__ = + Some(map_.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) + ; + } + GeneratedField::VotingPower => { + if voting_power__.is_some() { + return Err(serde::de::Error::duplicate_field("votingPower")); + } + voting_power__ = + Some(map_.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) + ; + } + GeneratedField::__SkipField__ => { + let _ = map_.next_value::()?; + } + } + } + Ok(EventTombstoneValidator { + evidence_height: evidence_height__.unwrap_or_default(), + current_height: current_height__.unwrap_or_default(), + identity_key: identity_key__, + address: address__.unwrap_or_default(), + voting_power: voting_power__.unwrap_or_default(), + }) + } + } + deserializer.deserialize_struct("penumbra.core.component.stake.v1.EventTombstoneValidator", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for FundingStream { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result @@ -1342,6 +1521,198 @@ impl<'de> serde::Deserialize<'de> for GenesisContent { deserializer.deserialize_struct("penumbra.core.component.stake.v1.GenesisContent", FIELDS, GeneratedVisitor) } } +impl serde::Serialize for GetValidatorInfoRequest { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.identity_key.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("penumbra.core.component.stake.v1.GetValidatorInfoRequest", len)?; + if let Some(v) = self.identity_key.as_ref() { + struct_ser.serialize_field("identityKey", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for GetValidatorInfoRequest { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "identity_key", + "identityKey", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + IdentityKey, + __SkipField__, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "identityKey" | "identity_key" => Ok(GeneratedField::IdentityKey), + _ => Ok(GeneratedField::__SkipField__), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GetValidatorInfoRequest; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct penumbra.core.component.stake.v1.GetValidatorInfoRequest") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut identity_key__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::IdentityKey => { + if identity_key__.is_some() { + return Err(serde::de::Error::duplicate_field("identityKey")); + } + identity_key__ = map_.next_value()?; + } + GeneratedField::__SkipField__ => { + let _ = map_.next_value::()?; + } + } + } + Ok(GetValidatorInfoRequest { + identity_key: identity_key__, + }) + } + } + deserializer.deserialize_struct("penumbra.core.component.stake.v1.GetValidatorInfoRequest", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for GetValidatorInfoResponse { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.validator_info.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("penumbra.core.component.stake.v1.GetValidatorInfoResponse", len)?; + if let Some(v) = self.validator_info.as_ref() { + struct_ser.serialize_field("validatorInfo", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for GetValidatorInfoResponse { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "validator_info", + "validatorInfo", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + ValidatorInfo, + __SkipField__, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "validatorInfo" | "validator_info" => Ok(GeneratedField::ValidatorInfo), + _ => Ok(GeneratedField::__SkipField__), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GetValidatorInfoResponse; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct penumbra.core.component.stake.v1.GetValidatorInfoResponse") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut validator_info__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::ValidatorInfo => { + if validator_info__.is_some() { + return Err(serde::de::Error::duplicate_field("validatorInfo")); + } + validator_info__ = map_.next_value()?; + } + GeneratedField::__SkipField__ => { + let _ = map_.next_value::()?; + } + } + } + Ok(GetValidatorInfoResponse { + validator_info: validator_info__, + }) + } + } + deserializer.deserialize_struct("penumbra.core.component.stake.v1.GetValidatorInfoResponse", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for Penalty { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result @@ -4222,6 +4593,197 @@ impl<'de> serde::Deserialize<'de> for ValidatorStatusResponse { deserializer.deserialize_struct("penumbra.core.component.stake.v1.ValidatorStatusResponse", FIELDS, GeneratedVisitor) } } +impl serde::Serialize for ValidatorUptimeRequest { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.identity_key.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("penumbra.core.component.stake.v1.ValidatorUptimeRequest", len)?; + if let Some(v) = self.identity_key.as_ref() { + struct_ser.serialize_field("identityKey", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for ValidatorUptimeRequest { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "identity_key", + "identityKey", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + IdentityKey, + __SkipField__, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "identityKey" | "identity_key" => Ok(GeneratedField::IdentityKey), + _ => Ok(GeneratedField::__SkipField__), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = ValidatorUptimeRequest; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct penumbra.core.component.stake.v1.ValidatorUptimeRequest") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut identity_key__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::IdentityKey => { + if identity_key__.is_some() { + return Err(serde::de::Error::duplicate_field("identityKey")); + } + identity_key__ = map_.next_value()?; + } + GeneratedField::__SkipField__ => { + let _ = map_.next_value::()?; + } + } + } + Ok(ValidatorUptimeRequest { + identity_key: identity_key__, + }) + } + } + deserializer.deserialize_struct("penumbra.core.component.stake.v1.ValidatorUptimeRequest", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for ValidatorUptimeResponse { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.uptime.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("penumbra.core.component.stake.v1.ValidatorUptimeResponse", len)?; + if let Some(v) = self.uptime.as_ref() { + struct_ser.serialize_field("uptime", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for ValidatorUptimeResponse { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "uptime", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Uptime, + __SkipField__, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "uptime" => Ok(GeneratedField::Uptime), + _ => Ok(GeneratedField::__SkipField__), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = ValidatorUptimeResponse; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct penumbra.core.component.stake.v1.ValidatorUptimeResponse") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut uptime__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::Uptime => { + if uptime__.is_some() { + return Err(serde::de::Error::duplicate_field("uptime")); + } + uptime__ = map_.next_value()?; + } + GeneratedField::__SkipField__ => { + let _ = map_.next_value::()?; + } + } + } + Ok(ValidatorUptimeResponse { + uptime: uptime__, + }) + } + } + deserializer.deserialize_struct("penumbra.core.component.stake.v1.ValidatorUptimeResponse", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for ZkUndelegateClaimProof { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result diff --git a/crates/proto/src/gen/proto_descriptor.bin.no_lfs b/crates/proto/src/gen/proto_descriptor.bin.no_lfs index 9643941d17..030ecba81e 100644 Binary files a/crates/proto/src/gen/proto_descriptor.bin.no_lfs and b/crates/proto/src/gen/proto_descriptor.bin.no_lfs differ diff --git a/crates/view/Cargo.toml b/crates/view/Cargo.toml index a61839cc98..b7b4800241 100644 --- a/crates/view/Cargo.toml +++ b/crates/view/Cargo.toml @@ -67,3 +67,4 @@ tonic = {workspace = true} tracing = {workspace = true} tracing-subscriber = {workspace = true} url = {workspace = true} +pbjson-types = { workspace = true } diff --git a/crates/view/src/client.rs b/crates/view/src/client.rs index 4de810c8b6..92cc9b58a3 100644 --- a/crates/view/src/client.rs +++ b/crates/view/src/client.rs @@ -2,6 +2,8 @@ use std::{collections::BTreeMap, future::Future, pin::Pin}; use anyhow::Result; use futures::{FutureExt, Stream, StreamExt, TryStreamExt}; +use pbjson_types::Any; +use penumbra_auction::auction::AuctionId; use tonic::{codegen::Bytes, Streaming}; use tracing::instrument; @@ -11,7 +13,7 @@ use penumbra_asset::{ ValueView, }; use penumbra_dex::{ - lp::position::{self}, + lp::position::{self, Position}, TradingPair, }; use penumbra_fee::GasPrices; @@ -48,6 +50,23 @@ pub(crate) type BroadcastStatusStream = Pin< /// enforce that it is a tower `Service`. #[allow(clippy::type_complexity)] pub trait ViewClient { + /// Query the auction state + fn auctions( + &mut self, + account_filter: Option, + include_inactive: bool, + query_latest_state: bool, + ) -> Pin< + Box< + dyn Future< + Output = Result< + Vec<(AuctionId, SpendableNoteRecord, Option, Vec)>, + >, + > + Send + + 'static, + >, + >; + /// Get the current status of chain sync. fn status( &mut self, @@ -915,4 +934,67 @@ where } .boxed() } + + fn auctions( + &mut self, + account_filter: Option, + include_inactive: bool, + query_latest_state: bool, + ) -> Pin< + Box< + dyn Future< + Output = Result< + Vec<(AuctionId, SpendableNoteRecord, Option, Vec)>, + >, + > + Send + + 'static, + >, + > { + let mut client = self.clone(); + async move { + let request = tonic::Request::new(pb::AuctionsRequest { + account_filter: account_filter.map(Into::into), + include_inactive, + query_latest_state, + }); + + let auctions: Vec = + ViewServiceClient::auctions(&mut client, request) + .await? + .into_inner() + .try_collect() + .await?; + + let resp: Vec<(AuctionId, SpendableNoteRecord, Option, Vec)> = + auctions + .into_iter() + .map(|auction_rsp| { + let pb_id = auction_rsp + .id + .ok_or_else(|| anyhow::anyhow!("missing auction id!!"))?; + let auction_id: AuctionId = pb_id.try_into()?; + let snr: SpendableNoteRecord = auction_rsp + .note_record + .ok_or_else(|| anyhow::anyhow!("mission SNR from auction response"))? + .try_into()?; + + let auction = auction_rsp.auction; + let lps: Vec = auction_rsp + .positions + .into_iter() + .map(TryInto::try_into) + .collect::>>()?; + + Ok::< + (AuctionId, SpendableNoteRecord, Option, Vec), + anyhow::Error, + >((auction_id, snr, auction, lps)) + }) + .filter_map(|res| res.ok()) // TODO: scrap this later. + .collect(); + + Ok(resp) + } + .boxed() + } } diff --git a/crates/view/src/planner.rs b/crates/view/src/planner.rs index bce94191d2..dcf87e7f52 100644 --- a/crates/view/src/planner.rs +++ b/crates/view/src/planner.rs @@ -714,7 +714,7 @@ impl Planner { // For any remaining provided balance, make a single change note for each for value in self.balance.provided().collect::>() { - self.output(value, self_address); + self.output(value, self_address.clone()); } // All actions have now been added, so check to make sure that you don't build and submit an diff --git a/crates/view/src/service.rs b/crates/view/src/service.rs index 5f66711c15..327f90707b 100644 --- a/crates/view/src/service.rs +++ b/crates/view/src/service.rs @@ -9,7 +9,7 @@ use ark_std::UniformRand; use async_stream::try_stream; use camino::Utf8Path; use decaf377::Fq; -use futures::stream::{StreamExt, TryStreamExt}; +use futures::stream::{self, StreamExt, TryStreamExt}; use rand::Rng; use rand_core::OsRng; use tokio::sync::{watch, RwLock}; @@ -392,9 +392,70 @@ impl ViewService for ViewServer { async fn auctions( &self, - _request: tonic::Request, + request: tonic::Request, ) -> Result, tonic::Status> { - unimplemented!("auctions") + use penumbra_proto::core::component::auction::v1alpha1 as pb_auction; + use penumbra_proto::core::component::auction::v1alpha1::query_service_client::QueryServiceClient as AuctionQueryServiceClient; + + let parameters = request.into_inner(); + let query_latest_state = parameters.query_latest_state; + let include_inactive = parameters.include_inactive; + + let account_filter = parameters + .account_filter + .to_owned() + .map(AddressIndex::try_from) + .map_or(Ok(None), |v| v.map(Some)) + .map_err(|_| tonic::Status::invalid_argument("invalid account filter"))?; + + let all_auctions = self + .storage + .fetch_auctions_by_account(account_filter, include_inactive) + .await + .map_err(|e| tonic::Status::internal(e.to_string()))?; + + let client = if query_latest_state { + Some( + AuctionQueryServiceClient::connect(self.node.to_string()) + .await + .map_err(|e| tonic::Status::internal(e.to_string()))?, + ) + } else { + None + }; + + let responses = + futures::future::join_all(all_auctions.into_iter().map(|(auction_id, note_record)| { + let maybe_client = client.clone(); + async move { + let (any_state, positions) = if let Some(mut client2) = maybe_client { + let extra_data = client2 + .auction_state_by_id(pb_auction::AuctionStateByIdRequest { + id: Some(auction_id.into()), + }) + .await + .map_err(|e| tonic::Status::internal(e.to_string()))? + .into_inner(); + (extra_data.auction, extra_data.positions) + } else { + (None, vec![]) + }; + + Result::<_, tonic::Status>::Ok(pb::AuctionsResponse { + id: Some(auction_id.into()), + note_record: Some(note_record.into()), + auction: any_state, + positions, + }) + } + })) + .await; + + let stream = stream::iter(responses) + .map_err(|e| tonic::Status::internal(format!("error getting auction: {e}"))) + .boxed(); + + Ok(Response::new(stream)) } async fn broadcast_transaction( @@ -865,12 +926,12 @@ impl ViewService for ViewServer { match action_view { ActionView::Spend(SpendView::Visible { note, .. }) => { let address = note.address(); - address_views.insert(address, fvk.view_address(address)); + address_views.insert(address.clone(), fvk.view_address(address)); asset_ids.insert(note.asset_id()); } ActionView::Output(OutputView::Visible { note, .. }) => { let address = note.address(); - address_views.insert(address, fvk.view_address(address)); + address_views.insert(address.clone(), fvk.view_address(address.clone())); asset_ids.insert(note.asset_id()); // Also add an AddressView for the return address in the memo. @@ -880,8 +941,8 @@ impl ViewService for ViewServer { address_views.insert(memo.return_address(), fvk.view_address(address)); } ActionView::Swap(SwapView::Visible { swap_plaintext, .. }) => { - let address = swap_plaintext.claim_address; - address_views.insert(address, fvk.view_address(address)); + let address = swap_plaintext.claim_address.clone(); + address_views.insert(address.clone(), fvk.view_address(address)); asset_ids.insert(swap_plaintext.trading_pair.asset_1()); asset_ids.insert(swap_plaintext.trading_pair.asset_2()); } @@ -890,13 +951,13 @@ impl ViewService for ViewServer { }) => { // Both will be sent to the same address so this only needs to be added once let address = output_1.address(); - address_views.insert(address, fvk.view_address(address)); + address_views.insert(address.clone(), fvk.view_address(address)); asset_ids.insert(output_1.asset_id()); asset_ids.insert(output_2.asset_id()); } ActionView::DelegatorVote(DelegatorVoteView::Visible { note, .. }) => { let address = note.address(); - address_views.insert(address, fvk.view_address(address)); + address_views.insert(address.clone(), fvk.view_address(address)); asset_ids.insert(note.asset_id()); } _ => {} diff --git a/crates/view/src/storage.rs b/crates/view/src/storage.rs index 7b8b0819e7..da3b93ef07 100644 --- a/crates/view/src/storage.rs +++ b/crates/view/src/storage.rs @@ -1,11 +1,11 @@ -use std::str::FromStr; -use std::{collections::BTreeMap, num::NonZeroU64, sync::Arc, time::Duration}; +use std::{collections::BTreeMap, num::NonZeroU64, str::FromStr, sync::Arc, time::Duration}; use anyhow::{anyhow, Context}; use camino::Utf8Path; use decaf377::{FieldExt, Fq}; use once_cell::sync::Lazy; use parking_lot::Mutex; +use penumbra_auction::auction::AuctionId; use r2d2_sqlite::{ rusqlite::{OpenFlags, OptionalExtension}, SqliteConnectionManager, @@ -977,7 +977,6 @@ impl Storage { .query_and_then((), |row| row.try_into())? .collect::>>()?; - // TODO: this could be internalized into the SQL query in principle, but it's easier to // do it this way; if it becomes slow, we can do it better let mut results = Vec::new(); @@ -1019,6 +1018,116 @@ impl Storage { Ok(()) } + pub async fn record_auction_with_state( + &self, + auction_id: AuctionId, + auction_state: u64, + ) -> anyhow::Result<()> { + let auction_id = auction_id.0.to_vec(); + let auction_state = auction_state; + + let pool = self.pool.clone(); + + spawn_blocking(move || { + let mut lock = pool.get()?; + let tx = lock.transaction()?; + tx.execute( + "INSERT OR IGNORE INTO auctions (auction_id, auction_state, note_commitment) VALUES (?1, ?2, NULL)", + (auction_id.clone(), auction_state), + )?; + tx.execute( + "UPDATE auctions SET auction_state = ?2 WHERE auction_id = ?1", + (auction_id, auction_state), + ) + .map_err(anyhow::Error::from)?; + + tx.commit()?; + Ok::<(), anyhow::Error>(()) + }) + .await??; + + Ok(()) + } + + pub async fn update_auction_with_note_commitment( + &self, + auction_id: AuctionId, + note_commitment: StateCommitment, + ) -> anyhow::Result<()> { + let auction_id = auction_id.0.to_vec(); + let blob_nc = note_commitment.0.to_bytes().to_vec(); + + let pool = self.pool.clone(); + + spawn_blocking(move || { + pool.get()? + .execute( + "UPDATE auctions SET (note_commitment) = ?1 WHERE auction_id = ?2", + (blob_nc, auction_id), + ) + .map_err(anyhow::Error::from) + }) + .await??; + + Ok(()) + } + + pub async fn fetch_auctions_by_account( + &self, + account_filter: Option, + include_inactive: bool, + ) -> anyhow::Result> { + let account_clause = account_filter + .map(|idx| { + format!( + "AND spendable_notes.address_index = x'{}'", + hex::encode(idx.to_bytes()) + ) + }) + .unwrap_or_else(|| "".to_string()); + + let active_clause = if !include_inactive { + "AND auctions.auction_state = 0" + } else { + "" + }; + + let query = format!( + "SELECT auctions.auction_id, spendable_notes.*, notes.* + FROM auctions + JOIN spendable_notes ON auctions.note_commitment = spendable_notes.note_commitment + JOIN notes ON auctions.note_commitment = notes.note_commitment + WHERE 1 = 1 + {account_clause} + {active_clause}", + account_clause = account_clause, + active_clause = active_clause, + ); + + let pool = self.pool.clone(); + + spawn_blocking(move || { + let mut conn = pool.get()?; + let tx = conn.transaction()?; + + let spendable_note_records: Vec<(AuctionId, SpendableNoteRecord)> = tx + .prepare(&query)? + .query_and_then((), |row| { + let raw_auction_id: Vec = row.get("auction_id")?; + let array_auction_id: [u8; 32] = raw_auction_id + .try_into() + .map_err(|_| anyhow!("auction id must be 32 bytes"))?; + let auction_id = AuctionId(array_auction_id); + let spendable_note_record: SpendableNoteRecord = row.try_into()?; + Ok((auction_id, spendable_note_record)) + })? + .collect::>>()?; + + Ok(spendable_note_records) + }) + .await? + } + pub async fn record_unknown_asset(&self, id: asset::Id) -> anyhow::Result<()> { let asset_id = id.to_bytes().to_vec(); let denom = "Unknown".to_string(); diff --git a/crates/view/src/storage/schema.sql b/crates/view/src/storage/schema.sql index 838268431c..fc967e2e4e 100644 --- a/crates/view/src/storage/schema.sql +++ b/crates/view/src/storage/schema.sql @@ -128,3 +128,13 @@ CREATE TABLE positions ( position_state TEXT NOT NULL, trading_pair TEXT NOT NULL ); + +-- This table records the user's own auction state, using the +-- auction id as a primary key. An extra-column is available +-- to cross-reference note commitments that is associated with +-- the entry. +CREATE TABLE auctions ( + auction_id BLOB PRIMARY KEY NOT NULL, + auction_state BIGINT NOT NULL, + note_commitment BLOB +); diff --git a/crates/view/src/worker.rs b/crates/view/src/worker.rs index 0c472aaf98..395f91a150 100644 --- a/crates/view/src/worker.rs +++ b/crates/view/src/worker.rs @@ -5,6 +5,7 @@ use std::{ }; use anyhow::Context; +use penumbra_auction::auction::AuctionNft; use penumbra_compact_block::CompactBlock; use penumbra_dex::lp::{position, LpNft}; use penumbra_keys::FullViewingKey; @@ -257,7 +258,6 @@ impl Worker { let position_id = position_open.position.id(); // Record every possible permutation. - let lp_nft = LpNft::new(position_id, position::State::Opened); let _id = lp_nft.asset_id(); let denom = lp_nft.denom(); @@ -303,6 +303,46 @@ impl Worker { // Update the position record self.storage.update_position(position_id, state).await?; } + penumbra_transaction::Action::ActionDutchAuctionSchedule( + schedule_da, + ) => { + let auction_id = schedule_da.description.id(); + let auction_nft_opened = AuctionNft::new(auction_id, 0); + let nft_metadata_opened = auction_nft_opened.metadata.clone(); + + self.storage.record_asset(nft_metadata_opened).await?; + + self.storage + .record_auction_with_state( + schedule_da.description.id(), + 0u64, // Opened + ) + .await?; + } + penumbra_transaction::Action::ActionDutchAuctionEnd(end_da) => { + let auction_id = end_da.auction_id; + let auction_nft_closed = AuctionNft::new(auction_id, 1); + let nft_metadata_closed = auction_nft_closed.metadata.clone(); + + self.storage.record_asset(nft_metadata_closed).await?; + + self.storage + .record_auction_with_state(end_da.auction_id, 1) + .await?; + } + penumbra_transaction::Action::ActionDutchAuctionWithdraw( + withdraw_da, + ) => { + let auction_id = withdraw_da.auction_id; + let auction_nft_withdrawn = + AuctionNft::new(auction_id, withdraw_da.seq); + let nft_metadata_withdrawn = auction_nft_withdrawn.metadata.clone(); + + self.storage.record_asset(nft_metadata_withdrawn).await?; + self.storage + .record_auction_with_state(auction_id, withdraw_da.seq) + .await?; + } _ => (), }; } @@ -310,14 +350,25 @@ impl Worker { // Record any new assets we detected. for note_record in filtered_block.new_notes.values() { - // If the asset is already known, skip it. - - if self + // If the asset is already known, skip it, unless there's useful information + // to cross-reference. + if let Some(note_denom) = self .storage .asset_by_id(¬e_record.note.asset_id()) .await? - .is_some() { + // If the asset metata is for an auction, we record the associated note commitment + // in the auction state table to cross reference with SNRs. + if note_denom.is_auction_nft() { + let note_commitment = note_record.note_commitment; + let auction_nft: AuctionNft = note_denom.try_into()?; + self.storage + .update_auction_with_note_commitment( + auction_nft.id, + note_commitment, + ) + .await?; + } continue; } else { // If the asset is unknown, we may be able to query for its denom metadata and store that. @@ -347,7 +398,6 @@ impl Worker { } // Commit the block to the database. - self.storage .record_block( filtered_block.clone(), diff --git a/crates/wallet/src/plan.rs b/crates/wallet/src/plan.rs index dd993ccb5b..2d438dced0 100644 --- a/crates/wallet/src/plan.rs +++ b/crates/wallet/src/plan.rs @@ -106,7 +106,7 @@ where let mut planner = Planner::new(rng); planner.fee(fee); for value in values.iter().cloned() { - planner.output(value, dest_address); + planner.output(value, dest_address.clone()); } let source_address = view.address_by_index(source_address_index).await?; planner diff --git a/deployments/containerfiles/Dockerfile b/deployments/containerfiles/Dockerfile index 2bc06f2222..951be11f1e 100644 --- a/deployments/containerfiles/Dockerfile +++ b/deployments/containerfiles/Dockerfile @@ -1,6 +1,6 @@ -# N.B. the RUST_VERSION should match MSRV in crates/bin/pd/Cargo.toml -ARG RUST_VERSION=1.75.0 -FROM docker.io/rust:${RUST_VERSION}-slim-bookworm AS build-env +# We use the latest stable version of the official Rust container, +# delegating to the `rust-toolchain.toml` file to pick a specific Rust toolchain. +FROM docker.io/rust:1-slim-bookworm AS build-env # Install build dependencies. These packages should match what's recommended on # https://guide.penumbra.zone/main/pcli/install.html @@ -13,7 +13,8 @@ RUN apt-get update && apt-get install -y \ WORKDIR /usr/src/penumbra # Add rust dependency lockfiles first, to cache downloads. -COPY Cargo.lock Cargo.toml . +COPY Cargo.lock Cargo.toml rust-toolchain.toml . + # If any rust code changed, the cache will break on copying `crates/`. # Ideally we'd copy in all Cargo.toml files first, fetch, then copy crates. COPY crates ./crates @@ -21,9 +22,10 @@ COPY crates ./crates COPY assets ./assets # Copy in summonerd contribution orchestrator. COPY tools ./tools +# Download all workspace dependencies specified in Cargo.toml RUN cargo fetch -COPY . . # Build Penumbra binaries +COPY . . RUN cargo build --release # Runtime image. diff --git a/docs/guide/src/dev/build.md b/docs/guide/src/dev/build.md index 8b47aa7fa9..87d24a53bc 100644 --- a/docs/guide/src/dev/build.md +++ b/docs/guide/src/dev/build.md @@ -13,6 +13,8 @@ of the Rust compiler, installation instructions for which you can find `cargo` is available in your `$PATH`! You can verify the rust compiler version by running `rustc --version` which should indicate version 1.75 or later. +The project uses a `rust-toolchain.toml` file, which will ensure that your version of rust stays current enough +to build the project from source. ### Installing build prerequisites diff --git a/flake.lock b/flake.lock index b552a9252b..e6c493870d 100644 --- a/flake.lock +++ b/flake.lock @@ -7,11 +7,11 @@ ] }, "locked": { - "lastModified": 1711681752, - "narHash": "sha256-LEg6/dmEFxx6Ygti5DO9MOhGNpyB7zdxdWtzv/FCTXk=", + "lastModified": 1713979152, + "narHash": "sha256-apdecPuh8SOQnkEET/kW/UcfjCRb8JbV5BKjoH+DcP4=", "owner": "ipetkov", "repo": "crane", - "rev": "ada0fb4dcce4561acb1eb17c59b7306d9d4a95f3", + "rev": "a5eca68a2cf11adb32787fc141cddd29ac8eb79c", "type": "github" }, "original": { @@ -25,11 +25,11 @@ "systems": "systems" }, "locked": { - "lastModified": 1705309234, - "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", "owner": "numtide", "repo": "flake-utils", - "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", "type": "github" }, "original": { @@ -40,11 +40,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1707268954, - "narHash": "sha256-2en1kvde3cJVc3ZnTy8QeD2oKcseLFjYPLKhIGDanQ0=", + "lastModified": 1714076141, + "narHash": "sha256-Drmja/f5MRHZCskS6mvzFqxEaZMeciScCTFxWVLqWEY=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "f8e2ebd66d097614d51a56a755450d4ae1632df1", + "rev": "7bb2ccd8cdc44c91edba16c48d2c8f331fb3d856", "type": "github" }, "original": { @@ -72,11 +72,11 @@ ] }, "locked": { - "lastModified": 1712024007, - "narHash": "sha256-52cf+mHZJbSaDFdsBj6vN1hH52AXsMgEpS/ajzc9yQE=", + "lastModified": 1714097613, + "narHash": "sha256-044xbpBszupqN3nl/CGOCJtTQ4O6Aca81mJpX45i8/I=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "d45d957dc3c48792af7ce58eec5d84407655e8fa", + "rev": "2a42c742ab04b61d9b2f1edf392842cf9f27ebfd", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 101682614e..0ee500380b 100644 --- a/flake.nix +++ b/flake.nix @@ -23,10 +23,6 @@ let # Define versions of Penumbra and CometBFT penumbraRelease = null; # Use the local working copy - # penumbraRelease = { # Use a specific release - # version = "0.71.0"; - # sha256 = "sha256-2mpyBEt44UlXm6hahJG9sHGxj6nzh7z9lnj/vLtAAzs="; - # }; cometBftRelease = { version = "0.37.5"; sha256 = "sha256-wNVHsifieAtZgedavCEJLgG0kRDqUhG4Lk5ciTPoNzI="; @@ -107,7 +103,7 @@ devShells.default = craneLib.devShell { inherit LIBCLANG_PATH; inputsFrom = [ penumbra ]; - packages = [ cargo-watch ]; + packages = [ cargo-watch cargo-nextest protobuf ]; shellHook = '' export LIBCLANG_PATH=${LIBCLANG_PATH} export RUST_SRC_PATH=${pkgs.rustPlatform.rustLibSrc} # Required for rust-analyzer diff --git a/proto/penumbra/penumbra/core/component/dex/v1/dex.proto b/proto/penumbra/penumbra/core/component/dex/v1/dex.proto index 53fd9d9167..c72588e285 100644 --- a/proto/penumbra/penumbra/core/component/dex/v1/dex.proto +++ b/proto/penumbra/penumbra/core/component/dex/v1/dex.proto @@ -248,7 +248,9 @@ message BatchSwapOutputData { // The trading pair associated with the batch swap. TradingPair trading_pair = 8; // The starting block height of the epoch for which the batch swap data is valid. - uint64 epoch_starting_height = 9; + uint64 epoch_starting_height = 9 [deprecated = true]; + // The prefix (epoch, block) of the position where this batch swap occurred. + uint64 sct_position_prefix = 10; } // The trading function for a specific pair. diff --git a/proto/penumbra/penumbra/core/component/stake/v1/stake.proto b/proto/penumbra/penumbra/core/component/stake/v1/stake.proto index 7260c58cb7..deaeac0537 100644 --- a/proto/penumbra/penumbra/core/component/stake/v1/stake.proto +++ b/proto/penumbra/penumbra/core/component/stake/v1/stake.proto @@ -238,11 +238,24 @@ message Penalty { // Query operations for the staking component. service QueryService { + // Queries for information about a specific validator. + rpc GetValidatorInfo(GetValidatorInfoRequest) returns (GetValidatorInfoResponse); // Queries the current validator set, with filtering. rpc ValidatorInfo(ValidatorInfoRequest) returns (stream ValidatorInfoResponse); rpc ValidatorStatus(ValidatorStatusRequest) returns (ValidatorStatusResponse); rpc ValidatorPenalty(ValidatorPenaltyRequest) returns (ValidatorPenaltyResponse); rpc CurrentValidatorRate(CurrentValidatorRateRequest) returns (CurrentValidatorRateResponse); + rpc ValidatorUptime(ValidatorUptimeRequest) returns (ValidatorUptimeResponse); +} + +// Requests information about a specific validator. +message GetValidatorInfoRequest { + // The identity key of the validator. + core.keys.v1.IdentityKey identity_key = 2; +} + +message GetValidatorInfoResponse { + core.component.stake.v1.ValidatorInfo validator_info = 1; } // Requests information on the chain's validators. @@ -282,6 +295,14 @@ message CurrentValidatorRateResponse { core.component.stake.v1.RateData data = 1; } +message ValidatorUptimeRequest { + core.keys.v1.IdentityKey identity_key = 2; +} + +message ValidatorUptimeResponse { + Uptime uptime = 1; +} + // Staking configuration data. message StakeParameters { // The number of epochs an unbonding note for before being released. @@ -311,3 +332,16 @@ message GenesisContent { // The list of validators present at genesis. repeated stake.v1.Validator validators = 2; } + +message EventTombstoneValidator { + // The height at which the offense occurred. + uint64 evidence_height = 1; + // The height at which the evidence was processed. + uint64 current_height = 2; + // The validator identity key. + keys.v1.IdentityKey identity_key = 4; + // The validator's Comet address. + bytes address = 5; + // The voting power for the validator. + uint64 voting_power = 6; +} diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 292fe499e3..3ff2a27f7a 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,8 @@ [toolchain] -channel = "stable" +# We set a specific version of rust so that CI workflows use the same +# version development environments do. +channel = "1.75" +components = [ "rustfmt" ] +# Include wasm toolchain, for CI tests to check wasm32 build targets still work, +# to avoid downstream breakage in `penumbra-wasm` crate, in the web repo. +targets = [ "wasm32-unknown-unknown" ] diff --git a/tools/summonerd/src/participant.rs b/tools/summonerd/src/participant.rs index 81bc38e1df..f0b0035505 100644 --- a/tools/summonerd/src/participant.rs +++ b/tools/summonerd/src/participant.rs @@ -43,7 +43,7 @@ impl Participant { } pub fn address(&self) -> Address { - self.address + self.address.clone() } pub fn is_live(&self) -> bool { diff --git a/tools/summonerd/src/queue.rs b/tools/summonerd/src/queue.rs index 21f8bc03ab..10aaa313f1 100644 --- a/tools/summonerd/src/queue.rs +++ b/tools/summonerd/src/queue.rs @@ -151,7 +151,7 @@ impl ParticipantQueue { for (i, (participant, bid)) in participants.iter().enumerate() { let address = participant.address(); match filter { - Some(f) if f != address => continue, + Some(ref f) if *f != address => continue, _ => {} } // Ignore failures (besides logging), let pruning happen later. diff --git a/tools/summonerd/src/server.rs b/tools/summonerd/src/server.rs index 83db48222f..f1b892cda9 100644 --- a/tools/summonerd/src/server.rs +++ b/tools/summonerd/src/server.rs @@ -97,7 +97,7 @@ impl server::CeremonyCoordinatorService for CoordinatorService { } }; tracing::info!(?amount, ?address, "bid"); - let (participant, response_rx) = Participant::new(address, streaming); + let (participant, response_rx) = Participant::new(address.clone(), streaming); self.queue.push(participant, amount).await; self.queue .inform_one(address)