diff --git a/.cargo/config b/.cargo/config deleted file mode 100644 index c5d8c8e32e..0000000000 --- a/.cargo/config +++ /dev/null @@ -1,5 +0,0 @@ -[build] -# Enable Tokio's `tracing` support for `tokio-console` -# rustflags = ["--cfg", "tokio_unstable"] -# Note(erwan): We decided to disable it for the time being, -# I'm keeping this around to be able to reactivate it on a whim. diff --git a/.dockerignore b/.dockerignore index f6b007af47..1a64da5fd6 100644 --- a/.dockerignore +++ b/.dockerignore @@ -6,6 +6,7 @@ !Cargo.toml !Cargo.lock !.cargo/ +!rust-toolchain.toml # testnets for 'pd testnet generate' defaults !testnets/ diff --git a/.github/workflows/buf-pull-request.yml b/.github/workflows/buf-pull-request.yml index 627c668fe7..37ca86722e 100644 --- a/.github/workflows/buf-pull-request.yml +++ b/.github/workflows/buf-pull-request.yml @@ -54,9 +54,6 @@ jobs: with: lfs: true - - name: Install rust toolchain - uses: dtolnay/rust-toolchain@stable - - uses: bufbuild/buf-setup-action@v1 with: buf_api_token: ${{ secrets.BUF_TOKEN }} diff --git a/.github/workflows/docs-lint.yml b/.github/workflows/docs-lint.yml index 1cae9f27de..90a98b5c81 100644 --- a/.github/workflows/docs-lint.yml +++ b/.github/workflows/docs-lint.yml @@ -13,9 +13,9 @@ jobs: with: lfs: false - - name: Install rust toolchain + - name: Install nightly rust toolchain # The script for rustdoc build requires nightly toolchain. - uses: dtolnay/rust-toolchain@nightly + run: rustup toolchain install nightly # Loading cache takes ~15s, but saves us minutes of build. - name: Load rust cache @@ -36,9 +36,6 @@ jobs: with: lfs: false - - name: Install rust toolchain - uses: dtolnay/rust-toolchain@stable - - name: Load rust cache uses: astriaorg/buildjet-rust-cache@v2.5.1 diff --git a/.github/workflows/notes.yml b/.github/workflows/notes.yml index 1a9eff4789..d6f3de83e3 100644 --- a/.github/workflows/notes.yml +++ b/.github/workflows/notes.yml @@ -17,8 +17,10 @@ jobs: uses: actions/checkout@v4 with: lfs: true + - name: Install rust toolchain - uses: dtolnay/rust-toolchain@nightly + run: rustup toolchain install nightly + - name: Load Rust caching uses: astriaorg/buildjet-rust-cache@v2.5.1 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 381227be8f..85d07c04ba 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,4 +1,4 @@ -# Copyright 2022-2023, axodotdev +# Copyright 2022-2024, axodotdev # SPDX-License-Identifier: MIT or Apache-2.0 # # CI that: @@ -6,10 +6,11 @@ # * checks for a Git Tag that looks like a release # * builds artifacts with cargo-dist (archives, installers, hashes) # * uploads those artifacts to temporary workflow zip -# * on success, uploads the artifacts to a Github Release™ +# * on success, uploads the artifacts to a GitHub Release # -# Note that the Github Release™ will be created with a generated +# Note that the GitHub Release will be created with a generated # title/body based on your changelogs. + name: Release permissions: @@ -21,28 +22,29 @@ permissions: # PACKAGE_NAME must be the name of a Cargo package in your workspace, and VERSION # must be a Cargo-style SemVer Version (must have at least major.minor.patch). # -# If PACKAGE_NAME is specified, then the release will be for that +# If PACKAGE_NAME is specified, then the announcement will be for that # package (erroring out if it doesn't have the given version or isn't cargo-dist-able). # -# If PACKAGE_NAME isn't specified, then the release will be for all +# If PACKAGE_NAME isn't specified, then the announcement will be for all # (cargo-dist-able) packages in the workspace with that version (this mode is # intended for workspaces with only one dist-able package, or with all dist-able # packages versioned/released in lockstep). # # If you push multiple tags at once, separate instances of this workflow will -# spin up, creating an independent Github Release™ for each one. However Github +# spin up, creating an independent announcement for each one. However, GitHub # will hard limit this to 3 tags per commit, as it will assume more tags is a # mistake. # -# If there's a prerelease-style suffix to the version, then the Github Release™ +# If there's a prerelease-style suffix to the version, then the release(s) # will be marked as a prerelease. on: push: tags: - '**[0-9]+.[0-9]+.[0-9]+*' + pull_request: jobs: - # Run 'cargo dist plan' to determine what tasks we need to do + # Run 'cargo dist plan' (or host) to determine what tasks we need to do plan: runs-on: ubuntu-latest outputs: @@ -56,65 +58,72 @@ jobs: - uses: actions/checkout@v4 with: submodules: recursive - - name: Install Rust - run: rustup update "1.75" --no-self-update && rustup default "1.75" - name: Install cargo-dist - run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.5.0/cargo-dist-installer.sh | sh" + # we specify bash to get pipefail; it guards against the `curl` command + # failing. otherwise `sh` won't catch that `curl` returned non-0 + shell: bash + run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.13.3/cargo-dist-installer.sh | sh" + # sure would be cool if github gave us proper conditionals... + # so here's a doubly-nested ternary-via-truthiness to try to provide the best possible + # functionality based on whether this is a pull_request, and whether it's from a fork. + # (PRs run on the *source* but secrets are usually on the *target* -- that's *good* + # but also really annoying to build CI around when it needs secrets to work right.) - id: plan run: | - cargo dist plan ${{ !github.event.pull_request && format('--tag={0}', github.ref_name) || '' }} --output-format=json > dist-manifest.json - echo "cargo dist plan ran successfully" - cat dist-manifest.json - echo "manifest=$(jq -c "." dist-manifest.json)" >> "$GITHUB_OUTPUT" + cargo dist ${{ (!github.event.pull_request && format('host --steps=create --tag={0}', github.ref_name)) || 'plan' }} --output-format=json > plan-dist-manifest.json + echo "cargo dist ran successfully" + cat plan-dist-manifest.json + echo "manifest=$(jq -c "." plan-dist-manifest.json)" >> "$GITHUB_OUTPUT" - name: "Upload dist-manifest.json" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: artifacts - path: dist-manifest.json + name: artifacts-plan-dist-manifest + path: plan-dist-manifest.json # Build and packages all the platform-specific things - upload-local-artifacts: + build-local-artifacts: + name: build-local-artifacts (${{ join(matrix.targets, ', ') }}) # Let the initial task tell us to not run (currently very blunt) - needs: plan - if: ${{ fromJson(needs.plan.outputs.val).releases != null && (needs.plan.outputs.publishing == 'true' || fromJson(needs.plan.outputs.val).ci.github.pr_run_mode == 'upload') }} + needs: + - plan + if: ${{ fromJson(needs.plan.outputs.val).ci.github.artifacts_matrix.include != null && (needs.plan.outputs.publishing == 'true' || fromJson(needs.plan.outputs.val).ci.github.pr_run_mode == 'upload') }} strategy: fail-fast: false - # We override the generated `matrix` so we can specify custom runners, - # for faster build times. This works for Linux & macOS. To generate the base template, run: - # `cargo dist plan --output-format json`. That JSON content has been adapted to YAML below. - # matrix: ${{ fromJson(needs.plan.outputs.val).ci.github.artifacts_matrix }} - matrix: - include: - - runner: buildjet-16vcpu-ubuntu-2204 - dist_args: --artifacts=local --target=x86_64-unknown-linux-gnu - install_dist: curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.5.0/cargo-dist-installer.sh | sh - targets: - - x86_64-unknown-linux-gnu - - runner: macos-12-xl - dist_args: --artifacts=local --target=aarch64-apple-darwin - install_dist: curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.5.0/cargo-dist-installer.sh | sh - targets: - - aarch64-apple-darwin - - runner: macos-12-xl - dist_args: --artifacts=local --target=x86_64-apple-darwin - install_dist: curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.5.0/cargo-dist-installer.sh | sh - targets: - - x86_64-apple-darwin - + # Target platforms/runners are computed by cargo-dist in create-release. + # Each member of the matrix has the following arguments: + # + # - runner: the github runner + # - dist-args: cli flags to pass to cargo dist + # - install-dist: expression to run to install cargo-dist on the runner + # + # Typically there will be: + # - 1 "global" task that builds universal installers + # - N "local" tasks that build each platform's binaries and platform-specific installers + matrix: ${{ fromJson(needs.plan.outputs.val).ci.github.artifacts_matrix }} runs-on: ${{ matrix.runner }} env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} BUILD_MANIFEST_NAME: target/distrib/${{ join(matrix.targets, '-') }}-dist-manifest.json - RUSTFLAGS: "--cfg tokio_unstable" steps: + - name: enable windows longpaths + run: | + git config --global core.longpaths true - uses: actions/checkout@v4 with: + submodules: recursive lfs: true - - name: Install Rust - run: rustup update "1.75" --no-self-update && rustup default "1.75" - uses: swatinem/rust-cache@v2 + with: + key: ${{ join(matrix.targets, '-') }} - name: Install cargo-dist run: ${{ matrix.install_dist }} + # Get the dist-manifest + - name: Fetch local artifacts + uses: actions/download-artifact@v4 + with: + pattern: artifacts-* + path: target/distrib/ + merge-multiple: true - name: Install dependencies run: | ${{ matrix.packages_install }} @@ -130,54 +139,135 @@ jobs: # inconsistent syntax between shell and powershell. shell: bash run: | - # Parse out what we just built and upload it to the Github Release™ + # Parse out what we just built and upload it to scratch storage echo "paths<> "$GITHUB_OUTPUT" - jq --raw-output ".artifacts[]?.path | select( . != null )" dist-manifest.json >> "$GITHUB_OUTPUT" + jq --raw-output ".upload_files[]" dist-manifest.json >> "$GITHUB_OUTPUT" echo "EOF" >> "$GITHUB_OUTPUT" cp dist-manifest.json "$BUILD_MANIFEST_NAME" - name: "Upload artifacts" - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: artifacts + name: artifacts-build-local-${{ join(matrix.targets, '_') }} path: | ${{ steps.cargo-dist.outputs.paths }} ${{ env.BUILD_MANIFEST_NAME }} - should-publish: + # Build and package all the platform-agnostic(ish) things + build-global-artifacts: needs: - plan - - upload-local-artifacts - if: ${{ needs.plan.outputs.publishing == 'true' }} - runs-on: ubuntu-latest + - build-local-artifacts + runs-on: "ubuntu-20.04" + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + BUILD_MANIFEST_NAME: target/distrib/global-dist-manifest.json steps: - - name: print tag - run: echo "ok we're publishing!" + - uses: actions/checkout@v4 + with: + submodules: recursive + lfs: true + - name: Install cargo-dist + shell: bash + run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.13.3/cargo-dist-installer.sh | sh" + # Get all the local artifacts for the global tasks to use (for e.g. checksums) + - name: Fetch local artifacts + uses: actions/download-artifact@v4 + with: + pattern: artifacts-* + path: target/distrib/ + merge-multiple: true + - id: cargo-dist + shell: bash + run: | + cargo dist build ${{ needs.plan.outputs.tag-flag }} --output-format=json "--artifacts=global" > dist-manifest.json + echo "cargo dist ran successfully" - # Create a Github Release with all the results once everything is done - publish-release: - needs: [plan, should-publish] - runs-on: ubuntu-latest + # Parse out what we just built and upload it to scratch storage + echo "paths<> "$GITHUB_OUTPUT" + jq --raw-output ".upload_files[]" dist-manifest.json >> "$GITHUB_OUTPUT" + echo "EOF" >> "$GITHUB_OUTPUT" + + cp dist-manifest.json "$BUILD_MANIFEST_NAME" + - name: "Upload artifacts" + uses: actions/upload-artifact@v4 + with: + name: artifacts-build-global + path: | + ${{ steps.cargo-dist.outputs.paths }} + ${{ env.BUILD_MANIFEST_NAME }} + # Determines if we should publish/announce + host: + needs: + - plan + - build-local-artifacts + - build-global-artifacts + # Only run if we're "publishing", and only if local and global didn't fail (skipped is fine) + if: ${{ always() && needs.plan.outputs.publishing == 'true' && (needs.build-global-artifacts.result == 'skipped' || needs.build-global-artifacts.result == 'success') && (needs.build-local-artifacts.result == 'skipped' || needs.build-local-artifacts.result == 'success') }} + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + runs-on: "ubuntu-20.04" + outputs: + val: ${{ steps.host.outputs.manifest }} + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - name: Install cargo-dist + run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.13.3/cargo-dist-installer.sh | sh" + # Fetch artifacts from scratch-storage + - name: Fetch artifacts + uses: actions/download-artifact@v4 + with: + pattern: artifacts-* + path: target/distrib/ + merge-multiple: true + # This is a harmless no-op for GitHub Releases, hosting for that happens in "announce" + - id: host + shell: bash + run: | + cargo dist host ${{ needs.plan.outputs.tag-flag }} --steps=upload --steps=release --output-format=json > dist-manifest.json + echo "artifacts uploaded and released successfully" + cat dist-manifest.json + echo "manifest=$(jq -c "." dist-manifest.json)" >> "$GITHUB_OUTPUT" + - name: "Upload dist-manifest.json" + uses: actions/upload-artifact@v4 + with: + # Overwrite the previous copy + name: artifacts-dist-manifest + path: dist-manifest.json + + # Create a GitHub Release while uploading all files to it + announce: + needs: + - plan + - host + # use "always() && ..." to allow us to wait for all publish jobs while + # still allowing individual publish jobs to skip themselves (for prereleases). + # "host" however must run to completion, no skipping allowed! + if: ${{ always() && needs.host.result == 'success' }} + runs-on: "ubuntu-20.04" env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v4 with: submodules: recursive - - name: "Download artifacts" - uses: actions/download-artifact@v3 + - name: "Download GitHub Artifacts" + uses: actions/download-artifact@v4 with: - name: artifacts + pattern: artifacts-* path: artifacts + merge-multiple: true - name: Cleanup run: | # Remove the granular manifests - rm artifacts/*-dist-manifest.json - - name: Create Release + rm -f artifacts/*-dist-manifest.json + - name: Create GitHub Release uses: ncipollo/release-action@v1 with: tag: ${{ needs.plan.outputs.tag }} - name: ${{ fromJson(needs.plan.outputs.val).announcement_title }} - body: ${{ fromJson(needs.plan.outputs.val).announcement_github_body }} - prerelease: ${{ fromJson(needs.plan.outputs.val).announcement_is_prerelease }} + name: ${{ fromJson(needs.host.outputs.val).announcement_title }} + body: ${{ fromJson(needs.host.outputs.val).announcement_github_body }} + prerelease: ${{ fromJson(needs.host.outputs.val).announcement_is_prerelease }} artifacts: "artifacts/*" diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 2fe0fe7f5f..09f9982ffc 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -10,10 +10,9 @@ jobs: with: lfs: true - - name: Install rust toolchain - uses: dtolnay/rust-toolchain@stable - with: - targets: wasm32-unknown-unknown + # The `rust-toolchain.toml` file dictates which version of rust to setup. + - name: check rust version + run: rustc --version - name: Install nextest uses: taiki-e/install-action@nextest @@ -24,9 +23,8 @@ jobs: - name: Run cargo check, failing on warnings run: cargo check --release --all-targets env: - # The `-D warnings` option causes an error on warnings; - # we must duplicate the rustflags from `.cargo/config.toml`. - RUSTFLAGS: "-D warnings --cfg tokio_unstable" + # The `-D warnings` option causes an error on warnings. + RUSTFLAGS: "-D warnings" - name: Check wasm compatibility run: ./deployments/scripts/check-wasm-compat.sh @@ -55,10 +53,6 @@ jobs: runs-on: buildjet-8vcpu-ubuntu-2204 steps: - uses: actions/checkout@v4 - - name: Install rust toolchain - uses: dtolnay/rust-toolchain@stable - with: - components: rustfmt - name: Load rust cache uses: astriaorg/buildjet-rust-cache@v2.5.1 - run: cargo fmt --all -- --check @@ -68,8 +62,6 @@ jobs: runs-on: buildjet-8vcpu-ubuntu-2204 steps: - uses: actions/checkout@v4 - - name: Install rust toolchain - uses: dtolnay/rust-toolchain@stable - name: Load rust cache uses: astriaorg/buildjet-rust-cache@v2.5.1 - name: install cargo-hack diff --git a/.github/workflows/smoke.yml b/.github/workflows/smoke.yml index 2f9dc7035a..cceb802326 100644 --- a/.github/workflows/smoke.yml +++ b/.github/workflows/smoke.yml @@ -17,9 +17,6 @@ jobs: with: lfs: true - - name: Install rust toolchain - uses: dtolnay/rust-toolchain@stable - - name: Load rust cache uses: astriaorg/buildjet-rust-cache@v2.5.1 diff --git a/.github/workflows/summoner_smoke.yml b/.github/workflows/summoner_smoke.yml index 6c535c13b7..7ff64d76cb 100644 --- a/.github/workflows/summoner_smoke.yml +++ b/.github/workflows/summoner_smoke.yml @@ -20,9 +20,6 @@ jobs: with: lfs: true - - name: Install rust toolchain - uses: dtolnay/rust-toolchain@stable - - name: Load rust cache uses: astriaorg/buildjet-rust-cache@v2.5.1 diff --git a/Cargo.lock b/Cargo.lock index 898f77078e..f78dbb6bee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1378,7 +1378,7 @@ dependencies = [ [[package]] name = "cnidarium" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "async-trait", @@ -1413,7 +1413,7 @@ dependencies = [ [[package]] name = "cnidarium-component" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "async-trait", @@ -1844,7 +1844,7 @@ dependencies = [ [[package]] name = "decaf377-fmd" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "ark-ff", "ark-serialize", @@ -1859,7 +1859,7 @@ dependencies = [ [[package]] name = "decaf377-frost" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -1874,7 +1874,7 @@ dependencies = [ [[package]] name = "decaf377-ka" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "ark-ff", "decaf377 0.5.0", @@ -2943,6 +2943,12 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02296996cb8796d7c6e3bc2d9211b7802812d36999a51bb754123ead7d37d026" +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + [[package]] name = "hyper" version = "0.14.28" @@ -4357,7 +4363,7 @@ dependencies = [ [[package]] name = "pcli" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -4380,6 +4386,7 @@ dependencies = [ "futures", "hex", "http-body", + "humantime", "ibc-proto", "ibc-types", "indicatif", @@ -4438,7 +4445,7 @@ dependencies = [ [[package]] name = "pclientd" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "assert_cmd", @@ -4490,7 +4497,7 @@ dependencies = [ [[package]] name = "pd" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -4537,11 +4544,13 @@ dependencies = [ "penumbra-governance", "penumbra-ibc", "penumbra-keys", + "penumbra-num", "penumbra-proof-params", "penumbra-proto", "penumbra-sct", "penumbra-shielded-pool", "penumbra-stake", + "penumbra-tct", "penumbra-tendermint-proxy", "penumbra-tower-trace", "penumbra-transaction", @@ -4628,7 +4637,7 @@ dependencies = [ [[package]] name = "penumbra-app" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -4709,7 +4718,7 @@ dependencies = [ [[package]] name = "penumbra-asset" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -4748,7 +4757,7 @@ dependencies = [ [[package]] name = "penumbra-auction" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -4804,7 +4813,7 @@ dependencies = [ [[package]] name = "penumbra-auto-https" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "axum-server", @@ -4816,7 +4825,7 @@ dependencies = [ [[package]] name = "penumbra-bench" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ec", @@ -4860,7 +4869,7 @@ dependencies = [ [[package]] name = "penumbra-community-pool" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -4892,7 +4901,7 @@ dependencies = [ [[package]] name = "penumbra-compact-block" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -4928,7 +4937,7 @@ dependencies = [ [[package]] name = "penumbra-custody" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -4963,7 +4972,7 @@ dependencies = [ [[package]] name = "penumbra-dex" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -5023,7 +5032,7 @@ dependencies = [ [[package]] name = "penumbra-distributions" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "async-trait", @@ -5040,7 +5049,7 @@ dependencies = [ [[package]] name = "penumbra-eddy" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -5058,7 +5067,7 @@ dependencies = [ [[package]] name = "penumbra-fee" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -5083,7 +5092,7 @@ dependencies = [ [[package]] name = "penumbra-funding" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "async-trait", @@ -5105,7 +5114,7 @@ dependencies = [ [[package]] name = "penumbra-governance" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -5163,7 +5172,7 @@ dependencies = [ [[package]] name = "penumbra-ibc" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -5198,7 +5207,7 @@ dependencies = [ [[package]] name = "penumbra-keys" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "aes 0.8.4", "anyhow", @@ -5244,7 +5253,7 @@ dependencies = [ [[package]] name = "penumbra-measure" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "bytesize", @@ -5263,7 +5272,7 @@ dependencies = [ [[package]] name = "penumbra-mock-client" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "cnidarium", @@ -5280,7 +5289,7 @@ dependencies = [ [[package]] name = "penumbra-mock-consensus" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "bytes", @@ -5295,7 +5304,7 @@ dependencies = [ [[package]] name = "penumbra-num" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -5332,7 +5341,7 @@ dependencies = [ [[package]] name = "penumbra-proof-params" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ec", @@ -5361,7 +5370,7 @@ dependencies = [ [[package]] name = "penumbra-proof-setup" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ec", @@ -5389,7 +5398,7 @@ dependencies = [ [[package]] name = "penumbra-proto" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "async-trait", @@ -5419,7 +5428,7 @@ dependencies = [ [[package]] name = "penumbra-sct" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -5452,7 +5461,7 @@ dependencies = [ [[package]] name = "penumbra-shielded-pool" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -5503,7 +5512,7 @@ dependencies = [ [[package]] name = "penumbra-stake" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -5556,7 +5565,7 @@ dependencies = [ [[package]] name = "penumbra-tct" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "ark-ed-on-bls12-377", "ark-ff", @@ -5587,7 +5596,7 @@ dependencies = [ [[package]] name = "penumbra-tct-property-test" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "futures", @@ -5599,7 +5608,7 @@ dependencies = [ [[package]] name = "penumbra-tct-visualize" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "axum", @@ -5629,7 +5638,7 @@ dependencies = [ [[package]] name = "penumbra-tendermint-proxy" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "chrono", @@ -5660,7 +5669,7 @@ dependencies = [ [[package]] name = "penumbra-test-subscriber" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "tracing", "tracing-subscriber 0.3.18", @@ -5668,7 +5677,7 @@ dependencies = [ [[package]] name = "penumbra-tower-trace" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "futures", "hex", @@ -5689,7 +5698,7 @@ dependencies = [ [[package]] name = "penumbra-transaction" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-ff", @@ -5742,7 +5751,7 @@ dependencies = [ [[package]] name = "penumbra-txhash" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "blake2b_simd 1.0.2", @@ -5754,7 +5763,7 @@ dependencies = [ [[package]] name = "penumbra-view" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-std", @@ -5811,7 +5820,7 @@ dependencies = [ [[package]] name = "penumbra-wallet" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-std", @@ -7629,7 +7638,7 @@ checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" [[package]] name = "summonerd" -version = "0.71.0" +version = "0.74.0-alpha.1" dependencies = [ "anyhow", "ark-groth16", diff --git a/Cargo.toml b/Cargo.toml index 5c842d8a46..179cd3d960 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,23 +58,26 @@ members = [ # Config for 'cargo dist' [workspace.metadata.dist] # The preferred cargo-dist version to use in CI (Cargo.toml SemVer syntax) -cargo-dist-version = "0.5.0" -# The preferred Rust toolchain to use in CI (rustup toolchain syntax) -rust-toolchain-version = "1.75" +cargo-dist-version = "0.13.3" # CI backends to support ci = ["github"] +# The archive format to use for non-windows builds (defaults .tar.xz) +unix-archive = ".tar.gz" # Target platforms to build apps for (Rust target-triple syntax) -targets = [ - "x86_64-unknown-linux-gnu", - "aarch64-apple-darwin", - "x86_64-apple-darwin", -] +targets = ["aarch64-apple-darwin", "x86_64-apple-darwin", "x86_64-unknown-linux-gnu"] # The installers to generate for each app -installers = [] +installers = ["shell"] # Publish jobs to run in CI -pr-run-mode = "skip" -# We override RUSTFLAGS, so we must permit changes from the default template. +pr-run-mode = "plan" +# Skip checking whether the specified configuration files are up to date allow-dirty = ["ci"] +# Whether to install an updater program +install-updater = false + +[workspace.metadata.dist.github-custom-runners] +aarch64-apple-darwin = "macos-13-large" +x86_64-apple-darwin = "macos-13-large" +x86_64-unknown-linux-gnu = "buildjet-32vcpu-ubuntu-2204" # The profile that 'cargo dist' will build with [profile.dist] @@ -99,7 +102,7 @@ push = false [workspace.package] authors = ["Penumbra Labs "] edition = "2021" -version = "0.71.0" +version = "0.74.0-alpha.1" repository = "https://github.com/penumbra-zone/penumbra" homepage = "https://penumbra.zone" license = "MIT OR Apache-2.0" @@ -144,6 +147,7 @@ futures = { version = "0.3.28" } hex = { version = "0.4.3" } http = { version = "0.2.9" } http-body = { version = "0.4.5" } +humantime = { version = "2.1" } ibc-proto = { default-features = false, version = "0.41.0" } ibc-types = { default-features = false, version = "0.12.0" } ibig = { version = "0.3" } diff --git a/crates/bench/benches/swap_claim.rs b/crates/bench/benches/swap_claim.rs index 2ba03515bb..cb1e3503ad 100644 --- a/crates/bench/benches/swap_claim.rs +++ b/crates/bench/benches/swap_claim.rs @@ -70,7 +70,7 @@ fn swap_claim_proving_time(c: &mut Criterion) { unfilled_2: Amount::from(50u64), height: height.into(), trading_pair: swap_plaintext.trading_pair, - epoch_starting_height: (epoch_duration * position.epoch()).into(), + sct_position_prefix: position, }; let (lambda_1, lambda_2) = output_data.pro_rata_outputs((delta_1_i, delta_2_i)); diff --git a/crates/bin/pcli/Cargo.toml b/crates/bin/pcli/Cargo.toml index 67321278ec..c68aecff0d 100644 --- a/crates/bin/pcli/Cargo.toml +++ b/crates/bin/pcli/Cargo.toml @@ -53,6 +53,7 @@ ed25519-consensus = {workspace = true} futures = {workspace = true} hex = {workspace = true} http-body = {workspace = true} +humantime = {workspace = true} ibc-proto = {workspace = true, default-features = true} ibc-types = {workspace = true, features = ["std", "with_serde"], default-features = true} indicatif = {workspace = true} diff --git a/crates/bin/pcli/src/command/ceremony.rs b/crates/bin/pcli/src/command/ceremony.rs index 0e396f374f..5cd73ca96c 100644 --- a/crates/bin/pcli/src/command/ceremony.rs +++ b/crates/bin/pcli/src/command/ceremony.rs @@ -139,7 +139,7 @@ impl CeremonyCmd { bid, address ); - handle_bid(app, *coordinator_address, index, bid).await?; + handle_bid(app, coordinator_address.clone(), index, bid).await?; println!("connecting to coordinator..."); // After we bid, we need to wait a couple of seconds just for the transaction to be diff --git a/crates/bin/pcli/src/command/query/validator.rs b/crates/bin/pcli/src/command/query/validator.rs index 311cc2b87a..c34826b6cc 100644 --- a/crates/bin/pcli/src/command/query/validator.rs +++ b/crates/bin/pcli/src/command/query/validator.rs @@ -1,16 +1,33 @@ -use std::{fs::File, io::Write}; +use std::{ + fs::File, + io::Write, + ops::{Deref, RangeInclusive}, + time::Duration, +}; -use anyhow::{Context, Result}; +use anyhow::{anyhow, Context, Error, Result}; use colored::Colorize; use comfy_table::{presets, Table}; use futures::TryStreamExt; -use penumbra_num::Amount; -use penumbra_proto::core::component::stake::v1::{ - query_service_client::QueryServiceClient as StakeQueryServiceClient, ValidatorInfoRequest, +use penumbra_app::params::AppParameters; +use penumbra_num::{fixpoint::U128x128, Amount}; +use penumbra_proto::{ + core::{ + app::v1::{ + query_service_client::QueryServiceClient as AppQueryServiceClient, AppParametersRequest, + }, + component::stake::v1::{ + query_service_client::QueryServiceClient as StakeQueryServiceClient, + GetValidatorInfoRequest, GetValidatorInfoResponse, ValidatorInfoRequest, + ValidatorStatusRequest, ValidatorUptimeRequest, + }, + }, + DomainType, }; use penumbra_stake::{ - validator::{self, ValidatorToml}, - IdentityKey, + rate::RateData, + validator::{self, Info, Status, Validator, ValidatorToml}, + IdentityKey, Uptime, BPS_SQUARED_SCALING_FACTOR, }; use crate::App; @@ -35,6 +52,16 @@ pub enum ValidatorCmd { /// The identity key of the validator to fetch. identity_key: String, }, + /// Get the uptime of the validator. + Uptime { + /// The identity key of the validator to fetch. + identity_key: String, + }, + /// Fetch the current status for a particular validator. + Status { + /// The identity key of the validator to fetch. + identity_key: String, + }, } impl ValidatorCmd { @@ -155,62 +182,297 @@ impl ValidatorCmd { println!("{table}"); } ValidatorCmd::Definition { file, identity_key } => { + // Parse the identity key and construct the RPC request. + let request = tonic::Request::new(GetValidatorInfoRequest { + identity_key: identity_key + .parse::() + .map(|ik| ik.to_proto()) + .map(Some)?, + }); + + // Instantiate an RPC client and send the request. + let GetValidatorInfoResponse { validator_info } = app + .pd_channel() + .await + .map(StakeQueryServiceClient::new)? + .get_validator_info(request) + .await? + .into_inner(); + + // Coerce the validator information into TOML, or return an error if it was not + // found within the client's response. + let serialize = |v| toml::to_string_pretty(&v).map_err(Error::from); + let toml = validator_info + .ok_or_else(|| anyhow!("response did not include validator info"))? + .try_into() + .context("parsing validator info") + .map(|Info { validator, .. }| validator) + .map(ValidatorToml::from) + .and_then(serialize)?; + + // Write to a file if an output file was specified, otherwise print to stdout. + if let Some(file) = file { + File::create(file) + .with_context(|| format!("cannot create file {file:?}"))? + .write_all(toml.as_bytes()) + .context("could not write file")?; + } else { + println!("{}", toml); + } + } + ValidatorCmd::Uptime { identity_key } => { let identity_key = identity_key.parse::()?; - /* - use penumbra_proto::client::specific::ValidatorStatusRequest; + let mut client = StakeQueryServiceClient::new(app.pd_channel().await?); - let mut client = opt.specific_client().await?; - let status: ValidatorStatus = client - .validator_status(ValidatorStatusRequest { - chain_id: "".to_string(), // TODO: fill in + // What's the uptime? + let uptime: Uptime = client + .validator_uptime(ValidatorUptimeRequest { identity_key: Some(identity_key.into()), }) .await? .into_inner() + .uptime + .ok_or_else(|| anyhow::anyhow!("uptime must be present in response"))? .try_into()?; - // why isn't the validator definition part of the status? - // why do we have all these different validator messages? - // do we need them? - status.state. - */ - - // Intsead just download everything - let mut client = StakeQueryServiceClient::new(app.pd_channel().await?); - - let validators = client - .validator_info(ValidatorInfoRequest { - show_inactive: true, - ..Default::default() + // Is the validator active? + let status: validator::Status = client + .validator_status(ValidatorStatusRequest { + identity_key: Some(identity_key.into()), }) .await? .into_inner() - .try_collect::>() + .status + .ok_or_else(|| anyhow::anyhow!("status must be present in response"))? + .try_into()?; + let state = status.state; + let active = matches!(state, validator::State::Active); + + // Get the chain parameters + let mut client = AppQueryServiceClient::new(app.pd_channel().await?); + let params: AppParameters = client + .app_parameters(tonic::Request::new(AppParametersRequest {})) .await? - .into_iter() - .map(TryInto::try_into) - .collect::, _>>()?; + .into_inner() + .app_parameters + .ok_or_else(|| anyhow::anyhow!("empty AppParametersResponse message"))? + .try_into()?; - let validator: ValidatorToml = validators - .iter() - .map(|info| &info.validator) - .find(|v| v.identity_key == identity_key) - .cloned() - .ok_or_else(|| anyhow::anyhow!("Could not find validator {}", identity_key))? - .into(); + let as_of_height = uptime.as_of_height(); + let missed_blocks = uptime.num_missed_blocks(); + let window_len = uptime.missed_blocks_window(); - if let Some(file) = file { - File::create(file) - .with_context(|| format!("cannot create file {file:?}"))? - .write_all(toml::to_string_pretty(&validator)?.as_bytes()) - .context("could not write file")?; - } else { - println!("{}", toml::to_string_pretty(&validator)?); + let mut downtime_ranges: Vec> = vec![]; + for missed_block in uptime.missed_blocks() { + if let Some(range) = downtime_ranges.last_mut() { + if range.end() + 1 == missed_block { + *range = *range.start()..=missed_block; + } else { + downtime_ranges.push(missed_block..=missed_block); + } + } else { + downtime_ranges.push(missed_block..=missed_block); + } + } + + let percent_uptime = + 100.0 * (window_len as f64 - missed_blocks as f64) / window_len as f64; + let signed_blocks = window_len as u64 - missed_blocks as u64; + let min_uptime_blocks = + window_len as u64 - params.stake_params.missed_blocks_maximum; + let percent_min_uptime = 100.0 * min_uptime_blocks as f64 / window_len as f64; + let percent_max_downtime = + 100.0 * params.stake_params.missed_blocks_maximum as f64 / window_len as f64; + let percent_downtime = 100.0 * missed_blocks as f64 / window_len as f64; + let percent_downtime_penalty = + // Converting from basis points squared to percentage + params.stake_params.slashing_penalty_downtime as f64 / 100.0 / 100.0; + let min_remaining_downtime_blocks = (window_len as u64) + .saturating_sub(missed_blocks as u64) + .saturating_sub(min_uptime_blocks); + let min_remaining_downtime = humantime::Duration::from(Duration::from_secs( + (min_remaining_downtime_blocks * 5) as u64, + )); + let cumulative_downtime = + humantime::Duration::from(Duration::from_secs((missed_blocks * 5) as u64)); + let percent_grace = 100.0 * min_remaining_downtime_blocks as f64 + / (window_len - min_uptime_blocks as usize) as f64; + let window_len_len = window_len.to_string().len(); + + println!("{state} validator: as of block {as_of_height}"); + println!("Unmissed signing: {percent_uptime:>6.2}% = {signed_blocks:width$}/{window_len} most-recent blocks", width = window_len_len); + if active { + println!("Required signing: {percent_min_uptime:>6.2}% = {min_uptime_blocks:width$}/{window_len} most-recent blocks", width = window_len_len); + } + println!("Salient downtime: {percent_downtime:>6.2}% = {missed_blocks:width$}/{window_len} most-recent blocks ~ {cumulative_downtime} cumulative downtime", width = window_len_len); + if active { + println!("Unexpended grace: {percent_grace:>6.2}% = {min_remaining_downtime_blocks:width$}/{window_len} forthcoming blocks ~ {min_remaining_downtime} at minimum before penalty", width = window_len_len); + println!( "Downtime penalty: {percent_downtime_penalty:>6.2}% - if downtime exceeds {percent_max_downtime:.2}%, penalty will be applied to all delegations"); + } + if !downtime_ranges.is_empty() { + println!("Downtime details:"); + let mut max_blocks_width = 0; + let mut max_start_width = 0; + let mut max_end_width = 0; + for range in downtime_ranges.iter() { + let blocks = range.end() - range.start() + 1; + max_blocks_width = max_blocks_width.max(blocks.to_string().len()); + max_start_width = max_start_width.max(range.start().to_string().len()); + if blocks != 1 { + max_end_width = max_end_width.max(range.end().to_string().len()); + } + } + for range in downtime_ranges.iter() { + let blocks = range.end() - range.start() + 1; + let estimated_duration = + humantime::Duration::from(Duration::from_secs((blocks * 5) as u64)); + if blocks == 1 { + let height = range.start(); + println!( + " • {blocks:width$} missed: block {height:>height_width$} {empty:>duration_width$}(~ {estimated_duration})", + width = max_blocks_width, + height_width = max_start_width, + duration_width = max_end_width + 5, + empty = "", + ); + } else { + let start = range.start(); + let end = range.end(); + println!( + " • {blocks:width$} missed: blocks {start:>start_width$} ..= {end:>end_width$} (~ {estimated_duration})", + width = max_blocks_width, + start_width = max_start_width, + end_width = max_end_width, + ); + }; + } } } + ValidatorCmd::Status { identity_key } => { + // Parse the identity key and construct the RPC request. + let request = tonic::Request::new(GetValidatorInfoRequest { + identity_key: identity_key + .parse::() + .map(|ik| ik.to_proto()) + .map(Some)?, + }); + + // Instantiate an RPC client and send the request. + let GetValidatorInfoResponse { validator_info } = app + .pd_channel() + .await + .map(StakeQueryServiceClient::new)? + .get_validator_info(request) + .await? + .into_inner(); + + // Parse the validator status, or return an error if it was not found within the + // client's response. + let info = validator_info + .ok_or_else(|| anyhow!("response did not include validator info"))? + .try_into() + .context("parsing validator info")?; + + // Initialize a table, add a header and insert this validator's information. + let mut table = Table::new(); + table + .load_preset(presets::NOTHING) + .set_header(vec![ + "Voting Power", + "Commission", + "State", + "Bonding State", + "Exchange Rate", + "Identity Key", + "Name", + ]) + .add_row(StatusRow::new(info)); + println!("{table}"); + } } Ok(()) } } + +/// A row within the `status` command's table output. +struct StatusRow { + power: f64, + commission: u16, + state: validator::State, + bonding_state: validator::BondingState, + exchange_rate: U128x128, + identity_key: IdentityKey, + name: String, +} + +impl StatusRow { + /// Constructs a new [`StatusRow`]. + fn new( + Info { + validator: + Validator { + funding_streams, + identity_key, + name, + .. + }, + status: + Status { + state, + bonding_state, + voting_power, + .. + }, + rate_data: + RateData { + validator_exchange_rate, + .. + }, + }: Info, + ) -> Self { + // Calculate the scaled voting power, exchange rate, and commissions. + let power = (voting_power.value() as f64) * 1e-6; + let commission = funding_streams.iter().map(|fs| fs.rate_bps()).sum(); + let exchange_rate = { + let rate_bps_sq = U128x128::from(validator_exchange_rate); + (rate_bps_sq / BPS_SQUARED_SCALING_FACTOR.deref()).expect("nonzero scaling factor") + }; + + Self { + power, + commission, + state, + bonding_state, + exchange_rate, + identity_key, + name, + } + } +} + +impl Into for StatusRow { + fn into(self) -> comfy_table::Row { + let Self { + power, + commission, + state, + bonding_state, + exchange_rate, + identity_key, + name, + } = self; + + [ + format!("{power:.3}"), + format!("{commission}bps"), + state.to_string(), + bonding_state.to_string(), + exchange_rate.to_string(), + identity_key.to_string(), + name, + ] + .into() + } +} diff --git a/crates/bin/pcli/src/command/tx.rs b/crates/bin/pcli/src/command/tx.rs index ae11ece118..3fd90f5b5b 100644 --- a/crates/bin/pcli/src/command/tx.rs +++ b/crates/bin/pcli/src/command/tx.rs @@ -31,7 +31,7 @@ use penumbra_asset::{asset, asset::Metadata, Value, STAKING_TOKEN_ASSET_ID}; use penumbra_dex::{lp::position, swap_claim::SwapClaimPlan}; use penumbra_fee::Fee; use penumbra_governance::{proposal::ProposalToml, proposal_state::State as ProposalState, Vote}; -use penumbra_keys::keys::AddressIndex; +use penumbra_keys::{keys::AddressIndex, Address}; use penumbra_num::Amount; use penumbra_proto::{ core::component::{ @@ -346,7 +346,7 @@ impl TxCmd { .map(|v| v.parse()) .collect::, _>>()?; let to = to - .parse() + .parse::
() .map_err(|_| anyhow::anyhow!("address is invalid"))?; let return_address = app @@ -364,7 +364,7 @@ impl TxCmd { .set_gas_prices(gas_prices) .set_fee_tier((*fee_tier).into()); for value in values.iter().cloned() { - planner.output(value, to); + planner.output(value, to.clone()); } let plan = planner .memo(memo_plaintext)? diff --git a/crates/bin/pcli/tests/proof.rs b/crates/bin/pcli/tests/proof.rs index adaa09277e..e84b1b2a0d 100644 --- a/crates/bin/pcli/tests/proof.rs +++ b/crates/bin/pcli/tests/proof.rs @@ -278,7 +278,7 @@ fn swap_claim_parameters_vs_current_swap_claim_circuit() { unfilled_2: Amount::from(50u64), height: height.into(), trading_pair: swap_plaintext.trading_pair, - epoch_starting_height: (epoch_duration * position.epoch()).into(), + sct_position_prefix: position, }; let (lambda_1, lambda_2) = output_data.pro_rata_outputs((delta_1_i, delta_2_i)); diff --git a/crates/bin/pclientd/tests/network_integration.rs b/crates/bin/pclientd/tests/network_integration.rs index 7f02c207ce..6c9c282171 100644 --- a/crates/bin/pclientd/tests/network_integration.rs +++ b/crates/bin/pclientd/tests/network_integration.rs @@ -6,7 +6,7 @@ //! where no tokens have been delegated, and the address with index 0 //! was distributedp 1cube. -use std::process::Command as StdCommand; +use std::{ops::Deref, process::Command as StdCommand}; use anyhow::Context; use assert_cmd::cargo::CommandCargoExt; @@ -120,7 +120,7 @@ async fn transaction_send_flow() -> anyhow::Result<()> { let plan = view_client .transaction_planner(TransactionPlannerRequest { outputs: vec![tpr::Output { - address: Some((*test_keys::ADDRESS_1).into()), + address: Some(test_keys::ADDRESS_1.deref().clone().into()), value: Some( Value { amount: 1_000_000u64.into(), @@ -304,7 +304,7 @@ async fn swap_claim_flow() -> anyhow::Result<()> { amount: Some(num::Amount { lo: 0, hi: 0 }), asset_id: None, }), - claim_address: Some((*test_keys::ADDRESS_1).into()), + claim_address: Some(test_keys::ADDRESS_1.deref().clone().into()), }], ..Default::default() }) diff --git a/crates/bin/pd/Cargo.toml b/crates/bin/pd/Cargo.toml index 4447152a5c..e3a6e1c080 100644 --- a/crates/bin/pd/Cargo.toml +++ b/crates/bin/pd/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "pd" -version = {workspace = true} -authors = {workspace = true} -edition = {workspace = true} +version = { workspace = true } +authors = { workspace = true } +edition = { workspace = true } description = "The node software for the Penumbra Zone" -repository = {workspace = true} -homepage = {workspace = true} -license = {workspace = true} +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } publish = false rust-version = "1.75" @@ -76,10 +76,12 @@ penumbra-fee = { workspace = true, default-features = true } penumbra-governance = { workspace = true, features = ["parallel"], default-features = true } penumbra-ibc = { workspace = true, features = ["rpc"], default-features = true } penumbra-keys = { workspace = true, default-features = true } +penumbra-num = { workspace = true, default-features = true } penumbra-proto = { workspace = true, default-features = true } penumbra-sct = { workspace = true, default-features = true } penumbra-shielded-pool = { workspace = true, features = ["parallel"], default-features = true } penumbra-stake = { workspace = true, features = ["parallel"], default-features = true } +penumbra-tct = { workspace = true, default-features = true } penumbra-tendermint-proxy = { path = "../../util/tendermint-proxy" } penumbra-tower-trace = { path = "../../util/tower-trace" } penumbra-transaction = { workspace = true, default-features = true } @@ -102,27 +104,27 @@ tempfile = { workspace = true } tendermint = { workspace = true } tendermint-config = { workspace = true } tendermint-light-client-verifier = { workspace = true } -tendermint-proto = { workspace = true } -tendermint-rpc = { workspace = true, features = ["http-client"] } -tokio = { workspace = true, features = ["full"] } -tokio-stream = { workspace = true } -tokio-util = { workspace = true, features = ["compat"] } -toml = { workspace = true } -tonic = { workspace = true } -tonic-reflection = { workspace = true } -tonic-web = { workspace = true } -tower = { workspace = true, features = ["full"] } -tower-abci = "0.11" -tower-actor = "0.1.0" -tower-http = { workspace = true } -tower-service = { workspace = true } -tracing = { workspace = true } -tracing-subscriber = { workspace = true, features = ["env-filter", "ansi"] } -url = { workspace = true } -zip = "0.6" +tendermint-proto = { workspace = true } +tendermint-rpc = { workspace = true, features = ["http-client"] } +tokio = { workspace = true, features = ["full"] } +tokio-stream = { workspace = true } +tokio-util = { workspace = true, features = ["compat"] } +toml = { workspace = true } +tonic = { workspace = true } +tonic-reflection = { workspace = true } +tonic-web = { workspace = true } +tower = { workspace = true, features = ["full"] } +tower-abci = "0.11" +tower-actor = "0.1.0" +tower-http = { workspace = true } +tower-service = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true, features = ["env-filter", "ansi"] } +url = { workspace = true } +zip = "0.6" [dev-dependencies] -penumbra-proof-params = {workspace = true, features = [ +penumbra-proof-params = { workspace = true, features = [ "bundled-proving-keys", "download-proving-keys", -], default-features = true} +], default-features = true } diff --git a/crates/bin/pd/src/main.rs b/crates/bin/pd/src/main.rs index 368d4066a3..f94cd092e3 100644 --- a/crates/bin/pd/src/main.rs +++ b/crates/bin/pd/src/main.rs @@ -12,7 +12,7 @@ use cnidarium::{StateDelta, Storage}; use metrics_exporter_prometheus::PrometheusBuilder; use pd::{ cli::{Opt, RootCommand, TestnetCommand}, - migrate::Migration::Testnet70, + migrate::Migration::Testnet74, testnet::{ config::{get_testnet_dir, parse_tm_address, url_has_necessary_parts}, generate::TestnetConfig, @@ -432,7 +432,7 @@ async fn main() -> anyhow::Result<()> { migrate_archive, } => { tracing::info!("migrating state in {}", target_directory.display()); - Testnet70 + Testnet74 .migrate(target_directory.clone(), genesis_start) .await .context("failed to upgrade state")?; diff --git a/crates/bin/pd/src/migrate.rs b/crates/bin/pd/src/migrate.rs index 922d0bc001..64e08b2b6c 100644 --- a/crates/bin/pd/src/migrate.rs +++ b/crates/bin/pd/src/migrate.rs @@ -4,6 +4,9 @@ //! node operators must coordinate to perform a chain upgrade. //! This module declares how local `pd` state should be altered, if at all, //! in order to be compatible with the network post-chain-upgrade. +mod testnet72; +mod testnet74; + use anyhow::Context; use futures::StreamExt as _; use std::path::PathBuf; @@ -28,6 +31,11 @@ pub enum Migration { SimpleMigration, /// Testnet-70 migration: move swap executions from the jmt to nv-storage. Testnet70, + /// Testnet-72 migration: + /// - Migrate `BatchSwapOutputData` to new protobuf, replacing epoch height with index. + Testnet72, + /// Testnet-74 migration: change liquidity positions to be ordered in descending order rather than ascending. + Testnet74, } impl Migration { @@ -37,7 +45,7 @@ impl Migration { genesis_start: Option, ) -> anyhow::Result<()> { match self { - Migration::Noop => (), + Migration::Noop => Ok(()), Migration::SimpleMigration => { let rocksdb_dir = path_to_export.join("rocksdb"); let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?; @@ -101,6 +109,7 @@ impl Migration { crate::testnet::generate::TestnetValidator::initial_state(); std::fs::write(validator_state_path, fresh_validator_state) .expect("can write validator state"); + Ok(()) } Migration::Testnet70 => { // Our goal is to fetch all swap executions from the jmt and store them in nv-storage. @@ -189,9 +198,12 @@ impl Migration { duration = migration_duration.as_secs(), "successful migration!" ); + + Ok(()) } + Migration::Testnet72 => testnet72::migrate(path_to_export, genesis_start).await, + Migration::Testnet74 => testnet74::migrate(path_to_export, genesis_start).await, } - Ok(()) } } diff --git a/crates/bin/pd/src/migrate/testnet72.rs b/crates/bin/pd/src/migrate/testnet72.rs new file mode 100644 index 0000000000..e6c8f6bcd9 --- /dev/null +++ b/crates/bin/pd/src/migrate/testnet72.rs @@ -0,0 +1,206 @@ +//! Contains functions related to the migration script of Testnet72 + +use anyhow; +use cnidarium::{Snapshot, StateDelta, StateRead, StateWrite, Storage}; +use futures::StreamExt as _; +use jmt::RootHash; +use penumbra_app::app::StateReadExt as _; +use penumbra_app::SUBSTORE_PREFIXES; +use penumbra_proto::core::component::sct::v1::query_service_server::QueryService; +use penumbra_proto::penumbra::core::component as pb; +use penumbra_proto::StateWriteProto; +use penumbra_sct::component::clock::{EpochManager, EpochRead}; +use penumbra_sct::component::rpc::Server as SctServer; +use penumbra_tct::Position; +use prost::Message; +use std::path::PathBuf; +use std::sync::Arc; +use tonic::IntoRequest; + +use crate::testnet::generate::TestnetConfig; + +/// The context holding various query services we need to help perform the migration. +#[derive(Clone)] +struct Context { + sct_server: Arc, +} + +impl Context { + /// Create a new context from the state storage. + fn new(storage: Storage) -> Self { + Self { + sct_server: Arc::new(SctServer::new(storage)), + } + } + + /// Use storage to lookup the index of an epoch based on its starting heights + async fn epoch_height_to_index(&self, epoch_starting_height: u64) -> anyhow::Result { + Ok(self + .sct_server + .epoch_by_height( + pb::sct::v1::EpochByHeightRequest { + height: epoch_starting_height, + } + .into_request(), + ) + .await? + .into_inner() + .epoch + .expect(&format!( + "epoch at height {} should be present", + epoch_starting_height + )) + .index) + } + + /// Translate the protobuf for a BSOD by populating the correct data and emptying the + /// deprecated field. + #[allow(deprecated)] + async fn translate_bsod( + &self, + bsod: pb::dex::v1::BatchSwapOutputData, + ) -> anyhow::Result { + let sct_position_prefix: u64 = { + let epoch = self + .epoch_height_to_index(bsod.epoch_starting_height) + .await?; + Position::from(( + u16::try_from(epoch).expect("epoch should fit in 16 bits"), + u16::try_from(bsod.height - bsod.epoch_starting_height) + .expect("block index should fit in 16 bits"), + 0, + )) + .into() + }; + Ok(pb::dex::v1::BatchSwapOutputData { + sct_position_prefix, + epoch_starting_height: Default::default(), + ..bsod + }) + } + + async fn translate_compact_block( + &self, + compact_block: pb::compact_block::v1::CompactBlock, + ) -> anyhow::Result { + let mut swap_outputs = Vec::with_capacity(compact_block.swap_outputs.len()); + for bsod in compact_block.swap_outputs { + swap_outputs.push(self.translate_bsod(bsod).await?); + } + Ok(pb::compact_block::v1::CompactBlock { + swap_outputs, + ..compact_block + }) + } +} + +/// Translate all of the BSODs inside dex storage to the new format. +async fn translate_dex_storage( + ctx: Context, + delta: &mut StateDelta, +) -> anyhow::Result<()> { + let mut stream = delta.prefix_raw("dex/output/"); + while let Some(r) = stream.next().await { + let (key, bsod_bytes) = r?; + let bsod = pb::dex::v1::BatchSwapOutputData::decode(bsod_bytes.as_slice())?; + let bsod = ctx.translate_bsod(bsod).await?; + delta.put_proto(key, bsod); + } + Ok(()) +} + +/// Translate all of the compact block storage to hold the new BSOD data inside the compact blocks. +async fn translate_compact_block_storage( + ctx: Context, + delta: &mut StateDelta, +) -> anyhow::Result<()> { + let mut stream = delta.nonverifiable_prefix_raw("compactblock/".as_bytes()); + while let Some(r) = stream.next().await { + let (key, compactblock_bytes) = r?; + let block = pb::compact_block::v1::CompactBlock::decode(compactblock_bytes.as_slice())?; + let block = ctx.translate_compact_block(block).await?; + delta.nonverifiable_put_raw(key, block.encode_to_vec()); + } + Ok(()) +} + +/// Run the full migration, given an export path and a start time for genesis. +pub async fn migrate( + path_to_export: PathBuf, + genesis_start: Option, +) -> anyhow::Result<()> { + let rocksdb_dir = path_to_export.join("rocksdb"); + let storage = Storage::load(rocksdb_dir.clone(), SUBSTORE_PREFIXES.to_vec()).await?; + let export_state = storage.latest_snapshot(); + let root_hash = export_state.root_hash().await.expect("can get root hash"); + let pre_upgrade_root_hash: RootHash = root_hash.into(); + let pre_upgrade_height = export_state + .get_block_height() + .await + .expect("can get block height"); + let post_upgrade_height = pre_upgrade_height.wrapping_add(1); + + let mut delta = StateDelta::new(export_state); + let (migration_duration, post_upgrade_root_hash) = { + let start_time = std::time::SystemTime::now(); + let ctx = Context::new(storage.clone()); + + // Translate inside dex storage. + translate_dex_storage(ctx.clone(), &mut delta).await?; + // Translate inside compact block storage. + translate_compact_block_storage(ctx.clone(), &mut delta).await?; + + delta.put_block_height(0u64); + let post_upgrade_root_hash = storage.commit_in_place(delta).await?; + tracing::info!(?post_upgrade_root_hash, "post-upgrade root hash"); + + (start_time.elapsed().unwrap(), post_upgrade_root_hash) + }; + + storage.release().await; + let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?; + let migrated_state = storage.latest_snapshot(); + + // The migration is complete, now we need to generate a genesis file. To do this, we need + // to lookup a validator view from the chain, and specify the post-upgrade app hash and + // initial height. + let chain_id = migrated_state.get_chain_id().await?; + let app_state = penumbra_app::genesis::Content { + chain_id, + ..Default::default() + }; + let mut genesis = TestnetConfig::make_genesis(app_state.clone()).expect("can make genesis"); + genesis.app_hash = post_upgrade_root_hash + .0 + .to_vec() + .try_into() + .expect("infaillible conversion"); + genesis.initial_height = post_upgrade_height as i64; + genesis.genesis_time = genesis_start.unwrap_or_else(|| { + let now = tendermint::time::Time::now(); + tracing::info!(%now, "no genesis time provided, detecting a testing setup"); + now + }); + let checkpoint = post_upgrade_root_hash.0.to_vec(); + let genesis = TestnetConfig::make_checkpoint(genesis, Some(checkpoint)); + + let genesis_json = serde_json::to_string(&genesis).expect("can serialize genesis"); + tracing::info!("genesis: {}", genesis_json); + let genesis_path = path_to_export.join("genesis.json"); + std::fs::write(genesis_path, genesis_json).expect("can write genesis"); + + let validator_state_path = path_to_export.join("priv_validator_state.json"); + let fresh_validator_state = crate::testnet::generate::TestnetValidator::initial_state(); + std::fs::write(validator_state_path, fresh_validator_state).expect("can write validator state"); + + tracing::info!( + pre_upgrade_height, + post_upgrade_height, + ?pre_upgrade_root_hash, + ?post_upgrade_root_hash, + duration = migration_duration.as_secs(), + "successful migration!" + ); + + Ok(()) +} diff --git a/crates/bin/pd/src/migrate/testnet74.rs b/crates/bin/pd/src/migrate/testnet74.rs new file mode 100644 index 0000000000..85b384e8d3 --- /dev/null +++ b/crates/bin/pd/src/migrate/testnet74.rs @@ -0,0 +1,165 @@ +//! Contains functions related to the migration script of Testnet74 + +use anyhow; +use cnidarium::{EscapedByteSlice, Snapshot, StateDelta, StateRead, StateWrite, Storage}; +use futures::StreamExt as _; +use jmt::RootHash; +use penumbra_app::{app::StateReadExt as _, SUBSTORE_PREFIXES}; +use penumbra_dex::SwapExecution; +use penumbra_num::Amount; +use penumbra_proto::{penumbra::core::component as pb, StateReadProto, StateWriteProto}; +use penumbra_sct::component::clock::{EpochManager, EpochRead}; +use std::path::PathBuf; + +use crate::testnet::generate::TestnetConfig; + +/// Updates arb execution output amounts to include the input amount instead +/// of reporting only profit (see #3790). +async fn fix_arb_execution_outputs(delta: &mut StateDelta) -> anyhow::Result<()> { + let mut stream = delta.prefix_proto("dex/arb_execution/"); + while let Some(r) = stream.next().await { + let (key, swap_ex_proto): (String, pb::dex::v1::SwapExecution) = r?; + let mut swap_ex: SwapExecution = swap_ex_proto.try_into()?; + swap_ex.output = swap_ex + .input + .asset_id + .value(swap_ex.output.amount + swap_ex.input.amount); + delta.put(key, swap_ex); + } + Ok(()) +} + +/// Update the ordering of liquidity position indices to return in descending order (see #4189) +/// +/// Lookups for liquidity positions based on starting asset were ordered backwards +/// and returning the positions with the least liquidity first. This migration +/// needs to modify the keys stored under the JMT `dex/ra/` prefix key to reverse +/// the ordering of the existing data. +async fn update_lp_index_order(delta: &mut StateDelta) -> anyhow::Result<()> { + let prefix_key = "dex/ra/".as_bytes(); + tracing::trace!(prefix_key = ?EscapedByteSlice(&prefix_key), "updating liquidity position indices"); + let mut liquidity_stream = delta.nonverifiable_prefix_raw(&prefix_key).boxed(); + + while let Some(r) = liquidity_stream.next().await { + let (old_key, asset_id): (Vec, Vec) = r?; + tracing::info!(?old_key, asset_id = ?EscapedByteSlice(&asset_id), "migrating asset liquidity"); + + // Construct the new key: + let mut new_key = [0u8; 55]; + new_key[0..7].copy_from_slice(b"dex/ra/"); + // The "from" asset ID remains the same in both keys. + new_key[7..32 + 7].copy_from_slice(&old_key[7..32 + 7]); + // Use the complement of the amount to ensure that the keys are ordered in descending order. + let a_from_b = Amount::from_be_bytes(old_key[32 + 7..32 + 7 + 16].try_into()?); + new_key[32 + 7..32 + 7 + 16].copy_from_slice(&(!a_from_b).to_be_bytes()); + + // Delete the old incorrectly ordered key: + delta.nonverifiable_delete(old_key.clone()); + + // Store the correctly formatted new key: + delta.nonverifiable_put_raw(new_key.to_vec(), asset_id); + tracing::info!( + new_key = ?EscapedByteSlice(&new_key), + ?old_key, + "updated liquidity index" + ); + } + + Ok(()) +} + +/// Run the full migration, given an export path and a start time for genesis. +/// +/// This migration script is responsible for: +/// +/// - Updating the ordering of liquidity position indices to return in descending order (see #4189) +/// - Updating arb execution output amounts to include the input amount instead of reporting only profit (see #3790) +/// +/// Affected JMT key prefixes: +/// +/// - `dex/ra/` +/// - `dex/arb_execution/` +pub async fn migrate( + path_to_export: PathBuf, + genesis_start: Option, +) -> anyhow::Result<()> { + // Setup: + let rocksdb_dir = path_to_export.join("rocksdb"); + let storage = Storage::load(rocksdb_dir.clone(), SUBSTORE_PREFIXES.to_vec()).await?; + let export_state = storage.latest_snapshot(); + let root_hash = export_state.root_hash().await.expect("can get root hash"); + let pre_upgrade_root_hash: RootHash = root_hash.into(); + let pre_upgrade_height = export_state + .get_block_height() + .await + .expect("can get block height"); + let post_upgrade_height = pre_upgrade_height.wrapping_add(1); + + // We initialize a `StateDelta` and start by reaching into the JMT for all entries matching the + // swap execution prefix. Then, we write each entry to the nv-storage. + let mut delta = StateDelta::new(export_state); + let (migration_duration, post_upgrade_root_hash) = { + let start_time = std::time::SystemTime::now(); + + // Update LP index order. + update_lp_index_order(&mut delta).await?; + + // Fix the arb execution output amounts. + fix_arb_execution_outputs(&mut delta).await?; + + delta.put_block_height(0u64); + let post_upgrade_root_hash = storage.commit_in_place(delta).await?; + tracing::info!(?post_upgrade_root_hash, "post-upgrade root hash"); + + (start_time.elapsed().unwrap(), post_upgrade_root_hash) + }; + + tracing::info!(?post_upgrade_root_hash, "post-upgrade root hash"); + + storage.release().await; + let storage = Storage::load(rocksdb_dir, SUBSTORE_PREFIXES.to_vec()).await?; + let migrated_state = storage.latest_snapshot(); + + // The migration is complete, now we need to generate a genesis file. To do this, we need + // to lookup a validator view from the chain, and specify the post-upgrade app hash and + // initial height. + let chain_id = migrated_state.get_chain_id().await?; + let app_state = penumbra_app::genesis::Content { + chain_id, + ..Default::default() + }; + let mut genesis = TestnetConfig::make_genesis(app_state.clone()).expect("can make genesis"); + genesis.app_hash = post_upgrade_root_hash + .0 + .to_vec() + .try_into() + .expect("infaillible conversion"); + genesis.initial_height = post_upgrade_height as i64; + genesis.genesis_time = genesis_start.unwrap_or_else(|| { + let now = tendermint::time::Time::now(); + tracing::info!(%now, "no genesis time provided, detecting a testing setup"); + now + }); + let checkpoint = post_upgrade_root_hash.0.to_vec(); + let genesis = TestnetConfig::make_checkpoint(genesis, Some(checkpoint)); + + let genesis_json = serde_json::to_string(&genesis).expect("can serialize genesis"); + tracing::info!("genesis: {}", genesis_json); + let genesis_path = path_to_export.join("genesis.json"); + std::fs::write(genesis_path, genesis_json).expect("can write genesis"); + + let validator_state_path = path_to_export.join("priv_validator_state.json"); + let fresh_validator_state = crate::testnet::generate::TestnetValidator::initial_state(); + std::fs::write(validator_state_path, fresh_validator_state).expect("can write validator state"); + + tracing::info!( + pre_upgrade_height, + post_upgrade_height, + ?pre_upgrade_root_hash, + ?post_upgrade_root_hash, + duration = migration_duration.as_secs(), + "successful migration!" + ); + + Ok(()) +} diff --git a/crates/cnidarium/src/gen/proto_descriptor.bin.no_lfs b/crates/cnidarium/src/gen/proto_descriptor.bin.no_lfs index 67d781308d..d50e1bd881 100644 Binary files a/crates/cnidarium/src/gen/proto_descriptor.bin.no_lfs and b/crates/cnidarium/src/gen/proto_descriptor.bin.no_lfs differ diff --git a/crates/core/app/src/action_handler/transaction.rs b/crates/core/app/src/action_handler/transaction.rs index 859ff816c8..9219a8350b 100644 --- a/crates/core/app/src/action_handler/transaction.rs +++ b/crates/core/app/src/action_handler/transaction.rs @@ -110,6 +110,8 @@ impl AppActionHandler for Transaction { #[cfg(test)] mod tests { + use std::ops::Deref; + use anyhow::Result; use penumbra_asset::{Value, STAKING_TOKEN_ASSET_ID}; use penumbra_fee::Fee; @@ -163,10 +165,14 @@ mod tests { actions: vec![ SpendPlan::new(&mut OsRng, note, auth_path.position()).into(), SpendPlan::new(&mut OsRng, note2, auth_path2.position()).into(), - OutputPlan::new(&mut OsRng, value, *test_keys::ADDRESS_1).into(), + OutputPlan::new(&mut OsRng, value, test_keys::ADDRESS_1.deref().clone()).into(), ], detection_data: Some(DetectionDataPlan { - clue_plans: vec![CluePlan::new(&mut OsRng, *test_keys::ADDRESS_1, 1)], + clue_plans: vec![CluePlan::new( + &mut OsRng, + test_keys::ADDRESS_1.deref().clone(), + 1, + )], }), memo: None, }; @@ -228,7 +234,7 @@ mod tests { }, actions: vec![ SpendPlan::new(&mut OsRng, note, auth_path.position()).into(), - OutputPlan::new(&mut OsRng, value, *test_keys::ADDRESS_1).into(), + OutputPlan::new(&mut OsRng, value, test_keys::ADDRESS_1.deref().clone()).into(), ], detection_data: None, memo: None, diff --git a/crates/core/app/src/app/mod.rs b/crates/core/app/src/app/mod.rs index f918e1c642..150047225b 100644 --- a/crates/core/app/src/app/mod.rs +++ b/crates/core/app/src/app/mod.rs @@ -634,7 +634,7 @@ impl App { /// /// Increment this manually after fixing the root cause for a chain halt: updated nodes will then be /// able to proceed past the block height of the halt. -const TOTAL_HALT_COUNT: u64 = 1; +const TOTAL_HALT_COUNT: u64 = 2; #[async_trait] pub trait StateReadExt: StateRead { diff --git a/crates/core/app/tests/app_can_define_and_delegate_to_a_validator.rs b/crates/core/app/tests/app_can_define_and_delegate_to_a_validator.rs index a3b26cfa52..b783bf2db4 100644 --- a/crates/core/app/tests/app_can_define_and_delegate_to_a_validator.rs +++ b/crates/core/app/tests/app_can_define_and_delegate_to_a_validator.rs @@ -1,5 +1,5 @@ use { - self::common::{BuilderExt, TestNodeExt}, + self::common::{BuilderExt, TestNodeExt, ValidatorDataReadExt}, anyhow::anyhow, cnidarium::TempStorage, decaf377_rdsa::{SigningKey, SpendAuth, VerificationKey}, @@ -16,6 +16,7 @@ use { GovernanceKey, IdentityKey, }, rand_core::OsRng, + std::ops::Deref, tap::Tap, tracing::{error_span, info, Instrument}, }; @@ -251,13 +252,16 @@ async fn app_can_define_and_delegate_to_a_validator() -> anyhow::Result<()> { let output = OutputPlan::new( &mut rand_core::OsRng, delegate.delegation_value(), - *test_keys::ADDRESS_1, + test_keys::ADDRESS_1.deref().clone(), ); let mut plan = TransactionPlan { actions: vec![spend.into(), output.into(), delegate.into()], // Now fill out the remaining parts of the transaction needed for verification: - memo: MemoPlan::new(&mut OsRng, MemoPlaintext::blank_memo(*test_keys::ADDRESS_0)) - .map(Some)?, + memo: MemoPlan::new( + &mut OsRng, + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), + ) + .map(Some)?, detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { chain_id: TestNode::<()>::CHAIN_ID.to_string(), @@ -410,14 +414,17 @@ async fn app_can_define_and_delegate_to_a_validator() -> anyhow::Result<()> { let output = OutputPlan::new( &mut rand_core::OsRng, undelegate.unbonded_value(), - *test_keys::ADDRESS_1, + test_keys::ADDRESS_1.deref().clone(), ); let mut plan = TransactionPlan { actions: vec![spend.into(), output.into(), undelegate.into()], // Now fill out the remaining parts of the transaction needed for verification: - memo: MemoPlan::new(&mut OsRng, MemoPlaintext::blank_memo(*test_keys::ADDRESS_0)) - .map(Some)?, + memo: MemoPlan::new( + &mut OsRng, + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), + ) + .map(Some)?, detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { chain_id: TestNode::<()>::CHAIN_ID.to_string(), diff --git a/crates/core/app/tests/app_can_disable_community_pool_spends.rs b/crates/core/app/tests/app_can_disable_community_pool_spends.rs index bac8e9a281..3648b06bba 100644 --- a/crates/core/app/tests/app_can_disable_community_pool_spends.rs +++ b/crates/core/app/tests/app_can_disable_community_pool_spends.rs @@ -1,4 +1,5 @@ use { + self::common::ValidatorDataReadExt, anyhow::anyhow, cnidarium::TempStorage, decaf377_rdsa::VerificationKey, @@ -26,13 +27,13 @@ use { DomainType, }, penumbra_shielded_pool::{genesis::Allocation, OutputPlan, SpendPlan}, - penumbra_stake::{component::validator_handler::ValidatorDataRead, DelegationToken}, + penumbra_stake::DelegationToken, penumbra_transaction::{ memo::MemoPlaintext, plan::MemoPlan, ActionPlan, TransactionParameters, TransactionPlan, }, rand::Rng, rand_core::OsRng, - std::collections::BTreeMap, + std::{collections::BTreeMap, ops::Deref}, tap::{Tap, TapFallible}, tracing::{error_span, info, Instrument}, }; @@ -203,7 +204,7 @@ async fn app_can_disable_community_pool_spends() -> anyhow::Result<()> { CommunityPoolSpend { value }.into(), CommunityPoolOutput { value, - address: *test_keys::ADDRESS_0, + address: test_keys::ADDRESS_0.deref().clone(), } .into(), ], @@ -232,12 +233,17 @@ async fn app_can_disable_community_pool_spends() -> anyhow::Result<()> { actions: vec![ proposal, // Next, create a new output of the exact same amount. - OutputPlan::new(&mut OsRng, proposal_nft_value, *test_keys::ADDRESS_0).into(), + OutputPlan::new( + &mut OsRng, + proposal_nft_value, + test_keys::ADDRESS_0.deref().clone(), + ) + .into(), ], // Now fill out the remaining parts of the transaction needed for verification: memo: Some(MemoPlan::new( &mut OsRng, - MemoPlaintext::blank_memo(*test_keys::ADDRESS_0), + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), )?), detection_data: None, transaction_parameters: TransactionParameters { diff --git a/crates/core/app/tests/app_can_propose_community_pool_spends.rs b/crates/core/app/tests/app_can_propose_community_pool_spends.rs index 80d43d450e..fe308b9761 100644 --- a/crates/core/app/tests/app_can_propose_community_pool_spends.rs +++ b/crates/core/app/tests/app_can_propose_community_pool_spends.rs @@ -1,4 +1,5 @@ use { + self::common::ValidatorDataReadExt, anyhow::anyhow, cnidarium::TempStorage, decaf377_rdsa::VerificationKey, @@ -26,13 +27,13 @@ use { DomainType, }, penumbra_shielded_pool::{genesis::Allocation, OutputPlan, SpendPlan}, - penumbra_stake::{component::validator_handler::ValidatorDataRead, DelegationToken}, + penumbra_stake::DelegationToken, penumbra_transaction::{ memo::MemoPlaintext, plan::MemoPlan, ActionPlan, TransactionParameters, TransactionPlan, }, rand::Rng, rand_core::OsRng, - std::collections::BTreeMap, + std::{collections::BTreeMap, ops::Deref}, tap::{Tap, TapFallible}, tracing::{error_span, info, Instrument}, }; @@ -197,7 +198,7 @@ async fn app_can_propose_community_pool_spends() -> anyhow::Result<()> { CommunityPoolSpend { value }.into(), CommunityPoolOutput { value, - address: *test_keys::ADDRESS_0, + address: test_keys::ADDRESS_0.deref().clone(), } .into(), ], @@ -226,12 +227,17 @@ async fn app_can_propose_community_pool_spends() -> anyhow::Result<()> { actions: vec![ proposal, // Next, create a new output of the exact same amount. - OutputPlan::new(&mut OsRng, proposal_nft_value, *test_keys::ADDRESS_0).into(), + OutputPlan::new( + &mut OsRng, + proposal_nft_value, + test_keys::ADDRESS_0.deref().clone(), + ) + .into(), ], // Now fill out the remaining parts of the transaction needed for verification: memo: Some(MemoPlan::new( &mut OsRng, - MemoPlaintext::blank_memo(*test_keys::ADDRESS_0), + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), )?), detection_data: None, transaction_parameters: TransactionParameters { diff --git a/crates/core/app/tests/app_can_spend_notes_and_detect_outputs.rs b/crates/core/app/tests/app_can_spend_notes_and_detect_outputs.rs index d4bf8577d1..547525cca6 100644 --- a/crates/core/app/tests/app_can_spend_notes_and_detect_outputs.rs +++ b/crates/core/app/tests/app_can_spend_notes_and_detect_outputs.rs @@ -13,6 +13,7 @@ use { memo::MemoPlaintext, plan::MemoPlan, TransactionParameters, TransactionPlan, }, rand_core::OsRng, + std::ops::Deref, tap::{Tap, TapFallible}, tracing::info, }; @@ -63,12 +64,17 @@ async fn app_can_spend_notes_and_detect_outputs() -> anyhow::Result<()> { ) .into(), // Next, create a new output of the exact same amount. - OutputPlan::new(&mut OsRng, input_note.value(), *test_keys::ADDRESS_1).into(), + OutputPlan::new( + &mut OsRng, + input_note.value(), + test_keys::ADDRESS_1.deref().clone(), + ) + .into(), ], // Now fill out the remaining parts of the transaction needed for verification: memo: Some(MemoPlan::new( &mut OsRng, - MemoPlaintext::blank_memo(*test_keys::ADDRESS_0), + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), )?), detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { diff --git a/crates/core/app/tests/app_can_undelegate_from_a_validator.rs b/crates/core/app/tests/app_can_undelegate_from_a_validator.rs index 1f7f2c7573..12365df179 100644 --- a/crates/core/app/tests/app_can_undelegate_from_a_validator.rs +++ b/crates/core/app/tests/app_can_undelegate_from_a_validator.rs @@ -1,5 +1,5 @@ use { - self::common::{BuilderExt, TestNodeExt}, + self::common::{BuilderExt, TestNodeExt, ValidatorDataReadExt}, anyhow::anyhow, ark_ff::UniformRand, cnidarium::TempStorage, @@ -18,6 +18,7 @@ use { memo::MemoPlaintext, plan::MemoPlan, TransactionParameters, TransactionPlan, }, rand_core::OsRng, + std::ops::Deref, tap::Tap, tracing::{error_span, info, Instrument}, }; @@ -132,13 +133,16 @@ async fn app_can_undelegate_from_a_validator() -> anyhow::Result<()> { let output = OutputPlan::new( &mut rand_core::OsRng, delegate.delegation_value(), - *test_keys::ADDRESS_1, + test_keys::ADDRESS_1.deref().clone(), ); let mut plan = TransactionPlan { actions: vec![spend.into(), output.into(), delegate.into()], // Now fill out the remaining parts of the transaction needed for verification: - memo: MemoPlan::new(&mut OsRng, MemoPlaintext::blank_memo(*test_keys::ADDRESS_0)) - .map(Some)?, + memo: MemoPlan::new( + &mut OsRng, + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), + ) + .map(Some)?, detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { chain_id: TestNode::<()>::CHAIN_ID.to_string(), @@ -230,13 +234,16 @@ async fn app_can_undelegate_from_a_validator() -> anyhow::Result<()> { let output = OutputPlan::new( &mut rand_core::OsRng, undelegate.unbonded_value(), - *test_keys::ADDRESS_1, + test_keys::ADDRESS_1.deref().clone(), ); let mut plan = TransactionPlan { actions: vec![spend.into(), output.into(), undelegate.into()], // Now fill out the remaining parts of the transaction needed for verification: - memo: MemoPlan::new(&mut OsRng, MemoPlaintext::blank_memo(*test_keys::ADDRESS_0)) - .map(Some)?, + memo: MemoPlan::new( + &mut OsRng, + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), + ) + .map(Some)?, detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { chain_id: TestNode::<()>::CHAIN_ID.to_string(), @@ -317,8 +324,11 @@ async fn app_can_undelegate_from_a_validator() -> anyhow::Result<()> { let mut plan = TransactionPlan { actions: vec![claim.into()], // Now fill out the remaining parts of the transaction needed for verification: - memo: MemoPlan::new(&mut OsRng, MemoPlaintext::blank_memo(*test_keys::ADDRESS_0)) - .map(Some)?, + memo: MemoPlan::new( + &mut OsRng, + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), + ) + .map(Some)?, detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { chain_id: TestNode::<()>::CHAIN_ID.to_string(), diff --git a/crates/core/app/tests/app_rejects_validator_definitions_with_invalid_auth_sigs.rs b/crates/core/app/tests/app_rejects_validator_definitions_with_invalid_auth_sigs.rs index 679a4048cb..a245d03488 100644 --- a/crates/core/app/tests/app_rejects_validator_definitions_with_invalid_auth_sigs.rs +++ b/crates/core/app/tests/app_rejects_validator_definitions_with_invalid_auth_sigs.rs @@ -1,5 +1,5 @@ use { - self::common::BuilderExt, + self::common::{BuilderExt, ValidatorDataReadExt}, cnidarium::TempStorage, decaf377_rdsa::{SigningKey, SpendAuth, VerificationKey}, penumbra_app::{genesis::AppState, server::consensus::Consensus}, @@ -7,10 +7,7 @@ use { penumbra_mock_client::MockClient, penumbra_mock_consensus::TestNode, penumbra_proto::DomainType, - penumbra_stake::{ - component::validator_handler::ValidatorDataRead as _, validator::Validator, FundingStreams, - GovernanceKey, IdentityKey, - }, + penumbra_stake::{validator::Validator, FundingStreams, GovernanceKey, IdentityKey}, rand_core::OsRng, tap::Tap, tracing::{error_span, info, Instrument}, diff --git a/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_missing_blocks.rs b/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_missing_blocks.rs index 126888c365..e9f93ba232 100644 --- a/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_missing_blocks.rs +++ b/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_missing_blocks.rs @@ -1,7 +1,5 @@ -mod common; - use { - self::common::BuilderExt, + self::common::{BuilderExt, ValidatorDataReadExt}, anyhow::Context, cnidarium::TempStorage, penumbra_app::{genesis::AppState, server::consensus::Consensus}, @@ -11,6 +9,8 @@ use { tracing::{error_span, trace, Instrument}, }; +mod common; + #[tokio::test] async fn app_tracks_uptime_for_genesis_validator_missing_blocks() -> anyhow::Result<()> { // Install a test logger, acquire some temporary storage, and start the test node. diff --git a/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_signing_blocks.rs b/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_signing_blocks.rs index 8c9be9a2d2..4f20881b8e 100644 --- a/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_signing_blocks.rs +++ b/crates/core/app/tests/app_tracks_uptime_for_genesis_validator_signing_blocks.rs @@ -1,5 +1,5 @@ use { - self::common::BuilderExt, + self::common::{BuilderExt, ValidatorDataReadExt}, anyhow::Context, cnidarium::TempStorage, penumbra_app::{genesis::AppState, server::consensus::Consensus}, diff --git a/crates/core/app/tests/app_tracks_uptime_for_validators_only_once_active.rs b/crates/core/app/tests/app_tracks_uptime_for_validators_only_once_active.rs index 8a4553e786..2c3f228f0c 100644 --- a/crates/core/app/tests/app_tracks_uptime_for_validators_only_once_active.rs +++ b/crates/core/app/tests/app_tracks_uptime_for_validators_only_once_active.rs @@ -1,5 +1,5 @@ use { - self::common::{BuilderExt, TestNodeExt}, + self::common::{BuilderExt, TestNodeExt, ValidatorDataReadExt}, cnidarium::TempStorage, decaf377_rdsa::{SigningKey, SpendAuth, VerificationKey}, penumbra_app::{ @@ -16,6 +16,7 @@ use { FundingStreams, GovernanceKey, IdentityKey, Uptime, }, rand_core::OsRng, + std::ops::Deref, tap::Tap, tracing::{error_span, Instrument}, }; @@ -191,13 +192,16 @@ async fn app_tracks_uptime_for_validators_only_once_active() -> anyhow::Result<( let output = OutputPlan::new( &mut rand_core::OsRng, delegate.delegation_value(), - *test_keys::ADDRESS_1, + test_keys::ADDRESS_1.deref().clone(), ); let mut plan = TransactionPlan { actions: vec![spend.into(), output.into(), delegate.into()], // Now fill out the remaining parts of the transaction needed for verification: - memo: MemoPlan::new(&mut OsRng, MemoPlaintext::blank_memo(*test_keys::ADDRESS_0)) - .map(Some)?, + memo: MemoPlan::new( + &mut OsRng, + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), + ) + .map(Some)?, detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { chain_id: TestNode::<()>::CHAIN_ID.to_string(), @@ -312,14 +316,17 @@ async fn app_tracks_uptime_for_validators_only_once_active() -> anyhow::Result<( let output = OutputPlan::new( &mut rand_core::OsRng, undelegate.unbonded_value(), - *test_keys::ADDRESS_1, + test_keys::ADDRESS_1.deref().clone(), ); let mut plan = TransactionPlan { actions: vec![spend.into(), output.into(), undelegate.into()], // Now fill out the remaining parts of the transaction needed for verification: - memo: MemoPlan::new(&mut OsRng, MemoPlaintext::blank_memo(*test_keys::ADDRESS_0)) - .map(Some)?, + memo: MemoPlan::new( + &mut OsRng, + MemoPlaintext::blank_memo(test_keys::ADDRESS_0.deref().clone()), + ) + .map(Some)?, detection_data: None, // We'll set this automatically below transaction_parameters: TransactionParameters { chain_id: TestNode::<()>::CHAIN_ID.to_string(), diff --git a/crates/core/app/tests/common/mod.rs b/crates/core/app/tests/common/mod.rs index 1a6e788508..ad71cc246e 100644 --- a/crates/core/app/tests/common/mod.rs +++ b/crates/core/app/tests/common/mod.rs @@ -5,7 +5,7 @@ pub use { self::{ temp_storage_ext::TempStorageExt, test_node_builder_ext::BuilderExt, - test_node_ext::TestNodeExt, + test_node_ext::TestNodeExt, validator_read_ext::ValidatorDataReadExt, }, penumbra_test_subscriber::set_tracing_subscriber, }; @@ -22,3 +22,9 @@ mod temp_storage_ext; /// /// See [`TestNodeExt`]. mod test_node_ext; + +/// Helpful additions for reading validator information. +/// +/// See [`ValidatorDataRead`][penumbra_stake::component::validator_handler::ValidatorDataRead], +/// and [`ValidatorDataReadExt`]. +mod validator_read_ext; diff --git a/crates/core/app/tests/common/validator_read_ext.rs b/crates/core/app/tests/common/validator_read_ext.rs new file mode 100644 index 0000000000..5788bed9d7 --- /dev/null +++ b/crates/core/app/tests/common/validator_read_ext.rs @@ -0,0 +1,39 @@ +use { + async_trait::async_trait, + futures::TryStreamExt, + penumbra_proto::StateReadProto, + penumbra_stake::{ + component::validator_handler::ValidatorDataRead, state_key, validator::Validator, + IdentityKey, + }, +}; + +/// All [`ValidatorDataRead`]s implement [`ValidatorDataReadExt`]. +impl ValidatorDataReadExt for T {} + +/// Additional extensions to [`ValidatorDataRead`] for use in test cases. +#[async_trait] +pub trait ValidatorDataReadExt: ValidatorDataRead { + /// Returns a list of **all** known validators' metadata. + /// + /// This is not included in [`ValidatorDataRead`] because it is liable to become expensive + /// over time as more validators are defined. This should only be used in test cases. + async fn validator_definitions(&self) -> anyhow::Result> { + self.prefix(state_key::validators::definitions::prefix()) + .map_ok(|(_key, validator)| validator) + .try_collect() + .await + } + + /// Returns a list of **all** known validators' identity keys. + /// + /// This is not included in [`ValidatorDataRead`] because it is liable to become expensive + /// over time as more validators are defined. This should only be used in test cases. + async fn validator_identity_keys(&self) -> anyhow::Result> { + self.prefix(state_key::validators::definitions::prefix()) + .map_ok(|(_key, validator)| validator) + .map_ok(|validator: Validator| validator.identity_key) + .try_collect() + .await + } +} diff --git a/crates/core/app/tests/mock_consensus_can_define_a_genesis_validator.rs b/crates/core/app/tests/mock_consensus_can_define_a_genesis_validator.rs index e1954bc743..2d478ea52a 100644 --- a/crates/core/app/tests/mock_consensus_can_define_a_genesis_validator.rs +++ b/crates/core/app/tests/mock_consensus_can_define_a_genesis_validator.rs @@ -1,5 +1,5 @@ use { - self::common::BuilderExt, + self::common::{BuilderExt, ValidatorDataReadExt}, anyhow::anyhow, cnidarium::TempStorage, penumbra_app::{genesis::AppState, server::consensus::Consensus}, diff --git a/crates/core/app/tests/swap_and_swap_claim.rs b/crates/core/app/tests/swap_and_swap_claim.rs index 586e5027f3..aa329da576 100644 --- a/crates/core/app/tests/swap_and_swap_claim.rs +++ b/crates/core/app/tests/swap_and_swap_claim.rs @@ -58,7 +58,7 @@ async fn swap_and_swap_claim() -> anyhow::Result<()> { let delta_1 = Amount::from(100_000u64); let delta_2 = Amount::from(0u64); let fee = Fee::default(); - let claim_address: Address = *test_keys::ADDRESS_0; + let claim_address: Address = test_keys::ADDRESS_0.deref().clone(); let plaintext = SwapPlaintext::new(&mut rng, trading_pair, delta_1, delta_2, fee, claim_address); @@ -295,7 +295,7 @@ async fn swap_with_nonzero_fee() -> anyhow::Result<()> { let delta_1 = Amount::from(100_000u64); let delta_2 = Amount::from(0u64); let fee = Fee::from_staking_token_amount(Amount::from(1u64)); - let claim_address: Address = *test_keys::ADDRESS_0; + let claim_address: Address = test_keys::ADDRESS_0.deref().clone(); let plaintext = SwapPlaintext::new(&mut rng, trading_pair, delta_1, delta_2, fee, claim_address); diff --git a/crates/core/asset/src/asset/denom_metadata.rs b/crates/core/asset/src/asset/denom_metadata.rs index 582a476a98..994e96f622 100644 --- a/crates/core/asset/src/asset/denom_metadata.rs +++ b/crates/core/asset/src/asset/denom_metadata.rs @@ -545,6 +545,8 @@ impl Display for Unit { #[cfg(test)] mod tests { + use std::sync::Arc; + #[test] fn can_parse_metadata_from_chain_registry() { const SOME_COSMOS_JSON: &str = r#" @@ -590,4 +592,31 @@ mod tests { //let json2 = serde_json::to_string_pretty(&_metadata).unwrap(); //println!("{}", json2); } + + #[test] + fn encoding_round_trip_succeeds() { + let metadata = super::Metadata::try_from("upenumbra").unwrap(); + + let proto = super::pb::Metadata::from(metadata.clone()); + + let metadata_2 = super::Metadata::try_from(proto).unwrap(); + + assert_eq!(metadata, metadata_2); + } + + #[test] + #[should_panic] + fn changing_asset_id_without_changing_denom_fails_decoding() { + let mut metadata = super::Metadata::try_from("upenumbra").unwrap(); + + let inner = Arc::get_mut(&mut metadata.inner).unwrap(); + + inner.id = super::Id::from_raw_denom("uusd"); + + let proto = super::pb::Metadata::from(metadata); + + // This should throw an error, because the asset ID and denom are now inconsistent. + + let _domain_type = super::Metadata::try_from(proto).unwrap(); + } } diff --git a/crates/core/component/dex/src/batch_swap_output_data.rs b/crates/core/component/dex/src/batch_swap_output_data.rs index f40a446b46..33afb327f5 100644 --- a/crates/core/component/dex/src/batch_swap_output_data.rs +++ b/crates/core/component/dex/src/batch_swap_output_data.rs @@ -8,6 +8,7 @@ use ark_r1cs_std::{ use ark_relations::r1cs::{ConstraintSystemRef, SynthesisError}; use decaf377::{r1cs::FqVar, Fq}; use penumbra_proto::{penumbra::core::component::dex::v1 as pb, DomainType}; +use penumbra_tct::Position; use serde::{Deserialize, Serialize}; use penumbra_num::fixpoint::{bit_constrain, U128x128, U128x128Var}; @@ -36,8 +37,8 @@ pub struct BatchSwapOutputData { pub height: u64, /// The trading pair associated with the batch swap. pub trading_pair: TradingPair, - /// The starting block height of the epoch for which the batch swap data is valid. - pub epoch_starting_height: u64, + /// The position prefix where this batch swap occurred. The commitment index must be 0. + pub sct_position_prefix: Position, } impl BatchSwapOutputData { @@ -117,19 +118,19 @@ impl ToConstraintField for BatchSwapOutputData { .expect("U128x128 types are Bls12-377 field members"), ); public_inputs.extend( - Fq::from(self.height) + self.trading_pair .to_field_elements() - .expect("Fq types are Bls12-377 field members"), + .expect("trading_pair is a Bls12-377 field member"), ); public_inputs.extend( - self.trading_pair + Fq::from(self.sct_position_prefix.epoch()) .to_field_elements() - .expect("trading_pair is a Bls12-377 field member"), + .expect("Position types are Bls12-377 field members"), ); public_inputs.extend( - Fq::from(self.epoch_starting_height) + Fq::from(self.sct_position_prefix.block()) .to_field_elements() - .expect("Fq types are Bls12-377 field members"), + .expect("Position types are Bls12-377 field members"), ); Some(public_inputs) } @@ -142,9 +143,9 @@ pub struct BatchSwapOutputDataVar { pub lambda_2: U128x128Var, pub unfilled_1: U128x128Var, pub unfilled_2: U128x128Var, - pub height: FqVar, pub trading_pair: TradingPairVar, - pub epoch_starting_height: FqVar, + pub epoch: FqVar, + pub block_within_epoch: FqVar, } impl AllocVar for BatchSwapOutputDataVar { @@ -168,18 +169,23 @@ impl AllocVar for BatchSwapOutputDataVar { let unfilled_1 = U128x128Var::new_variable(cs.clone(), || Ok(unfilled_1_fixpoint), mode)?; let unfilled_2_fixpoint: U128x128 = output_data.unfilled_2.into(); let unfilled_2 = U128x128Var::new_variable(cs.clone(), || Ok(unfilled_2_fixpoint), mode)?; - let height = FqVar::new_variable(cs.clone(), || Ok(Fq::from(output_data.height)), mode)?; - // Check the height is 64 bits - let _ = bit_constrain(height.clone(), 64); let trading_pair = TradingPairVar::new_variable_unchecked( cs.clone(), || Ok(output_data.trading_pair), mode, )?; - let epoch_starting_height = - FqVar::new_variable(cs, || Ok(Fq::from(output_data.epoch_starting_height)), mode)?; - // Check the epoch starting height is 64 bits - let _ = bit_constrain(epoch_starting_height.clone(), 64); + let epoch = FqVar::new_variable( + cs.clone(), + || Ok(Fq::from(output_data.sct_position_prefix.epoch())), + mode, + )?; + bit_constrain(epoch.clone(), 16)?; + let block_within_epoch = FqVar::new_variable( + cs.clone(), + || Ok(Fq::from(output_data.sct_position_prefix.block())), + mode, + )?; + bit_constrain(block_within_epoch.clone(), 16)?; Ok(Self { delta_1, @@ -189,8 +195,8 @@ impl AllocVar for BatchSwapOutputDataVar { unfilled_1, unfilled_2, trading_pair, - height, - epoch_starting_height, + epoch, + block_within_epoch, }) } } @@ -201,6 +207,7 @@ impl DomainType for BatchSwapOutputData { impl From for pb::BatchSwapOutputData { fn from(s: BatchSwapOutputData) -> Self { + #[allow(deprecated)] pb::BatchSwapOutputData { delta_1: Some(s.delta_1.into()), delta_2: Some(s.delta_2.into()), @@ -209,8 +216,12 @@ impl From for pb::BatchSwapOutputData { unfilled_1: Some(s.unfilled_1.into()), unfilled_2: Some(s.unfilled_2.into()), height: s.height, - epoch_starting_height: s.epoch_starting_height, trading_pair: Some(s.trading_pair.into()), + sct_position_prefix: s.sct_position_prefix.into(), + // Deprecated fields we explicitly fill with defaults. + // We could instead use a `..Default::default()` here, but that would silently + // work if we were to add fields to the domain type. + epoch_starting_height: Default::default(), } } } @@ -276,6 +287,14 @@ impl From for pb::BatchSwapOutputDataResponse { impl TryFrom for BatchSwapOutputData { type Error = anyhow::Error; fn try_from(s: pb::BatchSwapOutputData) -> Result { + let sct_position_prefix = { + let prefix = Position::from(s.sct_position_prefix); + anyhow::ensure!( + prefix.commitment() == 0, + "sct_position_prefix.commitment() != 0" + ); + prefix + }; Ok(Self { delta_1: s .delta_1 @@ -306,7 +325,7 @@ impl TryFrom for BatchSwapOutputData { .trading_pair .ok_or_else(|| anyhow!("Missing trading_pair"))? .try_into()?, - epoch_starting_height: s.epoch_starting_height, + sct_position_prefix, }) } } @@ -421,9 +440,9 @@ mod tests { lambda_2: Amount::from(1u32), unfilled_1: Amount::from(1u32), unfilled_2: Amount::from(1u32), - height: 1, + height: 0, trading_pair, - epoch_starting_height: 1, + sct_position_prefix: 0u64.into(), }, } } @@ -444,7 +463,7 @@ mod tests { unfilled_2: Amount::from(50u64), height: 0u64, trading_pair, - epoch_starting_height: 0u64, + sct_position_prefix: 0u64.into(), }; // Now suppose our user's contribution is: diff --git a/crates/core/component/dex/src/component/action_handler/position/close.rs b/crates/core/component/dex/src/component/action_handler/position/close.rs index f6ce7db1fa..9ba244126f 100644 --- a/crates/core/component/dex/src/component/action_handler/position/close.rs +++ b/crates/core/component/dex/src/component/action_handler/position/close.rs @@ -24,7 +24,8 @@ impl ActionHandler for PositionClose { // during that block's batch swap execution. state.queue_close_position(self.position_id); - state.record_proto(event::position_close(self)); + // queue position close you will... + state.record_proto(event::queue_position_close(self)); Ok(()) } diff --git a/crates/core/component/dex/src/component/arb.rs b/crates/core/component/dex/src/component/arb.rs index b802ca2279..8017d348ca 100644 --- a/crates/core/component/dex/src/component/arb.rs +++ b/crates/core/component/dex/src/component/arb.rs @@ -114,7 +114,7 @@ pub trait Arbitrage: StateWrite + Sized { amount: filled_input, }, output: Value { - amount: arb_profit, + amount: filled_input + arb_profit, asset_id: arb_token, }, }; diff --git a/crates/core/component/dex/src/component/circuit_breaker/value.rs b/crates/core/component/dex/src/component/circuit_breaker/value.rs index bc5c06a8b1..f911aa8b12 100644 --- a/crates/core/component/dex/src/component/circuit_breaker/value.rs +++ b/crates/core/component/dex/src/component/circuit_breaker/value.rs @@ -161,7 +161,7 @@ mod tests { unfilled_2: 0u64.into(), height: 1, trading_pair: pair_1.into_directed_trading_pair().into(), - epoch_starting_height: 0, + sct_position_prefix: Default::default(), }, None, None, @@ -250,7 +250,7 @@ mod tests { let routing_params = state.routing_params().await.unwrap(); // This call should panic due to the outflow of gn not being covered by the circuit breaker. state - .handle_batch_swaps(trading_pair, swap_flow, 0, 0, routing_params) + .handle_batch_swaps(trading_pair, swap_flow, 0, routing_params) .await .expect("unable to process batch swaps"); } diff --git a/crates/core/component/dex/src/component/dex.rs b/crates/core/component/dex/src/component/dex.rs index bdb9ada0c5..0172e34c62 100644 --- a/crates/core/component/dex/src/component/dex.rs +++ b/crates/core/component/dex/src/component/dex.rs @@ -7,7 +7,6 @@ use cnidarium_component::Component; use penumbra_asset::{asset, Value, STAKING_TOKEN_ASSET_ID}; use penumbra_num::Amount; use penumbra_proto::{StateReadProto, StateWriteProto}; -use penumbra_sct::component::clock::EpochRead; use tendermint::v0_37::abci; use tracing::instrument; @@ -56,7 +55,6 @@ impl Component for Dex { // 2. For each batch swap during the block, calculate clearing prices and set in the JMT. - let current_epoch = state.get_current_epoch().await.expect("epoch is set"); let routing_params = state.routing_params().await.expect("dex params are set"); for (trading_pair, swap_flows) in state.swap_flows() { @@ -69,7 +67,6 @@ impl Component for Dex { .height .try_into() .expect("height is part of the end block data"), - current_epoch.start_height, // Always include both ends of the target pair as fixed candidates. routing_params .clone() diff --git a/crates/core/component/dex/src/component/position_manager.rs b/crates/core/component/dex/src/component/position_manager.rs index cce1925989..cd3e3e4caa 100644 --- a/crates/core/component/dex/src/component/position_manager.rs +++ b/crates/core/component/dex/src/component/position_manager.rs @@ -253,22 +253,54 @@ pub trait PositionManager: StateWrite + PositionRead { /// Record execution against an opened position. /// + /// IMPORTANT: This method can mutate its input state. + /// + /// We return the position that was ultimately written to the state, + /// it could differ from the initial input e.g. if the position is + /// auto-closing. + /// + /// # Context parameter + /// /// The `context` parameter records the global context of the path in which /// the position execution happened. This may be completely different than /// the trading pair of the position itself, and is used to link the /// micro-scale execution (processed by this method) with the macro-scale /// context (a swap or arbitrage). + /// + /// # Auto-closing positions + /// + /// Some positions are `close_on_fill` i.e. they are programmed to close after + /// execution exhausts either side of their reserves. This method returns the + /// position that was written to the chain state, making it possible for callers + /// to inspect any change that has occured during execution handling. #[tracing::instrument(level = "debug", skip_all)] async fn position_execution( &mut self, mut new_state: Position, context: DirectedTradingPair, - ) -> Result<()> { + ) -> Result { + let position_id = new_state.id(); let prev_state = self - .position_by_id(&new_state.id()) + .position_by_id(&position_id) .await? .ok_or_else(|| anyhow::anyhow!("withdrew from unknown position {}", new_state.id()))?; + // Optimization: it's possible that the position's reserves haven't + // changed, and that we're about to do a no-op update. This can happen + // when saving a frontier, for instance, since the FillRoute code saves + // the entire frontier when it finishes. + // + // If so, skip the write, but more importantly, skip emitting an event, + // so tooling doesn't get confused about a no-op execution. + if prev_state == new_state { + anyhow::ensure!( + matches!(&prev_state.state, position::State::Opened | position::State::Closed), + "attempted to do a no-op execution against a position with state {:?}, expected Opened or Closed", + prev_state.state + ); + return Ok(new_state); + } + anyhow::ensure!( matches!(&prev_state.state, position::State::Opened), "attempted to execute against a position with state {:?}, expected Opened", @@ -280,33 +312,27 @@ pub trait PositionManager: StateWrite + PositionRead { prev_state.state ); + // We have already short-circuited no-op execution updates, so we can emit an execution + // event and not worry about duplicates. + self.record_proto(event::position_execution(&prev_state, &new_state, context)); + // Handle "close-on-fill": automatically flip the position state to "closed" if // either of the reserves are zero. if new_state.close_on_fill { if new_state.reserves.r1 == 0u64.into() || new_state.reserves.r2 == 0u64.into() { tracing::debug!( - id = ?new_state.id(), + ?position_id, r1 = ?new_state.reserves.r1, r2 = ?new_state.reserves.r2, "marking position as closed due to close-on-fill" ); + new_state.state = position::State::Closed; + self.record_proto(event::position_close_by_id(position_id)); } } - // Optimization: it's possible that the position's reserves haven't - // changed, and that we're about to do a no-op update. This can happen - // when saving a frontier, for instance, since the FillRoute code saves - // the entire frontier when it finishes. - // - // If so, skip the write, but more importantly, skip emitting an event, - // so tooling doesn't get confused about a no-op execution. - if prev_state != new_state { - self.record_proto(event::position_execution(&prev_state, &new_state, context)); - self.update_position(Some(prev_state), new_state).await?; - } - - Ok(()) + self.update_position(Some(prev_state), new_state).await } /// Withdraw from a closed position, incrementing its sequence number. @@ -401,7 +427,7 @@ trait Inner: StateWrite { &mut self, prev_state: Option, new_state: Position, - ) -> Result<()> { + ) -> Result { tracing::debug!(?prev_state, ?new_state, "updating position state"); let id = new_state.id(); @@ -417,8 +443,8 @@ trait Inner: StateWrite { self.update_trading_pair_position_counter(&prev_state, &new_state, &id) .await?; - self.put(state_key::position_by_id(&id), new_state); - Ok(()) + self.put(state_key::position_by_id(&id), new_state.clone()); + Ok(new_state) } fn guard_invalid_transitions( diff --git a/crates/core/component/dex/src/component/position_manager/base_liquidity_index.rs b/crates/core/component/dex/src/component/position_manager/base_liquidity_index.rs index d286a50753..f2908e3b28 100644 --- a/crates/core/component/dex/src/component/position_manager/base_liquidity_index.rs +++ b/crates/core/component/dex/src/component/position_manager/base_liquidity_index.rs @@ -25,6 +25,22 @@ pub(crate) trait AssetByLiquidityIndex: StateWrite { /// - An auxilliary index that maps a directed trading pair `A -> B` /// to the aggregate liquidity for B -> A (used in the primary composite key) /// + /// If we want liquidity rankings for assets adjacent to A, the ranking has to be + /// denominated in asset A, since that’s the only way to get commensurability when + /// ranking B C D E etc. + /// + /// There are then two possible amounts to consider for an asset B: amount of A that + /// can be sold for B and amount of A that can be bought with B + /// + /// (1), amount that can be sold (“outbound”) is the wrong thing to use + /// (2), amount that can be bought, is intuitively the “opposite” of what we want, + /// since it’s the reverse direction, but is actually the right thing to use as + /// a rough proxy for liquidity + /// + /// The reason is that (1) can be easily manipulated without any skin in the game, by + /// offering to sell a tiny amount of B for A at an outrageous/infinite price. + /// + /// /// # Diagram /// /// Liquidity index: diff --git a/crates/core/component/dex/src/component/router/fill_route.rs b/crates/core/component/dex/src/component/router/fill_route.rs index ac35ab746a..eb618b0968 100644 --- a/crates/core/component/dex/src/component/router/fill_route.rs +++ b/crates/core/component/dex/src/component/router/fill_route.rs @@ -495,11 +495,17 @@ impl Frontier { start: self.pairs.first().expect("pairs is nonempty").start, end: self.pairs.last().expect("pairs is nonempty").end, }; - self.state + let updated_position = self + .state .position_execution(self.positions[index].clone(), context) .await .expect("writing to storage should not fail"); + // We update the frontier cache with the updated state of the position we + // want to discard. This protects us from cache incoherency in case we do not + // find a suitable replacement for that position. + self.positions[index] = updated_position; + loop { let pair = &self.pairs[index]; let next_position_id = match self diff --git a/crates/core/component/dex/src/component/router/route_and_fill.rs b/crates/core/component/dex/src/component/router/route_and_fill.rs index b18a786200..41445ae23c 100644 --- a/crates/core/component/dex/src/component/router/route_and_fill.rs +++ b/crates/core/component/dex/src/component/router/route_and_fill.rs @@ -5,6 +5,7 @@ use async_trait::async_trait; use cnidarium::StateWrite; use penumbra_asset::{asset, Value}; use penumbra_num::Amount; +use penumbra_sct::component::clock::EpochRead; use tracing::instrument; use crate::{ @@ -23,21 +24,13 @@ use super::fill_route::FillError; /// a block's batch swap flows. #[async_trait] pub trait HandleBatchSwaps: StateWrite + Sized { - #[instrument(skip( - self, - trading_pair, - batch_data, - block_height, - epoch_starting_height, - params - ))] + #[instrument(skip(self, trading_pair, batch_data, block_height, params))] async fn handle_batch_swaps( self: &mut Arc, trading_pair: TradingPair, batch_data: SwapFlow, - // TODO: why not read these 2 from the state? + // This will be read from the ABCI request block_height: u64, - epoch_starting_height: u64, params: RoutingParams, ) -> Result<()> where @@ -95,9 +88,9 @@ pub trait HandleBatchSwaps: StateWrite + Sized { ), None => (0u64.into(), delta_2), }; + let epoch = self.get_current_epoch().await.expect("epoch is set"); let output_data = BatchSwapOutputData { height: block_height, - epoch_starting_height, trading_pair, delta_1, delta_2, @@ -105,6 +98,15 @@ pub trait HandleBatchSwaps: StateWrite + Sized { lambda_2, unfilled_1, unfilled_2, + sct_position_prefix: ( + u16::try_from(epoch.index).expect("epoch index should be small enough"), + // The block index is determined by looking at how many blocks have elapsed since + // the start of the epoch. + u16::try_from(block_height - epoch.start_height) + .expect("block index should be small enough"), + 0, + ) + .into(), }; // Fetch the swap execution object that should have been modified during the routing and filling. diff --git a/crates/core/component/dex/src/component/router/tests.rs b/crates/core/component/dex/src/component/router/tests.rs index ccb530b738..0c26b602f6 100644 --- a/crates/core/component/dex/src/component/router/tests.rs +++ b/crates/core/component/dex/src/component/router/tests.rs @@ -1024,7 +1024,7 @@ async fn best_position_route_and_fill() -> anyhow::Result<()> { .unwrap(); let routing_params = state.routing_params().await.unwrap(); state - .handle_batch_swaps(trading_pair, swap_flow, 0u32.into(), 0, routing_params) + .handle_batch_swaps(trading_pair, swap_flow, 0u32.into(), routing_params) .await .expect("unable to process batch swaps"); @@ -1165,7 +1165,7 @@ async fn multi_hop_route_and_fill() -> anyhow::Result<()> { .unwrap(); let routing_params = state.routing_params().await.unwrap(); state - .handle_batch_swaps(trading_pair, swap_flow, 0u32.into(), 0, routing_params) + .handle_batch_swaps(trading_pair, swap_flow, 0u32.into(), routing_params) .await .expect("unable to process batch swaps"); diff --git a/crates/core/component/dex/src/component/rpc.rs b/crates/core/component/dex/src/component/rpc.rs index ed3e6914d0..545e4e48cb 100644 --- a/crates/core/component/dex/src/component/rpc.rs +++ b/crates/core/component/dex/src/component/rpc.rs @@ -362,8 +362,12 @@ impl QueryService for Server { anyhow::Ok(position) } }) - .map_ok(|position| LiquidityPositionsByPriceResponse { - data: Some(position.into()), + .map_ok(|position| { + let id = position.id(); + LiquidityPositionsByPriceResponse { + data: Some(position.into()), + id: Some(id.into()), + } }) .map_err(|e: anyhow::Error| { tonic::Status::internal(format!("error retrieving positions: {:#}", e)) diff --git a/crates/core/component/dex/src/component/tests.rs b/crates/core/component/dex/src/component/tests.rs index b910c0998b..ae7bb3a527 100644 --- a/crates/core/component/dex/src/component/tests.rs +++ b/crates/core/component/dex/src/component/tests.rs @@ -632,7 +632,7 @@ async fn swap_execution_tests() -> anyhow::Result<()> { .unwrap(); let routing_params = state.routing_params().await.unwrap(); state - .handle_batch_swaps(trading_pair, swap_flow, 0, 0, routing_params) + .handle_batch_swaps(trading_pair, swap_flow, 0, routing_params) .await .expect("unable to process batch swaps"); @@ -740,7 +740,7 @@ async fn swap_execution_tests() -> anyhow::Result<()> { .unwrap(); let routing_params = state.routing_params().await.unwrap(); state - .handle_batch_swaps(trading_pair, swap_flow, 0u32.into(), 0, routing_params) + .handle_batch_swaps(trading_pair, swap_flow, 0u32.into(), routing_params) .await .expect("unable to process batch swaps"); @@ -756,8 +756,8 @@ async fn swap_execution_tests() -> anyhow::Result<()> { unfilled_1: 0u32.into(), unfilled_2: 0u32.into(), height: 0, - epoch_starting_height: 0, trading_pair, + sct_position_prefix: Default::default(), } ); @@ -866,6 +866,12 @@ async fn basic_cycle_arb() -> anyhow::Result<()> { /// The issue was that we did not treat the spill price as a strict /// upper bound, which is necessary to ensure that the arbitrage logic /// terminates. +/// +/// This test also ensures that the created `SwapExecution` has the +/// +/// *Arbitrage execution record bug:* +/// This test also ensures that the created `SwapExecution` has the +/// correct data. (See #3790). async fn reproduce_arbitrage_loop_testnet_53() -> anyhow::Result<()> { let _ = tracing_subscriber::fmt::try_init(); let storage = TempStorage::new().await?.apply_minimal_genesis().await?; @@ -953,5 +959,187 @@ async fn reproduce_arbitrage_loop_testnet_53() -> anyhow::Result<()> { tracing::info!("fetching the `ArbExecution`"); let arb_execution = state.arb_execution(0).await?.expect("arb was performed"); tracing::info!(?arb_execution, "fetched arb execution!"); + + // Validate that the arb execution has the correct data: + // Validate the traces. + assert_eq!( + arb_execution.traces, + vec![ + vec![ + penumbra.value(1u32.into()), + test_usd.value(110u32.into()), + Value { + amount: 1099999u64.into(), + asset_id: penumbra.id() + } + ], + vec![ + penumbra.value(1u32.into()), + test_usd.value(100u32.into()), + Value { + amount: 999999u64.into(), + asset_id: penumbra.id() + } + ] + ] + ); + + // Validate the input/output of the arb execution: + assert_eq!( + arb_execution.input, + Value { + amount: 2000000u64.into(), + asset_id: penumbra.id(), + } + ); + assert_eq!( + arb_execution.output, + Value { + amount: 2099998u64.into(), + asset_id: penumbra.id(), + } + ); + + Ok(()) +} + +#[tokio::test] +/// Confirms the ordering of routable assets returns the assets +/// with the most liquidity first, as discovered in https://github.com/penumbra-zone/penumbra/issues/4189 +/// For the purposes of this test, it is important to remember +/// that for a trade routing from A -> *, candidate liquidity is +/// the amount of A purchaseable with the candidate assets, i.e. the amount of +/// A in the reserves for any A <-> * positions. +async fn check_routable_asset_ordering() -> anyhow::Result<()> { + let storage = TempStorage::new().await?.apply_minimal_genesis().await?; + let mut state = Arc::new(StateDelta::new(storage.latest_snapshot())); + let mut state_tx = state.try_begin_transaction().unwrap(); + + let penumbra = asset::Cache::with_known_assets() + .get_unit("penumbra") + .unwrap(); + let test_usd = asset::Cache::with_known_assets() + .get_unit("test_usd") + .unwrap(); + let test_btc = asset::Cache::with_known_assets() + .get_unit("test_btc") + .unwrap(); + let gn = asset::Cache::with_known_assets().get_unit("gn").unwrap(); + let gm = asset::Cache::with_known_assets().get_unit("gm").unwrap(); + + let penumbra_usd = DirectedTradingPair::new(penumbra.id(), test_usd.id()); + + let reserves_1 = Reserves { + // 0 penumbra + r1: 0u64.into(), + // 120,000 test_usd + r2: 120_000u64.into(), + }; + + let position_1 = Position::new( + OsRng, + penumbra_usd, + 0u32, + 1_200_000u64.into(), + 1_000_000u64.into(), + reserves_1, + ); + + state_tx.open_position(position_1).await.unwrap(); + + let penumbra_gn = DirectedTradingPair::new(penumbra.id(), gn.id()); + + let reserves_2 = Reserves { + // 130,000 penumbra + r1: 130_000u64.into(), + // 0 gn + r2: 0u64.into(), + }; + + let position_2 = Position::new( + OsRng, + penumbra_gn, + 0u32, + 1_200_000u64.into(), + 1_000_000u64.into(), + reserves_2, + ); + + state_tx.open_position(position_2).await.unwrap(); + + let penumbra_btc = DirectedTradingPair::new(penumbra.id(), test_btc.id()); + + let reserves_3 = Reserves { + // 100,000 penumbra + r1: 100_000u64.into(), + // 50,000 test_btc + r2: 50_000u64.into(), + }; + + let position_3 = Position::new( + OsRng, + penumbra_btc, + 0u32, + 1_200_000u64.into(), + 1_000_000u64.into(), + reserves_3, + ); + + state_tx.open_position(position_3).await.unwrap(); + + let btc_gm = DirectedTradingPair::new(test_btc.id(), gm.id()); + + let reserves_4 = Reserves { + // 100,000 test_btc + r1: 100_000u64.into(), + // 100,000 gm + r2: 100_000u64.into(), + }; + + let position_4 = Position::new( + OsRng, + btc_gm, + 0u32, + 1_200_000u64.into(), + 1_000_000u64.into(), + reserves_4, + ); + + state_tx.open_position(position_4).await.unwrap(); + state_tx.apply(); + + // Expected: GN reserves > BTC reserves, and USD/gm should not appear + + // Find routable assets starting at the Penumbra asset. + let routable_assets: Vec<_> = state + .ordered_routable_assets(&penumbra.id()) + .collect::>() + .await; + let routable_assets = routable_assets + .into_iter() + .collect::>>()?; + + assert!( + routable_assets.len() == 2, + "expected 2 routable assets, got {}", + routable_assets.len() + ); + + let first = routable_assets[0]; + let second = routable_assets[1]; + assert!( + first == gn.id(), + "expected GN ({}) to be the first routable asset, got {}", + gn.id(), + first.clone() + ); + + assert!( + second == test_btc.id(), + "expected BTC ({}) to be the second routable asset, got {}", + test_btc.id(), + second.clone() + ); + Ok(()) } diff --git a/crates/core/component/dex/src/event.rs b/crates/core/component/dex/src/event.rs index 8aae026868..64df9a8c62 100644 --- a/crates/core/component/dex/src/event.rs +++ b/crates/core/component/dex/src/event.rs @@ -40,14 +40,24 @@ pub fn position_open(position: &Position) -> pb::EventPositionOpen { } } +pub fn position_close_by_id(id: position::Id) -> pb::EventPositionClose { + pb::EventPositionClose { + position_id: Some(id.into()), + } +} + pub fn position_close(action: &PositionClose) -> pb::EventPositionClose { - // TODO: should we have another event triggered by the position manager for when - // the position is actually closed? pb::EventPositionClose { position_id: Some(action.position_id.into()), } } +pub fn queue_position_close(action: &PositionClose) -> pb::EventQueuePositionClose { + pb::EventQueuePositionClose { + position_id: Some(action.position_id.into()), + } +} + pub fn position_withdraw( position_id: position::Id, final_position_state: &Position, diff --git a/crates/core/component/dex/src/state_key.rs b/crates/core/component/dex/src/state_key.rs index 6b302fd46e..20a616401b 100644 --- a/crates/core/component/dex/src/state_key.rs +++ b/crates/core/component/dex/src/state_key.rs @@ -129,8 +129,9 @@ pub(crate) mod engine { pub(crate) fn key(from: &asset::Id, a_from_b: Amount) -> [u8; 55] { let mut key = [0u8; 55]; key[0..7].copy_from_slice(b"dex/ra/"); - key[7..39].copy_from_slice(&from.to_bytes()); - key[39..55].copy_from_slice(&a_from_b.to_be_bytes()); + key[7..32 + 7].copy_from_slice(&from.to_bytes()); + // Use the complement of the amount to ensure that the keys are ordered in descending order. + key[32 + 7..32 + 7 + 16].copy_from_slice(&(!a_from_b).to_be_bytes()); key } diff --git a/crates/core/component/dex/src/swap/plaintext.rs b/crates/core/component/dex/src/swap/plaintext.rs index 8bde2f5db6..45e5ce66b5 100644 --- a/crates/core/component/dex/src/swap/plaintext.rs +++ b/crates/core/component/dex/src/swap/plaintext.rs @@ -70,7 +70,7 @@ impl SwapPlaintext { batch_data.pro_rata_outputs((self.delta_1_i, self.delta_2_i)); let output_1_note = Note::from_parts( - self.claim_address, + self.claim_address.clone(), Value { amount: lambda_1_i, asset_id: self.trading_pair.asset_1(), @@ -80,7 +80,7 @@ impl SwapPlaintext { .expect("claim address is valid"); let output_2_note = Note::from_parts( - self.claim_address, + self.claim_address.clone(), Value { amount: lambda_2_i, asset_id: self.trading_pair.asset_2(), @@ -344,7 +344,7 @@ impl From<&SwapPlaintext> for [u8; SWAP_LEN_BYTES] { bytes[80..96].copy_from_slice(&swap.delta_2_i.to_le_bytes()); bytes[96..112].copy_from_slice(&swap.claim_fee.0.amount.to_le_bytes()); bytes[112..144].copy_from_slice(&swap.claim_fee.0.asset_id.to_bytes()); - let pb_address = pb_keys::Address::from(swap.claim_address); + let pb_address = pb_keys::Address::from(swap.claim_address.clone()); bytes[144..224].copy_from_slice(&pb_address.inner); bytes[224..256].copy_from_slice(&swap.rseed.to_bytes()); bytes diff --git a/crates/core/component/dex/src/swap_claim/proof.rs b/crates/core/component/dex/src/swap_claim/proof.rs index c8698d131c..7d86071e65 100644 --- a/crates/core/component/dex/src/swap_claim/proof.rs +++ b/crates/core/component/dex/src/swap_claim/proof.rs @@ -123,11 +123,16 @@ fn check_satisfaction( anyhow::bail!("claim fee did not match public input"); } - let block: u64 = private.state_commitment_proof.position().block().into(); - let note_commitment_block_height: u64 = public.output_data.epoch_starting_height + block; - if note_commitment_block_height != public.output_data.height { - anyhow::bail!("swap commitment height did not match public input"); - } + anyhow::ensure!( + private.state_commitment_proof.position().block() + == public.output_data.sct_position_prefix.block(), + "scm block did not match batch swap" + ); + anyhow::ensure!( + private.state_commitment_proof.position().epoch() + == public.output_data.sct_position_prefix.epoch(), + "scm epoch did not match batch swap" + ); if private.swap_plaintext.trading_pair != public.output_data.trading_pair { anyhow::bail!("trading pair did not match public input"); @@ -255,12 +260,12 @@ impl ConstraintSynthesizer for SwapClaimCircuit { claimed_fee_var.enforce_equal(&swap_plaintext_var.claim_fee)?; // Validate the swap commitment's height matches the output data's height (i.e. the clearing price height). - let block = position_var.block()?; - let note_commitment_block_height_var = - output_data_var.epoch_starting_height.clone() + block; output_data_var - .height - .enforce_equal(¬e_commitment_block_height_var)?; + .block_within_epoch + .enforce_equal(&position_var.block()?)?; + output_data_var + .epoch + .enforce_equal(&position_var.epoch()?)?; // Validate that the output data's trading pair matches the note commitment's trading pair. output_data_var @@ -359,7 +364,7 @@ impl DummyWitness for SwapClaimCircuit { unfilled_2: Amount::from(10u64), height: 0, trading_pair: swap_plaintext.trading_pair, - epoch_starting_height: 0, + sct_position_prefix: Default::default(), }; let note_blinding_1 = Fq::from(1); let note_blinding_2 = Fq::from(1); @@ -642,7 +647,7 @@ mod tests { unfilled_2: test_bsod.unfilled_2, height: height.into(), trading_pair: swap_plaintext.trading_pair, - epoch_starting_height: (epoch_duration * position.epoch()).into(), + sct_position_prefix: Default::default(), }; let (lambda_1, lambda_2) = output_data.pro_rata_outputs((delta_1_i, delta_2_i)); @@ -774,7 +779,7 @@ mod tests { unfilled_2: test_bsod.unfilled_2, height: height.into(), trading_pair: swap_plaintext.trading_pair, - epoch_starting_height: (epoch_duration * position.epoch()).into(), + sct_position_prefix: Default::default() }; let (lambda_1, lambda_2) = output_data.pro_rata_outputs((delta_1_i, delta_2_i)); @@ -874,7 +879,7 @@ mod tests { unfilled_2: test_bsod.unfilled_2, height: height.into(), trading_pair: swap_plaintext.trading_pair, - epoch_starting_height: (epoch_duration * dummy_position.epoch()).into(), + sct_position_prefix: Default::default() }; let (lambda_1, lambda_2) = output_data.pro_rata_outputs((delta_1_i, delta_2_i)); diff --git a/crates/core/component/governance/src/delegator_vote/proof.rs b/crates/core/component/governance/src/delegator_vote/proof.rs index 201afd1ed2..f4a61fe267 100644 --- a/crates/core/component/governance/src/delegator_vote/proof.rs +++ b/crates/core/component/governance/src/delegator_vote/proof.rs @@ -453,7 +453,7 @@ mod tests { asset_id: asset::Id(Fq::from(asset_id64)), }; let note = Note::from_parts( - sender, + sender.clone(), value_to_send, Rseed(rseed_randomness), ).expect("should be able to create note"); @@ -469,7 +469,7 @@ mod tests { for i in 0..num_commitments { // To avoid duplicate note commitments, we use the `i` counter as the Rseed randomness let rseed = Rseed([i as u8; 32]); - let dummy_note_commitment = Note::from_parts(sender, value_to_send, rseed).expect("can create note").commit(); + let dummy_note_commitment = Note::from_parts(sender.clone(), value_to_send, rseed).expect("can create note").commit(); sct.insert(tct::Witness::Keep, dummy_note_commitment).expect("can insert note commitment into SCT"); } @@ -480,7 +480,7 @@ mod tests { // All proposals should have a position commitment index of zero, so we need to end the epoch // and get the position that corresponds to the first commitment in the new epoch. sct.end_epoch().expect("should be able to end an epoch"); - let first_note_commitment = Note::from_parts(sender, value_to_send, Rseed([u8::MAX; 32])).expect("can create note").commit(); + let first_note_commitment = Note::from_parts(sender.clone(), value_to_send, Rseed([u8::MAX; 32])).expect("can create note").commit(); sct.insert(tct::Witness::Keep, first_note_commitment).expect("can insert note commitment into SCT"); let start_position = sct.witness(first_note_commitment).expect("can witness note commitment").position(); @@ -529,7 +529,7 @@ mod tests { asset_id: asset::Id(Fq::from(asset_id64)), }; let note = Note::from_parts( - sender, + sender.clone(), value_to_send, Rseed(rseed_randomness), ).expect("should be able to create note"); @@ -545,7 +545,7 @@ mod tests { for i in 0..num_commitments { // To avoid duplicate note commitments, we use the `i` counter as the Rseed randomness let rseed = Rseed([i as u8; 32]); - let dummy_note_commitment = Note::from_parts(sender, value_to_send, rseed).expect("can create note").commit(); + let dummy_note_commitment = Note::from_parts(sender.clone(), value_to_send, rseed).expect("can create note").commit(); sct.insert(tct::Witness::Keep, dummy_note_commitment).expect("can insert note commitment into SCT"); } diff --git a/crates/core/component/shielded-pool/src/component/note_manager.rs b/crates/core/component/shielded-pool/src/component/note_manager.rs index dd221fac73..a4d13ef343 100644 --- a/crates/core/component/shielded-pool/src/component/note_manager.rs +++ b/crates/core/component/shielded-pool/src/component/note_manager.rs @@ -52,7 +52,7 @@ pub trait NoteManager: StateWrite { .as_bytes()[0..32] .try_into()?; - let note = Note::from_parts(*address, value, Rseed(rseed_bytes))?; + let note = Note::from_parts(address.clone(), value, Rseed(rseed_bytes))?; self.add_note_payload(note.payload(), source).await; Ok(()) diff --git a/crates/core/component/shielded-pool/src/note.rs b/crates/core/component/shielded-pool/src/note.rs index 930cb089de..c1575443b6 100644 --- a/crates/core/component/shielded-pool/src/note.rs +++ b/crates/core/component/shielded-pool/src/note.rs @@ -137,7 +137,7 @@ impl Note { Ok(Note { value, rseed, - address, + address: address.clone(), transmission_key_s: Fq::from_bytes(address.transmission_key().0) .map_err(|_| Error::InvalidTransmissionKey)?, }) @@ -155,12 +155,12 @@ impl Note { /// random blinding factor. pub fn generate(rng: &mut (impl Rng + CryptoRng), address: &Address, value: Value) -> Self { let rseed = Rseed::generate(rng); - Note::from_parts(*address, value, rseed) + Note::from_parts(address.clone(), value, rseed) .expect("transmission key in address is always valid") } pub fn address(&self) -> Address { - self.address + self.address.clone() } pub fn diversified_generator(&self) -> decaf377::Element { diff --git a/crates/core/component/shielded-pool/src/output/plan.rs b/crates/core/component/shielded-pool/src/output/plan.rs index ef7308294f..86e31817f3 100644 --- a/crates/core/component/shielded-pool/src/output/plan.rs +++ b/crates/core/component/shielded-pool/src/output/plan.rs @@ -68,7 +68,7 @@ impl OutputPlan { } pub fn output_note(&self) -> Note { - Note::from_parts(self.dest_address, self.value, self.rseed) + Note::from_parts(self.dest_address.clone(), self.value, self.rseed) .expect("transmission key in address is always valid") } diff --git a/crates/core/component/shielded-pool/src/spend/proof.rs b/crates/core/component/shielded-pool/src/spend/proof.rs index a818fc8267..8852ad07c6 100644 --- a/crates/core/component/shielded-pool/src/spend/proof.rs +++ b/crates/core/component/shielded-pool/src/spend/proof.rs @@ -429,7 +429,7 @@ mod tests { asset_id: asset::Id(Fq::from(asset_id64)), }; let note = Note::from_parts( - sender, + sender.clone(), value_to_send, Rseed(rseed_randomness), ).expect("should be able to create note"); @@ -445,7 +445,7 @@ mod tests { for i in 0..num_commitments { // To avoid duplicate note commitments, we use the `i` counter as the Rseed randomness let rseed = Rseed([i as u8; 32]); - let dummy_note_commitment = Note::from_parts(sender, value_to_send, rseed).expect("can create note").commit(); + let dummy_note_commitment = Note::from_parts(sender.clone(), value_to_send, rseed).expect("can create note").commit(); sct.insert(tct::Witness::Keep, dummy_note_commitment).expect("should be able to insert note commitments into the SCT"); } @@ -498,7 +498,7 @@ mod tests { asset_id: asset::Id(Fq::from(asset_id64)), }; let note = Note::from_parts( - sender, + sender.clone(), value_to_send, Rseed(rseed_randomness), ).expect("should be able to create note"); @@ -514,7 +514,7 @@ mod tests { for i in 0..num_commitments { // To avoid duplicate note commitments, we use the `i` counter as the Rseed randomness let rseed = Rseed([i as u8; 32]); - let dummy_note_commitment = Note::from_parts(sender, value_to_send, rseed).expect("can create note").commit(); + let dummy_note_commitment = Note::from_parts(sender.clone(), value_to_send, rseed).expect("can create note").commit(); sct.insert(tct::Witness::Keep, dummy_note_commitment).expect("should be able to insert note commitments into the SCT"); } let incorrect_anchor = sct.root(); @@ -641,7 +641,7 @@ mod tests { asset_id: asset::Id(Fq::from(asset_id64)), }; let note = Note::from_parts( - sender, + sender.clone(), value_to_send, Rseed(rseed_randomness), ).expect("should be able to create note"); @@ -657,12 +657,12 @@ mod tests { for i in 0..num_commitments { // To avoid duplicate note commitments, we use the `i` counter as the Rseed randomness let rseed = Rseed([i as u8; 32]); - let dummy_note_commitment = Note::from_parts(sender, value_to_send, rseed).expect("can create note").commit(); + let dummy_note_commitment = Note::from_parts(sender.clone(), value_to_send, rseed).expect("can create note").commit(); sct.insert(tct::Witness::Keep, dummy_note_commitment).expect("should be able to insert note commitments into the SCT"); } // Insert one more note commitment and witness it. let rseed = Rseed([num_commitments as u8; 32]); - let dummy_note_commitment = Note::from_parts(sender, value_to_send, rseed).expect("can create note").commit(); + let dummy_note_commitment = Note::from_parts(sender.clone(), value_to_send, rseed).expect("can create note").commit(); sct.insert(tct::Witness::Keep, dummy_note_commitment).expect("should be able to insert note commitments into the SCT"); let incorrect_position = sct.witness(dummy_note_commitment).expect("can witness note commitment").position(); @@ -715,7 +715,7 @@ mod tests { asset_id: asset::Id(Fq::from(asset_id64)), }; let note = Note::from_parts( - sender, + sender.clone(), value_to_send, Rseed(rseed_randomness), ).expect("should be able to create note"); @@ -731,7 +731,7 @@ mod tests { for i in 0..num_commitments { // To avoid duplicate note commitments, we use the `i` counter as the Rseed randomness let rseed = Rseed([i as u8; 32]); - let dummy_note_commitment = Note::from_parts(sender, value_to_send, rseed).expect("can create note").commit(); + let dummy_note_commitment = Note::from_parts(sender.clone(), value_to_send, rseed).expect("can create note").commit(); sct.insert(tct::Witness::Keep, dummy_note_commitment).expect("should be able to insert note commitments into the SCT"); } @@ -784,7 +784,7 @@ mod tests { asset_id: asset::Id(Fq::from(asset_id64)), }; let note = Note::from_parts( - sender, + sender.clone(), value_to_send, Rseed(rseed_randomness), ).expect("should be able to create note"); @@ -799,7 +799,7 @@ mod tests { for i in 0..num_commitments { // To avoid duplicate note commitments, we use the `i` counter as the Rseed randomness let rseed = Rseed([i as u8; 32]); - let dummy_note_commitment = Note::from_parts(sender, value_to_send, rseed).expect("can create note").commit(); + let dummy_note_commitment = Note::from_parts(sender.clone(), value_to_send, rseed).expect("can create note").commit(); sct.insert(tct::Witness::Keep, dummy_note_commitment).expect("should be able to insert note commitments into the SCT"); } @@ -853,7 +853,7 @@ mod tests { asset_id: asset::Id(Fq::from(asset_id64)), }; let note = Note::from_parts( - sender, + sender.clone(), value_to_send, Rseed(rseed_randomness), ).expect("should be able to create note"); @@ -1015,7 +1015,7 @@ mod tests { let mut sct = tct::Tree::new(); for _ in 0..5 { - let note_commitment = make_random_note_commitment(address); + let note_commitment = make_random_note_commitment(address.clone()); sct.insert(tct::Witness::Keep, note_commitment).unwrap(); let anchor = sct.root(); let state_commitment_proof = sct.witness(note_commitment).unwrap(); @@ -1043,12 +1043,12 @@ mod tests { sct.end_block().expect("can end block"); for _ in 0..100 { - let note_commitment = make_random_note_commitment(address); + let note_commitment = make_random_note_commitment(address.clone()); sct.insert(tct::Witness::Forget, note_commitment).unwrap(); } for _ in 0..5 { - let note_commitment = make_random_note_commitment(address); + let note_commitment = make_random_note_commitment(address.clone()); sct.insert(tct::Witness::Keep, note_commitment).unwrap(); let anchor = sct.root(); let state_commitment_proof = sct.witness(note_commitment).unwrap(); @@ -1076,12 +1076,12 @@ mod tests { sct.end_epoch().expect("can end epoch"); for _ in 0..100 { - let note_commitment = make_random_note_commitment(address); + let note_commitment = make_random_note_commitment(address.clone()); sct.insert(tct::Witness::Forget, note_commitment).unwrap(); } for _ in 0..5 { - let note_commitment = make_random_note_commitment(address); + let note_commitment = make_random_note_commitment(address.clone()); sct.insert(tct::Witness::Keep, note_commitment).unwrap(); let anchor = sct.root(); let state_commitment_proof = sct.witness(note_commitment).unwrap(); diff --git a/crates/core/component/stake/src/component/rpc.rs b/crates/core/component/stake/src/component/rpc.rs index 2a4432024b..ad7a27ad87 100644 --- a/crates/core/component/stake/src/component/rpc.rs +++ b/crates/core/component/stake/src/component/rpc.rs @@ -1,22 +1,23 @@ use std::pin::Pin; -use async_stream::try_stream; use cnidarium::Storage; -use futures::{StreamExt, TryStreamExt}; +use futures::StreamExt; use penumbra_proto::{ core::component::stake::v1::{ query_service_server::QueryService, CurrentValidatorRateRequest, - CurrentValidatorRateResponse, ValidatorInfoRequest, ValidatorInfoResponse, - ValidatorPenaltyRequest, ValidatorPenaltyResponse, ValidatorStatusRequest, - ValidatorStatusResponse, + CurrentValidatorRateResponse, GetValidatorInfoRequest, GetValidatorInfoResponse, + ValidatorInfoRequest, ValidatorInfoResponse, ValidatorPenaltyRequest, + ValidatorPenaltyResponse, ValidatorStatusRequest, ValidatorStatusResponse, + ValidatorUptimeRequest, ValidatorUptimeResponse, }, DomainType, }; +use tap::{TapFallible, TapOptional}; use tonic::Status; -use tracing::instrument; +use tracing::{error_span, instrument, Instrument, Span}; -use super::{validator_handler::ValidatorDataRead, SlashingData}; -use crate::validator; +use super::{validator_handler::ValidatorDataRead, ConsensusIndexRead, SlashingData}; +use crate::validator::{Info, State}; // TODO: Hide this and only expose a Router? pub struct Server { @@ -31,6 +32,38 @@ impl Server { #[tonic::async_trait] impl QueryService for Server { + #[instrument(skip(self, request))] + async fn get_validator_info( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + let state = self.storage.latest_snapshot(); + let GetValidatorInfoRequest { identity_key } = request.into_inner(); + + // Take the identity key from the inbound request. + let identity_key = identity_key + .ok_or_else(|| Status::invalid_argument("an identity key must be provided"))? + .try_into() + .tap_err(|error| tracing::debug!(?error, "request contained an invalid identity key")) + .map_err(|_| Status::invalid_argument("invalid identity key"))?; + + // Look up the information for the validator with the given identity key. + let info = state + .get_validator_info(&identity_key) + .await + .tap_err(|error| tracing::error!(?error, %identity_key, "failed to get validator info")) + .map_err(|_| Status::invalid_argument("failed to get validator info"))? + .tap_none(|| tracing::debug!(%identity_key, "validator info was not found")) + .ok_or_else(|| Status::not_found("validator info was not found"))?; + + // Construct the outbound response. + let resp = GetValidatorInfoResponse { + validator_info: Some(info.to_proto()), + }; + + Ok(tonic::Response::new(resp)) + } + type ValidatorInfoStream = Pin> + Send>>; @@ -39,38 +72,67 @@ impl QueryService for Server { &self, request: tonic::Request, ) -> Result, Status> { - let state = self.storage.latest_snapshot(); + use futures::TryStreamExt; + + // Get the latest snapshot from the backing storage, and determine whether or not the + // response should include inactive validator definitions. + let snapshot = self.storage.latest_snapshot(); + let ValidatorInfoRequest { show_inactive } = request.into_inner(); + + // Returns `true` if we should include a validator in the outbound response. + let filter_inactive = move |info: &Info| { + let should = match info.status.state { + State::Active => true, + _ if show_inactive => true, // Include other validators if the request asked us to. + _ => false, // Otherwise, skip this entry. + }; + futures::future::ready(should) + }; - let validators = state - .validator_definitions() // TODO(erwan): think through a UX for defined validators. Then we can remove `validator_list` entirely. - .await - .map_err(|e| tonic::Status::unavailable(format!("error listing validators: {e}")))?; + // Converts information about a validator into a RPC response. + let to_resp = |info: Info| { + let validator_info = Some(info.to_proto()); + ValidatorInfoResponse { validator_info } + }; + + // Creates a span that follows from the current tracing context. + let make_span = |identity_key| -> Span { + let span = error_span!("fetching validator information", %identity_key); + let current = Span::current(); + span.follows_from(current); + span + }; - let show_inactive = request.get_ref().show_inactive; - let s = try_stream! { - for v in validators { - let info = state.get_validator_info(&v.identity_key) + // Get a stream of identity keys corresponding to validators in the consensus set. + let consensus_set = snapshot + .consensus_set_stream() + .map_err(|e| format!("error getting consensus set: {e}")) + .map_err(Status::unavailable)?; + + // Adapt the stream of identity keys into a stream of validator information. + // Define a span indicating that the spawned future follows from the current context. + let validators = async_stream::try_stream! { + for await identity_key in consensus_set { + let identity_key = identity_key?; + let span = make_span(identity_key); + yield snapshot + .get_validator_info(&identity_key) + .instrument(span) .await? .expect("known validator must be present"); - // Slashed and inactive validators are not shown by default. - if !show_inactive && info.status.state != validator::State::Active { - continue; - } - yield info.to_proto(); } }; - Ok(tonic::Response::new( - s.map_ok(|info| ValidatorInfoResponse { - validator_info: Some(info), - }) - .map_err(|e: anyhow::Error| { - tonic::Status::unavailable(format!("error getting validator info: {e}")) - }) - // TODO: how do we instrument a Stream - //.instrument(Span::current()) - .boxed(), - )) + // Construct the outbound response. + let stream = validators + .try_filter(filter_inactive) + .map_ok(to_resp) + .map_err(|e: anyhow::Error| format!("error getting validator info: {e}")) + .map_err(Status::unavailable) + .into_stream() + .boxed(); + + Ok(tonic::Response::new(stream)) } #[instrument(skip(self, request))] @@ -146,4 +208,30 @@ impl QueryService for Server { None => Err(Status::not_found("current validator rate not found")), } } + + #[instrument(skip(self, request))] + async fn validator_uptime( + &self, + request: tonic::Request, + ) -> Result, Status> { + let state = self.storage.latest_snapshot(); + let identity_key = request + .into_inner() + .identity_key + .ok_or_else(|| tonic::Status::invalid_argument("empty message"))? + .try_into() + .map_err(|_| tonic::Status::invalid_argument("invalid identity key"))?; + + let uptime_data = state + .get_validator_uptime(&identity_key) + .await + .map_err(|e| tonic::Status::internal(e.to_string()))?; + + match uptime_data { + Some(u) => Ok(tonic::Response::new(ValidatorUptimeResponse { + uptime: Some(u.into()), + })), + None => Err(Status::not_found("validator uptime not found")), + } + } } diff --git a/crates/core/component/stake/src/component/validator_handler/validator_manager.rs b/crates/core/component/stake/src/component/validator_handler/validator_manager.rs index ed68ab8538..caf1b78c67 100644 --- a/crates/core/component/stake/src/component/validator_handler/validator_manager.rs +++ b/crates/core/component/stake/src/component/validator_handler/validator_manager.rs @@ -8,9 +8,15 @@ use { }, StateReadExt as _, StateWriteExt as _, }, + event, rate::{BaseRateData, RateData}, state_key, - validator::{self, BondingState::*, State, State::*, Validator}, + validator::{ + self, + BondingState::*, + State::{self, *}, + Validator, + }, DelegationToken, IdentityKey, Penalty, Uptime, }, anyhow::{ensure, Result}, @@ -19,8 +25,10 @@ use { penumbra_asset::asset, penumbra_num::Amount, penumbra_proto::StateWriteProto, - penumbra_sct::component::clock::{EpochManager, EpochRead}, - penumbra_sct::component::StateReadExt as _, + penumbra_sct::component::{ + clock::{EpochManager, EpochRead}, + StateReadExt as _, + }, penumbra_shielded_pool::component::AssetRegistry, std::collections::BTreeMap, tendermint::abci::types::Misbehavior, @@ -83,6 +91,8 @@ pub trait ValidatorManager: StateWrite { /// Execute a legal state transition, updating the validator records and /// implementing the necessary side effects. /// + /// Returns a `(old_state, new_state)` tuple, corresponding to the executed transition. + /// /// # Errors /// This method errors on illegal state transitions, but will otherwise try to do what /// you ask it to do. It is the caller's responsibility to ensure that the state transitions @@ -94,7 +104,7 @@ pub trait ValidatorManager: StateWrite { &mut self, identity_key: &IdentityKey, new_state: validator::State, - ) -> Result<()> { + ) -> Result<(State, State)> { let old_state = self .get_validator_state(identity_key) .await? @@ -118,7 +128,7 @@ pub trait ValidatorManager: StateWrite { identity_key: &IdentityKey, old_state: validator::State, new_state: validator::State, - ) -> Result<()> { + ) -> Result<(State, State)> { let validator_state_path = state_key::validators::state::by_id(identity_key); let current_height = self.get_block_height().await?; @@ -294,7 +304,7 @@ pub trait ValidatorManager: StateWrite { Self::state_machine_metrics(old_state, new_state); - Ok(()) + Ok((old_state, new_state)) } #[instrument(skip(self))] @@ -649,8 +659,20 @@ pub trait ValidatorManager: StateWrite { ) })?; - self.set_validator_state(&validator.identity_key, validator::State::Tombstoned) - .await + let (old_state, new_state) = self + .set_validator_state(&validator.identity_key, validator::State::Tombstoned) + .await?; + + if let (Inactive | Jailed | Active, Tombstoned) = (old_state, new_state) { + let current_height = self.get_block_height().await?; + self.record_proto(event::tombstone_validator( + current_height, + validator.identity_key.clone(), + evidence, + )); + } + + Ok(()) } fn state_machine_metrics(old_state: validator::State, new_state: validator::State) { diff --git a/crates/core/component/stake/src/component/validator_handler/validator_store.rs b/crates/core/component/stake/src/component/validator_handler/validator_store.rs index 3f30895d12..151571d47b 100644 --- a/crates/core/component/stake/src/component/validator_handler/validator_store.rs +++ b/crates/core/component/stake/src/component/validator_handler/validator_store.rs @@ -8,7 +8,7 @@ use crate::{ use anyhow::Result; use async_trait::async_trait; use cnidarium::{StateRead, StateWrite}; -use futures::{Future, FutureExt, TryStreamExt}; +use futures::{Future, FutureExt}; use penumbra_num::Amount; use penumbra_proto::{state::future::DomainFuture, DomainType, StateReadProto, StateWriteProto}; use std::pin::Pin; @@ -227,23 +227,6 @@ pub trait ValidatorDataRead: StateRead { .map_ok(|opt: Option| opt.map(|v: Validator| v.consensus_key)) .boxed() } - - /// Returns a list of **all** known validators metadata. - async fn validator_definitions(&self) -> Result> { - self.prefix(state_key::validators::definitions::prefix()) - .map_ok(|(_key, validator)| validator) - .try_collect() - .await - } - - /// Returns a list of **all** known validators identity keys. - async fn validator_identity_keys(&self) -> Result> { - self.prefix(state_key::validators::definitions::prefix()) - .map_ok(|(_key, validator)| validator) - .map_ok(|validator: Validator| validator.identity_key) - .try_collect() - .await - } } impl ValidatorDataRead for T {} diff --git a/crates/core/component/stake/src/event.rs b/crates/core/component/stake/src/event.rs index 069c249b47..f6530f016c 100644 --- a/crates/core/component/stake/src/event.rs +++ b/crates/core/component/stake/src/event.rs @@ -1,5 +1,6 @@ -use crate::{Delegate, Undelegate}; -use tendermint::abci::{Event, EventAttributeIndexExt}; +use crate::{Delegate, IdentityKey, Undelegate}; +use penumbra_proto::core::component::stake::v1 as pb; +use tendermint::abci::{types::Misbehavior, Event, EventAttributeIndexExt}; pub fn delegate(delegate: &Delegate) -> Event { Event::new( @@ -20,3 +21,17 @@ pub fn undelegate(undelegate: &Undelegate) -> Event { ], ) } + +pub fn tombstone_validator( + current_height: u64, + identity_key: IdentityKey, + evidence: &Misbehavior, +) -> pb::EventTombstoneValidator { + pb::EventTombstoneValidator { + evidence_height: evidence.height.value(), + current_height, + identity_key: Some(identity_key.into()), + address: evidence.validator.address.to_vec(), + voting_power: evidence.validator.power.value(), + } +} diff --git a/crates/core/component/stake/src/funding_stream.rs b/crates/core/component/stake/src/funding_stream.rs index 4a114d0968..67a63c4dd8 100644 --- a/crates/core/component/stake/src/funding_stream.rs +++ b/crates/core/component/stake/src/funding_stream.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; /// A destination for a portion of a validator's commission of staking rewards. #[allow(clippy::large_enum_variant)] -#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Clone, Copy)] +#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Clone)] #[serde(try_from = "pb::FundingStream", into = "pb::FundingStream")] pub enum FundingStream { ToAddress { @@ -25,7 +25,7 @@ pub enum FundingStream { } #[allow(clippy::large_enum_variant)] -#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[derive(Debug, PartialEq, Eq, Clone)] pub enum Recipient { Address(Address), CommunityPool, @@ -41,7 +41,7 @@ impl FundingStream { pub fn recipient(&self) -> Recipient { match self { - FundingStream::ToAddress { address, .. } => Recipient::Address(*address), + FundingStream::ToAddress { address, .. } => Recipient::Address(address.clone()), FundingStream::ToCommunityPool { .. } => Recipient::CommunityPool, } } diff --git a/crates/core/component/stake/src/uptime.rs b/crates/core/component/stake/src/uptime.rs index e44ab52f62..c52eaeadba 100644 --- a/crates/core/component/stake/src/uptime.rs +++ b/crates/core/component/stake/src/uptime.rs @@ -78,10 +78,35 @@ impl Uptime { self.signatures.iter_zeros().len() } + /// Enumerates the missed blocks over the window in terms of absolute block height. + pub fn missed_blocks(&self) -> impl Iterator + '_ { + // The height of the next block to be recorded (not yet recorded): + let current_height = self.as_of_block_height; + // The length of the window of blocks being recorded: + let window_len = self.signatures.len(); + // The earliest height of a block that has been recorded: + let earliest_height = current_height.saturating_sub(window_len as u64 - 1); + // The range of block heights that have been recorded: + let all_heights = earliest_height..=current_height; + // Filter out the heights that were signed: + all_heights.filter_map(move |height| { + // Index the bit vector as the ring buffer that it is, and invert the bit corresponding + // to this height to find out whether it was missed: + let index = (height as usize) % window_len; + let signed = self.signatures[index]; + Some(height).filter(|_| !signed) + }) + } + /// Returns the block height up to which this tracker has recorded. pub fn as_of_height(&self) -> u64 { self.as_of_block_height } + + /// Returns the size of the window of blocks being recorded. + pub fn missed_blocks_window(&self) -> usize { + self.signatures.len() + } } impl DomainType for Uptime { @@ -121,6 +146,9 @@ impl TryFrom for Uptime { mod tests { use super::*; + use proptest::prelude::*; + use std::collections::VecDeque; + #[test] fn counts_missed_blocks() { let window = 128; @@ -142,6 +170,64 @@ mod tests { assert!(uptime.mark_height_as_signed(0, true).is_err()); } + /// Basic check that if we miss block 1, we report that we missed block 1. + #[test] + fn enumerate_missed_first_block() { + let window = 128; + let mut uptime = Uptime::new(0, window); + + // Mark the first block as missed + uptime.mark_height_as_signed(1, false).unwrap(); + let missed_blocks: Vec<_> = uptime.missed_blocks().collect(); + + // Check that exactly the first block is missed + assert_eq!(missed_blocks, vec![1]); + } + + proptest! { + /// Ensure that the `Uptime` struct simulates a fixed size queue of (height, signed) tuples, + /// and that the `missed_blocks` iterator returns the correct missed blocks. + #[test] + fn enumerate_uptime_simulates_bounded_queue( + (window_len, signed_blocks) in + (1..=16usize).prop_flat_map(move |window_len| { + proptest::collection::vec(proptest::bool::ANY, 0..window_len * 2) + .prop_map(move |signed_blocks| (window_len, signed_blocks)) + }) + ) { + // We're going to simulate the `Uptime` struct with a VecDeque of (height, signed) + // tuples whose length we will keep bounded by the window length. + let mut uptime = Uptime::new(0, window_len); + let mut simulated_uptime = VecDeque::new(); + + // For each (height, signed) tuple in our generated sequence, mark the height as signed + // or not signed. + for (height, signed) in signed_blocks.into_iter().enumerate() { + // Convert the height to a u64 and add 1 because the `Uptime` struct starts out with + // an internal height of 0: + let height = height as u64 + 1; + // Mark it using the real `Uptime` struct: + uptime.mark_height_as_signed(height, signed).unwrap(); + // Mark it using our simulated `VecDeque`, taking care to keep its length bounded by + // the window length: + simulated_uptime.push_back((height, signed)); + if simulated_uptime.len() > window_len { + simulated_uptime.pop_front(); + } + } + + // Compare the missed blocks from the real `Uptime` struct with the simulated `VecDeque`: + let missed_blocks: Vec<_> = uptime.missed_blocks().collect(); + + // Retain only the heights from the simulated `VecDeque` that were not signed: + simulated_uptime.retain(|(_, signed)| !signed); + let simulated_missed_blocks: Vec<_> = + simulated_uptime.into_iter().map(|(height, _)| height).collect(); + + prop_assert_eq!(missed_blocks, simulated_missed_blocks); + } + } + #[test] fn proto_round_trip() { // make a weird size window diff --git a/crates/core/keys/src/address.rs b/crates/core/keys/src/address.rs index 616ec7a482..e6dc61e918 100644 --- a/crates/core/keys/src/address.rs +++ b/crates/core/keys/src/address.rs @@ -1,4 +1,9 @@ -use std::io::{Cursor, Read, Write}; +//! [Payment address][Address] facilities. + +use std::{ + io::{Cursor, Read, Write}, + sync::OnceLock, +}; use anyhow::Context; use ark_serialize::CanonicalDeserialize; @@ -16,29 +21,70 @@ pub use view::AddressView; use crate::{fmd, ka, keys::Diversifier}; +/// The length of an [`Address`] in bytes. pub const ADDRESS_LEN_BYTES: usize = 80; + /// Number of bits in the address short form divided by the number of bits per Bech32m character pub const ADDRESS_NUM_CHARS_SHORT_FORM: usize = 24; /// A valid payment address. -#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Clone, Eq, Serialize, Deserialize)] #[serde(try_from = "pb::Address", into = "pb::Address")] pub struct Address { + /// The address diversifier. d: Diversifier, - /// cached copy of the diversified base - g_d: decaf377::Element, + /// A cached copy of the diversified base. + g_d: OnceLock, + /// The public key for this payment address. + /// /// extra invariant: the bytes in pk_d should be the canonical encoding of an /// s value (whether or not it is a valid decaf377 encoding) /// this ensures we can use a PaymentAddress to form a note commitment, /// which involves hashing s as a field element. pk_d: ka::Public, - /// transmission key s value + /// The transmission key s value. transmission_key_s: Fq, + /// The clue key for this payment address. ck_d: fmd::ClueKey, } +impl std::cmp::PartialEq for Address { + fn eq( + &self, + rhs @ Self { + d: rhs_d, + g_d: rhs_g_d, + pk_d: rhs_pk_d, + transmission_key_s: rhs_transmission_key_s, + ck_d: rhs_ck_d, + }: &Self, + ) -> bool { + let lhs @ Self { + d: lhs_d, + g_d: lhs_g_d, + pk_d: lhs_pk_d, + transmission_key_s: lhs_transmission_key_s, + ck_d: lhs_ck_d, + } = self; + + // When a `OnceLock` value is compared, it will only call `get()`, refraining from + // initializing the value. To make sure that an address that *hasn't* yet accessed its + // diversified base is considered equal to an address that *has*, compute the base points + // if they have not already been generated. + lhs.diversified_generator(); + rhs.diversified_generator(); + + // Compare all of the fields. + lhs_d.eq(rhs_d) + && lhs_g_d.eq(rhs_g_d) + && lhs_pk_d.eq(rhs_pk_d) + && lhs_transmission_key_s.eq(rhs_transmission_key_s) + && lhs_ck_d.eq(rhs_ck_d) + } +} + impl std::cmp::PartialOrd for Address { fn partial_cmp(&self, other: &Self) -> Option { Some(self.to_vec().cmp(&other.to_vec())) @@ -69,7 +115,7 @@ impl Address { // don't need an error type here, caller will probably .expect anyways Some(Self { d, - g_d: d.diversified_generator(), + g_d: OnceLock::new(), pk_d, ck_d, transmission_key_s, @@ -79,26 +125,36 @@ impl Address { } } + /// Returns a reference to the address diversifier. pub fn diversifier(&self) -> &Diversifier { &self.d } + /// Returns a reference to the diversified base. + /// + /// This method computes the diversified base if it has not been computed yet. This value is + /// cached after it has been computed once. pub fn diversified_generator(&self) -> &decaf377::Element { - &self.g_d + self.g_d + .get_or_init(|| self.diversifier().diversified_generator()) } + /// Returns a reference to the transmission key. pub fn transmission_key(&self) -> &ka::Public { &self.pk_d } + /// Returns a reference to the clue key. pub fn clue_key(&self) -> &fmd::ClueKey { &self.ck_d } + /// Returns a reference to the transmission key `s` value. pub fn transmission_key_s(&self) -> &Fq { &self.transmission_key_s } + /// Converts this address to a vector of bytes. pub fn to_vec(&self) -> Vec { let mut bytes = std::io::Cursor::new(Vec::new()); bytes @@ -114,7 +170,7 @@ impl Address { f4jumble(bytes.get_ref()).expect("can jumble") } - /// A randomized dummy address. + /// Generates a randomized dummy address. pub fn dummy(rng: &mut R) -> Self { loop { let mut diversifier_bytes = [0u8; 16]; @@ -151,7 +207,7 @@ impl Address { /// Compat (bech32 non-m) address format pub fn compat_encoding(&self) -> String { - let proto_address = pb::Address::from(*self); + let proto_address = pb::Address::from(self); bech32str::encode( &proto_address.inner, bech32str::compat_address::BECH32_PREFIX, @@ -166,6 +222,12 @@ impl DomainType for Address { impl From
for pb::Address { fn from(a: Address) -> Self { + Self::from(&a) + } +} + +impl From<&Address> for pb::Address { + fn from(a: &Address) -> Self { pb::Address { inner: a.to_vec(), // Always produce encodings without the alt format. @@ -193,7 +255,7 @@ impl TryFrom for Address { impl std::fmt::Display for Address { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let proto_address = pb::Address::from(*self); + let proto_address = pb::Address::from(self); f.write_str(&bech32str::encode( &proto_address.inner, bech32str::address::BECH32_PREFIX, @@ -286,6 +348,18 @@ impl TryFrom<&[u8]> for Address { } } +/// Assert the addresses are both [`Send`] and [`Sync`]. +// NB: allow dead code, because this block only contains compile-time assertions. +#[allow(dead_code)] +mod assert_address_is_send_and_sync { + fn is_send() {} + fn is_sync() {} + fn f() { + is_send::(); + is_sync::(); + } +} + #[cfg(test)] mod tests { use std::str::FromStr; @@ -316,7 +390,7 @@ mod tests { alt_bech32m: bech32m_addr, } .encode_to_vec(); - let proto_addr_direct: pb::Address = dest.into(); + let proto_addr_direct: pb::Address = dest.clone().into(); let addr_from_proto: Address = proto_addr_direct .try_into() .expect("can convert from proto back to address"); diff --git a/crates/core/keys/src/address/r1cs.rs b/crates/core/keys/src/address/r1cs.rs index 635b72ade3..069f53abc1 100644 --- a/crates/core/keys/src/address/r1cs.rs +++ b/crates/core/keys/src/address/r1cs.rs @@ -36,7 +36,7 @@ impl AllocVar for AddressVar { ) -> Result { let ns = cs.into(); let cs = ns.cs(); - let address: Address = *f()?.borrow(); + let address: Address = f()?.borrow().to_owned(); let diversified_generator: ElementVar = AllocVar::::new_variable( cs.clone(), diff --git a/crates/core/keys/src/address/view.rs b/crates/core/keys/src/address/view.rs index 3dcf9c28c2..fd1cf6db0a 100644 --- a/crates/core/keys/src/address/view.rs +++ b/crates/core/keys/src/address/view.rs @@ -11,7 +11,7 @@ use super::Address; /// /// This type allows working with addresses and address indexes without knowing /// the corresponding FVK. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(try_from = "pb::AddressView", into = "pb::AddressView")] pub enum AddressView { Opaque { @@ -27,8 +27,8 @@ pub enum AddressView { impl AddressView { pub fn address(&self) -> Address { match self { - AddressView::Opaque { address } => *address, - AddressView::Decoded { address, .. } => *address, + AddressView::Opaque { address } => address.clone(), + AddressView::Decoded { address, .. } => address.clone(), } } } @@ -121,49 +121,57 @@ mod tests { let addr2_1 = fvk2.payment_address(1.into()).0; assert_eq!( - fvk1.view_address(addr1_0), + fvk1.view_address(addr1_0.clone()), AddressView::Decoded { - address: addr1_0, + address: addr1_0.clone(), index: 0.into(), wallet_id: fvk1.wallet_id(), } ); assert_eq!( - fvk2.view_address(addr1_0), - AddressView::Opaque { address: addr1_0 } + fvk2.view_address(addr1_0.clone()), + AddressView::Opaque { + address: addr1_0.clone() + } ); assert_eq!( - fvk1.view_address(addr1_1), + fvk1.view_address(addr1_1.clone()), AddressView::Decoded { - address: addr1_1, + address: addr1_1.clone(), index: 1.into(), wallet_id: fvk1.wallet_id(), } ); assert_eq!( - fvk2.view_address(addr1_1), - AddressView::Opaque { address: addr1_1 } + fvk2.view_address(addr1_1.clone()), + AddressView::Opaque { + address: addr1_1.clone() + } ); assert_eq!( - fvk1.view_address(addr2_0), - AddressView::Opaque { address: addr2_0 } + fvk1.view_address(addr2_0.clone()), + AddressView::Opaque { + address: addr2_0.clone() + } ); assert_eq!( - fvk2.view_address(addr2_0), + fvk2.view_address(addr2_0.clone()), AddressView::Decoded { - address: addr2_0, + address: addr2_0.clone(), index: 0.into(), wallet_id: fvk2.wallet_id(), } ); assert_eq!( - fvk1.view_address(addr2_1), - AddressView::Opaque { address: addr2_1 } + fvk1.view_address(addr2_1.clone()), + AddressView::Opaque { + address: addr2_1.clone() + } ); assert_eq!( - fvk2.view_address(addr2_1), + fvk2.view_address(addr2_1.clone()), AddressView::Decoded { - address: addr2_1, + address: addr2_1.clone(), index: 1.into(), wallet_id: fvk2.wallet_id(), } diff --git a/crates/core/num/src/amount.rs b/crates/core/num/src/amount.rs index d7797853a1..0ab8fdc352 100644 --- a/crates/core/num/src/amount.rs +++ b/crates/core/num/src/amount.rs @@ -82,6 +82,14 @@ impl Amount { } } +impl ops::Not for Amount { + type Output = Self; + + fn not(self) -> Self::Output { + Self { inner: !self.inner } + } +} + #[derive(Clone)] pub struct AmountVar { pub amount: FqVar, diff --git a/crates/core/transaction/src/memo.rs b/crates/core/transaction/src/memo.rs index 1f76604784..484655247a 100644 --- a/crates/core/transaction/src/memo.rs +++ b/crates/core/transaction/src/memo.rs @@ -66,7 +66,7 @@ impl MemoPlaintext { } pub fn return_address(&self) -> Address { - self.return_address + self.return_address.clone() } pub fn text(&self) -> &str { @@ -284,7 +284,7 @@ mod tests { // On the sender side, we have to encrypt the memo to put into the transaction-level, // and also the memo key to put on the action-level (output). let memo = MemoPlaintext { - return_address: dest, + return_address: dest.clone(), text: String::from("Hi"), }; let memo_key = PayloadKey::random_key(&mut OsRng); @@ -331,7 +331,7 @@ mod tests { // On the sender side, we have to encrypt the memo to put into the transaction-level, // and also the memo key to put on the action-level (output). - let memo = MemoPlaintext::new(dest, "Hello, friend".into())?; + let memo = MemoPlaintext::new(dest.clone(), "Hello, friend".into())?; let memo_key = PayloadKey::random_key(&mut OsRng); let ciphertext = MemoCiphertext::encrypt(memo_key.clone(), &memo).expect("can encrypt memo"); diff --git a/crates/core/transaction/src/plan.rs b/crates/core/transaction/src/plan.rs index dbbb70931e..3f36c7f42c 100644 --- a/crates/core/transaction/src/plan.rs +++ b/crates/core/transaction/src/plan.rs @@ -318,7 +318,9 @@ impl TransactionPlan { /// Convenience method to get all the destination addresses for each `OutputPlan`s. pub fn dest_addresses(&self) -> Vec
{ - self.output_plans().map(|plan| plan.dest_address).collect() + self.output_plans() + .map(|plan| plan.dest_address.clone()) + .collect() } /// Convenience method to get the number of `OutputPlan`s in this transaction. @@ -492,7 +494,7 @@ mod tests { .unwrap() .id(), }), - addr, + addr.clone(), ); let mut rng = OsRng; diff --git a/crates/crypto/proof-params/src/gen/swapclaim_id.rs b/crates/crypto/proof-params/src/gen/swapclaim_id.rs index 0293098666..e90d28aa7c 100644 --- a/crates/crypto/proof-params/src/gen/swapclaim_id.rs +++ b/crates/crypto/proof-params/src/gen/swapclaim_id.rs @@ -1,3 +1,3 @@ -pub const PROVING_KEY_ID: &'static str = "groth16pk1vs60etmlvwfzmn2ve0ljz0vfkzjlrhjpue5svm5ry6l076qukjcsw566rp"; -pub const VERIFICATION_KEY_ID: &'static str = "groth16vk18qjn0kxmypk8gmfc6zhjukhyxk0agmunfnhpxmf3yxq266q6sgaqwe94rc"; +pub const PROVING_KEY_ID: &'static str = "groth16pk1pfpj2hullzpeqzzyfqw85q03zz8mthht07zd3vkc562lfe776xgsvu3mfy"; +pub const VERIFICATION_KEY_ID: &'static str = "groth16vk1qyhwaxh5kq6lk2tm6fnxctynqqf7vt5j64u92zm8d8pndy7yap4qsyw855"; diff --git a/crates/crypto/proof-params/src/gen/swapclaim_pk.bin b/crates/crypto/proof-params/src/gen/swapclaim_pk.bin index 96b1d164b6..a401b19bc9 100644 --- a/crates/crypto/proof-params/src/gen/swapclaim_pk.bin +++ b/crates/crypto/proof-params/src/gen/swapclaim_pk.bin @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8501f1dad9ac85d80c6421b17b838f8c6478431babfa836d3d33b5398fa6b6ad -size 26003952 +oid sha256:1190707f9815bf0135169547b888716d6731cdbe1bc4ea2fbd22655a03fe56cd +size 25957872 diff --git a/crates/crypto/proof-params/src/gen/swapclaim_vk.param b/crates/crypto/proof-params/src/gen/swapclaim_vk.param index 4bd2b584a5..72be8023cd 100644 Binary files a/crates/crypto/proof-params/src/gen/swapclaim_vk.param and b/crates/crypto/proof-params/src/gen/swapclaim_vk.param differ diff --git a/crates/proto/src/gen/penumbra.core.component.dex.v1.rs b/crates/proto/src/gen/penumbra.core.component.dex.v1.rs index 644a2c7922..27b5b16480 100644 --- a/crates/proto/src/gen/penumbra.core.component.dex.v1.rs +++ b/crates/proto/src/gen/penumbra.core.component.dex.v1.rs @@ -508,8 +508,12 @@ pub struct BatchSwapOutputData { #[prost(message, optional, tag = "8")] pub trading_pair: ::core::option::Option, /// The starting block height of the epoch for which the batch swap data is valid. + #[deprecated] #[prost(uint64, tag = "9")] pub epoch_starting_height: u64, + /// The prefix (epoch, block) of the position where this batch swap occurred. + #[prost(uint64, tag = "10")] + pub sct_position_prefix: u64, } impl ::prost::Name for BatchSwapOutputData { const NAME: &'static str = "BatchSwapOutputData"; @@ -1168,6 +1172,8 @@ impl ::prost::Name for LiquidityPositionsByPriceRequest { pub struct LiquidityPositionsByPriceResponse { #[prost(message, optional, tag = "1")] pub data: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub id: ::core::option::Option, } impl ::prost::Name for LiquidityPositionsByPriceResponse { const NAME: &'static str = "LiquidityPositionsByPriceResponse"; @@ -1395,6 +1401,20 @@ impl ::prost::Name for EventPositionClose { } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventQueuePositionClose { + /// The ID of the position queued that is closed for closure. + #[prost(message, optional, tag = "1")] + pub position_id: ::core::option::Option, +} +impl ::prost::Name for EventQueuePositionClose { + const NAME: &'static str = "EventQueuePositionClose"; + const PACKAGE: &'static str = "penumbra.core.component.dex.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("penumbra.core.component.dex.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] pub struct EventPositionWithdraw { /// The ID of the withdrawn position. #[prost(message, optional, tag = "1")] diff --git a/crates/proto/src/gen/penumbra.core.component.dex.v1.serde.rs b/crates/proto/src/gen/penumbra.core.component.dex.v1.serde.rs index b6efda7985..9a6d02eee0 100644 --- a/crates/proto/src/gen/penumbra.core.component.dex.v1.serde.rs +++ b/crates/proto/src/gen/penumbra.core.component.dex.v1.serde.rs @@ -614,6 +614,9 @@ impl serde::Serialize for BatchSwapOutputData { if self.epoch_starting_height != 0 { len += 1; } + if self.sct_position_prefix != 0 { + len += 1; + } let mut struct_ser = serializer.serialize_struct("penumbra.core.component.dex.v1.BatchSwapOutputData", len)?; if let Some(v) = self.delta_1.as_ref() { struct_ser.serialize_field("delta1", v)?; @@ -644,6 +647,10 @@ impl serde::Serialize for BatchSwapOutputData { #[allow(clippy::needless_borrow)] struct_ser.serialize_field("epochStartingHeight", ToString::to_string(&self.epoch_starting_height).as_str())?; } + if self.sct_position_prefix != 0 { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("sctPositionPrefix", ToString::to_string(&self.sct_position_prefix).as_str())?; + } struct_ser.end() } } @@ -671,6 +678,8 @@ impl<'de> serde::Deserialize<'de> for BatchSwapOutputData { "tradingPair", "epoch_starting_height", "epochStartingHeight", + "sct_position_prefix", + "sctPositionPrefix", ]; #[allow(clippy::enum_variant_names)] @@ -684,6 +693,7 @@ impl<'de> serde::Deserialize<'de> for BatchSwapOutputData { Height, TradingPair, EpochStartingHeight, + SctPositionPrefix, __SkipField__, } impl<'de> serde::Deserialize<'de> for GeneratedField { @@ -715,6 +725,7 @@ impl<'de> serde::Deserialize<'de> for BatchSwapOutputData { "height" => Ok(GeneratedField::Height), "tradingPair" | "trading_pair" => Ok(GeneratedField::TradingPair), "epochStartingHeight" | "epoch_starting_height" => Ok(GeneratedField::EpochStartingHeight), + "sctPositionPrefix" | "sct_position_prefix" => Ok(GeneratedField::SctPositionPrefix), _ => Ok(GeneratedField::__SkipField__), } } @@ -743,6 +754,7 @@ impl<'de> serde::Deserialize<'de> for BatchSwapOutputData { let mut height__ = None; let mut trading_pair__ = None; let mut epoch_starting_height__ = None; + let mut sct_position_prefix__ = None; while let Some(k) = map_.next_key()? { match k { GeneratedField::Delta1 => { @@ -803,6 +815,14 @@ impl<'de> serde::Deserialize<'de> for BatchSwapOutputData { Some(map_.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) ; } + GeneratedField::SctPositionPrefix => { + if sct_position_prefix__.is_some() { + return Err(serde::de::Error::duplicate_field("sctPositionPrefix")); + } + sct_position_prefix__ = + Some(map_.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) + ; + } GeneratedField::__SkipField__ => { let _ = map_.next_value::()?; } @@ -818,6 +838,7 @@ impl<'de> serde::Deserialize<'de> for BatchSwapOutputData { height: height__.unwrap_or_default(), trading_pair: trading_pair__, epoch_starting_height: epoch_starting_height__.unwrap_or_default(), + sct_position_prefix: sct_position_prefix__.unwrap_or_default(), }) } } @@ -2188,6 +2209,102 @@ impl<'de> serde::Deserialize<'de> for EventPositionWithdraw { deserializer.deserialize_struct("penumbra.core.component.dex.v1.EventPositionWithdraw", FIELDS, GeneratedVisitor) } } +impl serde::Serialize for EventQueuePositionClose { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.position_id.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("penumbra.core.component.dex.v1.EventQueuePositionClose", len)?; + if let Some(v) = self.position_id.as_ref() { + struct_ser.serialize_field("positionId", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for EventQueuePositionClose { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "position_id", + "positionId", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + PositionId, + __SkipField__, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "positionId" | "position_id" => Ok(GeneratedField::PositionId), + _ => Ok(GeneratedField::__SkipField__), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = EventQueuePositionClose; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct penumbra.core.component.dex.v1.EventQueuePositionClose") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut position_id__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::PositionId => { + if position_id__.is_some() { + return Err(serde::de::Error::duplicate_field("positionId")); + } + position_id__ = map_.next_value()?; + } + GeneratedField::__SkipField__ => { + let _ = map_.next_value::()?; + } + } + } + Ok(EventQueuePositionClose { + position_id: position_id__, + }) + } + } + deserializer.deserialize_struct("penumbra.core.component.dex.v1.EventQueuePositionClose", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for EventSwap { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result @@ -3356,10 +3473,16 @@ impl serde::Serialize for LiquidityPositionsByPriceResponse { if self.data.is_some() { len += 1; } + if self.id.is_some() { + len += 1; + } let mut struct_ser = serializer.serialize_struct("penumbra.core.component.dex.v1.LiquidityPositionsByPriceResponse", len)?; if let Some(v) = self.data.as_ref() { struct_ser.serialize_field("data", v)?; } + if let Some(v) = self.id.as_ref() { + struct_ser.serialize_field("id", v)?; + } struct_ser.end() } } @@ -3371,11 +3494,13 @@ impl<'de> serde::Deserialize<'de> for LiquidityPositionsByPriceResponse { { const FIELDS: &[&str] = &[ "data", + "id", ]; #[allow(clippy::enum_variant_names)] enum GeneratedField { Data, + Id, __SkipField__, } impl<'de> serde::Deserialize<'de> for GeneratedField { @@ -3399,6 +3524,7 @@ impl<'de> serde::Deserialize<'de> for LiquidityPositionsByPriceResponse { { match value { "data" => Ok(GeneratedField::Data), + "id" => Ok(GeneratedField::Id), _ => Ok(GeneratedField::__SkipField__), } } @@ -3419,6 +3545,7 @@ impl<'de> serde::Deserialize<'de> for LiquidityPositionsByPriceResponse { V: serde::de::MapAccess<'de>, { let mut data__ = None; + let mut id__ = None; while let Some(k) = map_.next_key()? { match k { GeneratedField::Data => { @@ -3427,6 +3554,12 @@ impl<'de> serde::Deserialize<'de> for LiquidityPositionsByPriceResponse { } data__ = map_.next_value()?; } + GeneratedField::Id => { + if id__.is_some() { + return Err(serde::de::Error::duplicate_field("id")); + } + id__ = map_.next_value()?; + } GeneratedField::__SkipField__ => { let _ = map_.next_value::()?; } @@ -3434,6 +3567,7 @@ impl<'de> serde::Deserialize<'de> for LiquidityPositionsByPriceResponse { } Ok(LiquidityPositionsByPriceResponse { data: data__, + id: id__, }) } } diff --git a/crates/proto/src/gen/penumbra.core.component.stake.v1.rs b/crates/proto/src/gen/penumbra.core.component.stake.v1.rs index ea032a9fc1..80d1430b8f 100644 --- a/crates/proto/src/gen/penumbra.core.component.stake.v1.rs +++ b/crates/proto/src/gen/penumbra.core.component.stake.v1.rs @@ -592,6 +592,34 @@ impl ::prost::Name for Penalty { ::prost::alloc::format!("penumbra.core.component.stake.v1.{}", Self::NAME) } } +/// Requests information about a specific validator. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetValidatorInfoRequest { + /// The identity key of the validator. + #[prost(message, optional, tag = "2")] + pub identity_key: ::core::option::Option, +} +impl ::prost::Name for GetValidatorInfoRequest { + const NAME: &'static str = "GetValidatorInfoRequest"; + const PACKAGE: &'static str = "penumbra.core.component.stake.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("penumbra.core.component.stake.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetValidatorInfoResponse { + #[prost(message, optional, tag = "1")] + pub validator_info: ::core::option::Option, +} +impl ::prost::Name for GetValidatorInfoResponse { + const NAME: &'static str = "GetValidatorInfoResponse"; + const PACKAGE: &'static str = "penumbra.core.component.stake.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("penumbra.core.component.stake.v1.{}", Self::NAME) + } +} /// Requests information on the chain's validators. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -703,6 +731,32 @@ impl ::prost::Name for CurrentValidatorRateResponse { ::prost::alloc::format!("penumbra.core.component.stake.v1.{}", Self::NAME) } } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ValidatorUptimeRequest { + #[prost(message, optional, tag = "2")] + pub identity_key: ::core::option::Option, +} +impl ::prost::Name for ValidatorUptimeRequest { + const NAME: &'static str = "ValidatorUptimeRequest"; + const PACKAGE: &'static str = "penumbra.core.component.stake.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("penumbra.core.component.stake.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ValidatorUptimeResponse { + #[prost(message, optional, tag = "1")] + pub uptime: ::core::option::Option, +} +impl ::prost::Name for ValidatorUptimeResponse { + const NAME: &'static str = "ValidatorUptimeResponse"; + const PACKAGE: &'static str = "penumbra.core.component.stake.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("penumbra.core.component.stake.v1.{}", Self::NAME) + } +} /// Staking configuration data. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -763,6 +817,32 @@ impl ::prost::Name for GenesisContent { ::prost::alloc::format!("penumbra.core.component.stake.v1.{}", Self::NAME) } } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventTombstoneValidator { + /// The height at which the offense occurred. + #[prost(uint64, tag = "1")] + pub evidence_height: u64, + /// The height at which the evidence was processed. + #[prost(uint64, tag = "2")] + pub current_height: u64, + /// The validator identity key. + #[prost(message, optional, tag = "4")] + pub identity_key: ::core::option::Option, + /// The validator's Comet address. + #[prost(bytes = "vec", tag = "5")] + pub address: ::prost::alloc::vec::Vec, + /// The voting power for the validator. + #[prost(uint64, tag = "6")] + pub voting_power: u64, +} +impl ::prost::Name for EventTombstoneValidator { + const NAME: &'static str = "EventTombstoneValidator"; + const PACKAGE: &'static str = "penumbra.core.component.stake.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("penumbra.core.component.stake.v1.{}", Self::NAME) + } +} /// Generated client implementations. #[cfg(feature = "rpc")] pub mod query_service_client { @@ -850,6 +930,37 @@ pub mod query_service_client { self.inner = self.inner.max_encoding_message_size(limit); self } + /// Queries for information about a specific validator. + pub async fn get_validator_info( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/penumbra.core.component.stake.v1.QueryService/GetValidatorInfo", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "penumbra.core.component.stake.v1.QueryService", + "GetValidatorInfo", + ), + ); + self.inner.unary(req, path, codec).await + } /// Queries the current validator set, with filtering. pub async fn validator_info( &mut self, @@ -971,6 +1082,36 @@ pub mod query_service_client { ); self.inner.unary(req, path, codec).await } + pub async fn validator_uptime( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/penumbra.core.component.stake.v1.QueryService/ValidatorUptime", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "penumbra.core.component.stake.v1.QueryService", + "ValidatorUptime", + ), + ); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -981,6 +1122,14 @@ pub mod query_service_server { /// Generated trait containing gRPC methods that should be implemented for use with QueryServiceServer. #[async_trait] pub trait QueryService: Send + Sync + 'static { + /// Queries for information about a specific validator. + async fn get_validator_info( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; /// Server streaming response type for the ValidatorInfo method. type ValidatorInfoStream: tonic::codegen::tokio_stream::Stream< Item = std::result::Result, @@ -1016,6 +1165,13 @@ pub mod query_service_server { tonic::Response, tonic::Status, >; + async fn validator_uptime( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } /// Query operations for the staking component. #[derive(Debug)] @@ -1097,6 +1253,53 @@ pub mod query_service_server { fn call(&mut self, req: http::Request) -> Self::Future { let inner = self.inner.clone(); match req.uri().path() { + "/penumbra.core.component.stake.v1.QueryService/GetValidatorInfo" => { + #[allow(non_camel_case_types)] + struct GetValidatorInfoSvc(pub Arc); + impl< + T: QueryService, + > tonic::server::UnaryService + for GetValidatorInfoSvc { + type Response = super::GetValidatorInfoResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_validator_info(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetValidatorInfoSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } "/penumbra.core.component.stake.v1.QueryService/ValidatorInfo" => { #[allow(non_camel_case_types)] struct ValidatorInfoSvc(pub Arc); @@ -1284,6 +1487,52 @@ pub mod query_service_server { }; Box::pin(fut) } + "/penumbra.core.component.stake.v1.QueryService/ValidatorUptime" => { + #[allow(non_camel_case_types)] + struct ValidatorUptimeSvc(pub Arc); + impl< + T: QueryService, + > tonic::server::UnaryService + for ValidatorUptimeSvc { + type Response = super::ValidatorUptimeResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::validator_uptime(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ValidatorUptimeSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { Ok( diff --git a/crates/proto/src/gen/penumbra.core.component.stake.v1.serde.rs b/crates/proto/src/gen/penumbra.core.component.stake.v1.serde.rs index 639ecfbd62..b4b6f3d3bd 100644 --- a/crates/proto/src/gen/penumbra.core.component.stake.v1.serde.rs +++ b/crates/proto/src/gen/penumbra.core.component.stake.v1.serde.rs @@ -901,6 +901,185 @@ impl<'de> serde::Deserialize<'de> for DelegationChanges { deserializer.deserialize_struct("penumbra.core.component.stake.v1.DelegationChanges", FIELDS, GeneratedVisitor) } } +impl serde::Serialize for EventTombstoneValidator { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.evidence_height != 0 { + len += 1; + } + if self.current_height != 0 { + len += 1; + } + if self.identity_key.is_some() { + len += 1; + } + if !self.address.is_empty() { + len += 1; + } + if self.voting_power != 0 { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("penumbra.core.component.stake.v1.EventTombstoneValidator", len)?; + if self.evidence_height != 0 { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("evidenceHeight", ToString::to_string(&self.evidence_height).as_str())?; + } + if self.current_height != 0 { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("currentHeight", ToString::to_string(&self.current_height).as_str())?; + } + if let Some(v) = self.identity_key.as_ref() { + struct_ser.serialize_field("identityKey", v)?; + } + if !self.address.is_empty() { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("address", pbjson::private::base64::encode(&self.address).as_str())?; + } + if self.voting_power != 0 { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("votingPower", ToString::to_string(&self.voting_power).as_str())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for EventTombstoneValidator { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "evidence_height", + "evidenceHeight", + "current_height", + "currentHeight", + "identity_key", + "identityKey", + "address", + "voting_power", + "votingPower", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + EvidenceHeight, + CurrentHeight, + IdentityKey, + Address, + VotingPower, + __SkipField__, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "evidenceHeight" | "evidence_height" => Ok(GeneratedField::EvidenceHeight), + "currentHeight" | "current_height" => Ok(GeneratedField::CurrentHeight), + "identityKey" | "identity_key" => Ok(GeneratedField::IdentityKey), + "address" => Ok(GeneratedField::Address), + "votingPower" | "voting_power" => Ok(GeneratedField::VotingPower), + _ => Ok(GeneratedField::__SkipField__), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = EventTombstoneValidator; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct penumbra.core.component.stake.v1.EventTombstoneValidator") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut evidence_height__ = None; + let mut current_height__ = None; + let mut identity_key__ = None; + let mut address__ = None; + let mut voting_power__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::EvidenceHeight => { + if evidence_height__.is_some() { + return Err(serde::de::Error::duplicate_field("evidenceHeight")); + } + evidence_height__ = + Some(map_.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) + ; + } + GeneratedField::CurrentHeight => { + if current_height__.is_some() { + return Err(serde::de::Error::duplicate_field("currentHeight")); + } + current_height__ = + Some(map_.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) + ; + } + GeneratedField::IdentityKey => { + if identity_key__.is_some() { + return Err(serde::de::Error::duplicate_field("identityKey")); + } + identity_key__ = map_.next_value()?; + } + GeneratedField::Address => { + if address__.is_some() { + return Err(serde::de::Error::duplicate_field("address")); + } + address__ = + Some(map_.next_value::<::pbjson::private::BytesDeserialize<_>>()?.0) + ; + } + GeneratedField::VotingPower => { + if voting_power__.is_some() { + return Err(serde::de::Error::duplicate_field("votingPower")); + } + voting_power__ = + Some(map_.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) + ; + } + GeneratedField::__SkipField__ => { + let _ = map_.next_value::()?; + } + } + } + Ok(EventTombstoneValidator { + evidence_height: evidence_height__.unwrap_or_default(), + current_height: current_height__.unwrap_or_default(), + identity_key: identity_key__, + address: address__.unwrap_or_default(), + voting_power: voting_power__.unwrap_or_default(), + }) + } + } + deserializer.deserialize_struct("penumbra.core.component.stake.v1.EventTombstoneValidator", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for FundingStream { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result @@ -1342,6 +1521,198 @@ impl<'de> serde::Deserialize<'de> for GenesisContent { deserializer.deserialize_struct("penumbra.core.component.stake.v1.GenesisContent", FIELDS, GeneratedVisitor) } } +impl serde::Serialize for GetValidatorInfoRequest { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.identity_key.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("penumbra.core.component.stake.v1.GetValidatorInfoRequest", len)?; + if let Some(v) = self.identity_key.as_ref() { + struct_ser.serialize_field("identityKey", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for GetValidatorInfoRequest { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "identity_key", + "identityKey", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + IdentityKey, + __SkipField__, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "identityKey" | "identity_key" => Ok(GeneratedField::IdentityKey), + _ => Ok(GeneratedField::__SkipField__), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GetValidatorInfoRequest; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct penumbra.core.component.stake.v1.GetValidatorInfoRequest") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut identity_key__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::IdentityKey => { + if identity_key__.is_some() { + return Err(serde::de::Error::duplicate_field("identityKey")); + } + identity_key__ = map_.next_value()?; + } + GeneratedField::__SkipField__ => { + let _ = map_.next_value::()?; + } + } + } + Ok(GetValidatorInfoRequest { + identity_key: identity_key__, + }) + } + } + deserializer.deserialize_struct("penumbra.core.component.stake.v1.GetValidatorInfoRequest", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for GetValidatorInfoResponse { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.validator_info.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("penumbra.core.component.stake.v1.GetValidatorInfoResponse", len)?; + if let Some(v) = self.validator_info.as_ref() { + struct_ser.serialize_field("validatorInfo", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for GetValidatorInfoResponse { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "validator_info", + "validatorInfo", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + ValidatorInfo, + __SkipField__, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "validatorInfo" | "validator_info" => Ok(GeneratedField::ValidatorInfo), + _ => Ok(GeneratedField::__SkipField__), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GetValidatorInfoResponse; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct penumbra.core.component.stake.v1.GetValidatorInfoResponse") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut validator_info__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::ValidatorInfo => { + if validator_info__.is_some() { + return Err(serde::de::Error::duplicate_field("validatorInfo")); + } + validator_info__ = map_.next_value()?; + } + GeneratedField::__SkipField__ => { + let _ = map_.next_value::()?; + } + } + } + Ok(GetValidatorInfoResponse { + validator_info: validator_info__, + }) + } + } + deserializer.deserialize_struct("penumbra.core.component.stake.v1.GetValidatorInfoResponse", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for Penalty { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result @@ -4222,6 +4593,197 @@ impl<'de> serde::Deserialize<'de> for ValidatorStatusResponse { deserializer.deserialize_struct("penumbra.core.component.stake.v1.ValidatorStatusResponse", FIELDS, GeneratedVisitor) } } +impl serde::Serialize for ValidatorUptimeRequest { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.identity_key.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("penumbra.core.component.stake.v1.ValidatorUptimeRequest", len)?; + if let Some(v) = self.identity_key.as_ref() { + struct_ser.serialize_field("identityKey", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for ValidatorUptimeRequest { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "identity_key", + "identityKey", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + IdentityKey, + __SkipField__, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "identityKey" | "identity_key" => Ok(GeneratedField::IdentityKey), + _ => Ok(GeneratedField::__SkipField__), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = ValidatorUptimeRequest; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct penumbra.core.component.stake.v1.ValidatorUptimeRequest") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut identity_key__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::IdentityKey => { + if identity_key__.is_some() { + return Err(serde::de::Error::duplicate_field("identityKey")); + } + identity_key__ = map_.next_value()?; + } + GeneratedField::__SkipField__ => { + let _ = map_.next_value::()?; + } + } + } + Ok(ValidatorUptimeRequest { + identity_key: identity_key__, + }) + } + } + deserializer.deserialize_struct("penumbra.core.component.stake.v1.ValidatorUptimeRequest", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for ValidatorUptimeResponse { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.uptime.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("penumbra.core.component.stake.v1.ValidatorUptimeResponse", len)?; + if let Some(v) = self.uptime.as_ref() { + struct_ser.serialize_field("uptime", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for ValidatorUptimeResponse { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "uptime", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Uptime, + __SkipField__, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "uptime" => Ok(GeneratedField::Uptime), + _ => Ok(GeneratedField::__SkipField__), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = ValidatorUptimeResponse; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct penumbra.core.component.stake.v1.ValidatorUptimeResponse") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut uptime__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::Uptime => { + if uptime__.is_some() { + return Err(serde::de::Error::duplicate_field("uptime")); + } + uptime__ = map_.next_value()?; + } + GeneratedField::__SkipField__ => { + let _ = map_.next_value::()?; + } + } + } + Ok(ValidatorUptimeResponse { + uptime: uptime__, + }) + } + } + deserializer.deserialize_struct("penumbra.core.component.stake.v1.ValidatorUptimeResponse", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for ZkUndelegateClaimProof { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result diff --git a/crates/proto/src/gen/proto_descriptor.bin.no_lfs b/crates/proto/src/gen/proto_descriptor.bin.no_lfs index 9643941d17..4057b3c842 100644 Binary files a/crates/proto/src/gen/proto_descriptor.bin.no_lfs and b/crates/proto/src/gen/proto_descriptor.bin.no_lfs differ diff --git a/crates/view/src/service.rs b/crates/view/src/service.rs index afdcce9fc4..327f90707b 100644 --- a/crates/view/src/service.rs +++ b/crates/view/src/service.rs @@ -926,12 +926,12 @@ impl ViewService for ViewServer { match action_view { ActionView::Spend(SpendView::Visible { note, .. }) => { let address = note.address(); - address_views.insert(address, fvk.view_address(address)); + address_views.insert(address.clone(), fvk.view_address(address)); asset_ids.insert(note.asset_id()); } ActionView::Output(OutputView::Visible { note, .. }) => { let address = note.address(); - address_views.insert(address, fvk.view_address(address)); + address_views.insert(address.clone(), fvk.view_address(address.clone())); asset_ids.insert(note.asset_id()); // Also add an AddressView for the return address in the memo. @@ -941,8 +941,8 @@ impl ViewService for ViewServer { address_views.insert(memo.return_address(), fvk.view_address(address)); } ActionView::Swap(SwapView::Visible { swap_plaintext, .. }) => { - let address = swap_plaintext.claim_address; - address_views.insert(address, fvk.view_address(address)); + let address = swap_plaintext.claim_address.clone(); + address_views.insert(address.clone(), fvk.view_address(address)); asset_ids.insert(swap_plaintext.trading_pair.asset_1()); asset_ids.insert(swap_plaintext.trading_pair.asset_2()); } @@ -951,13 +951,13 @@ impl ViewService for ViewServer { }) => { // Both will be sent to the same address so this only needs to be added once let address = output_1.address(); - address_views.insert(address, fvk.view_address(address)); + address_views.insert(address.clone(), fvk.view_address(address)); asset_ids.insert(output_1.asset_id()); asset_ids.insert(output_2.asset_id()); } ActionView::DelegatorVote(DelegatorVoteView::Visible { note, .. }) => { let address = note.address(); - address_views.insert(address, fvk.view_address(address)); + address_views.insert(address.clone(), fvk.view_address(address)); asset_ids.insert(note.asset_id()); } _ => {} diff --git a/crates/wallet/src/plan.rs b/crates/wallet/src/plan.rs index dd993ccb5b..2d438dced0 100644 --- a/crates/wallet/src/plan.rs +++ b/crates/wallet/src/plan.rs @@ -106,7 +106,7 @@ where let mut planner = Planner::new(rng); planner.fee(fee); for value in values.iter().cloned() { - planner.output(value, dest_address); + planner.output(value, dest_address.clone()); } let source_address = view.address_by_index(source_address_index).await?; planner diff --git a/deployments/containerfiles/Dockerfile b/deployments/containerfiles/Dockerfile index 2bc06f2222..951be11f1e 100644 --- a/deployments/containerfiles/Dockerfile +++ b/deployments/containerfiles/Dockerfile @@ -1,6 +1,6 @@ -# N.B. the RUST_VERSION should match MSRV in crates/bin/pd/Cargo.toml -ARG RUST_VERSION=1.75.0 -FROM docker.io/rust:${RUST_VERSION}-slim-bookworm AS build-env +# We use the latest stable version of the official Rust container, +# delegating to the `rust-toolchain.toml` file to pick a specific Rust toolchain. +FROM docker.io/rust:1-slim-bookworm AS build-env # Install build dependencies. These packages should match what's recommended on # https://guide.penumbra.zone/main/pcli/install.html @@ -13,7 +13,8 @@ RUN apt-get update && apt-get install -y \ WORKDIR /usr/src/penumbra # Add rust dependency lockfiles first, to cache downloads. -COPY Cargo.lock Cargo.toml . +COPY Cargo.lock Cargo.toml rust-toolchain.toml . + # If any rust code changed, the cache will break on copying `crates/`. # Ideally we'd copy in all Cargo.toml files first, fetch, then copy crates. COPY crates ./crates @@ -21,9 +22,10 @@ COPY crates ./crates COPY assets ./assets # Copy in summonerd contribution orchestrator. COPY tools ./tools +# Download all workspace dependencies specified in Cargo.toml RUN cargo fetch -COPY . . # Build Penumbra binaries +COPY . . RUN cargo build --release # Runtime image. diff --git a/docs/guide/src/dev/build.md b/docs/guide/src/dev/build.md index 8b47aa7fa9..87d24a53bc 100644 --- a/docs/guide/src/dev/build.md +++ b/docs/guide/src/dev/build.md @@ -13,6 +13,8 @@ of the Rust compiler, installation instructions for which you can find `cargo` is available in your `$PATH`! You can verify the rust compiler version by running `rustc --version` which should indicate version 1.75 or later. +The project uses a `rust-toolchain.toml` file, which will ensure that your version of rust stays current enough +to build the project from source. ### Installing build prerequisites diff --git a/docs/guide/src/node/pd/install.md b/docs/guide/src/node/pd/install.md index 69d69ff617..d54c5d3ae1 100644 --- a/docs/guide/src/node/pd/install.md +++ b/docs/guide/src/node/pd/install.md @@ -4,18 +4,19 @@ Download prebuilt binaries from the [Penumbra releases page on Github](https://g Make sure to use the most recent version available, as the version of `pd` must match the software currently running on the network. -Make sure to choose the correct platform for your machine. After downloading the `.tar.xz` file, +Make sure to choose the correct platform for your machine. After downloading the `.tar.gz` file, extract it, and copy its contents to your `$PATH`. For example: ``` -curl -sSfL -O https://github.com/penumbra-zone/penumbra/releases/download/{{ #include ../../penumbra_version.md }}/pd-x86_64-unknown-linux-gnu.tar.xz -unxz pd-x86_64-unknown-linux-gnu.tar.xz -tar -xf pd-x86_64-unknown-linux-gnu.tar +curl -sSfL -O https://github.com/penumbra-zone/penumbra/releases/download/{{ #include ../../penumbra_version.md }}/pd-x86_64-unknown-linux-gnu.tar.gz +tar -xf pd-x86_64-unknown-linux-gnu.tar.gz sudo mv pd-x86_64-unknown-linux-gnu/pd /usr/local/bin/ # confirm the pd binary is installed by running: pd --version ``` + +There's also a one-liner install script available on the release page, which will install `pd` to `$HOME/.cargo/bin/`. As of v0.64.1 (released 2023-12-12), we build Linux binaries on Ubuntu 22.04. If these binaries don't work for you out of the box, you'll need to [build from source](../../dev/build.md), or use the container images. diff --git a/docs/guide/src/pcli/install.md b/docs/guide/src/pcli/install.md index bacbfbef69..d5de82316f 100644 --- a/docs/guide/src/pcli/install.md +++ b/docs/guide/src/pcli/install.md @@ -4,19 +4,17 @@ Download prebuilt binaries from the [Penumbra releases page on Github](https://g Make sure to use the most recent version available, as the version of `pcli` must match the software currently running on the network. -Make sure choose the correct platform for your machine. After downloading the `.tar.xz` file, -extract it, and copy its contents to your `$PATH`. For example: +Make sure choose the correct platform for your machine. Or, you can use a one-liner install script: ``` -curl -sSfL -O https://github.com/penumbra-zone/penumbra/releases/download/{{ #include ../penumbra_version.md }}/pcli-x86_64-unknown-linux-gnu.tar.xz -unxz pcli-x86_64-unknown-linux-gnu.tar.xz -tar -xf pcli-x86_64-unknown-linux-gnu.tar -sudo mv pcli-x86_64-unknown-linux-gnu/pcli /usr/local/bin/ +curl --proto '=https' --tlsv1.2 -LsSf https://github.com/penumbra-zone/penumbra/releases/download/{{ #include ../penumbra_version.md }}/pcli-installer.sh | sh # confirm the pcli binary is installed by running: pcli --version ``` +The installer script will place the binary in `$HOME/.cargo/bin/`. + If you see an error message containing `GLIBC`, then your system is not compatible with the precompiled binaries. See details below. diff --git a/docs/guide/src/penumbra_version.md b/docs/guide/src/penumbra_version.md index 6c58abe755..33aa2b96e4 100644 --- a/docs/guide/src/penumbra_version.md +++ b/docs/guide/src/penumbra_version.md @@ -1 +1 @@ -v0.73.0 +v0.73.1 diff --git a/flake.lock b/flake.lock index b552a9252b..e6c493870d 100644 --- a/flake.lock +++ b/flake.lock @@ -7,11 +7,11 @@ ] }, "locked": { - "lastModified": 1711681752, - "narHash": "sha256-LEg6/dmEFxx6Ygti5DO9MOhGNpyB7zdxdWtzv/FCTXk=", + "lastModified": 1713979152, + "narHash": "sha256-apdecPuh8SOQnkEET/kW/UcfjCRb8JbV5BKjoH+DcP4=", "owner": "ipetkov", "repo": "crane", - "rev": "ada0fb4dcce4561acb1eb17c59b7306d9d4a95f3", + "rev": "a5eca68a2cf11adb32787fc141cddd29ac8eb79c", "type": "github" }, "original": { @@ -25,11 +25,11 @@ "systems": "systems" }, "locked": { - "lastModified": 1705309234, - "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", "owner": "numtide", "repo": "flake-utils", - "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", "type": "github" }, "original": { @@ -40,11 +40,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1707268954, - "narHash": "sha256-2en1kvde3cJVc3ZnTy8QeD2oKcseLFjYPLKhIGDanQ0=", + "lastModified": 1714076141, + "narHash": "sha256-Drmja/f5MRHZCskS6mvzFqxEaZMeciScCTFxWVLqWEY=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "f8e2ebd66d097614d51a56a755450d4ae1632df1", + "rev": "7bb2ccd8cdc44c91edba16c48d2c8f331fb3d856", "type": "github" }, "original": { @@ -72,11 +72,11 @@ ] }, "locked": { - "lastModified": 1712024007, - "narHash": "sha256-52cf+mHZJbSaDFdsBj6vN1hH52AXsMgEpS/ajzc9yQE=", + "lastModified": 1714097613, + "narHash": "sha256-044xbpBszupqN3nl/CGOCJtTQ4O6Aca81mJpX45i8/I=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "d45d957dc3c48792af7ce58eec5d84407655e8fa", + "rev": "2a42c742ab04b61d9b2f1edf392842cf9f27ebfd", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 101682614e..0ee500380b 100644 --- a/flake.nix +++ b/flake.nix @@ -23,10 +23,6 @@ let # Define versions of Penumbra and CometBFT penumbraRelease = null; # Use the local working copy - # penumbraRelease = { # Use a specific release - # version = "0.71.0"; - # sha256 = "sha256-2mpyBEt44UlXm6hahJG9sHGxj6nzh7z9lnj/vLtAAzs="; - # }; cometBftRelease = { version = "0.37.5"; sha256 = "sha256-wNVHsifieAtZgedavCEJLgG0kRDqUhG4Lk5ciTPoNzI="; @@ -107,7 +103,7 @@ devShells.default = craneLib.devShell { inherit LIBCLANG_PATH; inputsFrom = [ penumbra ]; - packages = [ cargo-watch ]; + packages = [ cargo-watch cargo-nextest protobuf ]; shellHook = '' export LIBCLANG_PATH=${LIBCLANG_PATH} export RUST_SRC_PATH=${pkgs.rustPlatform.rustLibSrc} # Required for rust-analyzer diff --git a/proto/penumbra/penumbra/core/component/dex/v1/dex.proto b/proto/penumbra/penumbra/core/component/dex/v1/dex.proto index 53fd9d9167..08d486f403 100644 --- a/proto/penumbra/penumbra/core/component/dex/v1/dex.proto +++ b/proto/penumbra/penumbra/core/component/dex/v1/dex.proto @@ -248,7 +248,9 @@ message BatchSwapOutputData { // The trading pair associated with the batch swap. TradingPair trading_pair = 8; // The starting block height of the epoch for which the batch swap data is valid. - uint64 epoch_starting_height = 9; + uint64 epoch_starting_height = 9 [deprecated = true]; + // The prefix (epoch, block) of the position where this batch swap occurred. + uint64 sct_position_prefix = 10; } // The trading function for a specific pair. @@ -571,6 +573,7 @@ message LiquidityPositionsByPriceRequest { message LiquidityPositionsByPriceResponse { core.component.dex.v1.Position data = 1; + core.component.dex.v1.PositionId id = 2; } message SpreadRequest { @@ -651,6 +654,11 @@ message EventPositionClose { PositionId position_id = 1; } +message EventQueuePositionClose { + // The ID of the position queued that is closed for closure. + PositionId position_id = 1; +} + message EventPositionWithdraw { // The ID of the withdrawn position. PositionId position_id = 1; diff --git a/proto/penumbra/penumbra/core/component/stake/v1/stake.proto b/proto/penumbra/penumbra/core/component/stake/v1/stake.proto index 7260c58cb7..deaeac0537 100644 --- a/proto/penumbra/penumbra/core/component/stake/v1/stake.proto +++ b/proto/penumbra/penumbra/core/component/stake/v1/stake.proto @@ -238,11 +238,24 @@ message Penalty { // Query operations for the staking component. service QueryService { + // Queries for information about a specific validator. + rpc GetValidatorInfo(GetValidatorInfoRequest) returns (GetValidatorInfoResponse); // Queries the current validator set, with filtering. rpc ValidatorInfo(ValidatorInfoRequest) returns (stream ValidatorInfoResponse); rpc ValidatorStatus(ValidatorStatusRequest) returns (ValidatorStatusResponse); rpc ValidatorPenalty(ValidatorPenaltyRequest) returns (ValidatorPenaltyResponse); rpc CurrentValidatorRate(CurrentValidatorRateRequest) returns (CurrentValidatorRateResponse); + rpc ValidatorUptime(ValidatorUptimeRequest) returns (ValidatorUptimeResponse); +} + +// Requests information about a specific validator. +message GetValidatorInfoRequest { + // The identity key of the validator. + core.keys.v1.IdentityKey identity_key = 2; +} + +message GetValidatorInfoResponse { + core.component.stake.v1.ValidatorInfo validator_info = 1; } // Requests information on the chain's validators. @@ -282,6 +295,14 @@ message CurrentValidatorRateResponse { core.component.stake.v1.RateData data = 1; } +message ValidatorUptimeRequest { + core.keys.v1.IdentityKey identity_key = 2; +} + +message ValidatorUptimeResponse { + Uptime uptime = 1; +} + // Staking configuration data. message StakeParameters { // The number of epochs an unbonding note for before being released. @@ -311,3 +332,16 @@ message GenesisContent { // The list of validators present at genesis. repeated stake.v1.Validator validators = 2; } + +message EventTombstoneValidator { + // The height at which the offense occurred. + uint64 evidence_height = 1; + // The height at which the evidence was processed. + uint64 current_height = 2; + // The validator identity key. + keys.v1.IdentityKey identity_key = 4; + // The validator's Comet address. + bytes address = 5; + // The voting power for the validator. + uint64 voting_power = 6; +} diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 292fe499e3..3ff2a27f7a 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,8 @@ [toolchain] -channel = "stable" +# We set a specific version of rust so that CI workflows use the same +# version development environments do. +channel = "1.75" +components = [ "rustfmt" ] +# Include wasm toolchain, for CI tests to check wasm32 build targets still work, +# to avoid downstream breakage in `penumbra-wasm` crate, in the web repo. +targets = [ "wasm32-unknown-unknown" ] diff --git a/tools/summonerd/src/participant.rs b/tools/summonerd/src/participant.rs index 81bc38e1df..f0b0035505 100644 --- a/tools/summonerd/src/participant.rs +++ b/tools/summonerd/src/participant.rs @@ -43,7 +43,7 @@ impl Participant { } pub fn address(&self) -> Address { - self.address + self.address.clone() } pub fn is_live(&self) -> bool { diff --git a/tools/summonerd/src/queue.rs b/tools/summonerd/src/queue.rs index 21f8bc03ab..10aaa313f1 100644 --- a/tools/summonerd/src/queue.rs +++ b/tools/summonerd/src/queue.rs @@ -151,7 +151,7 @@ impl ParticipantQueue { for (i, (participant, bid)) in participants.iter().enumerate() { let address = participant.address(); match filter { - Some(f) if f != address => continue, + Some(ref f) if *f != address => continue, _ => {} } // Ignore failures (besides logging), let pruning happen later. diff --git a/tools/summonerd/src/server.rs b/tools/summonerd/src/server.rs index 83db48222f..f1b892cda9 100644 --- a/tools/summonerd/src/server.rs +++ b/tools/summonerd/src/server.rs @@ -97,7 +97,7 @@ impl server::CeremonyCoordinatorService for CoordinatorService { } }; tracing::info!(?amount, ?address, "bid"); - let (participant, response_rx) = Participant::new(address, streaming); + let (participant, response_rx) = Participant::new(address.clone(), streaming); self.queue.push(participant, amount).await; self.queue .inform_one(address)